repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
tmcunningham/planning-for-drunks | [
"e012721a8a35251281dcf2f7449400818d908bcc"
] | [
"python/measure_drunks_moves.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 25 14:11:59 2021\n\n@author: Tom Cunningham\n\nThis module uses the drunk_functions module to track how many moves it takes\nall drunks to get home at a range of drunk levels. The module will produce a\nboxplot of the results.\n\n\"\"\"\n\nimport drunk_functions\nimport matplotlib\nimport timeit\n\n# Set start time to time programme\nstart_time = timeit.default_timer()\n\n# Import town data\ntown = drunk_functions.import_town(\"drunk.plan\")\n\n# Set building coordinates\nbuilding_coords = drunk_functions.get_building_coords(town)\n\n# Set front door coordinates\nfront_door_coords = drunk_functions.get_pub_front_door(building_coords)\n\n# Set back door coordinates\nback_door_coords = drunk_functions.get_pub_back_door(building_coords)\n\n# Set drunk levels to be tested\ndrunk_levels = [10, 20, 50, 100, 200]\n\n# Set number of iterations to run model for each drunk_level\niterations = 1000\n \n# Create empty list to store move counts\ndrunk_level_moves = []\n\n# Loop over all drunk levels and run model set number of times and record how\n# long it took for all drunks to get home\nfor drunk_level in drunk_levels:\n drunk_level_lower = drunk_level\n drunk_level_higher = drunk_level\n moves = []\n\n for i in range(iterations):\n drunks = drunk_functions.create_drunks(town, building_coords, \n front_door_coords, back_door_coords,\n drunk_level_lower, \n drunk_level_higher)\n j = 0\n while not all([drunk.is_home for drunk in drunks]):\n for drunk in drunks:\n drunk.move()\n drunk.sober_up()\n j += 1\n else:\n moves.append(j)\n \n print(\"Drunk level \" + str(drunk_level) + \": \" + str(i/(iterations-1)))\n \n drunk_level_moves.append(moves)\n\n# Plot results as boxplots\nmatplotlib.pyplot.boxplot(drunk_level_moves, positions = drunk_levels)\nmatplotlib.pyplot.xlabel(\"Drunk level of all drunks\")\nmatplotlib.pyplot.ylabel(\"Iterations for all drunks to get home\")\nmatplotlib.pyplot.savefig(\"plots/Number of moves boxplot.png\")\n\n# Set end time\nend_time = timeit.default_timer()\ntime_taken = end_time - start_time\n\n# Calculate time taken to run\nprint(\"Time taken: \" + str(time_taken))"
] | [
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
urakubo/ffn_windows | [
"5c3739e2d3f1d8667c994d9126eafa936c66e02b"
] | [
"postprocessing/npz_to_png.py"
] | [
"import os\nfilename = 'seg-0_0_0.npz'\noutputdir = os.getcwd() + os.sep + 'inferred_segmentation'\ninputdir = os.getcwd()\n\n\nimport numpy as np\nimport h5py\n\nimport PIL\nimport PIL.Image\nimport cv2\nimport png\n\n\ndef save_tif8(id_data, filename):\n cv2.imwrite(filename, id_data.astype('uint8'))\n\ndef save_tifc(id_data, filename, colordata):\n pilOUT = gen_col_pil(id_data, colordata)\n pilOUT.save(filename)\n\ndef save_png16(id_data, filename):\n # Use pypng to write zgray as a grayscale PNG.\n with open(filename, 'wb') as f:\n writer = png.Writer(width=id_data.shape[1], height=id_data.shape[0], bitdepth=16, greyscale=True)\n id_data_list = id_data.astype('uint16').tolist()\n writer.write(f, id_data_list)\n\ndef save_png8(id_data, filename):\n # Use pypng to write zgray as a grayscale PNG.\n with open(filename, 'wb') as f:\n writer = png.Writer(width=id_data.shape[1], height=id_data.shape[0], bitdepth=8, greyscale=True)\n id_data_list = id_data.astype('uint8').tolist()\n writer.write(f, id_data_list)\n\ndef save_pngc(id_data, filename, colordata):\n pilOUT = gen_col_pil(id_data, colordata)\n pilOUT.save(filename)\n\ndef save_npy(id_data, filename):\n np.save(filename, id_data)\n\n\ninputdir = os.getcwd()\n\n\ndata = np.load(inputdir+ os.sep+filename)\n# print data.files\n# print data['segmentation'].shape\n\nnum_z = data['segmentation'].shape[0]\nnum_y = data['segmentation'].shape[1]\nnum_x = data['segmentation'].shape[2]\n\nfor idz in range(num_z):\n tmp = outputdir + os.sep + 'z' + '%04d' % (idz) + '.png'\n save_png8(data['segmentation'][idz,:,:].transpose(), tmp)\n\n"
] | [
[
"numpy.load",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiawei-ren/BalancedMSE | [
"4e1f44fe4cc2518159b1c67159abe3d2b0cea014"
] | [
"imdb-wiki-dir/utils.py"
] | [
"import os\nimport shutil\nimport torch\nimport logging\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter1d\nfrom scipy.signal.windows import triang\n\n\nclass AverageMeter(object):\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n logging.info('\\t'.join(entries))\n\n @staticmethod\n def _get_batch_fmtstr(num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef query_yes_no(question):\n \"\"\" Ask a yes/no question via input() and return their answer. \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n prompt = \" [Y/n] \"\n\n while True:\n print(question + prompt, end=':')\n choice = input().lower()\n if choice == '':\n return valid['y']\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")\n\n\ndef prepare_folders(args):\n folders_util = [args.store_root, os.path.join(args.store_root, args.store_name)]\n if os.path.exists(folders_util[-1]) and not args.resume and not args.pretrained and not args.evaluate:\n if query_yes_no('overwrite previous folder: {} ?'.format(folders_util[-1])):\n shutil.rmtree(folders_util[-1])\n print(folders_util[-1] + ' removed.')\n else:\n raise RuntimeError('Output folder {} already exists'.format(folders_util[-1]))\n for folder in folders_util:\n if not os.path.exists(folder):\n print(f\"===> Creating folder: {folder}\")\n os.mkdir(folder)\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n if 'name' in param_group and param_group['name'] == 'noise_sigma':\n continue\n param_group['lr'] = lr\n\n\ndef save_checkpoint(args, state, is_best, prefix=''):\n filename = f\"{args.store_root}/{args.store_name}/{prefix}ckpt.pth.tar\"\n torch.save(state, filename)\n if is_best:\n logging.info(\"===> Saving current best checkpoint...\")\n shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))\n\n\ndef calibrate_mean_var(matrix, m1, v1, m2, v2, clip_min=0.1, clip_max=10):\n if torch.sum(v1) < 1e-10:\n return matrix\n if (v1 == 0.).any():\n valid = (v1 != 0.)\n factor = torch.clamp(v2[valid] / v1[valid], clip_min, clip_max)\n matrix[:, valid] = (matrix[:, valid] - m1[valid]) * torch.sqrt(factor) + m2[valid]\n return matrix\n\n factor = torch.clamp(v2 / v1, clip_min, clip_max)\n return (matrix - m1) * torch.sqrt(factor) + m2\n\n\ndef get_lds_kernel_window(kernel, ks, sigma):\n assert kernel in ['gaussian', 'triang', 'laplace']\n half_ks = (ks - 1) // 2\n if kernel == 'gaussian':\n base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks\n kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma))\n elif kernel == 'triang':\n kernel_window = triang(ks)\n else:\n laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma)\n kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / max(map(laplace, np.arange(-half_ks, half_ks + 1)))\n\n return kernel_window\n"
] | [
[
"torch.sqrt",
"numpy.arange",
"torch.sum",
"scipy.ndimage.gaussian_filter1d",
"scipy.signal.windows.triang",
"torch.clamp",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/autogluon | [
"714894698495ef4352706d3c4250823ad4a43ead",
"714894698495ef4352706d3c4250823ad4a43ead",
"714894698495ef4352706d3c4250823ad4a43ead"
] | [
"autogluon/searcher/bayesopt/tuning_algorithms/bo_algorithm_components.py",
"autogluon/core/decorator.py",
"tests/unittests/utils/tabular/data/test_label_cleaner.py"
] | [
"from typing import Iterable, List, Type, Optional\nimport numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nimport logging\n\nfrom .base_classes import SurrogateModel, AcquisitionFunction, ScoringFunction, LocalOptimizer\nfrom ..datatypes.common import Candidate\nfrom ..datatypes.tuning_job_state import TuningJobState\n\nlogger = logging.getLogger(__name__)\n\n\nclass IndependentThompsonSampling(ScoringFunction):\n \"\"\"\n Note: This is not Thompson sampling, but rather a variant called\n \"independent Thompson sampling\", where means and variances are drawn\n from the marginal rather than the joint distribution. This is cheap,\n but incorrect.\n\n \"\"\"\n def __init__(\n self, model: SurrogateModel,\n random_state: Optional[np.random.RandomState] = None):\n self.model = model\n if random_state is None:\n random_state = np.random.RandomState(31415629)\n self.random_state = random_state\n\n def score(self, candidates: Iterable[Candidate],\n model: Optional[SurrogateModel] = None) -> List[float]:\n if model is None:\n model = self.model\n predictions_list = model.predict_candidates(candidates)\n scores = []\n # If the model supports fantasizing, posterior_means is a matrix. In\n # that case, samples are drawn for every column, then averaged (why\n # we need np.mean)\n for posterior_means, posterior_stds in predictions_list:\n new_score = [\n np.mean(self.random_state.normal(m, s))\n for m, s in zip(posterior_means, posterior_stds)]\n scores.append(new_score)\n return list(np.mean(np.array(scores), axis=0))\n\n\nclass LBFGSOptimizeAcquisition(LocalOptimizer):\n def __init__(self, state: TuningJobState, model: SurrogateModel,\n acquisition_function_class: Type[AcquisitionFunction]):\n super().__init__(state, model, acquisition_function_class)\n # Number criterion evaluations in last recent optimize call\n self.num_evaluations = None\n\n def optimize(self, candidate: Candidate,\n model: Optional[SurrogateModel] = None) -> Candidate:\n # Before local minimization, the model for this state_id should have been fitted.\n if model is None:\n model = self.model\n state = self.state\n acquisition_function = self.acquisition_function_class(model)\n\n x0 = state.hp_ranges.to_ndarray(candidate)\n bounds = state.hp_ranges.get_ndarray_bounds()\n n_evaluations = [0] # wrapped in list to allow access from function\n\n # unwrap 2d arrays\n def f_df(x):\n n_evaluations[0] += 1\n f, df = acquisition_function.compute_acq_with_gradients(x)\n assert len(f) == 1\n assert len(df) == 1\n return f[0], df[0]\n\n res = fmin_l_bfgs_b(f_df, x0=x0, bounds=bounds, maxiter=1000)\n self.num_evaluations = n_evaluations[0]\n if res[2]['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':\n # this condition was copied from the old GPyOpt code\n # this condition was silently ignored in the old code\n logger.warning(\n f\"ABNORMAL_TERMINATION_IN_LNSRCH in lbfgs after {n_evaluations[0]} evaluations, \"\n \"returning original candidate\"\n )\n return candidate # returning original candidate\n else:\n # Clip to avoid situation where result is small epsilon out of bounds\n a_min, a_max = zip(*bounds)\n optimized_x = np.clip(res[0], a_min, a_max)\n # Make sure the above clipping does really just fix numerical rounding issues in LBFGS\n # if any bigger change was made there is a bug and we want to throw an exception\n assert np.linalg.norm(res[0] - optimized_x) < 1e-6, (res[0], optimized_x, bounds)\n result = state.hp_ranges.from_ndarray(optimized_x.flatten())\n return result\n\n\nclass NoOptimization(LocalOptimizer):\n def optimize(self, candidate: Candidate,\n model: Optional[SurrogateModel]=None) -> Candidate:\n return candidate\n",
"import copy\nimport logging\nimport argparse\nimport functools\nfrom collections import OrderedDict\nimport numpy as np\nimport multiprocessing as mp\nimport ConfigSpace as CS\n\nfrom .space import *\nfrom .space import _add_hp, _add_cs, _rm_hp, _strip_config_space, SPLITTER\nfrom ..utils import EasyDict as ezdict\nfrom ..utils.deprecate import make_deprecate\n\n__all__ = ['args', 'obj', 'func', 'sample_config',\n 'autogluon_register_args', 'autogluon_object', 'autogluon_function',\n 'autogluon_register_dict']\n\nlogger = logging.getLogger(__name__)\n\n\ndef sample_config(args, config):\n args = copy.deepcopy(args)\n striped_keys = [k.split(SPLITTER)[0] for k in config.keys()]\n if isinstance(args, (argparse.Namespace, argparse.ArgumentParser)):\n args_dict = vars(args)\n else:\n args_dict = args\n for k, v in args_dict.items():\n # handle different type of configurations\n if k in striped_keys:\n if isinstance(v, NestedSpace):\n sub_config = _strip_config_space(config, prefix=k)\n args_dict[k] = v.sample(**sub_config)\n else:\n if SPLITTER in k:\n continue\n args_dict[k] = config[k]\n elif isinstance(v, AutoGluonObject):\n args_dict[k] = v.init()\n return args\n\nclass _autogluon_method(object):\n SEED = mp.Value('i', 0)\n LOCK = mp.Lock()\n def __init__(self, f):\n self.f = f\n self.args = ezdict()\n functools.update_wrapper(self, f)\n\n def __call__(self, args, config={}, **kwargs):\n new_config = copy.deepcopy(config)\n self._rand_seed()\n args = sample_config(args, new_config)\n from ..scheduler.reporter import FakeReporter\n if 'reporter' not in kwargs:\n logger.debug('Creating FakeReporter for test purpose.')\n kwargs['reporter'] = FakeReporter()\n\n output = self.f(args, **kwargs)\n logger.debug('Reporter Done!')\n kwargs['reporter'](done=True)\n return output\n \n def register_args(self, default={}, **kwvars):\n if isinstance(default, (argparse.Namespace, argparse.ArgumentParser)):\n default = vars(default)\n self.kwvars = {}\n self.args = ezdict()\n self.args.update(default)\n self.update(**kwvars)\n\n def update(self, **kwargs):\n \"\"\"For searcher support ConfigSpace\n \"\"\"\n self.kwvars.update(kwargs)\n for k, v in self.kwvars.items():\n if isinstance(v, (NestedSpace)):\n self.args.update({k: v})\n elif isinstance(v, Space):\n hp = v.get_hp(name=k)\n self.args.update({k: hp.default_value})\n else:\n self.args.update({k: v})\n\n @property\n def cs(self):\n cs = CS.ConfigurationSpace()\n for k, v in self.kwvars.items():\n if isinstance(v, NestedSpace):\n _add_cs(cs, v.cs, k)\n elif isinstance(v, Space):\n hp = v.get_hp(name=k)\n _add_hp(cs, hp)\n else:\n _rm_hp(cs, k)\n return cs\n\n @property\n def kwspaces(self):\n \"\"\"For RL searcher/controller\n \"\"\"\n kw_spaces = OrderedDict()\n for k, v in self.kwvars.items():\n if isinstance(v, NestedSpace):\n if isinstance(v, Categorical):\n kw_spaces['{}{}choice'.format(k, SPLITTER)] = v\n for sub_k, sub_v in v.kwspaces.items():\n new_k = '{}{}{}'.format(k, SPLITTER, sub_k)\n kw_spaces[new_k] = sub_v\n elif isinstance(v, Space):\n kw_spaces[k] = v\n return kw_spaces\n\n def _rand_seed(self):\n _autogluon_method.SEED.value += 1\n np.random.seed(_autogluon_method.SEED.value)\n\n def __repr__(self):\n return repr(self.f)\n\n\ndef args(default=None, **kwvars):\n \"\"\"Decorator for a Python training script that registers its arguments as hyperparameters. \n Each hyperparameter takes fixed value or is a searchable space, and the arguments may either be:\n built-in Python objects (e.g. floats, strings, lists, etc.), AutoGluon objects (see :func:`autogluon.obj`), \n or AutoGluon search spaces (see :class:`autogluon.space.Int`, :class:`autogluon.space.Real`, etc.).\n\n Examples\n --------\n >>> import autogluon as ag\n >>> @ag.args(batch_size=10, lr=ag.Real(0.01, 0.1))\n >>> def train_func(args):\n ... print('Batch size is {}, LR is {}'.format(args.batch_size, arg.lr))\n \"\"\"\n if default is None:\n default = dict()\n kwvars['_default_config'] = default\n def registered_func(func):\n @_autogluon_method\n @functools.wraps(func)\n def wrapper_call(*args, **kwargs):\n return func(*args, **kwargs)\n\n default = kwvars['_default_config']\n wrapper_call.register_args(default=default, **kwvars)\n return wrapper_call\n\n return registered_func\n\n\ndef func(**kwvars):\n \"\"\"Decorator for a function that registers its arguments as hyperparameters. \n Each hyperparameter may take a fixed value or be a searchable space (autogluon.space).\n\n Returns\n -------\n Instance of :class:`autogluon.space.AutoGluonObject`:\n A lazily initialized object, which allows for distributed training.\n\n Examples\n --------\n >>> import autogluon as ag\n >>> from gluoncv.model_zoo import get_model\n >>> \n >>> @ag.func(pretrained=ag.space.Categorical(True, False))\n >>> def cifar_resnet(pretrained):\n ... return get_model('cifar_resnet20_v1', pretrained=pretrained)\n \"\"\"\n def _autogluon_kwargs_func(**kwvars):\n def registered_func(func):\n kwspaces = OrderedDict()\n @functools.wraps(func)\n def wrapper_call(*args, **kwargs):\n _kwvars = copy.deepcopy(kwvars)\n _kwvars.update(kwargs)\n for k, v in _kwvars.items():\n if isinstance(v, NestedSpace):\n kwspaces[k] = v\n kwargs[k] = v\n elif isinstance(v, Space):\n kwspaces[k] = v\n hp = v.get_hp(name=k)\n kwargs[k] = hp.default_value\n else:\n kwargs[k] = v\n return func(*args, **kwargs)\n wrapper_call.kwspaces = kwspaces\n return wrapper_call\n return registered_func\n\n def registered_func(func):\n class autogluonobject(AutoGluonObject):\n @_autogluon_kwargs_func(**kwvars)\n def __init__(self, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self._inited = False\n\n def sample(self, **config):\n kwargs = copy.deepcopy(self.kwargs)\n kwspaces = copy.deepcopy(autogluonobject.kwspaces)\n for k, v in kwargs.items():\n if k in kwspaces and isinstance(kwspaces[k], NestedSpace):\n sub_config = _strip_config_space(config, prefix=k)\n kwargs[k] = kwspaces[k].sample(**sub_config)\n elif k in config:\n kwargs[k] = config[k]\n \n return self.func(*self.args, **kwargs)\n\n @functools.wraps(func)\n def wrapper_call(*args, **kwargs):\n _kwvars = copy.deepcopy(kwvars)\n _kwvars.update(kwargs)\n agobj = autogluonobject(*args, **kwargs)\n agobj.kwvars = _kwvars\n return agobj\n return wrapper_call\n return registered_func\n\ndef obj(**kwvars):\n \"\"\"Decorator for a Python class that registers its arguments as hyperparameters. \n Each hyperparameter may take a fixed value or be a searchable space (autogluon.space).\n\n Returns\n -------\n Instance of :class:`autogluon.space.AutoGluonObject`:\n A lazily initialized object, which allows distributed training.\n\n Examples\n --------\n >>> import autogluon as ag\n >>> from mxnet import optimizer as optim\n >>> @ag.obj(\n >>> learning_rate=ag.space.Real(1e-4, 1e-1, log=True),\n >>> wd=ag.space.Real(1e-4, 1e-1),\n >>> )\n >>> class Adam(optim.Adam):\n >>> pass\n \"\"\"\n def _autogluon_kwargs_obj(**kwvars):\n def registered_func(func):\n kwspaces = OrderedDict()\n @functools.wraps(func)\n def wrapper_call(*args, **kwargs):\n kwvars.update(kwargs)\n for k, v in kwvars.items():\n if isinstance(v, NestedSpace):\n kwspaces[k] = v\n kwargs[k] = v\n elif isinstance(v, Space):\n kwspaces[k] = v\n hp = v.get_hp(name=k)\n kwargs[k] = hp.default_value\n else:\n kwargs[k] = v\n return func(*args, **kwargs)\n wrapper_call.kwspaces = kwspaces\n wrapper_call.kwvars = kwvars\n return wrapper_call\n return registered_func\n\n def registered_class(Cls):\n class autogluonobject(AutoGluonObject):\n @_autogluon_kwargs_obj(**kwvars)\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self._inited = False\n\n def sample(self, **config):\n kwargs = copy.deepcopy(self.kwargs)\n kwspaces = copy.deepcopy(autogluonobject.kwspaces)\n for k, v in kwargs.items():\n if k in kwspaces and isinstance(kwspaces[k], NestedSpace):\n sub_config = _strip_config_space(config, prefix=k)\n kwargs[k] = kwspaces[k].sample(**sub_config)\n elif k in config:\n kwargs[k] = config[k]\n\n args = self.args\n return Cls(*args, **kwargs)\n\n def __repr__(self):\n return 'AutoGluonObject -- ' + Cls.__name__\n\n autogluonobject.kwvars = autogluonobject.__init__.kwvars\n autogluonobject.__doc__ = Cls.__doc__\n autogluonobject.__name__ = Cls.__name__\n return autogluonobject\n\n return registered_class\n\n\n\nautogluon_register_args = make_deprecate(args, 'autogluon_register_args')\nautogluon_register_dict = make_deprecate(args, 'autogluon_register_dict')\nautogluon_function = make_deprecate(func, 'autogluon_function')\nautogluon_object = make_deprecate(obj, 'autogluon_object')\n",
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom autogluon.utils.tabular.data.label_cleaner import LabelCleaner, LabelCleanerBinary, LabelCleanerMulticlass, LabelCleanerMulticlassToBinary, LabelCleanerDummy\nfrom autogluon.utils.tabular.ml.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS\n\n\ndef test_label_cleaner_binary():\n # Given\n problem_type = BINARY\n input_labels_numpy = np.array(['l1', 'l2', 'l2', 'l1', 'l1', 'l2'])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array(['new', 'l1', 'l2'])\n expected_output_labels = pd.Series([0, 1, 1, 0, 0, 1])\n expected_output_labels_new = pd.Series([np.nan, 0, 1])\n expected_output_labels_new_inverse = pd.Series([np.nan, 'l1', 'l2'])\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels)\n\n # Raise exception\n with pytest.raises(AssertionError):\n LabelCleaner.construct(problem_type=problem_type, y=input_labels_new)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerBinary)\n assert label_cleaner.problem_type_transform == BINARY\n assert label_cleaner.cat_mappings_dependent_var == {0: 'l1', 1: 'l2'}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n\ndef test_label_cleaner_multiclass():\n # Given\n problem_type = MULTICLASS\n input_labels_numpy = np.array([2, 4, 2, 2, 4, 1])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array([3, 5, 2])\n expected_output_labels = pd.Series([1, 2, 1, 1, 2, 0])\n expected_output_labels_new = pd.Series([np.nan, np.nan, 1])\n expected_output_labels_new_inverse = pd.Series([np.nan, np.nan, 2])\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=input_labels)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerMulticlass)\n assert label_cleaner.problem_type_transform == MULTICLASS\n assert label_cleaner.cat_mappings_dependent_var == {0: 1, 1: 2, 2: 4}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n\ndef test_label_cleaner_multiclass_to_binary():\n # Given\n problem_type = MULTICLASS\n input_labels_numpy = np.array(['l1', 'l2', 'l2', 'l1', 'l1', 'l2'])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_uncleaned = pd.Series(['l0', 'l1', 'l2', 'l2', 'l1', 'l1', 'l2', 'l3', 'l4'])\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array(['l0', 'l1', 'l2'])\n input_labels_proba_transformed = pd.Series([0.7, 0.2, 0.5], index=[5, 2, 8])\n expected_output_labels = pd.Series([0, 1, 1, 0, 0, 1])\n expected_output_labels_new = pd.Series([np.nan, 0, 1])\n expected_output_labels_new_inverse = pd.Series([np.nan, 'l1', 'l2'])\n expected_output_labels_proba_transformed_inverse = pd.DataFrame(\n data=[\n [0, 0.3, 0.7, 0, 0],\n [0, 0.8, 0.2, 0, 0],\n [0, 0.5, 0.5, 0, 0]\n ], index=[5, 2, 8], columns=['l0', 'l1', 'l2', 'l3', 'l4'], dtype=np.float64\n )\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=input_labels_uncleaned)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerMulticlassToBinary)\n assert label_cleaner.problem_type_transform == BINARY\n assert label_cleaner.cat_mappings_dependent_var == {0: 'l1', 1: 'l2'}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n output_labels_proba_transformed_inverse = label_cleaner.inverse_transform_proba(input_labels_proba_transformed, as_pandas=True)\n\n pd.testing.assert_frame_equal(expected_output_labels_proba_transformed_inverse, output_labels_proba_transformed_inverse)\n\n\ndef test_label_cleaner_regression():\n # Given\n problem_type = REGRESSION\n input_labels_numpy = np.array([2, 4, 2, 2, 4, 1])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_new = pd.Series([3, 5, 2])\n expected_output_labels = input_labels.copy()\n expected_output_labels_new = input_labels_new.copy()\n expected_output_labels_new_inverse = input_labels_new.copy()\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=None)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerDummy)\n assert label_cleaner.problem_type_transform == REGRESSION\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n\ndef test_label_softclass():\n # Given\n problem_type = SOFTCLASS\n input_labels = pd.Series([2, 4, 2, 2, 4, 1])\n\n # Raise exception\n with pytest.raises(NotImplementedError):\n LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=None)\n"
] | [
[
"numpy.clip",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.linalg.norm",
"numpy.array",
"numpy.random.RandomState"
],
[
"numpy.random.seed"
],
[
"numpy.array",
"pandas.testing.assert_frame_equal",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SKA-ScienceDataProcessor/integration-prototype | [
"5875dc0489f707232534ce75daf3707f909bcd15"
] | [
"sip/science_pipeline_workflows/ical_dask/pipelines/imaging_modeling.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Dask workflow for generating model data for the SIP example ICAL workflow.\n\nThis is code based on the test ICAL pipeline notebook from ARL.\n\"\"\"\nimport logging\nimport pickle\nimport os\nimport sys\nimport json\n\nimport numpy\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom data_models.data_model_helpers import export_blockvisibility_to_hdf5\nfrom data_models.polarisation import PolarisationFrame\n\nfrom workflows.arlexecute.execution_support.arlexecute import arlexecute\nfrom workflows.arlexecute.execution_support.dask_init import get_dask_Client\nfrom processing_components.imaging.base import advise_wide_field\nfrom workflows.arlexecute.imaging.imaging_arlexecute import predict_arlexecute\nfrom workflows.arlexecute.simulation.simulation_arlexecute import simulate_arlexecute, \\\n corrupt_arlexecute\n\nfrom processing_components.simulation.testing_support import \\\n create_low_test_image_from_gleam\n\n\nLOG = logging.getLogger('sip.ical.generate_data')\nRESULTS_DIR = 'results'\nif not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n\n\ndef init_logging():\n \"\"\"Initialise Python logging.\"\"\"\n # fmt = '%(thread)s %(asctime)s,%(msecs)d %(name)s %(levelname)s ' \\\n # '%(message)s'\n # logging.basicConfig(filename='%s/imaging_modeling.log' % RESULTS_DIR,\n # filemode='a', format=fmt, datefmt='%H:%M:%S',\n # level=logging.INFO)\n fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \\\n '| %(message)s'\n logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)\n\n\ndef main():\n \"\"\"Workflow stage application.\"\"\"\n init_logging()\n\n # Get Dask client\n arlexecute.set_client(get_dask_Client())\n arlexecute.run(init_logging)\n\n LOG.info('Results dir = %s', RESULTS_DIR)\n LOG.info(\"Starting imaging-modeling\")\n\n # Read parameters\n PARFILE = 'parameters.json'\n if len(sys.argv) > 1:\n PARFILE = sys.argv[1]\n LOG.info(\"JSON parameter file = %s\", PARFILE)\n\n try: \t\n with open(PARFILE, \"r\") as par_file:\n jspar = json.load(par_file) \n except AssertionError as error:\n LOG.critical('ERROR %s', error)\n return\n\n # Model parameters\n configuration= jspar[\"modeling\"][\"configuration\"][\"name\"]\n num_freq_win = jspar[\"modeling\"][\"configuration\"][\"num_freq_win\"] # 7\n num_times = jspar[\"modeling\"][\"configuration\"][\"num_times\"] # 11\n r_max = jspar[\"modeling\"][\"configuration\"][\"r_max\"] # 300.0\n fstart\t = jspar[\"modeling\"][\"configuration\"][\"fstart\"]\n fend\t = jspar[\"modeling\"][\"configuration\"][\"fend\"]\n timestart_pi = jspar[\"modeling\"][\"configuration\"][\"timestart_pi\"] # -1/3\n timeend_pi = jspar[\"modeling\"][\"configuration\"][\"timeend_pi\"] # 1/3\n polframe = jspar[\"modeling\"][\"configuration\"][\"PolarisationFrame\"] # StokesI\n\n frequency = numpy.linspace(fstart, fend, num_freq_win)\n channel_bw = numpy.array(num_freq_win * [frequency[1] - frequency[0]]) # 0.9e8 ... 1.1e8\n times = numpy.linspace(numpy.pi * timestart_pi, numpy.pi * timeend_pi, num_times)\n\n phase_centre = SkyCoord(\tra =jspar[\"modeling\"][\"phasecentre\"][\"RA\"] * u.deg, \n\t\t\t\tdec =jspar[\"modeling\"][\"phasecentre\"][\"Dec\"] * u.deg,\n \tframe =jspar[\"modeling\"][\"phasecentre\"][\"frame\"], \n\t\t\t\tequinox=jspar[\"modeling\"][\"phasecentre\"][\"equinox\"])\n\n # Simulate visibilities\n vis_list = simulate_arlexecute(configuration,\n frequency=frequency,\n channel_bandwidth=channel_bw,\n times=times,\n phasecentre=phase_centre,\n order=jspar[\"modeling\"][\"simulate\"][\"order\"],\n rmax=r_max)\n\n LOG.info('%d elements in vis_list', len(vis_list))\n LOG.info('About to make visibility')\n vis_list = arlexecute.compute(vis_list, sync=True)\n LOG.debug('vis_list type: %s', type(vis_list))\n LOG.debug('vis_list element type: %s', type(vis_list[0]))\n try:\n export_blockvisibility_to_hdf5(vis_list,\n '%s/%s' % (RESULTS_DIR, jspar[\"files\"][\"vis_list\"]))\n except AssertionError as error:\n LOG.critical('ERROR %s', error)\n return\n\n wprojection_planes = jspar[\"advice\"][\"wprojection_planes\"]\n guard_band_image = jspar[\"advice\"][\"guard_band_image\"]\n delA = jspar[\"advice\"][\"delA\"]\n advice_low = advise_wide_field(vis_list[0], guard_band_image=guard_band_image,\n delA=delA,\n wprojection_planes=wprojection_planes)\n advice_high = advise_wide_field(vis_list[-1], guard_band_image=guard_band_image,\n delA=delA,\n wprojection_planes=wprojection_planes)\n\n\n vis_slices = advice_low['vis_slices']\n num_pixels = advice_high['npixels2']\n cellsize = min(advice_low['cellsize'], advice_high['cellsize'])\n\n # Create GLEAM model\n gleam_model = [\n arlexecute.execute(create_low_test_image_from_gleam)(\n npixel=num_pixels,\n frequency=[frequency[f]],\n channel_bandwidth=[channel_bw[f]],\n cellsize=cellsize,\n phasecentre=phase_centre,\n polarisation_frame=PolarisationFrame(polframe),\n flux_limit=jspar[\"modeling\"][\"gleam_model\"][\"flux_limit\"], # 1.0,\n applybeam =jspar[\"modeling\"][\"gleam_model\"][\"applybeam\"]) # True\n for f, freq in enumerate(frequency)\n ]\n\n\n LOG.info('About to make GLEAM model')\n gleam_model = arlexecute.compute(gleam_model, sync=True)\n # future_gleam_model = arlexecute.scatter(gleam_model)\n\n # Get predicted visibilities for GLEAM model\n LOG.info('About to run predict to get predicted visibility')\n future_vis_graph = arlexecute.scatter(vis_list)\n predicted_vis_list = predict_arlexecute(future_vis_graph, gleam_model,\n context=jspar[\"modeling\"][\"predict\"][\"context\"], #'wstack'\n vis_slices=vis_slices)\n predicted_vis_list = arlexecute.compute(predicted_vis_list, sync=True)\n corrupted_vis_list = corrupt_arlexecute(predicted_vis_list, phase_error=jspar[\"modeling\"][\"corrupt\"][\"phase_error\"]) #1.0\n\n LOG.info('About to run corrupt to get corrupted visibility')\n corrupted_vis_list = arlexecute.compute(corrupted_vis_list, sync=True)\n\n LOG.info('About to output predicted_vislist.hdf')\n export_blockvisibility_to_hdf5(predicted_vis_list,\n '%s/%s' % (RESULTS_DIR,jspar[\"files\"][\"predicted_vis_list\"]))\n\n LOG.info('About to output corrupted_vislist.hdf')\n\n export_blockvisibility_to_hdf5(corrupted_vis_list,\n '%s/%s' % (RESULTS_DIR, jspar[\"files\"][\"corrupted_vis_list\"]))\n # Close Dask client\n arlexecute.close()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shuoli90/PAC-confidence-set | [
"ab8dcd5205f9aba6b490aabe7bfc74e1410d0f26"
] | [
"calibration/utils.py"
] | [
"import numpy as np\nimport sys, os\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport pickle\nimport glob\nimport time\n\nimport torch as tc\nfrom torch import nn, optim\nimport torch.tensor as T\nimport torch.nn.functional as F\n\n\nclass CalibrationError_L1:\n def __init__(self, n_bins=15, device=tc.device(\"cuda:0\")):\n self.n_bins = n_bins\n self.device = device\n bin_boundaries = tc.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n \n def get_acc_conf_mat(self, yhs, phs, ys):\n accs = yhs.eq(ys)\n confs = phs\n \n acc_conf_mat = tc.zeros(self.n_bins, 3)\n for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):\n # Calculated |confidence - accuracy| in each bin\n if i == 0:\n in_bin = (confs >= bin_lower.item()) & (confs <= bin_upper.item())\n else:\n in_bin = (confs > bin_lower.item()) & (confs <= bin_upper.item())\n # the number of examples in this bin\n acc_conf_mat[i, 0] = in_bin.float().sum()\n if acc_conf_mat[i, 0] > 0:\n # accumulate correct label predictions\n acc_conf_mat[i, 1] = accs[in_bin].float().sum()\n # accumulate confidence predictions\n acc_conf_mat[i, 2] = confs[in_bin].float().sum()\n \n return acc_conf_mat\n\n # ECE: emprical calibration error\n def ECEmat2ECE(self, ECE_mat):\n ECE_mat = ECE_mat.clone()\n ind = ECE_mat[:, 0] > 0\n # normalize the correct label predictions\n ECE_mat[ind, 1] = ECE_mat[ind, 1].div(ECE_mat[ind, 0])\n # normalize the confidence predictions\n ECE_mat[ind, 2] = ECE_mat[ind, 2].div(ECE_mat[ind, 0])\n ECE_mat[:, 0] = ECE_mat[:, 0].div(ECE_mat[:, 0].sum())\n ECE = ECE_mat[:, 0].mul((ECE_mat[:, 1] - ECE_mat[:, 2]).abs()).sum()\n return ECE\n\n # ECE_oneside: emprical calibration error oneside\n def ECEmat2ECE_overconfidence(self, ECE_mat):\n ECE_mat = ECE_mat.clone()\n ind = ECE_mat[:, 0] > 0\n # normalize the correct label predictions\n ECE_mat[ind, 1] = ECE_mat[ind, 1].div(ECE_mat[ind, 0])\n # normalize the confidence predictions\n ECE_mat[ind, 2] = ECE_mat[ind, 2].div(ECE_mat[ind, 0])\n ECE_mat[:, 0] = ECE_mat[:, 0].div(ECE_mat[:, 0].sum())\n ECE = ECE_mat[:, 0].mul((ECE_mat[:, 2] - ECE_mat[:, 1]).clamp(0.0, np.inf)).sum()\n return ECE\n\n # MOCE: maximum-overconfident calibration error\n def ECEmat2MOCE(self, ECE_mat):\n ECE_mat = ECE_mat.clone()\n ind = ECE_mat[:, 0] > 0\n # mean accuracy\n ECE_mat[ind, 1] = ECE_mat[ind, 1].div(ECE_mat[ind, 0])\n # mean confidence\n ECE_mat[ind, 2] = ECE_mat[ind, 2].div(ECE_mat[ind, 0])\n MOCE = (ECE_mat[:, 2] - ECE_mat[:, 1]).clamp(0.0, np.inf).max()\n return MOCE\n\n # EUCE: expected-underconfident calibration error\n def ECEmat2EUCE(self, ECE_mat):\n ECE_mat = ECE_mat.clone()\n ind = ECE_mat[:, 0] > 0\n # mean accuracy\n ECE_mat[ind, 1] = ECE_mat[ind, 1].div(ECE_mat[ind, 0])\n # mean confidence\n ECE_mat[ind, 2] = ECE_mat[ind, 2].div(ECE_mat[ind, 0])\n # frequency of each bin\n ECE_mat[:, 0] = ECE_mat[:, 0].div(ECE_mat[:, 0].sum())\n #FIXME: does not count all samples, loose information, need to use with MOCE\n EUCE = ECE_mat[:, 0].mul((ECE_mat[:, 1] - ECE_mat[:, 2]).clamp(0.0, np.inf)).sum()\n return EUCE\n\n def ECEmat2MOEUCE(self, ECE_mat):\n MOCE = ECEmat2MOCE(ECE_mat)\n EUCE = ECEmat2EUCE(ECE_mat)\n return MOCE + EUCE\n\n def decomposeECEmat(self, ECE_mat):\n ECE_mat = ECE_mat.clone()\n ind = ECE_mat[:, 0] > 0\n n_samples = ECE_mat[:, 0]\n mean_accuracy = ECE_mat[:, 1]\n mean_accuracy[ind] = ECE_mat[ind, 1].div(ECE_mat[ind, 0])\n mean_confidence = ECE_mat[:, 2]\n mean_confidence[ind] = ECE_mat[ind, 2].div(ECE_mat[ind, 0])\n return n_samples, mean_confidence, mean_accuracy\n\n \n def __call__(self, label_pred, tar_prob_pred, lds, \n measure_overconfidence=False, return_ECE_mat=False):\n ECE_mat = None\n with tc.no_grad():\n for ld in lds:\n for i, (xs, ys) in enumerate(ld):\n xs = xs.to(self.device)\n ys = ys.to(self.device)\n yhs = label_pred(xs)\n phs = tar_prob_pred(xs, yhs)\n\n ECE_mat_b = self.get_acc_conf_mat(yhs, phs, ys)\n ECE_mat = ECE_mat + ECE_mat_b if ECE_mat is not None else ECE_mat_b\n\n if measure_overconfidence:\n ECE = self.ECEmat2ECE_overconfidence(ECE_mat)\n else:\n ECE = self.ECEmat2ECE(ECE_mat)\n if return_ECE_mat:\n return ECE, ECE_mat\n else:\n return ECE\n \n def plot_reliablity_diagram(self, fig_fn, label_pred, conf_pred, lds):\n ECE, ECE_mat = self(label_pred, conf_pred, lds, False, True)\n n_samples, mean_confidence, mean_accuracy = self.decomposeECEmat(ECE_mat)\n plot_reliability_diag(self.n_bins, mean_accuracy, n_samples, fig_fn=fig_fn, fontsize=20, \n ECE=ECE)\n\n \ndef plot_reliability_diag(n_bins, mean_accuracy, n_samples, fig_fn=None, fontsize=15, ECE=None):\n out_fn = fig_fn + '_conf_acc'\n with PdfPages(out_fn + '.pdf') as pdf: \n plt.figure(1)\n plt.clf()\n plt.rc('xtick',labelsize=fontsize*0.75)\n plt.rc('ytick',labelsize=fontsize*0.75)\n\n xs_ori = tc.linspace(0, 1, n_bins+1)\n xs = xs_ori[0:-1] + (xs_ori[1:] - xs_ori[0:-1]) / 2.0\n w = (xs[1] - xs[0]) * 0.75\n\n plt.bar(xs.numpy(), mean_accuracy.numpy(), width=w, color='r', edgecolor='k')\n plt.plot(xs_ori.numpy(), xs_ori.numpy(), 'k--')\n plt.xlim([0, 1.0])\n plt.ylim([0, 1.0])\n plt.grid(True)\n plt.xlabel(\"Confidence\", fontsize=fontsize)\n plt.ylabel(\"Accuracy\", fontsize=fontsize)\n if ECE is not None:\n plt.title(\"ECE = %.2f%%\"%(ECE*100), fontsize=fontsize)\n plt.savefig(out_fn+'.png', bbox_inches='tight')\n pdf.savefig(bbox_inches='tight')\n\n out_fn = fig_fn + '_conf_acc_freq'\n with PdfPages(out_fn + '.pdf') as pdf: \n plt.figure(1)\n plt.clf()\n plt.rc('xtick',labelsize=fontsize*0.75)\n plt.rc('ytick',labelsize=fontsize*0.75)\n \n plt.bar(xs.numpy(), n_samples.div(n_samples.sum()).numpy(), \n width=w, color='r', edgecolor='k')\n plt.xlim([0, 1.0])\n plt.grid(True)\n plt.xlabel(\"Confidence\", fontsize=fontsize)\n plt.ylabel(\"Sample Ratio\", fontsize=fontsize)\n\n plt.savefig(out_fn+'.png', bbox_inches='tight')\n pdf.savefig(bbox_inches='tight')\n\n\n\ndef eval_print_ECE(lds, ld_names, model, target_model):\n \n ## calibration error for each dataset\n ECEs = []\n for ld, ld_name in zip(lds, ld_names):\n ECE = CalibrationError_L1()(model.label_pred, target_model.tar_prob_pred, [ld])\n print(\"# %s calibration error = %.2f%%\"%(ld_name, ECE * 100.0))\n ECEs.append(ECE.unsqueeze(0))\n\n if len(lds) > 1:\n ## calibration error for combined datasets\n ECE_all = CalibrationError_L1()(model.label_pred, target_model.tar_prob_pred, lds)\n print(\"# Combined calibration error = %.2f%%\"%(ECE_all * 100.0))\n\n ## average \n ECEs = tc.cat(ECEs)\n print(\"# Average calibration error = %.2f%%\"%(ECEs.mean() * 100.0))\n \n ## overconfience stats\n ECEs = []\n for ld, ld_name in zip(lds, ld_names):\n ECE = CalibrationError_L1()(model.label_pred, target_model.tar_prob_pred, [ld], True)\n print(\"# %s over-confident calibration error = %.2f%%\"%(ld_name, ECE * 100.0))\n ECEs.append(ECE.unsqueeze(0))\n\n if len(lds) > 1:\n ## calibration error for combined datasets\n ECE_all = CalibrationError_L1()(model.label_pred, target_model.tar_prob_pred, lds, True)\n print(\"# Combined over-confident calibration error = %.2f%%\"%(ECE_all * 100.0))\n\n ## average \n ECEs = tc.cat(ECEs)\n print(\"# Average over-confident calibration error = %.2f%%\"%(ECEs.mean() * 100.0))\n \n"
] | [
[
"matplotlib.backends.backend_pdf.PdfPages",
"torch.linspace",
"matplotlib.pyplot.title",
"torch.zeros",
"torch.cat",
"matplotlib.use",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylabel",
"torch.no_grad",
"matplotlib.pyplot.grid",
"torch.device",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igor-krawczuk/garage | [
"aa86ce710c6d01380477d6feddc0e38427b1e3b4"
] | [
"src/garage/torch/modules/gaussian_mlp_module.py"
] | [
"\"\"\"GaussianMLPModule.\"\"\"\nimport abc\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import Normal\nfrom torch.distributions.independent import Independent\n\nfrom garage.torch.distributions import TanhNormal\nfrom garage.torch.modules.mlp_module import MLPModule\nfrom garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule\n\n\nclass GaussianMLPBaseModule(nn.Module):\n \"\"\"Base of GaussianMLPModel.\n\n Args:\n input_dim (int): Input dimension of the model.\n output_dim (int): Output dimension of the model.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a torch.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n learn_std (bool): Is std trainable.\n init_std (float): Initial value for std.\n (plain value - not log or exponentiated).\n std_hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for std. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer\n in the std network.\n std_hidden_w_init (callable): Initializer function for the weight\n of hidden layer (s).\n std_hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s).\n std_output_nonlinearity (callable): Activation function for output\n dense layer in the std network. It should return a torch.Tensor.\n Set it to None to maintain a linear activation.\n std_output_w_init (callable): Initializer function for the weight\n of output dense layer(s) in the std network.\n std_parameterization (str): How the std should be parametrized. There\n are two options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation.\n - softplus: the std will be computed as log(1+exp(x)).\n layer_normalization (bool): Bool for using layer normalization or not.\n normal_distribution_cls (torch.distribution): normal distribution class\n to be constructed and returned by a call to forward. By default, is\n `torch.distributions.Normal`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_hidden_sizes=(32, 32),\n std_hidden_nonlinearity=torch.tanh,\n std_hidden_w_init=nn.init.xavier_uniform_,\n std_hidden_b_init=nn.init.zeros_,\n std_output_nonlinearity=None,\n std_output_w_init=nn.init.xavier_uniform_,\n std_parameterization='exp',\n layer_normalization=False,\n normal_distribution_cls=Normal):\n super().__init__()\n\n self._input_dim = input_dim\n self._hidden_sizes = hidden_sizes\n self._action_dim = output_dim\n self._learn_std = learn_std\n self._std_hidden_sizes = std_hidden_sizes\n self._min_std = min_std\n self._max_std = max_std\n self._std_hidden_nonlinearity = std_hidden_nonlinearity\n self._std_hidden_w_init = std_hidden_w_init\n self._std_hidden_b_init = std_hidden_b_init\n self._std_output_nonlinearity = std_output_nonlinearity\n self._std_output_w_init = std_output_w_init\n self._std_parameterization = std_parameterization\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._output_nonlinearity = output_nonlinearity\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._layer_normalization = layer_normalization\n self._norm_dist_class = normal_distribution_cls\n\n if self._std_parameterization not in ('exp', 'softplus'):\n raise NotImplementedError\n\n init_std_param = torch.Tensor([init_std]).log()\n if self._learn_std:\n self._init_std = torch.nn.Parameter(init_std_param)\n else:\n self._init_std = init_std_param\n self.register_buffer('init_std', self._init_std)\n\n self._min_std_param = self._max_std_param = None\n if min_std is not None:\n self._min_std_param = torch.Tensor([min_std]).log()\n self.register_buffer('min_std_param', self._min_std_param)\n if max_std is not None:\n self._max_std_param = torch.Tensor([max_std]).log()\n self.register_buffer('max_std_param', self._max_std_param)\n\n def to(self, *args, **kwargs):\n \"\"\"Move the module to the specified device.\n\n Args:\n *args: args to pytorch to function.\n **kwargs: keyword args to pytorch to function.\n\n \"\"\"\n super().to(*args, **kwargs)\n buffers = dict(self.named_buffers())\n if not isinstance(self._init_std, torch.nn.Parameter):\n self._init_std = buffers['init_std']\n self._min_std_param = buffers['min_std_param']\n self._max_std_param = buffers['max_std_param']\n\n @abc.abstractmethod\n def _get_mean_and_log_std(self, *inputs):\n pass\n\n def forward(self, *inputs):\n \"\"\"Forward method.\n\n Args:\n *inputs: Input to the module.\n\n Returns:\n torch.Tensor: Module output.\n\n \"\"\"\n mean, log_std_uncentered = self._get_mean_and_log_std(*inputs)\n\n if self._min_std_param or self._max_std_param:\n log_std_uncentered = log_std_uncentered.clamp(\n min=self._to_scalar_if_not_none(self._min_std_param),\n max=self._to_scalar_if_not_none(self._max_std_param))\n\n if self._std_parameterization == 'exp':\n std = log_std_uncentered.exp()\n else:\n std = log_std_uncentered.exp().exp().add(1.).log()\n dist = self._norm_dist_class(mean, std)\n # This control flow is needed because if a TanhNormal distribution is\n # wrapped by torch.distributions.Independent, then custom functions\n # such as rsample_with_pretanh_value of the TanhNormal distribution\n # are not accessable.\n if not isinstance(dist, TanhNormal):\n # Makes it so that a sample from the distribution is treated as a\n # single sample and not dist.batch_shape samples.\n dist = Independent(dist, 1)\n\n return dist\n\n # pylint: disable=no-self-use\n def _to_scalar_if_not_none(self, tensor):\n \"\"\"Convert torch.Tensor of a single value to a Python number.\n\n Args:\n tensor (torch.Tensor): A torch.Tensor of a single value.\n\n Returns:\n float: The value of tensor.\n\n \"\"\"\n return None if tensor is None else tensor.item()\n\n\nclass GaussianMLPModule(GaussianMLPBaseModule):\n \"\"\"GaussianMLPModule that mean and std share the same network.\n\n Args:\n input_dim (int): Input dimension of the model.\n output_dim (int): Output dimension of the model.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a torch.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n learn_std (bool): Is std trainable.\n init_std (float): Initial value for std.\n (plain value - not log or exponentiated).\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n std_parameterization (str): How the std should be parametrized. There\n are two options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n layer_normalization (bool): Bool for using layer normalization or not.\n normal_distribution_cls (torch.distribution): normal distribution class\n to be constructed and returned by a call to forward. By default, is\n `torch.distributions.Normal`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_parameterization='exp',\n layer_normalization=False,\n normal_distribution_cls=Normal):\n super(GaussianMLPModule,\n self).__init__(input_dim=input_dim,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n learn_std=learn_std,\n init_std=init_std,\n min_std=min_std,\n max_std=max_std,\n std_parameterization=std_parameterization,\n layer_normalization=layer_normalization,\n normal_distribution_cls=normal_distribution_cls)\n\n self._mean_module = MLPModule(\n input_dim=self._input_dim,\n output_dim=self._action_dim,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearity=self._output_nonlinearity,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n layer_normalization=self._layer_normalization)\n\n def _get_mean_and_log_std(self, *inputs):\n \"\"\"Get mean and std of Gaussian distribution given inputs.\n\n Args:\n *inputs: Input to the module.\n\n Returns:\n torch.Tensor: The mean of Gaussian distribution.\n torch.Tensor: The variance of Gaussian distribution.\n\n \"\"\"\n assert len(inputs) == 1\n mean = self._mean_module(*inputs)\n\n broadcast_shape = list(inputs[0].shape[:-1]) + [self._action_dim]\n uncentered_log_std = torch.zeros(*broadcast_shape) + self._init_std\n\n return mean, uncentered_log_std\n\n\nclass GaussianMLPIndependentStdModule(GaussianMLPBaseModule):\n \"\"\"GaussianMLPModule which has two different mean and std network.\n\n Args:\n input_dim (int): Input dimension of the model.\n output_dim (int): Output dimension of the model.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a torch.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n learn_std (bool): Is std trainable.\n init_std (float): Initial value for std.\n (plain value - not log or exponentiated).\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n std_hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for std. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer\n in the std network.\n std_hidden_w_init (callable): Initializer function for the weight\n of hidden layer (s).\n std_hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s).\n std_output_nonlinearity (callable): Activation function for output\n dense layer in the std network. It should return a torch.Tensor.\n Set it to None to maintain a linear activation.\n std_output_w_init (callable): Initializer function for the weight\n of output dense layer(s) in the std network.\n std_parameterization (str): How the std should be parametrized. There\n are two options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n layer_normalization (bool): Bool for using layer normalization or not.\n normal_distribution_cls (torch.distribution): normal distribution class\n to be constructed and returned by a call to forward. By default, is\n `torch.distributions.Normal`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_hidden_sizes=(32, 32),\n std_hidden_nonlinearity=torch.tanh,\n std_hidden_w_init=nn.init.xavier_uniform_,\n std_hidden_b_init=nn.init.zeros_,\n std_output_nonlinearity=None,\n std_output_w_init=nn.init.xavier_uniform_,\n std_parameterization='exp',\n layer_normalization=False,\n normal_distribution_cls=Normal):\n super(GaussianMLPIndependentStdModule,\n self).__init__(input_dim=input_dim,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n learn_std=learn_std,\n init_std=init_std,\n min_std=min_std,\n max_std=max_std,\n std_hidden_sizes=std_hidden_sizes,\n std_hidden_nonlinearity=std_hidden_nonlinearity,\n std_hidden_w_init=std_hidden_w_init,\n std_hidden_b_init=std_hidden_b_init,\n std_output_nonlinearity=std_output_nonlinearity,\n std_output_w_init=std_output_w_init,\n std_parameterization=std_parameterization,\n layer_normalization=layer_normalization,\n normal_distribution_cls=normal_distribution_cls)\n\n self._mean_module = MLPModule(\n input_dim=self._input_dim,\n output_dim=self._action_dim,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearity=self._output_nonlinearity,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n layer_normalization=self._layer_normalization)\n\n self._log_std_module = MLPModule(\n input_dim=self._input_dim,\n output_dim=self._action_dim,\n hidden_sizes=self._std_hidden_sizes,\n hidden_nonlinearity=self._std_hidden_nonlinearity,\n hidden_w_init=self._std_hidden_w_init,\n hidden_b_init=self._std_hidden_b_init,\n output_nonlinearity=self._std_output_nonlinearity,\n output_w_init=self._std_output_w_init,\n output_b_init=self._init_std_b,\n layer_normalization=self._layer_normalization)\n\n def _init_std_b(self, b):\n \"\"\"Default bias initialization function.\n\n Args:\n b (torch.Tensor): The bias tensor.\n\n Returns:\n torch.Tensor: The bias tensor itself.\n\n \"\"\"\n return nn.init.constant_(b, self._init_std.item())\n\n def _get_mean_and_log_std(self, *inputs):\n \"\"\"Get mean and std of Gaussian distribution given inputs.\n\n Args:\n *inputs: Input to the module.\n\n Returns:\n torch.Tensor: The mean of Gaussian distribution.\n torch.Tensor: The variance of Gaussian distribution.\n\n \"\"\"\n return self._mean_module(*inputs), self._log_std_module(*inputs)\n\n\nclass GaussianMLPTwoHeadedModule(GaussianMLPBaseModule):\n \"\"\"GaussianMLPModule which has only one mean network.\n\n Args:\n input_dim (int): Input dimension of the model.\n output_dim (int): Output dimension of the model.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a torch.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n torch.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a torch.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n torch.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n torch.Tensor.\n learn_std (bool): Is std trainable.\n init_std (float): Initial value for std.\n (plain value - not log or exponentiated).\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues (plain value - not log or exponentiated).\n std_parameterization (str): How the std should be parametrized. There\n are two options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n layer_normalization (bool): Bool for using layer normalization or not.\n normal_distribution_cls (torch.distribution): normal distribution class\n to be constructed and returned by a call to forward. By default, is\n `torch.distributions.Normal`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=torch.tanh,\n hidden_w_init=nn.init.xavier_uniform_,\n hidden_b_init=nn.init.zeros_,\n output_nonlinearity=None,\n output_w_init=nn.init.xavier_uniform_,\n output_b_init=nn.init.zeros_,\n learn_std=True,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_parameterization='exp',\n layer_normalization=False,\n normal_distribution_cls=Normal):\n super(GaussianMLPTwoHeadedModule,\n self).__init__(input_dim=input_dim,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n learn_std=learn_std,\n init_std=init_std,\n min_std=min_std,\n max_std=max_std,\n std_parameterization=std_parameterization,\n layer_normalization=layer_normalization,\n normal_distribution_cls=normal_distribution_cls)\n\n self._shared_mean_log_std_network = MultiHeadedMLPModule(\n n_heads=2,\n input_dim=self._input_dim,\n output_dims=self._action_dim,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearities=self._output_nonlinearity,\n output_w_inits=self._output_w_init,\n output_b_inits=[\n nn.init.zeros_,\n lambda x: nn.init.constant_(x, self._init_std.item())\n ],\n layer_normalization=self._layer_normalization)\n\n def _get_mean_and_log_std(self, *inputs):\n \"\"\"Get mean and std of Gaussian distribution given inputs.\n\n Args:\n *inputs: Input to the module.\n\n Returns:\n torch.Tensor: The mean of Gaussian distribution.\n torch.Tensor: The variance of Gaussian distribution.\n\n \"\"\"\n return self._shared_mean_log_std_network(*inputs)\n"
] | [
[
"torch.nn.Parameter",
"torch.zeros",
"torch.Tensor",
"torch.distributions.independent.Independent"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rafvasq/ml-agents | [
"e3b86a27a2547cdd177bf9e848b4337aa71b887a"
] | [
"ml-agents/mlagents/trainers/trainer_controller.py"
] | [
"# # Unity ML-Agents Toolkit\n# ## ML-Agent Learning\n\"\"\"Launches trainers for each External Brains in a Unity Environment.\"\"\"\n\nimport os\nimport logging\nimport shutil\nimport sys\nif sys.platform.startswith('win'):\n import win32api\n import win32con\nfrom typing import *\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom mlagents.envs import BrainInfo\nfrom mlagents.envs.exception import UnityEnvironmentException\nfrom mlagents.trainers.ppo.trainer import PPOTrainer\nfrom mlagents.trainers.bc.offline_trainer import OfflineBCTrainer\nfrom mlagents.trainers.bc.online_trainer import OnlineBCTrainer\nfrom mlagents.trainers.meta_curriculum import MetaCurriculum\n\n\nclass TrainerController(object):\n def __init__(self, model_path: str, summaries_dir: str,\n run_id: str, save_freq: int, meta_curriculum: Optional[MetaCurriculum],\n load: bool, train: bool, keep_checkpoints: int, lesson: Optional[int],\n external_brains: Dict[str, BrainInfo], training_seed: int):\n \"\"\"\n :param model_path: Path to save the model.\n :param summaries_dir: Folder to save training summaries.\n :param run_id: The sub-directory name for model and summary statistics\n :param save_freq: Frequency at which to save model\n :param meta_curriculum: MetaCurriculum object which stores information about all curricula.\n :param load: Whether to load the model or randomly initialize.\n :param train: Whether to train model, or only run inference.\n :param keep_checkpoints: How many model checkpoints to keep.\n :param lesson: Start learning from this lesson.\n :param external_brains: dictionary of external brain names to BrainInfo objects.\n :param training_seed: Seed to use for Numpy and Tensorflow random number generation.\n \"\"\"\n\n self.model_path = model_path\n self.summaries_dir = summaries_dir\n self.external_brains = external_brains\n self.external_brain_names = external_brains.keys()\n self.logger = logging.getLogger('mlagents.envs')\n self.run_id = run_id\n self.save_freq = save_freq\n self.lesson = lesson\n self.load_model = load\n self.train_model = train\n self.keep_checkpoints = keep_checkpoints\n self.trainers = {}\n self.global_step = 0\n self.meta_curriculum = meta_curriculum\n self.seed = training_seed\n np.random.seed(self.seed)\n tf.set_random_seed(self.seed)\n\n def _get_measure_vals(self):\n if self.meta_curriculum:\n brain_names_to_measure_vals = {}\n for brain_name, curriculum \\\n in self.meta_curriculum.brains_to_curriculums.items():\n if curriculum.measure == 'progress':\n measure_val = (self.trainers[brain_name].get_step /\n self.trainers[brain_name].get_max_steps)\n brain_names_to_measure_vals[brain_name] = measure_val\n elif curriculum.measure == 'reward':\n measure_val = np.mean(self.trainers[brain_name]\n .reward_buffer)\n brain_names_to_measure_vals[brain_name] = measure_val\n return brain_names_to_measure_vals\n else:\n return None\n\n def _save_model(self, steps=0):\n \"\"\"\n Saves current model to checkpoint folder.\n :param steps: Current number of steps in training process.\n :param saver: Tensorflow saver for session.\n \"\"\"\n for brain_name in self.trainers.keys():\n self.trainers[brain_name].save_model()\n self.logger.info('Saved Model')\n\n def _save_model_when_interrupted(self, steps=0):\n self.logger.info('Learning was interrupted. Please wait '\n 'while the graph is generated.')\n self._save_model(steps)\n\n def _win_handler(self, event):\n \"\"\"\n This function gets triggered after ctrl-c or ctrl-break is pressed\n under Windows platform.\n \"\"\"\n if event in (win32con.CTRL_C_EVENT, win32con.CTRL_BREAK_EVENT):\n self._save_model_when_interrupted(self.global_step)\n self._export_graph()\n sys.exit()\n return True\n return False\n\n def _export_graph(self):\n \"\"\"\n Exports latest saved models to .nn format for Unity embedding.\n \"\"\"\n for brain_name in self.trainers.keys():\n self.trainers[brain_name].export_model()\n\n def initialize_trainers(self, trainer_config):\n \"\"\"\n Initialization of the trainers\n :param trainer_config: The configurations of the trainers\n \"\"\"\n trainer_parameters_dict = {}\n\n for brain_name in self.external_brains:\n trainer_parameters = trainer_config['default'].copy()\n trainer_parameters['summary_path'] = '{basedir}/{name}'.format(\n basedir=self.summaries_dir,\n name=str(self.run_id) + '_' + brain_name)\n trainer_parameters['model_path'] = '{basedir}/{name}'.format(\n basedir=self.model_path,\n name=brain_name)\n trainer_parameters['keep_checkpoints'] = self.keep_checkpoints\n if brain_name in trainer_config:\n _brain_key = brain_name\n while not isinstance(trainer_config[_brain_key], dict):\n _brain_key = trainer_config[_brain_key]\n for k in trainer_config[_brain_key]:\n trainer_parameters[k] = trainer_config[_brain_key][k]\n trainer_parameters_dict[brain_name] = trainer_parameters.copy()\n for brain_name in self.external_brains:\n if trainer_parameters_dict[brain_name]['trainer'] == 'offline_bc':\n self.trainers[brain_name] = OfflineBCTrainer(\n self.external_brains[brain_name],\n trainer_parameters_dict[brain_name], self.train_model,\n self.load_model, self.seed, self.run_id)\n elif trainer_parameters_dict[brain_name]['trainer'] == 'online_bc':\n self.trainers[brain_name] = OnlineBCTrainer(\n self.external_brains[brain_name],\n trainer_parameters_dict[brain_name], self.train_model,\n self.load_model, self.seed, self.run_id)\n elif trainer_parameters_dict[brain_name]['trainer'] == 'ppo':\n self.trainers[brain_name] = PPOTrainer(\n self.external_brains[brain_name],\n self.meta_curriculum\n .brains_to_curriculums[brain_name]\n .min_lesson_length if self.meta_curriculum else 0,\n trainer_parameters_dict[brain_name],\n self.train_model, self.load_model, self.seed, self.run_id)\n else:\n raise UnityEnvironmentException('The trainer config contains '\n 'an unknown trainer type for '\n 'brain {}'\n .format(brain_name))\n\n @staticmethod\n def _create_model_path(model_path):\n try:\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n except Exception:\n raise UnityEnvironmentException('The folder {} containing the '\n 'generated model could not be '\n 'accessed. Please make sure the '\n 'permissions are set correctly.'\n .format(model_path))\n\n def _reset_env(self, env):\n \"\"\"Resets the environment.\n\n Returns:\n A Data structure corresponding to the initial reset state of the\n environment.\n \"\"\"\n if self.meta_curriculum is not None:\n return env.reset(config=self.meta_curriculum.get_config())\n else:\n return env.reset()\n\n def start_learning(self, env, trainer_config):\n # TODO: Should be able to start learning at different lesson numbers\n # for each curriculum.\n if self.meta_curriculum is not None:\n self.meta_curriculum.set_all_curriculums_to_lesson_num(self.lesson)\n self._create_model_path(self.model_path)\n\n tf.reset_default_graph()\n\n # Prevent a single session from taking all GPU memory.\n self.initialize_trainers(trainer_config)\n for _, t in self.trainers.items():\n self.logger.info(t)\n\n curr_info = self._reset_env(env)\n if self.train_model:\n for brain_name, trainer in self.trainers.items():\n trainer.write_tensorboard_text('Hyperparameters',\n trainer.parameters)\n if sys.platform.startswith('win'):\n # Add the _win_handler function to the windows console's handler function list\n win32api.SetConsoleCtrlHandler(self._win_handler, True)\n try:\n while any([t.get_step <= t.get_max_steps \\\n for k, t in self.trainers.items()]) \\\n or not self.train_model:\n new_info = self.take_step(env, curr_info)\n self.global_step += 1\n if self.global_step % self.save_freq == 0 and self.global_step != 0 \\\n and self.train_model:\n # Save Tensorflow model\n self._save_model(steps=self.global_step)\n curr_info = new_info\n # Final save Tensorflow model\n if self.global_step != 0 and self.train_model:\n self._save_model(steps=self.global_step)\n except KeyboardInterrupt:\n if self.train_model:\n self._save_model_when_interrupted(steps=self.global_step)\n pass\n env.close()\n\n if self.train_model:\n self._export_graph()\n\n def take_step(self, env, curr_info):\n if self.meta_curriculum:\n # Get the sizes of the reward buffers.\n reward_buff_sizes = {k: len(t.reward_buffer) \\\n for (k, t) in self.trainers.items()}\n # Attempt to increment the lessons of the brains who\n # were ready.\n lessons_incremented = \\\n self.meta_curriculum.increment_lessons(\n self._get_measure_vals(),\n reward_buff_sizes=reward_buff_sizes)\n\n # If any lessons were incremented or the environment is\n # ready to be reset\n if (self.meta_curriculum\n and any(lessons_incremented.values())):\n curr_info = self._reset_env(env)\n for brain_name, trainer in self.trainers.items():\n trainer.end_episode()\n for brain_name, changed in lessons_incremented.items():\n if changed:\n self.trainers[brain_name].reward_buffer.clear()\n elif env.global_done:\n curr_info = self._reset_env(env)\n for brain_name, trainer in self.trainers.items():\n trainer.end_episode()\n\n # Decide and take an action\n take_action_vector, \\\n take_action_memories, \\\n take_action_text, \\\n take_action_value, \\\n take_action_outputs \\\n = {}, {}, {}, {}, {}\n for brain_name, trainer in self.trainers.items():\n (take_action_vector[brain_name],\n take_action_memories[brain_name],\n take_action_text[brain_name],\n take_action_value[brain_name],\n take_action_outputs[brain_name]) = \\\n trainer.take_action(curr_info)\n new_info = env.step(vector_action=take_action_vector,\n memory=take_action_memories,\n text_action=take_action_text,\n value=take_action_value)\n for brain_name, trainer in self.trainers.items():\n trainer.add_experiences(curr_info, new_info,\n take_action_outputs[brain_name])\n trainer.process_experiences(curr_info, new_info)\n if trainer.is_ready_update() and self.train_model \\\n and trainer.get_step <= trainer.get_max_steps:\n # Perform gradient descent with experience buffer\n trainer.update_policy()\n # Write training statistics to Tensorboard.\n if self.meta_curriculum is not None:\n trainer.write_summary(\n self.global_step,\n lesson_num=self.meta_curriculum\n .brains_to_curriculums[brain_name]\n .lesson_num)\n else:\n trainer.write_summary(self.global_step)\n if self.train_model \\\n and trainer.get_step <= trainer.get_max_steps:\n trainer.increment_step_and_update_last_reward()\n return new_info\n"
] | [
[
"tensorflow.set_random_seed",
"tensorflow.reset_default_graph",
"numpy.mean",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
sadielbartholomew/openscm-twolayermodel | [
"19b030571892a3238082765671e161ddd4c2ab97"
] | [
"tests/integration/test_impulse_response_integration.py"
] | [
"import numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom openscm_units import unit_registry as ur\nfrom scmdata import ScmRun\nfrom test_model_integration_base import TwoLayerVariantIntegrationTester\n\nfrom openscm_twolayermodel import ImpulseResponseModel\n\n\nclass TestTwoLayerModel(TwoLayerVariantIntegrationTester):\n\n tmodel = ImpulseResponseModel\n\n def test_run_scenarios_single(self):\n inp = self.tinp.copy()\n\n model = self.tmodel()\n\n res = model.run_scenarios(inp)\n\n model.set_drivers(\n inp.values.squeeze() * ur(inp.get_unique_meta(\"unit\", no_duplicates=True))\n )\n model.reset()\n model.run()\n\n npt.assert_allclose(\n res.filter(variable=\"Surface Temperature\").values.squeeze(),\n model._temp1_mag + model._temp2_mag,\n )\n assert (\n res.filter(variable=\"Surface Temperature\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res.filter(variable=\"Surface Temperature|Box 1\").values.squeeze(),\n model._temp1_mag,\n )\n assert (\n res.filter(variable=\"Surface Temperature|Box 1\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res.filter(variable=\"Surface Temperature|Box 2\").values.squeeze(),\n model._temp2_mag,\n )\n assert (\n res.filter(variable=\"Surface Temperature|Box 2\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res.filter(variable=\"Heat Uptake\").values.squeeze(), model._rndt_mag\n )\n assert (\n res.filter(variable=\"Heat Uptake\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"W/m^2\"\n )\n\n def test_run_scenarios_multiple(self):\n ts1_erf = np.linspace(0, 4, 101)\n ts2_erf = np.sin(np.linspace(0, 4, 101))\n\n inp = ScmRun(\n data=np.vstack([ts1_erf, ts2_erf]).T,\n index=np.linspace(1750, 1850, 101).astype(int),\n columns={\n \"scenario\": [\"test_scenario_1\", \"test_scenario_2\"],\n \"model\": \"unspecified\",\n \"climate_model\": \"junk input\",\n \"variable\": \"Effective Radiative Forcing\",\n \"unit\": \"W/m^2\",\n \"region\": \"World\",\n },\n )\n\n model = self.tmodel()\n\n res = model.run_scenarios(inp)\n\n for scenario_ts in inp.groupby(\"scenario\"):\n scenario = scenario_ts.get_unique_meta(\"scenario\", no_duplicates=True)\n\n model.set_drivers(\n scenario_ts.values.squeeze()\n * ur(inp.get_unique_meta(\"unit\", no_duplicates=True))\n )\n model.reset()\n model.run()\n\n res_scen = res.filter(scenario=scenario)\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Surface Temperature\").values.squeeze(),\n model._temp1_mag + model._temp2_mag,\n )\n assert (\n res_scen.filter(variable=\"Surface Temperature\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Surface Temperature|Box 1\").values.squeeze(),\n model._temp1_mag,\n )\n assert (\n res_scen.filter(variable=\"Surface Temperature|Box 1\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Surface Temperature|Box 2\").values.squeeze(),\n model._temp2_mag,\n )\n assert (\n res_scen.filter(variable=\"Surface Temperature|Box 2\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Heat Uptake\").values.squeeze(),\n model._rndt_mag,\n )\n assert (\n res.filter(variable=\"Heat Uptake\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"W/m^2\"\n )\n\n @pytest.mark.parametrize(\n \"driver_var\",\n (\"Effective Radiative Forcing\", \"Effective Radiative Forcing|CO2\",),\n )\n def test_run_scenarios_multiple_drive_var(self, driver_var):\n ts1_erf = np.linspace(0, 4, 101)\n ts1_erf_co2 = 0.9 * ts1_erf\n ts2_erf = np.sin(np.linspace(0, 4, 101))\n ts2_erf_co2 = np.cos(np.linspace(0, 4, 101)) * ts2_erf\n\n inp = ScmRun(\n data=np.vstack([ts1_erf, ts1_erf_co2, ts2_erf, ts2_erf_co2]).T,\n index=np.linspace(1750, 1850, 101).astype(int),\n columns={\n \"scenario\": [\n \"test_scenario_1\",\n \"test_scenario_1\",\n \"test_scenario_2\",\n \"test_scenario_2\",\n ],\n \"model\": \"unspecified\",\n \"climate_model\": \"junk input\",\n \"variable\": [\n \"Effective Radiative Forcing\",\n \"Effective Radiative Forcing|CO2\",\n \"Effective Radiative Forcing\",\n \"Effective Radiative Forcing|CO2\",\n ],\n \"unit\": \"W/m^2\",\n \"region\": \"World\",\n },\n )\n\n model = self.tmodel()\n\n res = model.run_scenarios(inp, driver_var=driver_var)\n\n for scenario_ts in inp.groupby(\"scenario\"):\n scenario = scenario_ts.get_unique_meta(\"scenario\", no_duplicates=True)\n\n driver = scenario_ts.filter(variable=driver_var)\n model.set_drivers(\n driver.values.squeeze()\n * ur(inp.get_unique_meta(\"unit\", no_duplicates=True))\n )\n model.reset()\n model.run()\n\n res_scen = res.filter(scenario=scenario)\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Surface Temperature|Box 1\").values.squeeze(),\n model._temp1_mag,\n )\n assert (\n res.filter(variable=\"Surface Temperature|Box 1\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Surface Temperature|Box 2\").values.squeeze(),\n model._temp2_mag,\n )\n assert (\n res.filter(variable=\"Surface Temperature|Box 2\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"delta_degC\"\n )\n\n npt.assert_allclose(\n res_scen.filter(variable=\"Heat Uptake\").values.squeeze(),\n model._rndt_mag,\n )\n assert (\n res.filter(variable=\"Heat Uptake\").get_unique_meta(\n \"unit\", no_duplicates=True\n )\n == \"W/m^2\"\n )\n\n def test_run_scenario_timestep_followed(self, check_equal_pint):\n inp = self.tinp.copy()\n\n model = self.tmodel()\n\n res = model.run_scenarios(inp)\n check_equal_pint(model.delta_t, 1 * ur(\"yr\"))\n\n inp_monthly = inp.resample(\"MS\")\n res_monthly = model.run_scenarios(inp_monthly)\n check_equal_pint(model.delta_t, 1 * ur(\"month\"))\n\n comp_filter = {\n \"variable\": \"Surface Temperature\",\n \"year\": int(\n res[\"year\"].iloc[-1]\n ), # scmdata bug that you have to wrap this with int()\n \"month\": 1,\n }\n\n # running with two different timesteps should give approximately same results\n npt.assert_allclose(\n res.filter(**comp_filter).values.squeeze(),\n res_monthly.filter(**comp_filter).values.squeeze(),\n rtol=6 * 1e-3,\n )\n res.filter(variable=\"Surface Temperature\")\n"
] | [
[
"numpy.vstack",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tanglemontree/StockPrediction-1 | [
"80e95bcdd6d77b9250d3b6a452850c1184fad29a"
] | [
"LSTM3/Technical.py"
] | [
"# coding: utf-8\n\n# In[4]:\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\n\n\n\n# In[7]:\n\ndef moving_average(values, period):\n \"\"\"\n 移動平均を計算するのです。\n * values: 調整後終値を指定するのです。\n * period: 期間なのです。\n \"\"\"\n return DataFrame(values).rolling(period).mean()\n\n\n# In[8]:\n\nif __name__ == '__main__':\n print(moving_average(adj_end, 25))\n print(moving_average(adj_end, 75))\n\n\n# In[9]:\n\ndef moving_average_deviation_rate(values, period):\n \"\"\"\n 移動平均乖離率を計算するのです。\n * values: 調整後終値を指定するのです。\n * period: 期間なのです。\n \"\"\"\n _values = DataFrame(values)\n ma = moving_average(_values, period)\n return (_values - ma) / ma\n\n\n# In[10]:\n\nif __name__ == '__main__':\n print(moving_average_deviation_rate(adj_end, 5))\n print(moving_average_deviation_rate(adj_end, 25))\n\n\n# In[11]:\n\ndef macd(values, short_period, long_period, signal_period):\n \"\"\"\n MACD とその signal を計算するのです。\n * values: 調整後終値を指定するのです。\n * short_period: 短期の期間なのです。\n * long_period: 長期の期間なのです。\n * signal_period: signal の期間なのです。\n * return: MACD と MACD Signal を返すのです。\n \"\"\"\n _values = DataFrame(values)\n shorts = _values.ewm(span=short_period).mean()\n longs = _values.ewm(span=long_period).mean()\n _macd = shorts - longs\n return _macd, _macd.ewm(span=signal_period).mean()\n\n\n# In[12]:\n\nif __name__ == '__main__':\n print(macd(adj_end, 12, 26, 9))\n\n\n# In[13]:\n\ndef momentum(values, period):\n \"\"\"\n モメンタムを計算するのです。\n * values: 調整後終値を指定するのです。\n * period: 期間なのです。\n * return: Momentum を返すのです。\n \"\"\"\n _values = DataFrame(values)\n pasts = _values.shift(period)\n return (_values - pasts) / period\n\n\n# In[14]:\n\nif __name__ == '__main__':\n print(momentum(adj_end, 9))\n\n\n# In[15]:\n\ndef roc(values, period):\n \"\"\"\n ROC を計算するのです。\n * values: 調整後終値を指定するのです。\n * period: 期間なのです。\n * return: 終値ベースの ROC を返すのです。\n \"\"\"\n _values = DataFrame(values)\n pasts = _values.shift(period)\n return (_values - pasts) / _values\n\n\n# In[16]:\n\nif __name__ == '__main__':\n print(roc(adj_end, 12))\n\n\n# In[17]:\n\ndef rsi(values, period):\n \"\"\"\n Wilder の RSI を計算するのです。\n * values: 調整後終値を指定するのです。\n * period: 期間なのです。\n * return: Wilder の RSI の値なのです。\n \"\"\"\n _values = DataFrame(values)\n # 前日との差\n _diff = _values.diff(1)\n # 上がったやつ\n _posi = _diff.clip_lower(0).ewm(alpha=1/period).mean()\n # 下がったやつ\n _nega = _diff.clip_upper(0).ewm(alpha=1/period).mean()\n return _posi / (_posi - _nega)\n\n\n# In[18]:\n\nif __name__ == '__main__':\n print(rsi(adj_end, 14))\n\n\n# In[19]:\n\ndef stochastic_K(values_end, values_high, values_low, period):\n \"\"\"\n ストキャスティクス の %K を計算するのです。\n * values_end: 終値を指定するのです。\n * values_high: 高値を指定するのです。\n * values_low: 安値を指定するのです。\n * period: 期間なのです。\n * return: %K の値なのです。\n \"\"\"\n \"\"\"\n %K={ (C-L9)÷(H9-L9) }×100%\n C:当日終値\n L9:過去x日間の最安値。xとしては、14, 9, 5 などが使用されることが多い。\n H9:過去x日間の最高値\n \"\"\"\n end = DataFrame(values_end)\n high = DataFrame(values_high)\n low = DataFrame(values_low)\n \n hline = high.rolling(period).max()\n lline = low.rolling(period).min()\n return (end - lline) / (hline - lline)\n\n\ndef stochastic_D(values_end, values_high, values_low, period_K, period):\n \"\"\"\n ストキャスティクス の %D を計算するのです。\n * values_end: 終値を指定するのです。\n * values_high: 高値を指定するのです。\n * values_low: 安値を指定するのです。\n * period_K: %K の期間なのです。\n * period: 期間なのです。\n * return: %D の値なのです。\n \"\"\"\n \"\"\"\n %D=(H3÷L3)×100%\n H3:(C-L9)のy日間合計。(C-L9)の単純移動平均。yとしては3が使われることが多い。\n L3:(H9-L9)のy日間合計。(H9-L9)の単純移動平均。\n \"\"\"\n end = DataFrame(values_end)\n high = DataFrame(values_high)\n low = DataFrame(values_low)\n \n hline = high.rolling(period_K).max()\n lline = low.rolling(period_K).min()\n \n sumlow = (end - lline).rolling(period).sum()\n sumhigh = (hline - lline).rolling(period).sum()\n \n return sumlow / sumhigh\n\ndef stochastic_slowD(values_end, values_high, values_low, period_K, period_D, period):\n d = stochastic_D(values_end, values_high, values_low, period_K, period_D)\n return d.rolling(period).mean()\n\n\n# In[21]:\n\nif __name__ == '__main__':\n print(stochastic_K(end, high, low, 5))\n print(stochastic_D(end, high, low, 5, 3))\n\n print(stochastic_D(end, high, low, 15, 3))\n print(stochastic_slowD(end, high, low, 15, 3, 3))\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
alexanu/tf-quant-finance | [
"d0eb0e778d2422c6190844ef8f8c457ae25f9265",
"d0eb0e778d2422c6190844ef8f8c457ae25f9265",
"d0eb0e778d2422c6190844ef8f8c457ae25f9265",
"d0eb0e778d2422c6190844ef8f8c457ae25f9265",
"d0eb0e778d2422c6190844ef8f8c457ae25f9265"
] | [
"tf_quant_finance/models/joined_ito_process_test.py",
"tf_quant_finance/experimental/instruments/overnight_index_linked_futures.py",
"tf_quant_finance/experimental/dates/schedules.py",
"tf_quant_finance/experimental/dates/holiday_calendar_v2.py",
"tf_quant_finance/models/utils_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the join of Ito processes.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass JoinedItoProcessTest(tf.test.TestCase):\n\n def test_join_hull_white(self):\n \"\"\"Tests that join of Hull White is the same as VectorHullWhite.\"\"\"\n tf.random.set_seed(42) # Fix global random seed\n dtype = np.float64\n instant_forward_rate_fn_1 = lambda t: 2 * [0.2]\n process_1 = tff.models.hull_white.VectorHullWhiteModel(\n dim=2, mean_reversion=[0.1, 0.2], volatility=[0.1, 0.2],\n instant_forward_rate_fn=instant_forward_rate_fn_1,\n dtype=dtype)\n instant_forward_rate_fn_2 = lambda t: 3 * [0.1]\n process_2 = tff.models.hull_white.VectorHullWhiteModel(\n dim=3, mean_reversion=[0.3, 0.4, 0.5], volatility=[0.1, 0.1, 0.1],\n instant_forward_rate_fn=instant_forward_rate_fn_2,\n dtype=dtype)\n # Correlation structure\n corr_1 = [[1.0, 0.3, 0.2],\n [0.3, 1.0, 0.5],\n [0.2, 0.5, 1.0]]\n def corr_2(t):\n del t\n return [[1.0, 0.1], [0.1, 1.0]]\n matrices = [corr_1, corr_2]\n process_join = tff.models.JoinedItoProcess([process_1, process_2], matrices)\n expected_corr_matrix = np.array([[1.0, 0.3, 0.2, 0.0, 0.0],\n [0.3, 1.0, 0.5, 0.0, 0.0],\n [0.2, 0.5, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.1],\n [0.0, 0.0, 0.0, 0.1, 1.0]])\n expected_mean = [0.0109434, 0.02356047, 0.01500711, 0.01915375, 0.0230985]\n expected_var = [0.00475813, 0.01812692, 0.0043197, 0.004121, 0.00393469]\n num_samples = 110000\n samples = process_join.sample_paths(\n times=[0.1, 0.5], time_step=0.01, num_samples=num_samples,\n random_type=tff.math.random.RandomType.PSEUDO_ANTITHETIC,\n seed=42)\n self.assertEqual(samples.dtype, dtype)\n self.assertEqual(samples.shape, [num_samples, 2, 5])\n samples = self.evaluate(samples)\n self.assertAllClose(np.corrcoef(samples[:, -1, :], rowvar=False),\n expected_corr_matrix, rtol=1e-2, atol=1e-2)\n self.assertAllClose(np.mean(samples[:, -1, :], axis=0),\n expected_mean, rtol=1e-3, atol=1e-3)\n self.assertAllClose(np.var(samples[:, -1, :], axis=0),\n expected_var, rtol=1e-3, atol=1e-3)\n\n def test_invalid_processes(self):\n \"\"\"Tests that all proceses should be `ItoProcess`es.\"\"\"\n def drift_fn(t, x):\n del t, x\n return -1. / 2\n def vol_fn(t, x):\n del t\n return tf.ones([1, 1], dtype=x.dtype)\n process = tff.models.GenericItoProcess(\n dim=1, drift_fn=drift_fn, volatility_fn=vol_fn)\n with self.assertRaises(ValueError):\n tff.models.JoinedItoProcess([process, lambda x: x], [[1.0], [1.0]])\n\n def test_inconsistent_dtype(self):\n \"\"\"Tests that all proceses should have the same dtype.\"\"\"\n def drift_fn(t, x):\n del t, x\n return -1. / 2\n def vol_fn(t, x):\n del t\n return tf.ones([1, 1], dtype=x.dtype)\n process_1 = tff.models.GenericItoProcess(\n dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=np.float32)\n process_2 = tff.models.GenericItoProcess(\n dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=np.float64)\n with self.assertRaises(ValueError):\n tff.models.JoinedItoProcess([process_1, process_2], [[1.0], [1.0]])\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Futures contracts on overnight rates.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom tf_quant_finance.experimental import dates\nfrom tf_quant_finance.experimental.instruments import rates_common as rc\n\n\nclass OvernightIndexLinkedFutures:\n \"\"\"Represents a collection of futures linked to an average of overnight rates.\n\n Overnight index futures are exchange traded futures contracts where the\n underlying reference rates are the published overnight rates such as\n Secured Overnight Financing Rate (SOFR), Effective Fed Funds Rate (EFFR) etc.\n These contracts are generally cash settled where the settlement price is\n evaluated on the basis of realized reference rate values during the contract\n reference period (or delivery period). Typically the settlement price is\n based on componding the published daily reference rate during the delivery\n period or based on the arithmetic average of the reference rate during the\n delivery period.\n An overnight index future contract on the settlement date T settles at the\n price\n\n `100 * (1 - R)`\n\n If R is evaluated based on compunding the realized index values during the\n reference period then:\n\n `R = [Product[(1 + tau_i * r_i), 1 <= i <= N] - 1] / Sum[tau_i, 1 <= i <= N]`\n\n If R is evaulated based on the arithmetic average of the realized index\n during the reference period, then:\n\n `R = Sum(r_i, 1 <= i <= N) / N`\n\n where `i` is the variable indexing the business days within the delivery\n period, tau_i denotes the year fractions between successive business days\n taking into account the appropriate daycount convention and N is the number of\n calendar days in the delivery period. See [1] for SOFR futures on CME.\n\n The OvernightIndexLinkedFutures class can be used to create and price multiple\n contracts simultaneously. However all contracts within an object must be\n priced using a common reference curve.\n\n #### Example:\n The following example illustrates the construction of an overnight index\n future instrument and calculating its price.\n\n ```python\n\n import numpy as np\n import tensorflow as tf\n import tf_quant_finance as tff\n\n dates = tff.experimental.dates\n instruments = tff.experimental.instruments\n\n dtype = np.float64\n notional = 1.\n contract_start_date = dates.convert_to_date_tensor([(2021, 2, 8)])\n contract_end_date = dates.convert_to_date_tensor([(2021, 5, 8)])\n valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])\n\n future = instruments.OvernightIndexLinkedFutures(\n contract_start_date, contract_end_date, dtype=dtype)\n\n curve_dates = valuation_date + dates.periods.PeriodTensor(\n [1, 2, 3, 12, 24, 60], dates.PeriodType.MONTH)\n reference_curve = instruments.RateCurve(\n curve_dates,\n np.array([0.02, 0.025, 0.0275, 0.03, 0.035, 0.0325], dtype=dtype),\n dtype=dtype)\n\n market = instruments.InterestRateMarket(reference_curve=reference_curve,\n discount_curve=None)\n\n price = future.price(valuation_date, market)\n\n #### References:\n [1]: SOFR futures settlement calculation.\n https://www.cmegroup.com/education/files/sofr-futures-settlement-calculation-methodologies.pdf\n \"\"\"\n\n def __init__(self,\n contract_start_date,\n contract_end_date,\n daycount_convention=None,\n averaging_type=None,\n contract_unit=1.,\n holiday_calendar=None,\n dtype=None,\n name=None):\n \"\"\"Initialize the Overnight index futures object.\n\n Args:\n contract_start_date: A Rank 1 `DateTensor` specifying the start dates of\n the reference period (or delivery period) of each futures contract. The\n published overnight index during the reference period determines the\n final settlement price of the futures contract.\n contract_end_date: A Rank 1 `DateTensor` specifying the ending dates of\n the reference period (or delivery period) of each futures contract.\n daycount_convention: An optional scalar `DayCountConvention` corresponding\n to the day count convention for the underlying rate for each contract.\n Default value: None in which case each the day count convention equal to\n DayCountConvention.ACTUAL_360 is used.\n averaging_type: An optional `AverageType` corresponding to how the\n final settlement rate is computed from daily rates.\n Default value: None, in which case `AverageType.COMPOUNDING` is used.\n contract_unit: An optional scalar or Rank 1 `Tensor` of real dtype\n specifying the notional amount for the contract. If the notional is\n entered as a scalar, it is assumed that all of the contracts have a\n notional equal to the input value.\n Default value: 1.0\n holiday_calendar: An instance of `dates.HolidayCalenday` to specify\n weekends and holidays.\n Default value: None in which case a holiday calendar would be created\n with Saturday and Sunday being the holidays.\n dtype: `tf.Dtype`. If supplied the dtype for the real variables or ops\n either supplied to the EurodollarFuture object or created by the\n EurodollarFuture object.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'eurodollar_future'.\n \"\"\"\n self._name = name or 'overnight_rate_futures'\n\n with tf.compat.v2.name_scope(self._name):\n self._contract_unit = tf.convert_to_tensor(\n contract_unit, dtype=dtype)\n self._dtype = dtype if dtype else self._contract_unit.dtype\n self._start_date = dates.convert_to_date_tensor(contract_start_date)\n self._end_date = dates.convert_to_date_tensor(contract_end_date)\n self._batch_size = self._start_date.shape[0]\n\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n\n if averaging_type is None:\n averaging_type = rc.AverageType.COMPOUNDING\n\n if holiday_calendar is None:\n holiday_calendar = dates.HolidayCalendar2(\n weekend_mask=dates.WeekendMask.SATURDAY_SUNDAY)\n\n self._daycount_convention = daycount_convention\n self._averaging_type = averaging_type\n self._holiday_calendar = holiday_calendar\n self._rate_tenor = dates.periods.PeriodTensor(1, dates.PeriodType.DAY)\n\n self._setup()\n\n def price(self, valuation_date, market, model=None, name=None):\n \"\"\"Returns the price of the contract on the valuation date.\n\n Args:\n valuation_date: A scalar `DateTensor` specifying the date on which\n valuation is being desired.\n market: An object of type `InterestRateMarket` which contains the\n necessary information for pricing the FRA instrument.\n model: Reserved for future use.\n name: Python string. The name to give this op.\n Default value: `None` which maps to `price`.\n\n Returns:\n A Rank 1 `Tensor` of real type containing the modeled price of each\n futures contract based on the input market data.\n \"\"\"\n\n del model, valuation_date\n\n name = name or (self._name + '_price')\n with tf.name_scope(name):\n reference_curve = market.reference_curve\n\n df1 = reference_curve.get_discount_factor(self._accrual_start_dates)\n df2 = reference_curve.get_discount_factor(self._accrual_end_dates)\n\n fwd_rates = (df1 / df2 - 1.) / self._accrual_daycount\n\n total_accrual = tf.math.segment_sum(self._daycount_fractions,\n self._contract_idx)\n if self._averaging_type == rc.AverageType.ARITHMETIC_AVERAGE:\n\n settlement_rate = tf.math.segment_sum(\n fwd_rates * self._daycount_fractions,\n self._contract_idx) / total_accrual\n else:\n settlement_rate = (tf.math.segment_prod(\n 1. + fwd_rates * self._daycount_fractions, self._contract_idx) -\n 1.) / total_accrual\n\n return 100. * (1. - settlement_rate)\n\n def _setup(self):\n \"\"\"Setup relevant tensors for efficient computations.\"\"\"\n\n reset_dates = []\n contract_idx = []\n daycount_fractions = []\n for i in range(self._batch_size):\n instr_reset_dates = dates.PeriodicSchedule(\n start_date=self._start_date[i] + self._rate_tenor,\n end_date=self._end_date[i],\n tenor=self._rate_tenor,\n holiday_calendar=self._holiday_calendar,\n roll_convention=dates.BusinessDayConvention.FOLLOWING).dates()\n\n # Append the start_date of the contract\n instr_reset_dates = dates.DateTensor.concat([\n self._start_date[i].expand_dims(axis=0),\n instr_reset_dates], axis=0)\n\n # Add one day beyond the end of the delivery period to compute the\n # accrual on the last day of the delivery.\n one_period_past_enddate = self._end_date[i] + self._rate_tenor\n instr_reset_dates = dates.DateTensor.concat([\n instr_reset_dates,\n one_period_past_enddate.expand_dims(axis=0)], axis=0)\n\n instr_daycount_fractions = rc.get_daycount_fraction(\n instr_reset_dates[:-1], instr_reset_dates[1:],\n self._daycount_convention, self._dtype)\n\n reset_dates.append(instr_reset_dates[:-1])\n daycount_fractions.append(instr_daycount_fractions)\n contract_idx.append(tf.fill(tf.shape(instr_daycount_fractions), i))\n\n self._reset_dates = dates.DateTensor.concat(reset_dates, axis=0)\n self._accrual_start_dates = self._reset_dates\n self._accrual_end_dates = self._reset_dates + self._rate_tenor\n self._accrual_daycount = rc.get_daycount_fraction(\n self._accrual_start_dates, self._accrual_end_dates,\n self._daycount_convention, self._dtype)\n self._daycount_fractions = tf.concat(daycount_fractions, axis=0)\n self._contract_idx = tf.concat(contract_idx, axis=0)\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Functions for creating schedules.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance.experimental.dates import constants\nfrom tf_quant_finance.experimental.dates import date_tensor\n\n\n_MIN_DAYS_IN_PERIOD = {\n constants.PeriodType.DAY: 1,\n constants.PeriodType.WEEK: 7,\n constants.PeriodType.MONTH: 28,\n constants.PeriodType.YEAR: 365\n}\n\n\nclass PeriodicSchedule:\n \"\"\"Defines an array of dates specified by a regular schedule.\"\"\"\n\n def __init__(self,\n *,\n start_date,\n end_date,\n tenor,\n holiday_calendar=None,\n roll_convention=constants.BusinessDayConvention.NONE,\n backward=False,\n end_of_month=False):\n \"\"\"Initializes the schedule.\n\n Initializes a schedule with a given tenor, date range and holiday calendar.\n\n A schedule is an increasing sequence of dates at a regular interval subject\n to holiday adjustments.\n\n The rules for schedule generation (accessed via the `dates()` method)\n are as follows.\n\n (a) If `backward=False`, take `start_date` and add `tenor` multiplied by\n 0, 1, 2, etc. until the resulting date is greater than `end_date`.\n (b) If `backward=True`, take `end_date` and subtract `tenor` multiplied by\n 0, 1, 2, etc. until the resulting date is smaller than `start_date`.\n Ensure that the result is in ascending order.\n (c) Both `start_date` and `end_date` are included, even if the distance\n between then is not an integer number of tenor periods.\n (d) If `holiday_calendar` is specified, roll all the dates according to\n `roll_convention`. The rolling includes `start_date` and `end_date` when\n they are part of resulting schedule. Thus if `start_date` or `end_date`\n fall on holidays, they will change and may go out of the\n [`start_date`, `end_date`] interval.\n\n Note that `tenor = PeriodType.DAY` is treated as an actual day, not as\n a business day. So a schedule with `tenor = periods.days(7)` is the same\n as one with `tenor = periods.week()`.\n\n The `dates()` can create multiple schedules simultaneously.\n The start and end dates may have any (compatible) shape.\n The `DateTensor` returned by `dates()` has the shape\n `start_date.shape + (n,)`, where `n` is the maximum length of schedules in\n the batch. If schedules have different lengths, the extra elements will be\n padded with extra `end_date` elements at the end, if `backward=False`\n and with extra `start_date` elements in the beginning if\n `backward=True`. In all cases each schedule in the batch is monotonic.\n\n The following examples demonstrate the batch and non-batch usage.\n\n #### Example Usage (Non-batch)\n\n ```python\n start_date = dates.from_tuples([(2020, 1, 18)])\n end_date = dates.from_tuples([(2021, 3, 25)])\n tenor = dates.periods.months(3)\n backward = False\n holiday_calendar = dates.HolidayCalendar(start_year=2020, end_year=2021)\n roll_convention = dates.BusinessDayConvention.FOLLOWING\n schedule = dates.PeriodicSchedule(\n start_date=start_date,\n end_date=end_date,\n tenor=tenor,\n holiday_calendar=holiday_calendar,\n roll_convention=dates.BusinessDayConvention.FOLLOWING,\n backward=backward).dates()\n # schedule is a DateTensor of\n # [[(2020, 1, 18), (2020, 4, 20), (2020, 7, 20), (2020, 10, 19),\n # (2021, 1, 18), (2021, 3, 25)]] for backward = False and\n # [[(2020, 1, 18), (2020, 3, 25), (2020, 6, 25), (2020, 9, 25),\n # (2020, 12, 25), (2021, 3, 25)]] for backward = True.\n ```\n\n The following example demonstrates this batching property.\n\n #### Example Usage (Batch)\n\n ```python\n start_date = dates.from_tuples([(2020, 1, 15), (2020, 4, 15)])\n end_date = dates.from_tuples([(2021, 3, 31), (2021, 1, 1)])\n tenor = dates.months([4, 3])\n schedule = dates.PeriodicSchedule(\n start_dates,\n end_dates,\n tenors,\n dates.HolidayCalendar(start_year=2020, end_year=2021),\n roll_convention=dates.BusinessDayConvention.FOLLOWING,\n backward=False).dates()\n # Returns DateTensor of\n # [[(2020, 1, 15), (2020, 5, 15), (2020, 9, 15), (2021, 1, 15),\n # (2021, 3, 31)],\n # [(2020, 4, 15), (2020, 7, 15), (2020, 10, 15), (2021, 1, 1),\n # (2021, 1, 1)]].\n ```\n\n Args:\n start_date: `dates.DateTensor`. Defines the lower boundary of schedule. If\n `backward=True` must be broadcastable to `end_date`, otherwise has\n arbitrary shape.\n end_date: `dates.DateTensor`. Defines the upper boundary of the schedule.\n If `backward=False` must be broadcastable to `start_date`, otherwise has\n arbitrary shape.\n tenor: `periods.PeriodTensor`. Defines the frequency of the schedule. Must\n be broadcastable to `start_date` if `backward=False`, and to `end_date`\n if `backward=True`.\n holiday_calendar: `dates.HolidayCalendar`. If `None`, the dates in the\n schedule will not be rolled to business days.\n roll_convention: BusinessDayConvention. Defines how dates in the schedule\n should be rolled to business days if they fall on holidays. Ignored if\n `holiday_calendar = None`.\n Default value: BusinessDayConvention.NONE (i.e. no rolling).\n backward: Python `bool`. Whether to build the schedule from the\n `start_date` moving forwards or from the `end_date` and moving\n backwards.\n end_of_month: Python `bool`. If `True`, shifts all dates in schedule to\n the ends of corresponding months, if `start_date` or `end_date` (\n depending on `backward`) is at the end of a month. The shift is applied\n before applying `roll_convention`. In the batched case, only those\n schedules in a batch, whose corresponding `start_date` (or `end_date`)\n are at ends of months, will be shifted.\n \"\"\"\n if end_of_month and tenor.period_type() not in [constants.PeriodType.MONTH,\n constants.PeriodType.YEAR]:\n raise ValueError(\n \"end_of_month may only be used with tenors of PeriodType.MONTH or \"\n \"PeriodType.YEAR\"\n )\n\n self._start_date = start_date\n self._end_date = end_date\n self._tenor = tenor\n self._holiday_calendar = holiday_calendar\n self._roll_convention = roll_convention\n self._backward = backward\n self._end_of_month = end_of_month\n\n def dates(self):\n \"\"\"Returns the dates as computed from the schedule as a DateTensor.\n\n Constructs the date schedule from the supplied data. For more details see\n the initializer docstring.\n\n Returns:\n `DateTensor` of rank one more than `start_date` or `end_date`\n (depending on `backwards`), representing schedules for each element\n of the input.\n \"\"\"\n return _gen_periodic_schedule(\n self._start_date,\n self._end_date,\n self._tenor,\n holiday_calendar=self._holiday_calendar,\n roll_convention=self._roll_convention,\n backward=self._backward,\n end_of_month=self._end_of_month)\n\n @property\n def start_date(self):\n return self._start_date\n\n @property\n def end_date(self):\n return self._end_date\n\n @property\n def tenor(self):\n return self._tenor\n\n @property\n def holiday_calendar(self):\n return self._holiday_calendar\n\n @property\n def roll_convention(self):\n return self._roll_convention\n\n @property\n def generate_backwards(self):\n \"\"\"Returns whether the schedule is generated from the end date.\"\"\"\n return self._backward\n\n @property\n def end_of_month(self):\n return self._end_of_month\n\n\nclass BusinessDaySchedule:\n \"\"\"Generates schedules containing every business day in a period.\"\"\"\n\n def __init__(self,\n *,\n start_date,\n end_date,\n holiday_calendar,\n backward=False):\n \"\"\"Initializes the schedule.\n\n Initializes a schedule with a given date range and holiday calendar.\n\n The schedule includes all business days between and including `start_date`\n and `end_date`.\n\n Can create multiple schedules simultaneously. The start and end dates may\n have any (compatible) shape. The `DateTensor` returned by `dates()` has the\n shape `start_date.shape + (n,)`, where `n` is the maximum length of\n schedules in the batch. If schedules have different lengths, the extra\n elements will be padded with extra `end_date` elements at the end, if\n `backward=False` and with extra `start_date` elements in the beginning if\n `backward=True`. In all cases each schedule in the batch is monotonic.\n\n #### Example Usage (Non-batch)\n\n ```python\n start_date = dates.from_tuples([(2020, 3, 19)])\n end_date = dates.from_tuples([(2021, 3, 25)])\n holiday_calendar = dates.HolidayCalendar(start_year=2020, end_year=2021)\n schedule = dates.BusinessDaysSchedule(\n start_date=start_date,\n end_date=end_date,\n holiday_calendar=holiday_calendar,\n roll_convention=dates.BusinessDayConvention.FOLLOWING,\n backward=False).dates()\n # schedule is a DateTensor of\n # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24),\n # (2021, 3, 25)]] regardless of `backward`.\n ```\n\n #### Example Usage (Batch)\n\n ```python\n start_date = dates.from_tuples([(2020, 3, 19), (2020, 4, 15)])\n end_date = dates.from_tuples([(2021, 3, 13), (2021, 3, 17)])\n schedule = dates.BusinessDaysSchedule(\n start_dates,\n end_dates,\n dates.HolidayCalendar(start_year=2020, end_year=2021),\n backward=False).dates()\n # Returns DateTensor of\n # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24),\n # (2021, 3, 25)],\n # [(2020, 3, 13), (2020, 3, 16), (2020, 3, 17), (2020, 3, 17),\n # (2021, 3, 17)]], if `backward` is True.\n # [[(2020, 3, 19), (2020, 3, 20), (2020, 3, 23), (2020, 3, 24),\n # (2021, 3, 25)],\n # [(2020, 3, 13), (2020, 3, 13), (2020, 3, 13), (2020, 3, 16),\n # (2021, 3, 17)]], if `backward` is True.\n ```\n Args:\n start_date: `dates.DateTensor`. Defines the lower boundary of schedule. If\n `backward=True` must be broadcastable to `end_date`, otherwise has\n arbitrary shape.\n end_date: `dates.DateTensor`. Defines the upper boundary of the schedule.\n If `backward=False` must be broadcastable to `start_date`, otherwise has\n arbitrary shape.\n holiday_calendar: `dates.HolidayCalendar` that defines which days will be\n included.\n backward: Python `bool`. Defines the way padding is applied in case of\n batching. If schedules in a batch have different lengths, the extra\n elements will be padded with extra `end_date` elements at the end, if\n `backward=False` and with extra `start_date` elements in the beginning\n if `backward=True`.\n \"\"\"\n self._start_date = start_date\n self._end_date = end_date\n self._holiday_calendar = holiday_calendar\n self._backward = backward\n\n def dates(self):\n \"\"\"Returns the dates as computed from the schedule as a DateTensor.\n\n Constructs the date schedule from the supplied data. For more details see\n the initializer docstring.\n\n Returns:\n `DateTensor` of rank one more than `start_date` or `end_date`\n (depending on `backwards`), representing schedules for each element\n of the input.\n \"\"\"\n return _gen_business_days(self._start_date,\n self._end_date,\n self._holiday_calendar,\n self._backward)\n\n @property\n def holiday_calendar(self):\n return self._holiday_calendar\n\n @property\n def start_date(self):\n return self._start_date\n\n @property\n def end_date(self):\n return self._end_date\n\n @property\n def generate_backwards(self):\n return self._backward\n\n\ndef _gen_periodic_schedule(start_date,\n end_date,\n tenor,\n holiday_calendar=None,\n roll_convention=constants.BusinessDayConvention.NONE,\n backward=False,\n end_of_month=False):\n \"\"\"Generates a periodic schedule, see PeriodicSchedule.\"\"\"\n\n # Validate inputs.\n control_deps = [\n tf.debugging.assert_greater_equal(end_date.ordinal(),\n start_date.ordinal()),\n tf.debugging.assert_positive(tenor.quantity())\n ]\n\n with tf.compat.v1.control_dependencies(control_deps):\n # Reshape the input Tensors.\n if backward:\n start_date = start_date.broadcast_to(end_date.shape)\n tenor = tenor.broadcast_to(end_date.shape)\n else:\n end_date = end_date.broadcast_to(start_date.shape)\n tenor = tenor.broadcast_to(start_date.shape)\n start_date = start_date.expand_dims(axis=-1)\n end_date = end_date.expand_dims(axis=-1)\n tenor = tenor.expand_dims(axis=-1)\n\n # Figure out the upper bound of the schedule length.\n min_days_in_period = _MIN_DAYS_IN_PERIOD[tenor.period_type()]\n days_between = end_date.ordinal() - start_date.ordinal() + 1\n schedule_len_upper_bound = tf.cast(\n tf.math.ceil(tf.math.reduce_max(\n days_between / (tenor.quantity() * min_days_in_period))),\n dtype=tf.int32)\n\n # Add the periods.\n if backward:\n # Subtract tenor * n, where n = n_max, ..., 2, 1, 0.\n tenors_expanded = tenor * tf.range(schedule_len_upper_bound - 1, -1, -1,\n dtype=tf.int32)\n schedules = end_date - tenors_expanded\n # Prepend start_date to ensure we always include it.\n schedules = date_tensor.DateTensor.concat((start_date, schedules),\n axis=-1)\n in_bounds = schedules.ordinal() >= start_date.ordinal()\n\n # Pad with start_date.\n schedules = date_tensor.DateTensor.where(in_bounds, schedules, start_date)\n\n # Find how much we overestimated max schedule length and trim the extras.\n not_start_date = tf.math.not_equal(schedules.ordinal(),\n start_date.ordinal())\n max_schedule_len_error = (\n tf.math.reduce_min(tf.where(not_start_date)[..., -1]) - 1)\n schedules = schedules[..., max_schedule_len_error:]\n else:\n # Add tenor * n, where n = 0, 1, 2, ..., n_max.\n tenors_expanded = tenor * tf.range(schedule_len_upper_bound,\n dtype=tf.int32)\n schedules = start_date + tenors_expanded\n # Append end_date to ensure we always include it.\n schedules = date_tensor.DateTensor.concat((schedules, end_date), axis=-1)\n\n in_bounds = schedules.ordinal() <= end_date.ordinal()\n\n # Pad with end_date.\n schedules = date_tensor.DateTensor.where(in_bounds, schedules, end_date)\n\n # Find the actual schedule length and trim the extras.\n not_end_date = tf.math.not_equal(schedules.ordinal(), end_date.ordinal())\n max_schedule_len = tf.math.reduce_max(tf.where(not_end_date)[..., -1]) + 2\n schedules = schedules[..., :max_schedule_len]\n\n # Move to the end of month where necessary.\n if end_of_month:\n where_cond = (end_date if backward else start_date).is_end_of_month()\n schedules = date_tensor.DateTensor.where(where_cond,\n schedules.to_end_of_month(),\n schedules)\n\n # Roll to business days.\n if holiday_calendar is not None:\n schedules = holiday_calendar.roll_to_business_day(schedules,\n roll_convention)\n\n return schedules\n\n\ndef _gen_business_days(start_date, end_date, holiday_calendar, backward=False):\n \"\"\"Generates business days between given dates, see BusinessDaySchedule.\"\"\"\n # Handle the case when start_date or end_date fall on holidays.\n start_date = holiday_calendar.roll_to_business_day(\n start_date, roll_convention=constants.BusinessDayConvention.FOLLOWING)\n end_date = holiday_calendar.roll_to_business_day(\n end_date, roll_convention=constants.BusinessDayConvention.PRECEDING)\n\n # Validate inputs.\n control_deps = [\n tf.debugging.assert_greater_equal(end_date.ordinal(),\n start_date.ordinal()),\n ]\n with tf.compat.v1.control_dependencies(control_deps):\n # Reshape the input Tensors.\n if backward:\n start_date = start_date.broadcast_to(end_date.shape)\n else:\n end_date = end_date.broadcast_to(start_date.shape)\n start_date = start_date.expand_dims(axis=-1)\n end_date = end_date.expand_dims(axis=-1)\n\n # Find the longest schedule in the batch.\n max_len = tf.math.abs(tf.math.reduce_max(\n holiday_calendar.business_days_between(start_date, end_date))) + 1\n\n if backward:\n # Subtract n days, where n = max_len-1, ..., 2, 1, 0.\n days = tf.range(-max_len + 1, 1, dtype=tf.int32)\n schedules = holiday_calendar.add_business_days(end_date, days)\n in_bounds = schedules.ordinal() >= start_date.ordinal()\n # Pad with start_date.\n schedules = date_tensor.DateTensor.where(in_bounds, schedules, start_date)\n else:\n # Add n days, where n = 0, 1, 2, ..., max_len-1.\n days = tf.range(max_len, dtype=tf.int32)\n schedules = holiday_calendar.add_business_days(start_date, days)\n in_bounds = schedules.ordinal() <= end_date.ordinal()\n # Pad with end_date.\n schedules = date_tensor.DateTensor.where(in_bounds, schedules, end_date)\n\n return schedules\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"HolidayCalendar definition.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance.experimental.dates import constants\nfrom tf_quant_finance.experimental.dates import date_tensor as dt\nfrom tf_quant_finance.experimental.dates import date_utils as du\nfrom tf_quant_finance.experimental.dates import holiday_utils as hol\nfrom tf_quant_finance.experimental.dates import periods\n\n\nclass HolidayCalendar(object):\n \"\"\"Represents a holiday calendar.\n\n Differs from dates.HolidayCalendar in implementation. This implementation\n supports weekends and holiday supplied as `Tensor`s. However, it is\n (potentially significantly) slower than the dates.HolidayCalendar\n implementation.\n\n Provides methods for manipulating the dates taking into account the holidays,\n and the business day roll conventions. Weekends are treated as holidays.\n \"\"\"\n\n def __init__(self, weekend_mask=None, holidays=None):\n \"\"\"Initializer.\n\n Args:\n weekend_mask: Boolean `Tensor` of 7 elements one for each day of the week\n starting with Monday at index 0. A `True` value indicates the day is\n considered a weekend day and a `False` value implies a week day.\n Default value: None which means no weekends are applied.\n holidays: Defines the holidays that are added to the weekends defined by\n `weekend_mask`. An instance of `dates.DateTensor` or an object\n convertible to `DateTensor`.\n Default value: None which means no holidays other than those implied by\n the weekends (if any).\n \"\"\"\n if weekend_mask is not None:\n weekend_mask = tf.cast(weekend_mask, dtype=tf.bool)\n if holidays is not None:\n holidays = dt.convert_to_date_tensor(holidays).ordinal()\n self._to_biz_space, self._from_biz_space = hol.business_day_mappers(\n weekend_mask=weekend_mask, holidays=holidays)\n\n def is_business_day(self, date_tensor):\n \"\"\"Returns a tensor of bools for whether given dates are business days.\"\"\"\n ordinals = dt.convert_to_date_tensor(date_tensor).ordinal()\n return self._to_biz_space(ordinals)[1]\n\n def roll_to_business_day(self, date_tensor, roll_convention):\n \"\"\"Rolls the given dates to business dates according to given convention.\n\n Args:\n date_tensor: `DateTensor` of dates to roll from.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\n Returns:\n The resulting `DateTensor`.\n \"\"\"\n if roll_convention == constants.BusinessDayConvention.NONE:\n return date_tensor\n ordinals = dt.convert_to_date_tensor(date_tensor).ordinal()\n biz_days, is_bizday = self._to_biz_space(ordinals)\n biz_days_rolled = self._apply_roll_biz_space(date_tensor, biz_days,\n is_bizday, roll_convention)\n return dt.from_ordinals(self._from_biz_space(biz_days_rolled))\n\n def _apply_roll_biz_space(self, date_tensor, biz_days, is_bizday,\n roll_convention):\n \"\"\"Applies roll in business day space.\"\"\"\n if roll_convention == constants.BusinessDayConvention.NONE:\n # If no business convention is specified, return the current business\n # day.\n return biz_days\n\n if roll_convention == constants.BusinessDayConvention.FOLLOWING:\n return tf.where(is_bizday, biz_days, biz_days + 1)\n\n if roll_convention == constants.BusinessDayConvention.PRECEDING:\n return biz_days\n\n if roll_convention == constants.BusinessDayConvention.MODIFIED_FOLLOWING:\n maybe_prev_biz_day = biz_days\n maybe_next_biz_day = tf.where(is_bizday, biz_days, biz_days + 1)\n maybe_next_biz_ordinal = self._from_biz_space(maybe_next_biz_day)\n take_previous = tf.not_equal(\n _get_month(maybe_next_biz_ordinal), date_tensor.month())\n return tf.where(take_previous, maybe_prev_biz_day, maybe_next_biz_day)\n\n if roll_convention == constants.BusinessDayConvention.MODIFIED_PRECEDING:\n maybe_prev_biz_day = biz_days\n maybe_next_biz_day = tf.where(is_bizday, biz_days, biz_days + 1)\n maybe_prev_biz_ordinal = self._from_biz_space(maybe_prev_biz_day)\n take_next = tf.not_equal(\n _get_month(maybe_prev_biz_ordinal), date_tensor.month())\n return tf.where(take_next, maybe_next_biz_day, maybe_prev_biz_day)\n\n raise ValueError('Unsupported roll convention: {}'.format(roll_convention))\n\n def add_period_and_roll(self,\n date_tensor,\n period_tensor,\n roll_convention=constants.BusinessDayConvention.NONE):\n \"\"\"Adds given periods to given dates and rolls to business days.\n\n The original dates are not rolled prior to addition.\n\n Args:\n date_tensor: `DateTensor` of dates to add to.\n period_tensor: PeriodTensor broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\n Returns:\n The resulting `DateTensor`.\n \"\"\"\n return self.roll_to_business_day(date_tensor + period_tensor,\n roll_convention)\n\n def add_business_days(self,\n date_tensor,\n num_days,\n roll_convention=constants.BusinessDayConvention.NONE):\n \"\"\"Adds given number of business days to given dates.\n\n Note that this is different from calling `add_period_and_roll` with\n PeriodType.DAY. For example, adding 5 business days to Monday gives the next\n Monday (unless there are holidays on this week or next Monday). Adding 5\n days and rolling means landing on Saturday and then rolling either to next\n Monday or to Friday of the same week, depending on the roll convention.\n\n If any of the dates in `date_tensor` are not business days, they will be\n rolled to business days before doing the addition. If `roll_convention` is\n `NONE`, and any dates are not business days, an exception is raised.\n\n Args:\n date_tensor: `DateTensor` of dates to advance from.\n num_days: Tensor of int32 type broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\n Returns:\n The resulting `DateTensor`.\n \"\"\"\n control_deps = []\n biz_days, is_bizday = self._to_biz_space(\n dt.convert_to_date_tensor(date_tensor).ordinal())\n if roll_convention == constants.BusinessDayConvention.NONE:\n control_deps.append(\n tf.debugging.assert_equal(\n is_bizday,\n True,\n message='Non business starting day with no roll convention.'))\n\n with tf.compat.v1.control_dependencies(control_deps):\n biz_days_rolled = self._apply_roll_biz_space(date_tensor, biz_days,\n is_bizday, roll_convention)\n return dt.from_ordinals(\n self._from_biz_space(biz_days_rolled + num_days))\n\n def subtract_period_and_roll(\n self,\n date_tensor,\n period_tensor,\n roll_convention=constants.BusinessDayConvention.NONE):\n \"\"\"Subtracts given periods from given dates and rolls to business days.\n\n The original dates are not rolled prior to subtraction.\n\n Args:\n date_tensor: `DateTensor` of dates to subtract from.\n period_tensor: PeriodTensor broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\n Returns:\n The resulting `DateTensor`.\n \"\"\"\n minus_period_tensor = periods.PeriodTensor(-period_tensor.quantity(),\n period_tensor.period_type())\n return self.add_period_and_roll(date_tensor, minus_period_tensor,\n roll_convention)\n\n def subtract_business_days(\n self,\n date_tensor,\n num_days,\n roll_convention=constants.BusinessDayConvention.NONE):\n \"\"\"Adds given number of business days to given dates.\n\n Note that this is different from calling `subtract_period_and_roll` with\n PeriodType.DAY. For example, subtracting 5 business days from Friday gives\n the previous Friday (unless there are holidays on this week or previous\n Friday). Subtracting 5 days and rolling means landing on Sunday and then\n rolling either to Monday or to Friday, depending on the roll convention.\n\n If any of the dates in `date_tensor` are not business days, they will be\n rolled to business days before doing the subtraction. If `roll_convention`\n is `NONE`, and any dates are not business days, an exception is raised.\n\n Args:\n date_tensor: `DateTensor` of dates to advance from.\n num_days: Tensor of int32 type broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\n Returns:\n The resulting `DateTensor`.\n \"\"\"\n return self.add_business_days(date_tensor, -num_days, roll_convention)\n\n def business_days_in_period(self, date_tensor, period_tensor):\n \"\"\"Calculates number of business days in a period.\n\n Includes the dates in `date_tensor`, but excludes final dates resulting from\n addition of `period_tensor`.\n\n Args:\n date_tensor: `DateTensor` of starting dates.\n period_tensor: PeriodTensor, should be broadcastable to `date_tensor`.\n\n Returns:\n An int32 Tensor with the number of business days in given periods that\n start at given dates.\n\n \"\"\"\n return self.business_days_between(date_tensor, date_tensor + period_tensor)\n\n def business_days_between(self, from_dates, to_dates):\n \"\"\"Calculates number of business between pairs of dates.\n\n For each pair, the initial date is included in the difference, and the final\n date is excluded. If the final date is the same or earlier than the initial\n date, zero is returned.\n\n Args:\n from_dates: `DateTensor` of initial dates.\n to_dates: `DateTensor` of final dates, should be broadcastable to\n `from_dates`.\n\n Returns:\n An int32 Tensor with the number of business days between the\n corresponding pairs of dates.\n \"\"\"\n from_biz, from_is_bizday = self._to_biz_space(\n dt.convert_to_date_tensor(from_dates).ordinal())\n to_biz, to_is_bizday = self._to_biz_space(\n dt.convert_to_date_tensor(to_dates).ordinal())\n from_biz = tf.where(from_is_bizday, from_biz, from_biz + 1)\n to_biz = tf.where(to_is_bizday, to_biz, to_biz + 1)\n return tf.math.maximum(to_biz - from_biz, 0)\n\n\ndef _get_month(ordinals):\n return du.ordinal_to_year_month_day(ordinals)[1]\n",
"# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the `utils` module.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\nfrom tf_quant_finance.models import utils\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass UtilsTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('SinglePrecision', np.float32),\n ('DoublePrecision', np.float64),\n )\n def test_sobol_numbers_generation(self, dtype):\n \"\"\"Sobol random dtype results in the correct draws.\"\"\"\n num_draws = tf.constant(2, dtype=tf.int32)\n steps_num = tf.constant(3, dtype=tf.int32)\n num_samples = tf.constant(4, dtype=tf.int32)\n random_type = tff.math.random.RandomType.SOBOL\n skip = 10\n samples = utils.generate_mc_normal_draws(\n num_normal_draws=num_draws, num_time_steps=steps_num,\n num_sample_paths=num_samples, random_type=random_type,\n dtype=dtype, skip=skip)\n expected_samples = [[[0.8871465, 0.48877636],\n [-0.8871465, -0.48877636],\n [0.48877636, 0.8871465],\n [-0.15731068, 0.15731068]],\n [[0.8871465, -1.5341204],\n [1.5341204, -0.15731068],\n [-0.15731068, 1.5341204],\n [-0.8871465, 0.48877636]],\n [[-0.15731068, 1.5341204],\n [0.15731068, -0.48877636],\n [-1.5341204, 0.8871465],\n [0.8871465, -1.5341204]]]\n self.assertAllClose(samples, expected_samples, rtol=1e-5, atol=1e-5)\n\n @parameterized.named_parameters(\n ('SinglePrecision', np.float32),\n ('DoublePrecision', np.float64),\n )\n def test_maybe_update_along_axis(self, dtype):\n \"\"\"Tests that the values are updated correctly.\"\"\"\n tensor = tf.ones([5, 4, 3, 2], dtype=dtype)\n new_tensor = tf.zeros([5, 4, 1, 2], dtype=dtype)\n @tf.function\n def maybe_update_along_axis(do_update):\n return utils.maybe_update_along_axis(\n tensor=tensor, new_tensor=new_tensor, axis=1, ind=2,\n do_update=do_update)\n updated_tensor = maybe_update_along_axis(True)\n with self.subTest(name='Shape'):\n self.assertEqual(updated_tensor.shape, tensor.shape)\n with self.subTest(name='UpdatedVals'):\n self.assertAllEqual(updated_tensor[:, 2, :, :],\n tf.zeros_like(updated_tensor[:, 2, :, :]))\n with self.subTest(name='NotUpdatedVals'):\n self.assertAllEqual(updated_tensor[:, 1, :, :],\n tf.ones_like(updated_tensor[:, 2, :, :]))\n with self.subTest(name='DoNotUpdateVals'):\n not_updated_tensor = maybe_update_along_axis(False)\n self.assertAllEqual(not_updated_tensor, tensor)\n\n def test_block_diagonal_to_dense(self):\n matrices = [[[1.0, 0.1], [0.1, 1.0]],\n [[1.0, 0.3, 0.2],\n [0.3, 1.0, 0.5],\n [0.2, 0.5, 1.0]], [[1.0]]]\n dense = utils.block_diagonal_to_dense(*matrices)\n expected_result = [[1.0, 0.1, 0.0, 0.0, 0.0, 0.0],\n [0.1, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.3, 0.2, 0.0],\n [0.0, 0.0, 0.3, 1.0, 0.5, 0.0],\n [0.0, 0.0, 0.2, 0.5, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]\n self.assertAllClose(dense, expected_result, rtol=1e-5, atol=1e-5)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v2.test.main",
"numpy.var",
"tensorflow.compat.v2.random.set_seed",
"tensorflow.compat.v2.ones",
"numpy.mean",
"numpy.corrcoef",
"numpy.array"
],
[
"tensorflow.compat.v2.compat.v2.name_scope",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.math.segment_prod",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.segment_sum"
],
[
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.compat.v1.control_dependencies",
"tensorflow.compat.v2.where"
],
[
"tensorflow.compat.v2.debugging.assert_equal",
"tensorflow.compat.v2.compat.v1.control_dependencies",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.maximum"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liupeng678/AudioEmotionRcognition | [
"2cd6261a84788d9d2b08fc1204a262ad6a33a7bb"
] | [
"lstm.py"
] | [
"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport librosa\nimport librosa.display\nimport IPython\nfrom IPython.display import Audio\nfrom IPython.display import Image\nimport matplotlib.pyplot as plt\n\nEMOTIONS = {1:'neutral', 2:'calm', 3:'happy', 4:'sad', 5:'angry', 6:'fear', 7:'disgust', 0:'surprise'} # surprise je promenjen sa 8 na 0\nDATA_PATH = '/home/liupeng/Desktop/paperFour/data/'\nSAMPLE_RATE = 48000\n\ny = []\n\ndata = pd.DataFrame(columns=['Emotion', 'Emotion intensity', 'Gender','Path'])\nfor dirname, _, filenames in os.walk(DATA_PATH):\n for filename in filenames:\n file_path = os.path.join('/kaggle/input/',dirname, filename)\n identifiers = filename.split('.')[0].split('-')\n emotion = (int(identifiers[2]))\n if emotion == 8: # promeni surprise sa 8 na 0\n emotion = 0\n if int(identifiers[3]) == 1:\n emotion_intensity = 'normal' \n else:\n emotion_intensity = 'strong'\n if int(identifiers[6])%2 == 0:\n gender = 'female'\n else:\n gender = 'male'\n y.append(emotion)\n data = data.append({\"Emotion\": emotion,\n \"Emotion intensity\": emotion_intensity,\n \"Gender\": gender,\n \"Path\": file_path\n },\n ignore_index = True\n )\n# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session\n\n\n\n\ndef getMELspectrogram(audio, sample_rate):\n mel_spec = librosa.feature.melspectrogram(y=audio,\n sr=sample_rate,\n n_fft=1024,\n win_length = 512,\n window='hamming',\n hop_length = 256,\n n_mels=128,\n fmax=sample_rate/2\n )\n mel_spec_db = librosa.power_to_db(mel_spec, ref=np.max)\n return mel_spec_db\n\n# test function\naudio, sample_rate = librosa.load(data.loc[0,'Path'], duration=3, offset=0.5,sr=SAMPLE_RATE)\nsignal = np.zeros((int(SAMPLE_RATE*3,)))\nsignal[:len(audio)] = audio\nmel_spectrogram = getMELspectrogram(signal, SAMPLE_RATE)\nlibrosa.display.specshow(mel_spectrogram, y_axis='mel', x_axis='time')\nprint('MEL spectrogram shape: ',mel_spectrogram.shape)\n\n\n\nmel_spectrograms = []\nsignals = []\nfor i, file_path in enumerate(data.Path):\n audio, sample_rate = librosa.load(file_path, duration=3, offset=0.5, sr=SAMPLE_RATE)\n signal = np.zeros((int(SAMPLE_RATE*3,)))\n signal[:len(audio)] = audio\n signals.append(signal)\n mel_spectrogram = getMELspectrogram(signal, sample_rate=SAMPLE_RATE)\n mel_spectrograms.append(mel_spectrogram)\n print(\"\\r Processed {}/{} files\".format(i,len(data)),end='')\n\n\n\ndef addAWGN(signal, num_bits=16, augmented_num=1, snr_low=15, snr_high=30): \n signal_len = len(signal)\n # Generate White Gaussian noise\n noise = np.random.normal(size=(augmented_num, signal_len))\n # Normalize signal and noise\n norm_constant = 2.0**(num_bits-1)\n signal_norm = signal / norm_constant\n noise_norm = noise / norm_constant\n # Compute signal and noise power\n s_power = np.sum(signal_norm ** 2) / signal_len\n n_power = np.sum(noise_norm ** 2, axis=1) / signal_len\n # Random SNR: Uniform [15, 30] in dB\n target_snr = np.random.randint(snr_low, snr_high)\n # Compute K (covariance matrix) for each noise \n K = np.sqrt((s_power / n_power) * 10 ** (- target_snr / 10))\n K = np.ones((signal_len, augmented_num)) * K \n # Generate noisy signal\n return signal + K.T * noise\n\n\n\nfor i,signal in enumerate(signals):\n augmented_signals = addAWGN(signal)\n for j in range(augmented_signals.shape[0]):\n mel_spectrogram = getMELspectrogram(augmented_signals[j,:], sample_rate=SAMPLE_RATE)\n mel_spectrograms.append(mel_spectrogram)\n data = data.append(data.iloc[i], ignore_index=True)\n #print(data.iloc[i])\n # print(y[i])\n y.append(y[i])\n print(\"\\r Processed {}/{} files\".format(i,len(signals)),end='')\n\n\nX = np.stack(mel_spectrograms,axis=0)\nX = np.expand_dims(X,1)\nX = X.swapaxes(1,3)\nX = X.swapaxes(1,2)\n\n\n\nshape2 = 128\nshape1 = 563\n\nX = np.reshape(X,(X.shape[0],shape2,shape1))\n\nfrom keras.utils import np_utils, Sequence\ny = np.array(y)\ny = np.expand_dims(y,1)\ny = np_utils.to_categorical(y, 8)\nprint('Shape of data: ',X.shape)\nprint('Shape of data: ',y.shape)\n\nnp.savez(\"./data1.npz\", X=X, y=y)\n# np.save(\"filename.npy\",a)\n# b = np.load(\"filename.npy\")\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(\n X, y, test_size=0.2, random_state=2)\n\n\n\n\n\n\n\n#print(Y_test)\n\nimport librosa\nimport librosa.display\nimport pandas as pd\nimport numpy as np\nfrom glob import glob\nimport os\nfrom tqdm import tqdm_notebook as tqdm\nimport traceback\nimport cv2\nimport sklearn\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib as plot\nimport math\nfrom imgaug import augmenters as iaa\nfrom tensorflow.python.keras.layers import *\nfrom tensorflow.python.keras import Sequential,Model\nfrom tensorflow.keras import optimizers as opts\nfrom tensorflow.python.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\nfrom tensorflow.python.keras import regularizers\nfrom keras.utils import np_utils, Sequence\nfrom sklearn.metrics import confusion_matrix\n\n\nimport keras\nfrom keras.optimizers import *\nfrom keras.losses import categorical_crossentropy\nfrom classification_models.keras import Classifiers\nfrom efficientnet.keras import EfficientNetB4\n\nfrom keras.layers import *\nfrom keras import backend as K\n\nemotion_len = 8\ndef backend_reshape(x):\n return K.reshape(x, ( 64,281, 128))\n\ninputs = Input(shape= [shape2, shape1])\n\nx = Conv1D(32, 4, padding=\"same\", activation='relu')(inputs)\nx = Conv1D(64, 4, padding=\"same\", activation='relu')(x)\nx = Conv1D(128, 2, padding=\"same\", activation='relu')(x)\nx = MaxPooling1D(pool_size= (3))(x)\n\n# print(x.shape)\n# x = Flatten()(x)\n# x = Reshape((-1,-1))(x)\nx = LSTM(64,dropout=0.2,recurrent_dropout=0.2,return_sequences=False)(x)\n\nx = Dense(128,activation='relu')(x)\nx = Dense(emotion_len,activation='softmax')(x)\n\n\n\nmodel = Model(inputs= inputs, outputs= x)\n\n\n#model.summary()\nmodel.compile(loss=categorical_crossentropy,\n optimizer=RMSprop(), metrics=['accuracy'])\n\nhistory = model.fit(X_train, Y_train, batch_size=32, epochs=50,\n validation_data=(X_test, Y_test), verbose=1, shuffle=True)\n\n\nloss, acc = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test loss:', loss)\nprint('Test accuracy:', acc)"
] | [
[
"numpy.expand_dims",
"numpy.savez",
"numpy.sqrt",
"numpy.reshape",
"tensorflow.python.keras.Model",
"sklearn.model_selection.train_test_split",
"numpy.stack",
"pandas.DataFrame",
"numpy.ones",
"numpy.random.normal",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"2.5",
"2.8",
"2.10"
]
}
] |
santhoshkumarvs/tensorflow | [
"5581b91ada226f1ec20f55cd6423853072b2813c",
"5581b91ada226f1ec20f55cd6423853072b2813c"
] | [
"tensorflow/python/training/tracking/data_structures_test.py",
"tensorflow/python/eager/function_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\n\nimport numpy\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.keras.layers import normalization\nfrom tensorflow.python.layers import core as non_keras_core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util\n\n\nclass HasList(training.Model):\n\n def __init__(self):\n super(HasList, self).__init__()\n self.layer_list = data_structures.List([core.Dense(3)])\n self.layer_list.append(core.Dense(4))\n self.layer_list.extend(\n [core.Dense(5),\n core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])\n self.layer_list += [\n core.Dense(7, bias_regularizer=math_ops.reduce_sum),\n core.Dense(8)\n ]\n self.layer_list += (\n data_structures.List([core.Dense(9)]) + data_structures.List(\n [core.Dense(10)]))\n self.layer_list.extend(\n data_structures.List(\n list([core.Dense(11)]) + [core.Dense(12)]))\n self.layers_with_updates = data_structures.List(\n (normalization.BatchNormalization(),))\n\n def call(self, x):\n aggregation = 0.\n for l in self.layer_list:\n x = l(x)\n aggregation += math_ops.reduce_sum(x)\n bn, = self.layers_with_updates\n return bn(x) / aggregation\n\n\nclass ListTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_v1_only(\"b/120545219\")\n def testTracking(self):\n model = HasList()\n output = model(array_ops.ones([32, 2]))\n self.assertAllEqual([32, 12], output.shape)\n self.assertEqual(11, len(model.layers))\n self.assertEqual(10, len(model.layer_list.layers))\n six.assertCountEqual(\n self,\n model.layers,\n model.layer_list.layers + model.layers_with_updates)\n for index in range(10):\n self.assertEqual(3 + index, model.layer_list.layers[index].units)\n self.assertEqual(2, len(model._checkpoint_dependencies))\n self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)\n self.assertIs(model.layers_with_updates,\n model._checkpoint_dependencies[1].ref)\n self.assertEqual(\n 10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))\n self.evaluate([v.initializer for v in model.variables])\n self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))\n model.load_weights(save_path)\n self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],\n self.evaluate(model.variables[0]))\n v = variables.Variable(1.)\n model.var_list = [v]\n self.assertIn(v, model.variables)\n self.assertIn(v, model.trainable_variables)\n self.assertNotIn(v, model.non_trainable_variables)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testUpdatesForwarded(self):\n with context.graph_mode():\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertGreater(len(model.layers_with_updates[0].updates), 0)\n self.assertEqual(set(model.layers_with_updates[0].updates),\n set(model.updates))\n\n with context.eager_mode():\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertEqual(0, len(model.updates))\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_v1_only(\"b/120545219\")\n def testLossesForwarded(self):\n model = HasList()\n model_input = array_ops.ones([32, 2])\n model(model_input)\n self.assertEqual(2, len(model.losses))\n\n def testModelContainersCompareEqual(self):\n class HasEqualContainers(training.Model):\n\n def __init__(self):\n super(HasEqualContainers, self).__init__()\n self.l1 = []\n self.l2 = []\n\n model = HasEqualContainers()\n first_layer = HasEqualContainers()\n model.l1.append(first_layer)\n second_layer = HasEqualContainers()\n model.l2.append(second_layer)\n self.assertEqual([first_layer, second_layer], model.layers)\n\n def testNotTrackable(self):\n class NotTrackable(object):\n pass\n\n with self.assertRaises(ValueError):\n data_structures.List([NotTrackable()])\n\n def testCallNotImplemented(self):\n with self.assertRaisesRegexp(TypeError, \"not callable\"):\n data_structures.List()(1.)\n\n def testNoPop(self):\n with self.assertRaises(AttributeError):\n data_structures.List().pop()\n\n @test_util.run_in_graph_and_eager_modes\n def testTensorConversion(self):\n\n class ListToTensor(training.Model):\n\n def __init__(self):\n super(ListToTensor, self).__init__()\n self.l = [1., 2., 3.]\n\n self.assertAllEqual(\n [1., 2., 3.],\n self.evaluate(constant_op.constant(ListToTensor().l)))\n\n self.assertAllEqual(\n [1., 2., 3.],\n self.evaluate(array_ops.pack(ListToTensor().l)))\n\n def testNesting(self):\n with context.graph_mode():\n inner = data_structures.List()\n outer = data_structures.List([inner])\n inner.append(non_keras_core.Dense(1))\n inner[0](array_ops.ones([2, 3]))\n self.assertEqual(2, len(outer.variables))\n self.assertIsInstance(\n outer.variables[0],\n resource_variable_ops.ResourceVariable)\n\n def testNonLayerVariables(self):\n v = resource_variable_ops.ResourceVariable([1.])\n l = data_structures.List([v])\n self.assertTrue(l.trainable)\n self.assertEqual([], l.layers)\n self.assertEqual([v], l.variables)\n self.assertEqual([v], l.trainable_weights)\n self.assertEqual([], l.non_trainable_variables)\n l.trainable = False\n self.assertEqual([v], l.variables)\n self.assertEqual([], l.trainable_variables)\n self.assertEqual([v], l.non_trainable_variables)\n l.trainable = True\n v2 = resource_variable_ops.ResourceVariable(1., trainable=False)\n l.append(v2)\n self.assertEqual([v, v2], l.weights)\n self.assertEqual([v], l.trainable_weights)\n self.assertEqual([v2], l.non_trainable_weights)\n\n def testCopy(self):\n v1 = resource_variable_ops.ResourceVariable(1.)\n v2 = resource_variable_ops.ResourceVariable(1.)\n v3 = resource_variable_ops.ResourceVariable(1.)\n\n l1 = data_structures.List([v1, v2])\n l2 = l1.copy()\n l2.append(v3)\n self.assertEqual(list(l1), [v1, v2])\n self.assertEqual(list(l2), [v1, v2, v3])\n\n def testSlicing(self):\n v1 = resource_variable_ops.ResourceVariable(1.)\n v2 = resource_variable_ops.ResourceVariable(1.)\n v3 = resource_variable_ops.ResourceVariable(1.)\n v4 = resource_variable_ops.ResourceVariable(1.)\n\n l = data_structures.List([v1, v2, v3, v4])\n self.assertEqual(l[1:], [v2, v3, v4])\n self.assertEqual(l[1:-1], [v2, v3])\n self.assertEqual(l[:-1], [v1, v2, v3])\n\n def testHash(self):\n has_sequences = set([data_structures.List(),\n data_structures.List()])\n self.assertEqual(2, len(has_sequences))\n self.assertNotIn(data_structures.List(), has_sequences)\n\n def testIMul_zero(self):\n l = data_structures.List([])\n with self.assertRaisesRegexp(ValueError, \"List only supports append\"):\n l *= 0\n\n def testIMul(self):\n v = resource_variable_ops.ResourceVariable(1.)\n l = data_structures.List([v])\n l *= 2\n self.assertEqual(list(l), [v] * 2)\n\n def testMul(self):\n v = resource_variable_ops.ResourceVariable(1.)\n l = data_structures.List([v, v, v])\n self.assertEqual(list(l * 2), [v, v, v] * 2)\n\n def testRMul(self):\n v = resource_variable_ops.ResourceVariable(1.)\n l = data_structures.List([v, v, v])\n self.assertEqual(list(2 * l), [v, v, v] * 2)\n\n\nclass ListWrapperTest(test.TestCase):\n\n IGNORED = (\"__new__\", \"__init__\", \"__subclasshook__\", \"__getattribute__\")\n\n def test_overrides_all_list_methods(self):\n not_overridden = []\n\n for name in dir(list):\n if name in ListWrapperTest.IGNORED:\n continue\n\n list_method = getattr(list, name)\n\n if not callable(list_method):\n continue\n\n object_method = getattr(object, name, None)\n if object_method is not None and object_method == list_method:\n # Skip methods that aren't overridden from object.\n continue\n\n if list_method == getattr(data_structures._ListWrapper, name):\n not_overridden.append(name)\n\n if not_overridden:\n self.fail(\"_ListWrapper does not override %s\" % (not_overridden))\n\n def testListWrapperBasic(self):\n # _ListWrapper, unlike List, compares like the built-in list type (since it\n # is used to automatically replace lists).\n a = tracking.AutoTrackable()\n b = tracking.AutoTrackable()\n self.assertEqual([a, a],\n [a, a])\n self.assertEqual(data_structures._ListWrapper([a, a]),\n data_structures._ListWrapper([a, a]))\n self.assertEqual([a, a],\n data_structures._ListWrapper([a, a]))\n self.assertEqual(data_structures._ListWrapper([a, a]),\n [a, a])\n self.assertNotEqual([a, a],\n [b, a])\n self.assertNotEqual(data_structures._ListWrapper([a, a]),\n data_structures._ListWrapper([b, a]))\n self.assertNotEqual([a, a],\n data_structures._ListWrapper([b, a]))\n self.assertLess([a], [a, b])\n self.assertLess(data_structures._ListWrapper([a]),\n data_structures._ListWrapper([a, b]))\n self.assertLessEqual([a], [a, b])\n self.assertLessEqual(data_structures._ListWrapper([a]),\n data_structures._ListWrapper([a, b]))\n self.assertGreater([a, b], [a])\n self.assertGreater(data_structures._ListWrapper([a, b]),\n data_structures._ListWrapper([a]))\n self.assertGreaterEqual([a, b], [a])\n self.assertGreaterEqual(data_structures._ListWrapper([a, b]),\n data_structures._ListWrapper([a]))\n self.assertEqual([a], data_structures._ListWrapper([a]))\n self.assertEqual([a], list(data_structures.List([a])))\n self.assertEqual([a, a], data_structures._ListWrapper([a]) + [a])\n self.assertEqual([a, a], [a] + data_structures._ListWrapper([a]))\n self.assertIsInstance(data_structures._ListWrapper([a]), list)\n\n def testAcceptsNonTrackableContent(self):\n l = data_structures._ListWrapper([1, 2, 3])\n self.assertEqual(l, [1, 2, 3])\n\n def testWrapperChangesList(self):\n l = []\n l_wrapper = data_structures._ListWrapper(l)\n l_wrapper.append(1)\n self.assertEqual([1], l)\n\n def testListChangesWrapper(self):\n l = []\n l_wrapper = data_structures._ListWrapper(l)\n l.append(1)\n self.assertEqual([1], l_wrapper)\n\n def testLayerCollectionWithExternalMutation(self):\n l = []\n l_wrapper = data_structures._ListWrapper(l)\n layer = core.Dense(1)\n l.append(layer)\n self.assertEqual([layer], l_wrapper.layers)\n\n def testNotHashable(self):\n with self.assertRaises(TypeError):\n hash(data_structures._ListWrapper())\n\n def testDelItem(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n del l[0]\n self.assertEqual(l, [2, 3, 4])\n self.assertUnableToSave(l, \"Unable to save .*__delitem__\")\n\n def testDelSlice(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n del l[2:3]\n self.assertEqual(l, [1, 2, 4])\n self.assertUnableToSave(l, \"Unable to save .*__delslice__\")\n\n def testSetSlice_canSaveForNonTrackableItems(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n l[:] = 2, 8, 9, 0\n self.assertEqual(l, [2, 8, 9, 0])\n l._maybe_initialize_trackable() # pylint: disable=protected-access\n self.assertEqual(len(l._checkpoint_dependencies), 0) # pylint: disable=protected-access\n\n def testSetSlice_cannotSaveIfTrackableModified(self):\n v1 = resource_variable_ops.ResourceVariable(1.)\n v2 = resource_variable_ops.ResourceVariable(1.)\n l = data_structures._ListWrapper([1, 2, v1, v2])\n l[:] = 2, 8, 9, v2\n self.assertEqual(l, [2, 8, 9, v2])\n self.assertUnableToSave(l, \"Unable to save .*__setslice__\")\n\n def testSetSlice_truncate(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n l[:] = []\n self.assertEqual(l, [])\n\n def testSetSlice_extend(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n l[2:] = 1, 2, 3, 4\n self.assertEqual(l, [1, 2, 1, 2, 3, 4])\n\n def testIMulNegative(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n l *= -1\n self.assertEqual(l, [1, 2, 3, 4] * -1)\n self.assertUnableToSave(l, \"Unable to save\")\n\n def testIMulPositive(self):\n v = variables.Variable(1.)\n l = data_structures._ListWrapper([1, 2, 3, 4, v])\n self.assertEqual([(\"4\", v)], l._checkpoint_dependencies)\n root = util.Checkpoint(l=l)\n prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n path = root.save(prefix)\n v.assign(5.)\n l *= 2\n self.assertEqual(l, [1, 2, 3, 4, v, 1, 2, 3, 4, v])\n self.assertEqual([(\"4\", v), (\"9\", v)], l._checkpoint_dependencies)\n root.restore(path)\n self.assertAllClose(1., v.numpy())\n\n def testSort(self):\n l = data_structures._ListWrapper([1, 2, 3, 4])\n l.sort()\n self.assertEqual(l, [1, 2, 3, 4])\n # Regardless of being a no-op for the input list, we still refuse to save.\n # This is intentional since otherwise we would end up with a hard to debug\n # case for users (e.g. sometimes sort on a ListWrapper is trackable and\n # other times it is not).\n self.assertUnableToSave(l, \"Unable to save .*sort\")\n\n def assertUnableToSave(self, l, msg):\n l._maybe_initialize_trackable() # pylint: disable=protected-access\n with self.assertRaisesRegexp(ValueError, msg):\n return l._checkpoint_dependencies # pylint: disable=protected-access\n\n\nclass HasMapping(training.Model):\n\n def __init__(self):\n super(HasMapping, self).__init__()\n self.layer_dict = data_structures.Mapping(output=core.Dense(7))\n self.layer_dict[\"norm\"] = data_structures.List()\n self.layer_dict[\"dense\"] = data_structures.List()\n self.layer_dict[\"dense\"].extend(\n [core.Dense(5),\n core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])\n self.layer_dict[\"norm\"].append(\n normalization.BatchNormalization())\n self.layer_dict[\"norm\"].append(\n normalization.BatchNormalization())\n\n def call(self, x):\n aggregation = 0.\n for norm, dense in zip(self.layer_dict[\"norm\"], self.layer_dict[\"dense\"]):\n x = norm(dense(x))\n aggregation += math_ops.reduce_sum(x)\n return self.layer_dict[\"output\"](x) / aggregation\n\n\nclass MappingTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_v1_only(\"b/120545219\")\n def testTracking(self):\n model = HasMapping()\n output = model(array_ops.ones([32, 2]))\n self.assertAllEqual([32, 7], output.shape)\n self.assertEqual(5, len(model.layers))\n six.assertCountEqual(self, model.layers, model.layer_dict.layers)\n self.assertEqual(1, len(model._checkpoint_dependencies))\n self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)\n self.evaluate([v.initializer for v in model.variables])\n test_var = model.layer_dict[\"output\"].kernel\n self.evaluate(test_var.assign(array_ops.ones([6, 7])))\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n self.evaluate(test_var.assign(array_ops.zeros([6, 7])))\n model.load_weights(save_path)\n self.assertAllEqual(numpy.ones([6, 7]),\n self.evaluate(test_var))\n\n def testNoOverwrite(self):\n mapping = data_structures.Mapping()\n original = data_structures.List()\n mapping[\"a\"] = original\n with self.assertRaises(ValueError):\n mapping[\"a\"] = data_structures.List()\n self.assertIs(original, mapping[\"a\"])\n with self.assertRaises(AttributeError):\n del mapping[\"a\"]\n mapping.update(b=data_structures.Mapping())\n with self.assertRaises(ValueError):\n mapping.update({\"b\": data_structures.Mapping()})\n\n def testNonStringKeys(self):\n mapping = data_structures.Mapping()\n with self.assertRaises(TypeError):\n mapping[1] = data_structures.List()\n\n def testLayerCollectionWithExternalMutation(self):\n d = {}\n root = tracking.AutoTrackable()\n root.wrapper = d\n self.assertEqual([], root.wrapper.layers)\n self.assertEqual([], root.wrapper.trainable_weights)\n layer1 = core.Dense(1)\n layer2 = core.Dense(1)\n d[\"a\"] = layer1\n d[\"b\"] = layer2\n self.assertEqual([layer1, layer2], root.wrapper.layers)\n # The layers have still not created variables\n self.assertEqual([], root.wrapper.trainable_weights)\n\n def testHashing(self):\n has_mappings = set([data_structures.Mapping(),\n data_structures.Mapping()])\n self.assertEqual(2, len(has_mappings))\n self.assertNotIn(data_structures.Mapping(), has_mappings)\n # In contrast to Mapping, dict wrappers are not hashable\n a = tracking.AutoTrackable()\n a.d = {}\n self.assertEqual({}, a.d)\n self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison\n self.assertNotEqual({1: 2}, a.d)\n with self.assertRaisesRegexp(TypeError, \"unhashable\"):\n set([a.d])\n\n def testDictWrapperBadKeys(self):\n a = tracking.AutoTrackable()\n a.d = {}\n a.d[1] = data_structures.List()\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"non-string key\"):\n model.save_weights(save_path)\n\n def testDictWrapperNoDependency(self):\n a = tracking.AutoTrackable()\n a.d = data_structures.NoDependency({})\n a.d[1] = [3]\n self.assertEqual([a], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testNonStringKeyNotTrackableValue(self):\n a = tracking.AutoTrackable()\n a.d = {}\n a.d[\"a\"] = [3]\n a.d[1] = data_structures.NoDependency([3])\n self.assertEqual([a, a.d, a.d[\"a\"]], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testNonAppendNotTrackable(self):\n # Non-append mutations (deleting or overwriting values) are OK when the\n # values aren't tracked.\n a = tracking.AutoTrackable()\n a.d = {}\n a.d[\"a\"] = [3]\n a.d[1] = 3\n a.d[1] = 2\n self.assertEqual(2, a.d[1])\n del a.d[1]\n a.d[2] = data_structures.NoDependency(tracking.AutoTrackable())\n second = tracking.AutoTrackable()\n a.d[2] = data_structures.NoDependency(second)\n self.assertIs(second, a.d[2])\n self.assertEqual([a, a.d, a.d[\"a\"]], util.list_objects(a))\n model = training.Model()\n model.sub = a\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n model.save_weights(save_path)\n model.load_weights(save_path)\n\n def testDelNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = []\n del model.d[\"a\"]\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testPopNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = []\n model.d.pop(\"a\")\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testExternalModificationNoSave(self):\n model = training.Model()\n external_reference = {}\n model.d = external_reference\n external_reference[\"a\"] = []\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"modified outside the wrapper\"):\n model.save_weights(save_path)\n\n def testOverwriteNoSave(self):\n model = training.Model()\n model.d = {}\n model.d[\"a\"] = {}\n model.d[\"a\"] = {}\n save_path = os.path.join(self.get_temp_dir(), \"ckpt\")\n with self.assertRaisesRegexp(ValueError, \"overwritten or deleted\"):\n model.save_weights(save_path)\n\n def testIter(self):\n model = training.Model()\n model.d = {1: 3}\n model.d[1] = 3\n self.assertEqual([1], list(model.d))\n new_dict = {}\n # This update() is super tricky. If the dict wrapper subclasses dict,\n # CPython will access its storage directly instead of calling any\n # methods/properties on the object. So the options are either not to\n # subclass dict (in which case update will call normal iter methods, but the\n # object won't pass isinstance checks) or to subclass dict and keep that\n # storage updated (no shadowing all its methods like _ListWrapper).\n new_dict.update(model.d)\n self.assertEqual({1: 3}, new_dict)\n\n def testListShallowCopy(self):\n root = tracking.AutoTrackable()\n orig_list = [[1.]]\n root.a = orig_list\n copied = copy.copy(root.a)\n self.assertAllEqual([[1.]], copied)\n self.assertIsNot(root.a, copied)\n self.assertIs(root.a[0], copied[0])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_list.append(1.)\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.copy(root.a))\n\n def testListDeepCopy(self):\n root = tracking.AutoTrackable()\n orig_list = [[1.]]\n root.a = orig_list\n copied = copy.deepcopy(root.a)\n self.assertAllEqual([[1.]], copied)\n self.assertIsNot(root.a, copied)\n self.assertIsNot(root.a[0], copied[0])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_list.append(1.)\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.deepcopy(root.a))\n\n def testDictShallowCopy(self):\n root = tracking.AutoTrackable()\n orig_dict = {\"a\": [1.]}\n root.a = orig_dict\n copied = copy.copy(root.a)\n self.assertAllEqual([1.], copied[\"a\"])\n self.assertIsNot(root.a, copied)\n self.assertIs(root.a[\"a\"], copied[\"a\"])\n\n copied = root.a.copy()\n self.assertAllEqual([1.], copied[\"a\"])\n self.assertIsNot(root.a, copied)\n self.assertIs(root.a[\"a\"], copied[\"a\"])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_dict[\"b\"] = []\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.copy(root.a))\n\n def testDictDeepCopy(self):\n root = tracking.AutoTrackable()\n orig_dict = {\"a\": [1.]}\n root.a = orig_dict\n copied = copy.deepcopy(root.a)\n self.assertAllEqual([1.], copied[\"a\"])\n self.assertIsNot(root.a, copied)\n self.assertIsNot(root.a[\"a\"], copied[\"a\"])\n\n # Dirtiness should be inherited\n util.list_objects(root.a)\n orig_dict[\"b\"] = []\n with self.assertRaises(ValueError):\n util.list_objects(root.a)\n with self.assertRaises(ValueError):\n util.list_objects(copy.deepcopy(root.a))\n\n def testShallowCopyTrackable(self):\n original = tracking.AutoTrackable()\n original_sub = tracking.AutoTrackable()\n original.a = [[1.]]\n original.b = {\"a\": original_sub}\n shallow_copied = copy.copy(original)\n self.assertIs(original_sub, shallow_copied.b[\"a\"])\n self.assertIsNot(original, shallow_copied)\n self.assertEqual([[1.]], shallow_copied.a)\n shallow_deps = util.list_objects(shallow_copied)\n self.assertIn(shallow_copied.a, shallow_deps)\n self.assertIn(shallow_copied.b, shallow_deps)\n self.assertIn(shallow_copied.b[\"a\"], shallow_deps)\n\n def testDeepCopyTrackable(self):\n original = tracking.AutoTrackable()\n original_sub = tracking.AutoTrackable()\n original.a = [[1.]]\n original.b = {\"a\": original_sub}\n deep_copied = copy.deepcopy(original)\n self.assertIsNot(original, deep_copied)\n self.assertIsNot(original_sub, deep_copied.b[\"a\"])\n self.assertEqual([[1.]], deep_copied.a)\n self.assertIsInstance(deep_copied.b[\"a\"], tracking.AutoTrackable)\n deps = util.list_objects(deep_copied)\n self.assertIn(deep_copied.a, deps)\n self.assertIn(deep_copied.b, deps)\n self.assertIn(deep_copied.b[\"a\"], deps)\n self.assertNotIn(original_sub, deps)\n\n def testConstructableFromSequence(self):\n result = data_structures._DictWrapper([(1, 2), (3, 4)])\n self.assertIsInstance(result, dict)\n self.assertEqual({1: 2, 3: 4}, result)\n\n def testListAddOrder(self):\n self.assertEqual([1., 2.],\n data_structures._ListWrapper([1.])\n + data_structures._ListWrapper([2.]))\n self.assertEqual([1., 2.],\n data_structures._ListWrapper([1.])\n + [2.])\n self.assertEqual([1., 2.],\n [1.]\n + data_structures._ListWrapper([2.]))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport itertools\nfrom multiprocessing.pool import ThreadPool\nimport sys\nimport weakref\n\nfrom absl.testing import parameterized\nimport numpy\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import function as tf_function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.engine import training as keras_training\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.keras.optimizer_v2 import adam\nfrom tensorflow.python.layers import convolutional\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.ops import gen_random_ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import list_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import training_ops\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\n\n\ndef total_function_cache(defined):\n # pylint: disable=protected-access\n return (set(defined._function_cache.primary)\n | set(defined._function_cache.arg_relaxed))\n # pylint: enable=protected-access\n\n\nclass MiniModel(keras_training.Model):\n \"\"\"Minimal model for mnist.\n\n Useful for testing and debugging on slow TPU simulators.\n \"\"\"\n\n def __init__(self):\n super(MiniModel, self).__init__(name='')\n self.fc = keras.layers.Dense(1, name='fc', kernel_initializer='ones',\n bias_initializer='ones')\n\n def call(self, inputs, training=True):\n return self.fc(inputs)\n\n\nclass DefunnedMiniModel(MiniModel):\n\n @function.defun\n def call(self, inputs, training=True):\n return super(DefunnedMiniModel, self).call(inputs, training=training)\n\n\nclass FunctionTest(test.TestCase, parameterized.TestCase):\n\n def testBasic(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n sq = matmul(t, t, transpose_a=True)\n sq2 = matmul(sq, t, transpose_a=True)\n self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])\n self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])\n\n def testVariable(self):\n v1 = variables.Variable(1.0)\n add = def_function.function(lambda x, v: x + v1 + v)\n v2 = variables.Variable(1.0)\n x = constant_op.constant(1.0)\n r = add(x, v2)\n self.assertEqual(3.0, self.evaluate(r))\n\n def testExternalControlDependency(self):\n with ops.Graph().as_default(), self.test_session():\n v = variables.Variable(1.0)\n v.initializer.run()\n\n op = v.assign_add(1.0)\n\n @function.defun\n def f():\n with ops.control_dependencies([op]):\n return 1.0\n\n self.evaluate(f())\n self.assertAllEqual(self.evaluate(v), 2.0)\n\n def testInputShapeFunctionRelaxation(self):\n unknown_dim = [False]\n\n @function.defun\n def func(a):\n if a._shape_tuple()[0] is None:\n unknown_dim[0] = True\n return a + 1\n\n func(constant_op.constant([]))\n self.assertFalse(unknown_dim[0])\n self.assertLen(total_function_cache(func), 1)\n\n func(constant_op.constant([1.0]))\n self.assertFalse(unknown_dim[0])\n self.assertLen(total_function_cache(func), 2)\n\n func(constant_op.constant([1.0, 2.0]))\n self.assertTrue(unknown_dim[0])\n self.assertLen(total_function_cache(func), 2)\n\n def testCaptureNonTrainableVariable(self):\n\n v = variables.Variable(1.0, trainable=False)\n\n @def_function.function\n def f():\n return v + 1\n\n c = f.get_concrete_function()\n self.assertEqual(len(list(c.graph.variables)), 1) # pylint: disable=g-generic-assert\n\n def testNestedInputShapeFunctionRelaxation(self):\n unknown_dim = [False]\n\n @function.defun\n def func(a_, b_=None):\n del a_ # Only used to check which cache is used.\n self.assertEqual(b_[0]._shape_tuple(), ())\n if b_[1]._shape_tuple()[0] is None:\n unknown_dim[0] = True\n return b_[0] + 1\n\n a = 'hi'\n b0 = constant_op.constant(1.0)\n func(a, b_=[b0, constant_op.constant([])])\n self.assertFalse(unknown_dim[0])\n self.assertLen(total_function_cache(func), 1)\n\n func(a, b_=[b0, constant_op.constant([1.0])])\n self.assertFalse(unknown_dim[0])\n self.assertLen(total_function_cache(func), 2)\n\n func(a, b_=[b0, constant_op.constant([1.0, 1.0])])\n self.assertTrue(unknown_dim[0])\n self.assertLen(total_function_cache(func), 2)\n\n unknown_dim[0] = False\n\n # Now do the same except with a new a which is not a tensor; this should\n # change the cache key.\n a = 'bye'\n func(a, b_=[b0, constant_op.constant([])])\n self.assertFalse(unknown_dim[0])\n self.assertLen(total_function_cache(func), 3)\n\n # Since we already marked a cache miss for a function with the same\n # non-input signatures, here we will immediately start relaxing shapes.\n func(a, b_=[b0, constant_op.constant([1.0])])\n self.assertTrue(unknown_dim[0])\n self.assertLen(total_function_cache(func), 3)\n\n def testFunctionRelaxationLosesInnerDimWithKerasLayer(self):\n layer = keras.layers.Dense(1)\n fn = def_function.function()(layer)\n\n with self.captureWritesToStream(sys.stderr) as printed:\n fn(array_ops.ones((3, 2)))\n self.assertNotIn('ValueError', printed.contents())\n with self.captureWritesToStream(sys.stderr) as printed:\n # Use batch size 2 to trigger a second cache miss on the shape.\n fn(array_ops.ones((2, 2)))\n self.assertNotIn('ValueError', printed.contents())\n\n # Shape relaxation passes TensorShape([None, None]), which causes layer\n # matmul to fail, due to incompatible dims. What would have been a graph\n # build time error (layer would complain about the inner dim being 4).\n with self.captureWritesToStream(sys.stderr) as printed:\n with self.assertRaisesRegexp(errors.InvalidArgumentError, r'MatMul'):\n fn(array_ops.ones((3, 4)))\n\n def testNestedShapeFunctionRelaxation(self):\n\n got_shape = [None]\n\n # The inner function will go through shape relaxation because the shapes it\n # receives will be [1], [2], [3], ...\n @def_function.function\n def bar(x_shape):\n got_shape[0] = x_shape._shape_tuple()\n return x_shape\n\n # The outer function will not go through shape relaxation because the shapes\n # it receives will be [1], [[1]], [[[1]]], ...\n @def_function.function\n def foo(ones):\n return bar(array_ops.shape(ones))\n\n for rank in range(1, 6):\n x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))\n self.assertAllEqual(x_shape, [1] * rank)\n if rank < 3:\n self.assertEqual(got_shape[0], (rank,))\n else:\n self.assertEqual(got_shape[0], (None,))\n\n def testNoHash(self):\n\n @def_function.function()\n def f(_):\n return 1.0\n\n with self.assertRaisesRegexp(TypeError, 'set'):\n f(set([]))\n\n def testFuncName(self):\n\n @function.defun_with_attributes(attributes={'func_name': 'multiply'})\n def add(x, y):\n _ = x * y\n return x + y\n\n @function.defun\n def add_2(x, y):\n _ = x * y\n return x + y\n\n self.assertEqual(add._name, 'multiply')\n self.assertEqual(add_2._name, 'add_2')\n\n def testBasicGraphMode(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n @def_function.function\n def sq(a):\n return matmul(a, a)\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n out = sq(t)\n self.assertAllEqual(out, math_ops.matmul(t, t).numpy())\n\n def testNestedInputsGraphMode(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n pair = collections.namedtuple('pair', ['a', 'b'])\n\n @def_function.function\n def a_times_b(inputs):\n return matmul(inputs.a['a'], inputs.b['b'])\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n\n out = a_times_b(pair({'a': t}, {'b': t}))\n self.assertAllEqual(out, math_ops.matmul(t, t).numpy())\n\n def testNestedOutputsGraphMode(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n pair = collections.namedtuple('pair', ['a', 'b'])\n\n @def_function.function()\n def pairs_mul(pair_a, pair_b):\n return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))\n\n a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])\n b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])\n\n out = pairs_mul(pair(a, b), pair(b, a))\n expected = pair(math_ops.matmul(a, b).numpy(),\n math_ops.matmul(b, a).numpy())\n self.assertAllClose(out, expected)\n\n def testGraphEagerIsolation(self):\n\n @function.defun\n def f():\n self.v = variables.Variable(1.0)\n return self.v.read_value()\n\n self.assertAllEqual(f(), 1.0)\n\n with ops.Graph().as_default():\n self.assertEqual(f().shape, ())\n\n def testBasicGraphFunction(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n @def_function.function\n def sq(a):\n return matmul(a, a)\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n\n sq_op = sq.get_concrete_function(t)\n self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))\n out = sq_op(t)\n self.assertAllEqual(out, math_ops.matmul(t, t).numpy())\n\n def testInputSpecGraphFunction(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n @def_function.function\n def sq(a):\n return matmul(a, a)\n\n sq_op = sq.get_concrete_function(\n tensor_spec.TensorSpec((None, None), dtypes.float32))\n self.assertEqual([None, None], sq_op.output_shapes.as_list())\n\n t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n out1 = sq_op(t1)\n self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())\n\n t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n out2 = sq_op(t2)\n self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())\n\n def testNestedInputSpecGraphFunction(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n @def_function.function\n def sq(mats):\n ((a, b),) = mats\n return matmul(a, b)\n\n with self.assertRaisesRegexp(ValueError, \"two arguments named 'mats'\"):\n sq.get_concrete_function(\n [(tensor_spec.TensorSpec((None, None), dtypes.float32),\n tensor_spec.TensorSpec((None, None), dtypes.float32))])\n sq_op = sq.get_concrete_function(\n [(tensor_spec.TensorSpec((None, None), dtypes.float32,\n name='first_mat'),\n tensor_spec.TensorSpec((None, None), dtypes.float32,\n name='second_mat'))])\n self.assertEqual([None, None], sq_op.output_shapes.as_list())\n\n t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])\n with self.assertRaisesRegexp(\n TypeError, 'bound to Tensors within nested structures'):\n sq_op(t1, t2)\n out = sq_op(first_mat=t1, second_mat=t2)\n self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())\n\n def testExecutingStatelessDefunConcurrently(self):\n\n @def_function.function\n def stateless(x):\n return math_ops.multiply(2.0, x)\n\n pool = ThreadPool()\n inputs = [constant_op.constant(1.0 * x) for x in range(100)]\n outputs = [float(out) for out in pool.map(stateless, inputs)]\n expected = [float(2.0 * x) for x in inputs]\n self.assertSequenceEqual(outputs, expected)\n\n def testExecutingManyStatelessDefunsConcurrently(self):\n\n @def_function.function\n def stateless(x):\n del x\n return math_ops.multiply(2.0, 2.0)\n\n pool = ThreadPool()\n # `pool.map` below instantiates 100 functions, one for each object.\n outputs = [\n float(out)\n for out in pool.map(stateless, [object() for _ in range(100)])\n ]\n expected = [4.0] * 100\n self.assertSequenceEqual(outputs, expected)\n\n def testExecutingStatefulDefunConcurrently(self):\n\n v = resource_variable_ops.ResourceVariable(1.0)\n\n @def_function.function\n def stateful(x):\n v.assign(x)\n\n pool = ThreadPool()\n inputs = [constant_op.constant(0.0)] * 100\n pool.map(stateful, inputs)\n self.assertEqual(float(v.read_value()), 0.0)\n\n def testExecutingManyStatefulDefunsConcurrently(self):\n\n v = resource_variable_ops.ResourceVariable(1.0)\n\n @def_function.function\n def stateful(x):\n del x\n return v.assign(0.0)\n\n pool = ThreadPool()\n # `pool.map` below instantiates 100 functions, one for each object.\n pool.map(stateful, [object() for _ in range(100)])\n self.assertEqual(float(v.read_value()), 0.0)\n\n def disabled_testRandomSeed(self):\n\n @def_function.function\n def f():\n return random_ops.random_normal(())\n\n random_seed.set_random_seed(1)\n x = f()\n self.assertNotEqual(x, f())\n random_seed.set_random_seed(1)\n self.assertAllEqual(f(), x)\n\n def testNestedInputsGraphFunction(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n pair = collections.namedtuple('pair', ['a', 'b'])\n\n @def_function.function\n def a_times_b(inputs):\n return matmul(inputs.a['a'], inputs.b['b'])\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n sq_op = a_times_b.get_concrete_function(\n pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')),\n dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b'))))\n self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))\n out = sq_op(a=t, b=t)\n self.assertAllEqual(out, math_ops.matmul(t, t).numpy())\n\n def testNestedOutputGraphFunction(self):\n # TODO(b/121134877): Remove the autograph override.\n matmul = def_function.function(math_ops.matmul, autograph=False)\n\n @def_function.function\n def sq(a):\n return (matmul(a, a), {'b': constant_op.constant(1.0)})\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n\n sq_op = sq.get_concrete_function(t)\n self.assertEqual(sq_op.output_shapes,\n (tensor_shape.TensorShape([2, 2]),\n {'b': tensor_shape.TensorShape([])}))\n self.assertEqual(sq_op.output_dtypes,\n (dtypes.float32, {'b': dtypes.float32}))\n (a, b) = sq_op(t)\n self.assertAllEqual(a, math_ops.matmul(t, t).numpy())\n self.assertAllEqual(b['b'].numpy(), 1.0)\n\n def testGraphFunctionNoneOutput(self):\n @def_function.function\n def fn(unused_a, unused_b):\n return None\n\n x = constant_op.constant(1)\n fn_op = fn.get_concrete_function(x, x)\n self.assertEqual(fn_op.output_dtypes, None)\n self.assertEqual(fn_op.output_shapes, None)\n self.assertAllEqual(fn_op(x, x), None)\n\n def testDefunNumpyArraysConvertedToTensors(self):\n\n def f(x):\n self.assertIsInstance(x, ops.Tensor)\n return x\n\n x = random_ops.random_uniform([2, 2]).numpy()\n defined = function.defun(f)\n defined(x)\n self.assertLen(total_function_cache(defined), 1)\n\n x = random_ops.random_uniform([2, 2]).numpy()\n defined(x)\n # A NumPy array with different values but the same shape and dtype\n # shouldn't trigger another function definition.\n self.assertLen(total_function_cache(defined), 1)\n\n # Test that the numpy array is properly an argument to the graph function.\n self.assertEqual(1., defined(numpy.ones([])).numpy())\n self.assertEqual(0., defined(numpy.zeros([])).numpy())\n self.assertEqual(1., defined(array_ops.ones([])).numpy())\n self.assertEqual(0., defined(array_ops.zeros([])).numpy())\n\n def testDefunCapturedInt32(self):\n x = constant_op.constant(1, dtype=dtypes.int32)\n\n @def_function.function\n def add_int32s():\n return x + x\n\n self.assertEqual(2, int(add_int32s()))\n\n def testDefunReadVariable(self):\n v = resource_variable_ops.ResourceVariable(1.0)\n\n @def_function.function\n def f():\n return v.read_value()\n\n self.assertEqual(1.0, float(f()))\n\n def testDefunAssignAddVariable(self):\n v = resource_variable_ops.ResourceVariable(1.0)\n x = constant_op.constant(2.0)\n\n @def_function.function\n def test_assign_add():\n v.assign_add(x)\n return v.read_value()\n\n self.assertEqual(3.0, float(test_assign_add()))\n\n @test_util.run_in_graph_and_eager_modes\n def testTensorInitializationInFunctionRaisesError(self):\n error_msg = ('Tensor-typed variable initializers must either be '\n 'wrapped in an init_scope or callable.*')\n\n @def_function.function\n def tensor_init():\n with self.assertRaisesRegexp(ValueError, error_msg):\n resource_variable_ops.ResourceVariable(constant_op.constant(2.0))\n\n tensor_init()\n\n @test_util.run_in_graph_and_eager_modes\n def testCallableTensorInitializationInFunction(self):\n\n @def_function.function\n def tensor_init():\n self.v = resource_variable_ops.ResourceVariable(\n lambda: constant_op.constant(2.0))\n return self.v.read_value()\n\n value = tensor_init()\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(self.evaluate(value), 2.0)\n\n @test_util.also_run_as_tf_function\n def testInitScopeTensorInitializationInFunction(self):\n\n @def_function.function\n def tensor_init():\n with ops.init_scope():\n const = constant_op.constant(2.0)\n # Note: this variable bypasses tf.function's variable creation\n # requirements by bypassing variable_creator_scope by using\n # ResourceVariable instead of Variable.\n self.v = resource_variable_ops.ResourceVariable(const)\n return self.v.read_value()\n\n value = tensor_init()\n self.assertAllEqual(value, 2.0)\n\n @test_util.run_in_graph_and_eager_modes\n def testGetConcreteFunctionCreatesVariables(self):\n\n v_holder = []\n\n @def_function.function\n def tensor_init():\n if not v_holder:\n v_holder.append(variables.Variable(5.))\n return v_holder[0].read_value()\n\n concrete = tensor_init.get_concrete_function()\n self.evaluate(variables.global_variables_initializer())\n self.assertAllEqual(5., self.evaluate(concrete()))\n self.assertAllEqual(5., self.evaluate(tensor_init()))\n\n def testFuncGraphCaptureByValue(self):\n v = variables.Variable(1.0)\n\n def trivial_function():\n return v.read_value()\n\n graph_function = function.Function(\n trivial_function, 'test', capture_by_value=True)\n\n self.assertAllEqual(graph_function(), 1.0)\n v.assign(2.0)\n self.assertAllEqual(graph_function(), 1.0)\n\n def testFuncGraphCaptureByValueNested(self):\n v = variables.Variable(1.0)\n\n def trivial_function():\n return control_flow_ops.cond(\n array_ops.placeholder_with_default(True, ()),\n v.read_value, v.read_value)\n\n graph_function = function.Function(\n trivial_function, 'test', capture_by_value=True)\n\n self.assertAllEqual(graph_function(), 1.0)\n v.assign(2.0)\n self.assertAllEqual(graph_function(), 1.0)\n\n def testDefunShapeInferenceWithCapturedResourceVariable(self):\n v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])\n\n def f():\n x = constant_op.constant([[1, 2], [3, 4]])\n out = math_ops.matmul(v, x)\n self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))\n # We do not return v directly since the tensor conversion function of\n # ResourceVariable returns the read value and not the resource itself.\n return v._handle\n\n compiled = def_function.function(f)\n var_handle = compiled()\n self.assertEqual(var_handle.dtype, dtypes.resource)\n self.assertEqual(var_handle.shape, tensor_shape.scalar())\n var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)\n self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))\n\n def testShapeInferenceForMoreSpecificInput(self):\n\n def f(a):\n return array_ops.reshape(a, [-1, 3])\n\n signature = [tensor_spec.TensorSpec(None, dtypes.float32)]\n compiled = def_function.function(f, input_signature=signature)\n\n @def_function.function\n def use_f():\n inputs = array_ops.zeros([10, 10, 3])\n self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)\n\n use_f()\n\n def testFuncListAttr(self):\n\n @function.defun\n def test_function(val):\n\n def fn1():\n return array_ops.ones([10])\n\n fn2 = lambda: array_ops.ones([10]) * 2\n\n def fn3(x=3):\n return array_ops.ones([10]) * x\n fn4 = functools.partial(fn3, x=4)\n fn5 = functools.partial(fn3, 5)\n\n return gen_functional_ops.case(val, [], [dtypes.float32],\n [function.defun(f).get_concrete_function()\n for f in (fn1, fn2, fn3, fn4, fn5)])\n\n ones = array_ops.ones([10])\n self.assertAllEqual([ones], test_function(0))\n self.assertAllEqual([ones * 2], test_function(1))\n self.assertAllEqual([ones * 3], test_function(2))\n self.assertAllEqual([ones * 4], test_function(3))\n self.assertAllEqual([ones * 5], test_function(4))\n self.assertAllEqual([ones * 5], test_function(22)) # default branch\n\n @test_util.enable_control_flow_v2\n def testVariableInLoopInFunction(self):\n\n @function.defun\n def test_function():\n\n def loop_test(_):\n return False\n\n def loop_body(_):\n return variable_scope.get_variable('a', shape=())\n\n return control_flow_ops.while_loop(loop_test, loop_body, [0.0])\n\n self.assertEqual(test_function().shape, [])\n\n def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):\n with context.graph_mode():\n v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])\n\n def f():\n x = constant_op.constant([[1, 2], [3, 4]])\n out = math_ops.matmul(v, x)\n self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))\n # We do not return v directly since the tensor conversion function of\n # ResourceVariable returns the read value and not the resource itself.\n return v._handle\n\n compiled = def_function.function(f)\n var_handle = compiled()\n self.assertEqual(var_handle.dtype, dtypes.resource)\n self.assertEqual(var_handle.shape, tensor_shape.scalar())\n var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)\n self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))\n\n def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):\n with context.graph_mode():\n v = variables.Variable([[1, 2], [3, 4]])\n\n def f():\n x = constant_op.constant([[1, 2], [3, 4]])\n out = math_ops.matmul(v, x)\n self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))\n\n # Check that shape inference works while creating the defun\n compiled = def_function.function(f)\n compiled()\n\n def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):\n with context.graph_mode():\n tensor_list = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))\n tensor_list = list_ops.tensor_list_push_back(tensor_list,\n constant_op.constant(1.0))\n tensor_list = list_ops.tensor_list_push_back(tensor_list,\n constant_op.constant(2.0))\n\n def f():\n tl, value = list_ops.tensor_list_pop_back(\n tensor_list, element_dtype=dtypes.float32)\n self.assertEqual(value.shape, tensor_shape.scalar())\n return tl\n\n compiled = def_function.function(f)\n output_tensor_list = compiled()\n _, value = list_ops.tensor_list_pop_back(\n output_tensor_list, element_dtype=dtypes.float32)\n self.assertEqual(value.shape, tensor_shape.scalar())\n\n @test_util.run_in_graph_and_eager_modes\n def testDefunForcesResourceVariables(self):\n\n def variable_creator():\n self.v = variables.Variable(0.0)\n return self.v.read_value()\n\n self.v = None\n defined = function.defun(variable_creator)\n defined() # Create the variable.\n self.assertIsInstance(\n self.v, resource_variable_ops.ResourceVariable)\n\n def testRunMetadata(self):\n\n @def_function.function\n def f(x):\n return x * x\n\n with ops.device('cpu:0'):\n context.enable_run_metadata()\n f(constant_op.constant(1.0))\n run_metadata = context.export_run_metadata()\n context.disable_run_metadata()\n step_stats = run_metadata.step_stats\n self.assertNotEmpty(step_stats.dev_stats)\n cpu_stats = step_stats.dev_stats[0]\n self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',\n cpu_stats.device)\n # Testing for at least 2 because the function call should generate at most\n # one entry in the step_stats; the ops inside function can generate\n # arbitrarily many (placeholders, return identities, etc, might be included\n # or not in the future, so shouldn't be tested for exactly.\n self.assertGreaterEqual(len(cpu_stats.node_stats), 2)\n self.assertLen(run_metadata.partition_graphs, 1)\n\n def testGraphModeCaptureVariable(self):\n with context.graph_mode(), self.cached_session():\n\n class HasAVar(object):\n\n def __init__(self):\n self.v = resource_variable_ops.ResourceVariable(1.0)\n\n def call(self):\n return self.v * 2\n\n o = HasAVar()\n self.evaluate(variables.global_variables_initializer())\n call = def_function.function(o.call)\n op = call()\n self.assertAllEqual(self.evaluate(op), 2.0)\n\n def testGraphModeManyFunctions(self):\n with ops.Graph().as_default(), self.cached_session():\n\n @def_function.function\n def f(x):\n return x * x\n\n @def_function.function\n def g(x):\n return f(x) + 1\n\n self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0)\n\n def testDict(self):\n\n @def_function.function\n def f(x):\n return {'name': x + 1}\n\n self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)\n\n def testTensorConversionWithDefun(self):\n\n @def_function.function\n def f(x):\n return math_ops.add(x, constant_op.constant(3))\n\n self.assertAllEqual(5, f(constant_op.constant(2)))\n\n def testTensorConversionCall(self):\n\n @def_function.function\n def f(x):\n return math_ops.add(x, constant_op.constant(3))\n\n @def_function.function\n def g(x):\n return f(f(x))\n\n self.assertAllEqual(8, g(constant_op.constant(2)))\n\n def testCallShape(self):\n\n @def_function.function\n def f(x):\n return x + 1\n\n @def_function.function\n def g(x):\n x = f(x)\n self.assertEqual(x.shape.as_list(), [])\n return None\n\n g(constant_op.constant(1.0))\n\n def testNestedDefunWithNoOutputAndTapedInput(self):\n three = resource_variable_ops.ResourceVariable(3.0, name='v')\n\n @def_function.function\n def f(x):\n # This function intentionally takes a taped variable as input,\n # but does not return any values\n math_ops.add(x, three)\n\n @def_function.function\n def g(x):\n y = math_ops.add(x, three)\n f(y)\n\n g(three)\n\n def testGatherResourceWithDefun(self):\n with ops.device('cpu:0'):\n v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])\n\n def sum_gather():\n return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))\n\n defined = def_function.function(sum_gather)\n self.assertAllEqual(sum_gather(), defined())\n\n def testReturningIndexedSlicesWithDefun(self):\n\n def validate(indexed_slice):\n @def_function.function\n def f():\n return indexed_slice\n\n output = f()\n self.assertIsInstance(output, ops.IndexedSlices)\n self.assertAllEqual(indexed_slice.values, output.values)\n self.assertAllEqual(indexed_slice.indices, output.indices)\n self.assertAllEqual(indexed_slice.dense_shape, output.dense_shape)\n\n self.assertEqual(\n f.get_concrete_function().output_shapes,\n indexed_slice.values.shape)\n\n arg = ops.IndexedSlices(\n values=constant_op.constant([1, 2]),\n indices=constant_op.constant([0, 1]),\n dense_shape=constant_op.constant([2]))\n validate(arg)\n\n arg = ops.IndexedSlices(\n values=constant_op.constant([1, 2]),\n indices=constant_op.constant([0, 1]),\n dense_shape=None)\n validate(arg)\n\n def testIndexedSliceAsArgumentWithDefun(self):\n\n @def_function.function\n def f(indexed_slice):\n return indexed_slice\n\n def validate(arg):\n output = f(arg)\n self.assertIsInstance(output, ops.IndexedSlices)\n self.assertAllEqual(arg.values, output.values)\n self.assertAllEqual(arg.indices, output.indices)\n self.assertAllEqual(arg.dense_shape, output.dense_shape)\n\n indexed_slice = ops.IndexedSlices(\n values=constant_op.constant([1]),\n indices=constant_op.constant([0]),\n dense_shape=constant_op.constant([1]))\n validate(indexed_slice)\n\n # Test that `f` works even when `dense_shape` is None.\n indexed_slice = ops.IndexedSlices(\n values=constant_op.constant([1]),\n indices=constant_op.constant([0]),\n dense_shape=None)\n validate(indexed_slice)\n\n @test_util.run_gpu_only\n def testFunctionOnDevice(self):\n x = constant_op.constant([1.]).gpu()\n # TODO(b/121134877): Remove the autograph override.\n f = def_function.function(math_ops.add, autograph=False)\n y = f(x, x).cpu()\n self.assertAllEqual(y, [2.])\n\n @test_util.run_gpu_only\n @test_util.run_in_graph_and_eager_modes\n def testFunctionWithResourcesOnDifferentDevices(self):\n with ops.device('/cpu:0'):\n v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])\n\n with ops.device('/gpu:0'):\n v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])\n\n def sum_gather():\n cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))\n gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))\n return cpu_result, gpu_result\n\n defined = function.defun(sum_gather)\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n expected = self.evaluate(sum_gather())\n self.assertAllEqual(expected, self.evaluate(defined()))\n\n @test_util.run_gpu_only\n @test_util.run_in_graph_and_eager_modes\n def testOpInFunctionWithConflictingResourceInputs(self):\n with ops.device('/cpu:0'):\n v_cpu = resource_variable_ops.ResourceVariable(\n [0.0, 1.0, 2.0], name='cpu')\n v_also_cpu = resource_variable_ops.ResourceVariable(\n [0.0, 1.0, 2.0], name='also_cpu')\n\n with ops.device('/gpu:0'):\n v_gpu = resource_variable_ops.ResourceVariable(\n [0.0, 1.0, 2.0], name='gpu')\n\n @def_function.function\n def resource_apply_adam():\n training_ops.resource_apply_adam(\n v_cpu.handle,\n v_gpu.handle,\n v_also_cpu.handle,\n 1.0, # beta1_power\n 1.0, # beta2_power\n 1.0, # learning_rate\n 1.0, # beta1\n 1.0, # beta2\n 1.0, # epsilon,\n [1.0, 1.0, 1.0], # grad\n False) # use_locking\n return None\n\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n 'Cannot place the graph because a reference or resource edge connects '\n 'colocation groups with incompatible assigned devices'):\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(resource_apply_adam())\n\n @test_util.run_gpu_only\n def testFunctionHandlesInputsOnDifferentDevices(self):\n # The Reshape op requires the shape tensor to be placed in host memory.\n # TODO(b/121134877): Remove the autograph override.\n reshape = def_function.function(array_ops.reshape, autograph=False)\n value = constant_op.constant([1., 2.]).gpu()\n shape = constant_op.constant([2, 1])\n reshaped = reshape(value, shape).cpu()\n self.assertAllEqual(reshaped, [[1], [2]])\n\n @test_util.run_gpu_only\n def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):\n # The Reshape op requires the shape tensor to be placed in host memory.\n # TODO(b/121134877): Remove the autograph override.\n reshape = def_function.function(array_ops.reshape, autograph=False)\n value = constant_op.constant([1., 2.])\n shape = constant_op.constant([2, 1]).gpu()\n reshape(value, shape) # No error is raised\n\n def testNoneOutput(self):\n\n @def_function.function\n def my_function(_):\n return None\n\n self.assertAllEqual(my_function(1), None)\n\n def testNestedFunctions(self):\n # TensorFlow function (which is what would be used in TensorFlow graph\n # construction).\n @tf_function.Defun(dtypes.int32, dtypes.int32)\n def add(a, b):\n return math_ops.add(a, b)\n\n @def_function.function\n def add_one(x):\n return add(x, 1)\n\n self.assertAllEqual(3, add_one(constant_op.constant(2)))\n\n def testVariableCaptureInNestedFunctions(self):\n v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)\n\n @def_function.function\n def inner_read():\n return v.read_value()\n\n @def_function.function\n def outer():\n return inner_read()\n\n self.assertEqual(1, int(outer()))\n\n def testReturnCapturedEagerTensor(self):\n t = constant_op.constant(1)\n\n @def_function.function\n def read():\n return t\n\n self.assertEqual(1, int(read()))\n\n def testReturnCapturedGraphTensor(self):\n with context.graph_mode(), self.cached_session():\n t = constant_op.constant(1)\n\n @def_function.function\n def read():\n return t\n\n self.assertEqual(1, int(self.evaluate(read())))\n\n def testSequenceInputs(self):\n # TODO(b/121134877): Remove the autograph override.\n clip_by_global_norm = def_function.function(\n clip_ops.clip_by_global_norm, autograph=False)\n t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]\n clipped_list, global_norm = clip_by_global_norm(t_list,\n constant_op.constant(.2))\n for t in clipped_list:\n self.assertIsInstance(t, ops.Tensor)\n self.assertIsInstance(global_norm, ops.Tensor)\n\n def testNestedSequenceInputs(self):\n\n def my_op(inputs):\n a, b, c = inputs\n e, f = b\n g, h = e\n return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c\n\n my_eager_op = def_function.function(my_op)\n ret = my_eager_op([\n constant_op.constant(1), [(constant_op.constant(2),\n constant_op.constant(3)),\n constant_op.constant(4)],\n constant_op.constant(5)\n ])\n self.assertLen(ret, 2)\n self.assertAllEqual(ret[0][0], 2)\n self.assertAllEqual(ret[0][1][0][0], 8)\n self.assertAllEqual(ret[0][1][0][1], 4)\n self.assertIsInstance(ret[0][1][0], tuple)\n self.assertAllEqual(ret[0][1][1], 6)\n self.assertAllEqual(ret[0][2], 10)\n self.assertAllEqual(ret[1], 15)\n\n def testVariableNamesRespectNameScopesWithDefun(self):\n @def_function.function\n def create_variable():\n with ops.name_scope('foo'):\n v = resource_variable_ops.ResourceVariable(0.0, name='bar')\n self.assertEqual(v.name, 'foo/bar:0')\n\n create_variable()\n\n def testVariableNamesRespectNameScopesWithDefunInGraph(self):\n with context.graph_mode():\n @def_function.function\n def create_variable():\n with ops.name_scope('foo'):\n v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')\n self.assertEqual(v.name, 'foo/bar:0')\n\n with ops.get_default_graph().as_default():\n create_variable()\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testLayerInDefun(self):\n conv = convolutional.Conv2D(\n filters=1,\n kernel_size=2,\n kernel_initializer=init_ops.ones_initializer(),\n bias_initializer=init_ops.zeros_initializer())\n\n @function.defun\n def model(x):\n return conv(x)\n\n x = array_ops.ones([1, 2, 2, 1])\n y = model(x)\n\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n\n self.assertAllClose([[[[4.0]]]], self.evaluate(y))\n\n # Variable lifting is somewhat different between defun/tf.function, so testing\n # device placement on both makes sense.\n @parameterized.named_parameters(\n dict(testcase_name='Defun',\n function_decorator=function.defun),\n dict(testcase_name='DefFunction',\n function_decorator=def_function.function))\n @test_util.run_in_graph_and_eager_modes\n def testVariablesPlacedOnOutsideDevice(self, function_decorator):\n\n class _Obj(object):\n\n def __init__(self):\n self.v = None\n\n @function_decorator\n def f(self):\n if self.v is None:\n self.v = variables.Variable(1.)\n return self.v + 1.\n\n has_device = _Obj()\n with ops.device('cpu:0'):\n has_device.f()\n self.assertIn('CPU', has_device.v.device)\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testDefunKerasModelCall(self):\n model = MiniModel()\n model.call = function.defun(model.call)\n\n x = array_ops.ones([1, 2])\n y = model(x)\n\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n\n self.assertAllEqual([[3.0]], self.evaluate(y))\n\n # Break the reference cycle between the MiniModel and the defun:\n # `MiniModel` --(through its `call` method)--> `Function`\n # `Function` --(instancemethod on `MiniModel`)--> `MiniModel`\n del model.call\n\n # Note: The ConfigProto below unfortunately only configures graph\n # construction. Eager's configuration is controlled in `__main__`.\n @test_util.run_in_graph_and_eager_modes(\n config=config_pb2.ConfigProto(device_count={'CPU': 4}))\n @test_util.run_v1_only('b/120545219')\n def testDeviceAnnotationsRespected(self):\n\n def multi_device_fn():\n with ops.device('/cpu:0'):\n s0 = test_ops.device_placement_op()\n with ops.device('/cpu:1'):\n s1 = test_ops.device_placement_op()\n with ops.device('/cpu:2'):\n s2 = test_ops.device_placement_op()\n s3 = test_ops.device_placement_op()\n return s0, s1, s2, s3\n\n defined = function.defun(multi_device_fn)\n outputs = self.evaluate(defined())\n self.assertLen(total_function_cache(defined), 1)\n self.assertIn(compat.as_bytes('CPU:0'), outputs[0])\n self.assertIn(compat.as_bytes('CPU:1'), outputs[1])\n self.assertIn(compat.as_bytes('CPU:2'), outputs[2])\n\n with ops.device('/cpu:3'):\n outputs = self.evaluate(defined())\n # All function definitions are agnostic to call site devices.\n self.assertLen(total_function_cache(defined), 1)\n self.assertIn(compat.as_bytes('CPU:0'), outputs[0])\n self.assertIn(compat.as_bytes('CPU:1'), outputs[1])\n self.assertIn(compat.as_bytes('CPU:2'), outputs[2])\n self.assertIn(compat.as_bytes('CPU:3'), outputs[3])\n\n with ops.device('/cpu:0'):\n outputs = self.evaluate(defined())\n self.assertLen(total_function_cache(defined), 1)\n self.assertIn(compat.as_bytes('CPU:0'), outputs[0])\n self.assertIn(compat.as_bytes('CPU:1'), outputs[1])\n self.assertIn(compat.as_bytes('CPU:2'), outputs[2])\n self.assertIn(compat.as_bytes('CPU:0'), outputs[3])\n\n @test_util.run_in_graph_and_eager_modes(\n config=config_pb2.ConfigProto(device_count={'CPU': 2}))\n @test_util.run_v1_only('b/120545219')\n def testCallingGraphFunctionOnDifferentDevice(self):\n\n def func():\n return constant_op.constant(0)\n\n defined = def_function.function(func)\n with ops.device('cpu:0'):\n cpu_graph_function = defined.get_concrete_function()\n\n with ops.device('cpu:0'):\n self.assertEqual(\n self.evaluate(cpu_graph_function()), self.evaluate(func()))\n\n with ops.device('cpu:1'):\n self.assertEqual(0., self.evaluate(cpu_graph_function()))\n\n with ops.device(None):\n self.assertEqual(0., self.evaluate(cpu_graph_function()))\n\n default_graph_function = defined.get_concrete_function()\n self.assertEqual(\n self.evaluate(default_graph_function()), self.evaluate(func()))\n\n with ops.device('cpu:1'):\n self.assertEqual(0., self.evaluate(default_graph_function()))\n\n @test_util.run_gpu_only\n @test_util.run_in_graph_and_eager_modes\n def testColocateWithRespected(self):\n # TODO(b/113291792): Use multiple CPUs instead of a GPU.\n with ops.device('cpu:0'):\n x = constant_op.constant(1.0)\n\n with ops.device('gpu:0'):\n y = constant_op.constant(1.0)\n\n @def_function.function\n def foo():\n return test_ops.device_placement_op()\n\n with ops.colocate_with(x):\n self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))\n\n with ops.colocate_with(y):\n self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))\n\n def testVariablesAreTracked(self):\n v = resource_variable_ops.ResourceVariable(1.0)\n\n def foo(x):\n return v * x\n\n defined = def_function.function(foo)\n\n x = constant_op.constant([1.0])\n self.assertEqual(1., self.evaluate(defined(x)))\n v.assign(2.)\n\n x = constant_op.constant([1.0, 2.0])\n self.assertAllEqual([2., 4.], self.evaluate(defined(x)))\n\n def testCacheObjectHashCollisions(self):\n\n class Foo(object):\n\n def __hash__(self):\n return 42\n\n def func(foo):\n del foo\n return\n\n defined = function.defun(func)\n defined(Foo())\n self.assertLen(total_function_cache(defined), 1)\n\n defined(Foo())\n self.assertLen(total_function_cache(defined), 2)\n\n def testCacheTensorDtypeCollision(self):\n\n def func(t):\n return t + t\n\n defined = function.defun(func)\n t = constant_op.constant([[1.0]], dtype=dtypes.complex64)\n defined(t)\n self.assertLen(total_function_cache(defined), 1)\n\n t = constant_op.constant([[1.0]], dtype=dtypes.complex128)\n defined(t)\n self.assertLen(total_function_cache(defined), 2)\n\n def testCacheTensorShapeCollision(self):\n\n def func(t):\n return t + t\n\n defined = function.defun(func)\n t = constant_op.constant([[1.0]], dtype=dtypes.complex64)\n defined(t)\n self.assertLen(total_function_cache(defined), 1)\n\n t = constant_op.constant([1.0], dtype=dtypes.complex64)\n defined(t)\n self.assertLen(total_function_cache(defined), 2)\n\n def testCacheTensorShapeDtypeCollision(self):\n\n def func(t):\n return t + t\n\n defined = function.defun(func)\n t = constant_op.constant([[1.0]], dtype=dtypes.complex64)\n defined(t)\n self.assertLen(total_function_cache(defined), 1)\n\n t = constant_op.constant([1.0], dtype=dtypes.complex128)\n defined(t)\n self.assertLen(total_function_cache(defined), 2)\n\n def testCacheTensorUnknownShapesCollision(self):\n\n def func(t):\n return t + t\n\n with context.graph_mode(), self.cached_session():\n defined = function.defun(func)\n\n p = array_ops.placeholder(dtype=dtypes.float32, shape=[])\n defined(p)\n self.assertLen(total_function_cache(defined), 1)\n\n p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])\n defined(p)\n self.assertLen(total_function_cache(defined), 2)\n\n p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])\n defined(p)\n # Gradual shape relaxation is performed; and the common shape between\n # [1] and [2] is one containing unknown dimensions.\n self.assertLen(total_function_cache(defined), 2)\n\n # pylint: disable=protected-access\n self.assertLen(defined._function_cache.arg_relaxed_shapes, 1)\n relaxed_shapes = (\n list(defined._function_cache.arg_relaxed_shapes.values())[0])\n self.assertEqual(len(relaxed_shapes), 1)\n relaxed_shape = relaxed_shapes[0]\n # pylint: enable=protected-access\n self.assertEqual(relaxed_shape.rank, 1)\n self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None)\n\n t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)\n defined(t)\n # Shape (3,) matches the relaxed shape TensorShape([None])\n self.assertLen(total_function_cache(defined), 2)\n\n def testPythonFunctionWithDefaultArgs(self):\n\n def func(foo, bar=1, baz=2):\n del foo\n del bar\n del baz\n return\n\n defined = function.defun(func)\n defined(0, baz=20)\n\n def cache_keys():\n \"\"\"Sanitizes cache keys of non-input metadata.\"\"\"\n return tuple(key[0] for key in total_function_cache(defined))\n\n # `True` corresponds to the fact that we're executing eagerly\n self.assertIn(('URRRu', (0, 1, 20)), cache_keys())\n\n defined(1) # bar=1, baz=2\n self.assertIn(('URRRu', (1, 1, 2)), cache_keys())\n\n # This matches the previous call.\n defined(foo=1)\n self.assertLen(total_function_cache(defined), 2)\n\n defined(1, 2, 3)\n self.assertLen(total_function_cache(defined), 3)\n self.assertIn(('URRRu', (1, 2, 3)), cache_keys())\n\n # This matches the previous call.\n defined(1, bar=2, baz=3)\n self.assertLen(total_function_cache(defined), 3)\n\n # This matches the previous call.\n defined(1, baz=3, bar=2)\n self.assertLen(total_function_cache(defined), 3)\n\n def testFunctoolsPartialUnwrappedCorrectly(self):\n\n def full_function(a, b, c=3):\n return a, b, c\n\n partial = functools.partial(full_function, 1, c=4)\n a, b, c = partial(2)\n\n defined = function.defun(partial)\n func_a, func_b, func_c = defined(2)\n self.assertEqual(func_a.numpy(), a)\n self.assertEqual(func_b.numpy(), b)\n self.assertEqual(func_c.numpy(), c)\n\n def testInputSignatureWithMatchingInputs(self):\n\n def foo(a):\n self.assertEqual(a.shape, (2,))\n return a\n\n signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]\n defined = function.defun(foo, input_signature=signature)\n a = array_ops.ones([2])\n self.assertAllEqual(a, defined(a))\n self.assertLen(total_function_cache(defined), 1)\n self.assertAllEqual(a, defined.get_concrete_function()(a))\n self.assertAllEqual(a, defined.get_concrete_function(a)(a))\n self.assertAllEqual(a, defined.get_concrete_function(\n tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a))\n self.assertLen(total_function_cache(defined), 1)\n\n def bar(a):\n self.assertEqual(a._shape_tuple(), (2, None))\n return a\n\n signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]\n defined = function.defun(bar, input_signature=signature)\n a = array_ops.ones([2, 1])\n out = defined(a)\n self.assertLen(total_function_cache(defined), 1)\n self.assertAllEqual(out, a)\n\n # Changing the second dimension shouldn't create a new function.\n b = array_ops.ones([2, 3])\n out = defined(b)\n self.assertLen(total_function_cache(defined), 1)\n self.assertAllEqual(out, b)\n\n def testInputSignatureWithCompatibleInputs(self):\n\n rank2_spec = tensor_spec.TensorSpec(shape=(None, None),\n dtype=dtypes.float32)\n\n @function.defun(input_signature=[rank2_spec])\n def func(a):\n self.assertEqual([None, None], a.shape.as_list())\n return array_ops.shape(a)\n\n self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))\n self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))\n\n with self.assertRaisesRegexp(ValueError, 'incompatible'):\n func([0.0, 1.0, 2.0]) # Wrong shape.\n\n with self.assertRaisesRegexp(ValueError, 'incompatible'):\n func([['wrong dtype']])\n\n def testNestedInputSignatures(self):\n\n def expected_foo(a, b):\n return [a, b]\n\n @function.defun(input_signature=[\n [tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,\n tensor_spec.TensorSpec((1,), dtypes.float32),\n ])\n def foo(a, b):\n self.assertEqual(a[0]._shape_tuple(), (2, None))\n self.assertEqual(a[1]._shape_tuple(), (2, None))\n self.assertEqual(b._shape_tuple(), (1,))\n return [a, b]\n\n a = array_ops.ones([2, 1])\n b = array_ops.ones([1])\n expected = expected_foo([a, a], b)\n out = foo([a, a], b)\n self.assertLen(total_function_cache(foo), 1)\n nest.assert_same_structure(out, expected)\n self.assertAllEqual(out[0][0], a)\n self.assertAllEqual(out[0][1], a)\n self.assertAllEqual(out[1], b)\n\n # Changing the unspecified dimensions shouldn't create a new function.\n a = array_ops.ones([2, 3])\n b = array_ops.ones([2, 5])\n c = array_ops.ones([1])\n expected = expected_foo([a, b], c)\n out = foo([a, b], c)\n self.assertLen(total_function_cache(foo), 1)\n nest.assert_same_structure(out, expected)\n self.assertAllEqual(out[0][0], a)\n self.assertAllEqual(out[0][1], b)\n self.assertAllEqual(out[1], c)\n\n # Passing compatible inputs should work.\n a = a.numpy().tolist()\n b = b.numpy().tolist()\n c = c.numpy().tolist()\n out = foo([a, b], c)\n self.assertLen(total_function_cache(foo), 1)\n nest.assert_same_structure(out, expected)\n self.assertAllEqual(out[0][0], a)\n self.assertAllEqual(out[0][1], b)\n self.assertAllEqual(out[1], c)\n\n def testNestedInputSignaturesWithDict(self):\n def expected_bar(a):\n return a\n\n @function.defun(input_signature=[{\n 'a': tensor_spec.TensorSpec((2, None), dtypes.float32),\n 'b': tensor_spec.TensorSpec((2, None), dtypes.float32),\n 'c': tensor_spec.TensorSpec((1,), dtypes.float32)}])\n def bar(a):\n self.assertEqual(a['a']._shape_tuple(), (2, None))\n self.assertEqual(a['b']._shape_tuple(), (2, None))\n self.assertEqual(a['c']._shape_tuple(), (1,))\n return a\n\n a = array_ops.ones([2, 3])\n b = array_ops.ones([1])\n inputs = {'a': a, 'b': a, 'c': b}\n expected = expected_bar(inputs)\n out = bar(inputs)\n nest.assert_same_structure(out, expected)\n self.assertAllEqual(out['a'], expected['a'])\n self.assertAllEqual(out['b'], expected['b'])\n self.assertAllEqual(out['c'], expected['c'])\n\n # Passing compatible inputs should work.\n a = a.numpy().tolist()\n b = b.numpy().tolist()\n inputs = {'a': a, 'b': a, 'c': b}\n out = bar(inputs)\n nest.assert_same_structure(out, expected)\n self.assertAllEqual(out['a'], expected['a'])\n self.assertAllEqual(out['b'], expected['b'])\n self.assertAllEqual(out['c'], expected['c'])\n\n def testInputSignatureMustBeSequenceOfTensorSpecs(self):\n\n def foo(a, b):\n del a\n del b\n\n # Signatures must consist exclusively of `TensorSpec` objects.\n signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]\n with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'):\n def_function.function(foo, input_signature=signature)\n\n # Signatures must be either lists or tuples on their outermost levels.\n signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}\n with self.assertRaisesRegexp(TypeError, 'input_signature must be either a '\n 'tuple or a list.*'):\n function.defun(foo, input_signature=signature)\n\n @test_util.run_in_graph_and_eager_modes\n def testInputsIncompatibleWithSignatureRaisesError(self):\n\n def foo(a):\n return a\n\n signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]\n defined = def_function.function(foo, input_signature=signature)\n\n # Invalid shapes.\n with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):\n defined(array_ops.ones([3]))\n\n with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):\n defined(array_ops.ones([2, 1]))\n\n # Wrong number of arguments.\n with self.assertRaisesRegexp(TypeError, 'Received 2 argument\\(s\\)'):\n defined(array_ops.ones([2]), array_ops.ones([2]))\n with self.assertRaisesRegexp(ValueError,\n 'Structure of Python function inputs.*'):\n defined()\n\n with self.assertRaisesRegexp(ValueError,\n 'inputs incompatible with input_signature'):\n defined.get_concrete_function(\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))\n\n def testInputsIncompatibleWithNestedSignatureRaisesError(self):\n\n def foo(a, b):\n return [a, b]\n\n signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2,\n [tensor_spec.TensorSpec((1,), dtypes.float32)] * 2]\n defined = function.defun(foo, input_signature=signature)\n a = array_ops.ones([1])\n\n with self.assertRaisesRegexp(ValueError,\n 'Structure of Python function inputs.*'):\n defined([a, a, a], [a])\n\n with self.assertRaisesRegexp(ValueError,\n 'Structure of Python function inputs.*'):\n defined([a], [a, a, a])\n defined([a, a], [a, a])\n\n def testUnderspecifiedInputSignature(self):\n @function.defun(input_signature=[\n tensor_spec.TensorSpec([], dtypes.float32),\n ])\n def foo(a, training=True):\n if training:\n return a\n else:\n return -1.0 * a\n\n x = constant_op.constant(1.0)\n with self.assertRaisesRegexp(TypeError, 'only pass arguments'):\n foo(x, training=True)\n\n with self.assertRaisesRegexp(TypeError, 'only pass arguments'):\n foo(x, training=False)\n\n self.assertAllEqual(x.numpy(), foo(x).numpy())\n\n def testInputSignatureWithPartialFunction(self):\n self.skipTest('b/124441704')\n def full_function(a, b, c=3.0):\n return a, b, c\n\n partial = functools.partial(full_function, 1, c=4)\n a, b, c = partial(2.0)\n signature = [tensor_spec.TensorSpec([], dtypes.float32)]\n defined = function.defun(partial, input_signature=signature)\n x = constant_op.constant(2.0)\n func_a, func_b, func_c = defined(x)\n self.assertEqual(func_a.numpy(), a)\n self.assertEqual(func_b.numpy(), b)\n self.assertEqual(func_c.numpy(), c)\n\n def testInputSignatureConversionWithDefaultArg(self):\n\n def foo(a, training=True):\n if training:\n return a\n else:\n return -1.0 * a\n\n signature = [\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([], dtypes.bool),\n ]\n defined = def_function.function(foo, input_signature=signature)\n a = constant_op.constant(1.0)\n self.assertAllEqual(a.numpy(), defined(a))\n self.assertAllEqual(a.numpy(), defined(a, training=True))\n self.assertAllEqual(-a.numpy(), defined(a, training=False))\n\n def testInputSignatureWithKeywordPositionalArgs(self):\n\n @function.defun(input_signature=[\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([], dtypes.int64)\n ])\n def foo(flt, integer):\n return flt, integer\n\n flt = constant_op.constant(1.0)\n integer = constant_op.constant(2, dtypes.int64)\n\n out1, out2 = foo(flt, integer)\n self.assertLen(total_function_cache(foo), 1)\n self.assertEqual(out1.numpy(), 1.0)\n self.assertEqual(out2.numpy(), 2)\n\n out1, out2 = foo(flt=flt, integer=integer)\n self.assertLen(total_function_cache(foo), 1)\n self.assertEqual(out1.numpy(), 1.0)\n self.assertEqual(out2.numpy(), 2)\n\n out1, out2 = foo(integer=integer, flt=flt)\n self.assertLen(total_function_cache(foo), 1)\n self.assertEqual(out1.numpy(), 1.0)\n self.assertEqual(out2.numpy(), 2)\n\n out1, out2 = foo(flt, integer=integer)\n self.assertLen(total_function_cache(foo), 1)\n self.assertEqual(out1.numpy(), 1.0)\n self.assertEqual(out2.numpy(), 2)\n\n def testInputSignatureWithKeywordArgsFails(self):\n\n def foo(a, **kwargs):\n del a\n del kwargs\n\n with self.assertRaisesRegexp(\n ValueError, 'Cannot define a TensorFlow function from a Python '\n 'function with keyword arguments when input_signature.*'):\n function.defun(\n foo,\n input_signature=[\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([], dtypes.int64)\n ])\n\n def testTensorKeywordArguments(self):\n\n def foo(a, b):\n del a\n return b\n\n defined = function.defun(foo)\n a = constant_op.constant(2.0)\n b = constant_op.constant([1.0, 2.0])\n one = defined(a, b)\n self.assertLen(total_function_cache(defined), 1)\n\n two = defined(a=a, b=b)\n self.assertLen(total_function_cache(defined), 1)\n\n three = defined(b=b, a=a)\n self.assertLen(total_function_cache(defined), 1)\n\n four = defined(a, b=b)\n self.assertLen(total_function_cache(defined), 1)\n\n # The next call corresponds to a new input signature, hence\n # we expect another function to be defined.\n five = defined(b, a)\n self.assertLen(total_function_cache(defined), 2)\n\n six = defined(a=b, b=a)\n self.assertLen(total_function_cache(defined), 2)\n\n seven = defined(b=a, a=b)\n self.assertLen(total_function_cache(defined), 2)\n\n self.assertAllEqual(one, [1.0, 2.0])\n self.assertAllEqual(two, [1.0, 2.0])\n self.assertAllEqual(three, [1.0, 2.0])\n self.assertAllEqual(four, [1.0, 2.0])\n self.assertAllEqual(five, 2.0)\n self.assertAllEqual(six, 2.0)\n self.assertAllEqual(seven, 2.0)\n\n def testDefuningInstanceMethod(self):\n\n integer = constant_op.constant(2, dtypes.int64)\n\n class Foo(object):\n\n def one(self, tensor):\n return tensor\n\n @def_function.function\n def two(self, tensor, other=integer):\n return self.one(tensor), other\n\n foo = Foo()\n t = constant_op.constant(1.0)\n one, two = foo.two(t)\n self.assertEqual(one.numpy(), 1.0)\n self.assertEqual(two.numpy(), 2)\n\n def testDefuningInstanceMethodWithDefaultArgument(self):\n\n integer = constant_op.constant(2, dtypes.int64)\n\n class Foo(object):\n\n @def_function.function\n def func(self, other=integer):\n return other\n\n foo = Foo()\n self.assertEqual(foo.func().numpy(), int(integer))\n\n def testPythonCallWithSideEffects(self):\n state = []\n\n @def_function.function\n def side_effecting_function():\n state.append(0)\n\n side_effecting_function()\n self.assertAllEqual(state, [0])\n\n # The second invocation should call the graph function, which shouldn't\n # trigger the list append.\n side_effecting_function()\n self.assertAllEqual(state, [0])\n\n # Whereas calling the python function directly should create a side-effect.\n side_effecting_function.python_function()\n self.assertAllEqual(state, [0, 0])\n\n def testFunctionWithNestedFunctionCallAndSideEffects(self):\n v1 = variables.Variable(1.0)\n v2 = variables.Variable(1.0)\n\n @def_function.function\n def add_one(a):\n a.assign_add(1.0)\n\n # Grappler will inline calls to `add_one` into the function body, we check\n # that all side-effects were executed.\n @def_function.function\n def side_effecting_function(a, b):\n add_one(a)\n add_one(b)\n return a + b\n\n result = side_effecting_function(v1, v2)\n self.assertEqual(result.numpy(), 4.0)\n\n def testFunctionWithExtraAttributes(self):\n @function.defun_with_attributes(attributes={'experimental_1': 'value1',\n 'experimental_2': 2})\n def matmul(x, y):\n return math_ops.matmul(x, y)\n\n def add(x, y):\n return math_ops.add(x, y)\n defun_add = function.defun_with_attributes(\n add, attributes={'experimental_3': True, 'experimental_4': 1.0})\n\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n sq = matmul(t, t)\n double = defun_add(t, t)\n self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])\n self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])\n\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n self.assertLen(graph._functions, 2)\n functions = list(graph._functions.values())\n self.assertRegexpMatches(\n functions[0].definition.signature.name, '.*matmul.*')\n attrs = functions[0].definition.attr\n self.assertLen(attrs, 2)\n self.assertEqual(attrs['experimental_1'].s, b'value1')\n self.assertEqual(attrs['experimental_2'].i, 2)\n\n self.assertRegexpMatches(\n functions[1].definition.signature.name, '.*add.*')\n attrs = functions[1].definition.attr\n self.assertLen(attrs, 2)\n self.assertEqual(attrs['experimental_3'].b, True)\n self.assertEqual(attrs['experimental_4'].f, 1.0)\n # pylint: enable=protected-access\n\n def testFunctionWithInvalidAttribute(self):\n @function.defun_with_attributes(attributes={'experimental_1': ['value1']})\n def add(x, y):\n return math_ops.add(x, y)\n\n with self.assertRaisesRegexp(ValueError,\n '.*Unsupported attribute type.*'):\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n add(t, t)\n\n def testRegisterFunction(self):\n\n @function.defun\n def add(x, y):\n return math_ops.add(x, y)\n\n def matmul(x, y):\n return math_ops.matmul(x, y)\n defun_matmul = function.defun(matmul)\n\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n function.register(defun_matmul, t, t)\n function.register(add, t, t)\n\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n self.assertLen(graph._functions, 6)\n # two sets of functions, each of them are (inference, forward, backward)\n functions = list(graph._functions.values())\n captured_function_names = [\n f.definition.signature.name for f in functions\n ]\n expected_func_name_regex = [\n '.*inference.*matmul.*',\n '.*forward.*matmul.*',\n '.*inference.*backward.*matmul.*',\n '.*inference.*add.*',\n '.*forward.*add.*',\n '.*inference.*backward.*add.*',\n ]\n for i in range(len(functions)):\n self.assertRegexpMatches(captured_function_names[i],\n expected_func_name_regex[i])\n\n # Check the forward and backward function has the correct attributes.\n self.assertEqual(\n functions[1].definition.attr['backward_function_name'].s,\n functions[2].name)\n self.assertEqual(\n functions[2].definition.attr['forward_function_name'].s,\n functions[1].name)\n\n self.assertEqual(\n functions[4].definition.attr['backward_function_name'].s,\n functions[5].name)\n self.assertEqual(\n functions[5].definition.attr['forward_function_name'].s,\n functions[4].name)\n\n sq = defun_matmul(t, t)\n double = add(t, t)\n self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])\n self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])\n # Make sure the pre registered function is used, and no other function\n # is added.\n self.assertLen(graph._functions, 6)\n functions = list(graph._functions.values())\n for i in range(len(functions)):\n self.assertEqual(captured_function_names[i],\n functions[i].definition.signature.name)\n\n @parameterized.named_parameters(\n dict(testcase_name='Defun',\n function_decorator=function.defun),\n dict(testcase_name='DefFunction',\n function_decorator=def_function.function))\n def testRegisterConcreteFunction(self, function_decorator):\n @function_decorator\n def py_add(x, y):\n return math_ops.add(x, y)\n\n py_add(array_ops.ones([]), array_ops.ones([]))\n add = py_add.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32))\n\n @function_decorator\n def py_composite(x, y):\n return x, add(x, y)\n\n py_composite(array_ops.ones([]), array_ops.ones([]))\n composite = py_composite.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32))\n\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n composite.add_to_graph(register_gradient_functions=True)\n\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n self.assertLen(graph._functions, 6)\n # two sets of functions, each of them are (inference, forward, backward)\n functions = list(graph._functions.values())\n captured_function_names = [\n f.definition.signature.name for f in functions\n ]\n expected_func_name_regex = [\n '.*inference.*py_composite.*',\n '.*inference.*py_add.*',\n '.*forward.*py_composite.*',\n '.*forward.*py_add.*',\n '.*inference.*backward.*py_composite.*',\n '.*inference.*backward.*py_add.*',\n ]\n for expected, found in zip(\n expected_func_name_regex,\n captured_function_names):\n self.assertRegexpMatches(found, expected)\n\n composite_t, composite_double = composite(t, t)\n double = add(t, t)\n self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))\n self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))\n self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))\n # Make sure the pre registered function is used, and no other function\n # is added.\n self.assertLen(graph._functions, 6)\n\n def testRegisterFunctionWithInputSignature(self):\n def matmul(x, y):\n return math_ops.matmul(x, y)\n defun_matmul = function.defun(\n matmul,\n input_signature=[\n tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)\n ])\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n function.register(defun_matmul, t, t)\n\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n self.assertLen(graph._functions, 3)\n\n # Test register function with cache, note inputs are ignored.\n function.register(defun_matmul)\n graph = ops.get_default_graph()\n self.assertLen(graph._functions, 3)\n\n def testRegisterFunctionWithCache(self):\n def matmul(x, y):\n return math_ops.matmul(x, y)\n defun_matmul = function.defun(matmul)\n\n with context.graph_mode(), self.cached_session():\n with ops.get_default_graph().as_default():\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])\n function.register(defun_matmul, t, t)\n function.register(defun_matmul, t2, t2)\n\n graph = ops.get_default_graph()\n # Only one function is registered since the input param are in same type\n # pylint: disable=protected-access\n self.assertLen(graph._functions, 3)\n\n def testCallingFunctionWithDifferentVariables(self):\n\n @function.defun\n def foo(v):\n v.assign_add(1.0)\n return v.read_value()\n\n v = resource_variable_ops.ResourceVariable(0.0)\n graph_function = foo.get_concrete_function(v)\n self.assertLen(graph_function.inputs, 1)\n self.assertEmpty(graph_function.captured_inputs)\n\n self.assertEqual(float(graph_function(v)), 1.0)\n self.assertEqual(float(graph_function(v)), 2.0)\n\n w = resource_variable_ops.ResourceVariable(0.0)\n\n @function.defun\n def bar(v):\n del v\n return constant_op.constant(1.0)\n\n graph_function = bar.get_concrete_function(v)\n self.assertEqual(float(graph_function(v)), 1.0)\n self.assertEqual(float(graph_function(w)), 1.0)\n\n def testCallingFunctionWithNonTensorsFails(self):\n\n @function.defun\n def foo(x):\n return x\n\n graph_function = foo.get_concrete_function(constant_op.constant(1.0))\n with self.assertRaisesRegexp(\n ValueError, 'All inputs to `ConcreteFunction`s must be Tensors;.*'):\n graph_function('Not a Tensor.')\n\n def testSwapImplementationWithGrapplerPlugin(self):\n # Set the min_graph_nodes to -1 since the graph in this test is too small,\n # and will be ignored by grappler if don't set this.\n rewrites = rewriter_config_pb2.RewriterConfig()\n rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON\n rewrites.min_graph_nodes = -1\n graph_options = config_pb2.GraphOptions(\n rewrite_options=rewrites, build_cost_model=1)\n config = config_pb2.ConfigProto(graph_options=graph_options)\n\n with context.graph_mode(), self.cached_session(\n config=config, graph=ops.Graph(), use_gpu=True):\n\n @function.defun_with_attributes(\n attributes={\n 'api_implements': 'random_boost',\n 'api_preferred_device': 'CPU'\n })\n def cpu_boost(x):\n return math_ops.add(x, 2.0)\n\n @function.defun_with_attributes(\n attributes={\n 'api_implements': 'random_boost',\n 'api_preferred_device': 'GPU'\n })\n def gpu_boost(x):\n return math_ops.add(x, 4.0)\n\n x = constant_op.constant(1.0)\n\n function.register(cpu_boost, x)\n y = gpu_boost(x)\n y_value = self.evaluate(y)\n\n if test.is_gpu_available():\n self.assertEqual(y_value, 5.0)\n else:\n # Grappler fallback to use the CPU impl even called with GPU function.\n self.assertEqual(y_value, 3.0)\n\n def testDefunFunctionSeparateGraphs(self):\n with context.graph_mode():\n\n @function.defun\n def add(x):\n return x + 5\n\n @function.defun\n def maybe_add(x, should_add):\n if should_add:\n return add(x)\n else:\n return x\n\n with ops.Graph().as_default():\n x = constant_op.constant(11)\n maybe_add(x, True)\n self.assertLen(total_function_cache(maybe_add), 1)\n self.assertLen(total_function_cache(add), 1)\n\n maybe_add(x, False)\n self.assertLen(total_function_cache(maybe_add), 2)\n self.assertLen(total_function_cache(add), 1)\n\n with ops.Graph().as_default():\n x = constant_op.constant(11)\n maybe_add(x, True)\n self.assertLen(total_function_cache(maybe_add), 3)\n self.assertLen(total_function_cache(add), 2)\n\n def testCacheKeyOverlappingShapes(self):\n @function.defun\n def defined(t):\n return t\n\n defined(array_ops.zeros([12, 1]))\n self.assertLen(total_function_cache(defined), 1)\n\n defined(array_ops.zeros([1, 21]))\n self.assertLen(total_function_cache(defined), 2)\n\n def testCacheKeyNestedLists(self):\n @function.defun\n def defined(l):\n return l\n\n a = constant_op.constant(1.)\n b = constant_op.constant(2.)\n c = constant_op.constant(3.)\n defined([[a], b, c])\n self.assertLen(total_function_cache(defined), 1)\n\n defined([[a, b], c])\n self.assertLen(total_function_cache(defined), 2)\n\n def testDecoratedMethod(self):\n m = DefunnedMiniModel()\n instance_call_one = m.call(array_ops.ones([1, 2]), training=True)\n instance_call_two = m.call(\n inputs=array_ops.ones([1, 2]), training=True)\n class_call = DefunnedMiniModel.call(m, array_ops.ones([1, 2]),\n training=True)\n self.assertAllEqual(instance_call_one, instance_call_two)\n self.assertAllEqual(instance_call_one, class_call)\n\n def testDecoratedMethodUniqueFunctionPerInstance(self):\n m = DefunnedMiniModel()\n n = DefunnedMiniModel()\n\n class_method_one = DefunnedMiniModel.call\n class_method_two = DefunnedMiniModel.call\n\n m_method_one = m.call\n m_method_two = m.call\n\n n_method_one = n.call\n n_method_two = n.call\n\n self.assertEqual(class_method_one, class_method_two)\n self.assertEqual(m_method_one, m_method_two)\n self.assertEqual(n_method_one, n_method_two)\n self.assertNotEqual(m.call, n.call)\n\n def testDecoratedMethodInspect(self):\n m = DefunnedMiniModel()\n fullargspec = tf_inspect.getfullargspec(m.call)\n self.assertIn('training', fullargspec.args)\n\n def testDecoratedMethodGetConcreteFunction(self):\n m = DefunnedMiniModel()\n instance_call_one = m.call.get_concrete_function(\n array_ops.ones([1, 2]), training=False)\n instance_call_two = m.call.get_concrete_function(\n inputs=array_ops.ones([1, 2]), training=False)\n self.assertAllEqual(instance_call_one(array_ops.ones([1, 2])),\n instance_call_two(array_ops.ones([1, 2])))\n\n # Also make sure get_concrete_function works on the class method\n DefunnedMiniModel.call.get_concrete_function(\n m, array_ops.ones([1, 2]), training=False)\n DefunnedMiniModel.call.get_concrete_function(\n m, inputs=array_ops.ones([1, 2]), training=True)\n\n def testFunctionModifiesInputList(self):\n # Tests on `list` methods that do in place modification, except `list.sort`\n # since it cannot even be \"defunned\" in the first place\n\n def get_list():\n return [constant_op.constant(0.), constant_op.constant(1.)]\n\n expected_msg = (\n 'Function to be traced should not modify structure of input '\n 'arguments. Check if your function has list and dictionary '\n 'operations that alter input arguments, '\n 'such as `list.pop`, `list.append`')\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def append(l):\n l.append(constant_op.constant(0.))\n\n append(get_list())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def extend(l):\n l.extend([constant_op.constant(0.)])\n\n extend(get_list())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def insert(l):\n l.insert(0, constant_op.constant(0.))\n\n insert(get_list())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def pop(l):\n l.pop()\n\n pop(get_list())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def reverse(l):\n l.reverse()\n\n reverse(get_list())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def remove(l):\n l.remove(l[0])\n\n remove(get_list())\n\n # `list.clear` is a method that is in Py3 but not Py2\n if sys.version.startswith('3'):\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def clear(l):\n l.clear()\n\n clear(get_list())\n\n # One last test for keyword arguments\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def kwdappend(**kwargs):\n l = kwargs['l']\n l.append(constant_op.constant(0.))\n\n kwdappend(l=get_list())\n\n def testFunctionModifiesInputDict(self):\n\n def get_dict():\n return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}\n\n expected_msg = (\n 'Function to be traced should not modify structure of input '\n 'arguments. Check if your function has list and dictionary '\n 'operations that alter input arguments, '\n 'such as `list.pop`, `list.append`')\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def clear(m):\n m.clear()\n\n clear(get_dict())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def pop(m):\n m.pop('t1')\n\n pop(get_dict())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def popitem(m):\n m.popitem()\n\n popitem(get_dict())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def update(m):\n m.update({'t1': constant_op.constant(3.)})\n\n update(get_dict())\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def setdefault(m):\n m.setdefault('t3', constant_op.constant(3.))\n\n setdefault(get_dict())\n\n def testFunctionModifiesInputNest(self):\n # Test on functions that modify structure of nested input arguments\n expected_msg = (\n 'Function to be traced should not modify structure of input '\n 'arguments. Check if your function has list and dictionary '\n 'operations that alter input arguments, '\n 'such as `list.pop`, `list.append`')\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n @def_function.function\n def modify(n):\n n[0]['t1'].append(constant_op.constant(1.))\n\n nested_input = [{\n 't1': [constant_op.constant(0.),\n constant_op.constant(1.)],\n },\n constant_op.constant(2.)]\n\n modify(nested_input)\n\n with self.assertRaisesRegexp(ValueError, expected_msg):\n\n # The flat list doesn't change whereas the true structure changes\n @def_function.function\n def modify_same_flat(n):\n n[0].append(n[1].pop(0))\n\n nested_input = [[constant_op.constant(0.)],\n [constant_op.constant(1.),\n constant_op.constant(2.)]]\n\n modify_same_flat(nested_input)\n\n def testDecoratedMethodVariableCleanup(self):\n m = DefunnedMiniModel()\n m(array_ops.ones([1, 2]))\n weak_variables = weakref.WeakSet(m.variables)\n self.assertLen(weak_variables, 2)\n del m\n self.assertEqual([], list(weak_variables))\n\n def testExecutorType(self):\n @function.defun\n def add_five(x):\n return x + 5\n\n self.assertEqual(\n 5,\n add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())\n\n with self.assertRaisesRegexp(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):\n with context.function_executor_type('NON_EXISTENT_EXECUTOR'):\n add_five(constant_op.constant(0, dtype=dtypes.int32))\n\n for executor_type in ('', 'DEFAULT', None):\n with context.function_executor_type(executor_type):\n self.assertAllEqual(\n 5,\n add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())\n\n @test_util.assert_no_garbage_created\n def testReferenceCycles(self):\n\n fn = function.defun(lambda x: 2. * x)\n\n fn(constant_op.constant(4.0))\n weak_fn = weakref.ref(fn)\n del fn\n # Tests that the weak reference we made to the function is now dead, which\n # means the object has been deleted. This should be true as long as the\n # function itself is not involved in a reference cycle.\n self.assertIs(None, weak_fn())\n\n def testFunctionStackInErrorMessage(self):\n if context.executing_eagerly():\n # TODO(b/122736651): Remove this skipTest once fixed.\n self.skipTest('Error interpolation is not working when function is '\n 'invoked without PartitionedCallOp.')\n\n @def_function.function()\n def fn3(x):\n return x + 2\n\n @def_function.function()\n def fn2(x):\n check_ops.assert_equal(fn3(x), 3)\n return 2\n\n @def_function.function()\n def fn(x):\n return fn2(x)\n\n with self.assertRaises(errors.InvalidArgumentError) as cm:\n fn(2)\n e = cm.exception\n self.assertIn('fn -> fn2', e.message)\n self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)\n self.assertNotIn('fn3', e.message)\n\n @test_util.run_gpu_only\n def testFunctionIsNotPinned(self):\n \"\"\"Tests that functions aren't pinned to the CPU by the eager runtime.\"\"\"\n seed1, seed2 = 79, 25\n shape = constant_op.constant([4, 7])\n dtype = dtypes.float32\n\n @def_function.function\n def func():\n with ops.device('GPU:0'):\n return gen_random_ops.random_standard_normal(\n shape, dtype=dtype, seed=seed1, seed2=seed2)\n\n with ops.device('GPU:0'):\n x = func()\n self.assertRegexpMatches(x.device, 'GPU')\n\n @test_util.run_in_graph_and_eager_modes\n def testShapeCaching(self):\n\n @function.defun\n def func(x):\n return array_ops.shape(x)\n\n @function.defun(\n input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)])\n def calls_func(x):\n return func(x)\n\n self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))\n self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))\n self.assertAllEqual(\n [3, 3],\n self.evaluate(calls_func(array_ops.zeros([3, 3]))))\n\n def testLimitedRetracing(self):\n trace_count = [0]\n @function.defun\n def func(x):\n trace_count[0] += 1\n return x\n\n for _ in range(50):\n func(constant_op.constant(3.))\n func(constant_op.constant(4.))\n func(constant_op.constant([[1., 2.]]))\n func(constant_op.constant([[]]))\n func(constant_op.constant([[3., 4.], [5., 6.]]))\n func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]]))\n # Tracing more than twice per input doesn't make sense.\n self.assertLess(trace_count[0], 13)\n\n def test_concrete_function_shape_mismatch(self):\n\n @def_function.function\n def f(argument_name):\n return argument_name + 1.\n\n f_concrete = f.get_concrete_function(constant_op.constant([1.]))\n\n # Calling a function from eager doesn't do any shape checking above what\n # kernels do while executing.\n self.assertAllEqual(\n [2., 3.],\n f_concrete(constant_op.constant([1., 2.])).numpy())\n\n @def_function.function\n def g():\n f_concrete(constant_op.constant([1., 2.]))\n\n with self.assertRaisesRegexp(ValueError, 'argument_name'):\n g()\n\n @test_util.run_in_graph_and_eager_modes\n def test_shape_inference_with_symbolic_shapes(self):\n\n @def_function.function\n def _uses_symbolic_shapes(w, x, y):\n x = array_ops.identity(x, name='name_collision')\n x = array_ops.transpose(x, [1, 0, 2])\n x_batch = array_ops.shape(x)[0]\n y_batch = array_ops.shape(y)[0]\n y *= w\n n = y_batch // x_batch\n return array_ops.reshape(y, [n, x_batch, -1])\n\n conc = _uses_symbolic_shapes.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32))\n\n @def_function.function\n def _call_concrete():\n c = constant_op.constant(1.)\n array_ops.identity(c, name='name_collision')\n output1 = conc(array_ops.ones([2]),\n array_ops.ones([5, 4, 2]),\n array_ops.ones([20, 2]))\n self.assertEqual([5, 4, 2], output1.shape)\n output2 = conc(array_ops.ones([3]),\n array_ops.ones([5, 4, 3]),\n array_ops.ones([40, 3]))\n self.assertEqual([10, 4, 3], output2.shape)\n return output1, output2\n\n output1, output2 = _call_concrete()\n self.assertEqual((5, 4, 2), self.evaluate(output1).shape)\n self.assertEqual((10, 4, 3), self.evaluate(output2).shape)\n\n\nclass MultiDeviceTest(test.TestCase, parameterized.TestCase):\n\n @test_util.run_gpu_only\n def testMultiDeviceOutput(self):\n \"\"\"Tests that functions can produce outputs on multiple devices.\"\"\"\n @function.defun\n def func(a, b, transpose_a):\n with ops.device('/device:CPU:0'):\n m1 = math_ops.matmul(a, b, transpose_a=transpose_a)\n with ops.device('/device:GPU:0'):\n m2 = math_ops.matmul(a, b, transpose_a=transpose_a)\n return m1, m2\n\n t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])\n m1, m2 = func(t, t, transpose_a=True)\n self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])\n self.assertRegexpMatches(m1.backing_device, 'CPU')\n self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])\n self.assertRegexpMatches(m2.backing_device, 'GPU')\n\n @test_util.run_gpu_only\n def testEmptyBody(self):\n @function.defun\n def func(a, b):\n return b, a\n\n with ops.device('/device:CPU:0'):\n a = constant_op.constant(3.0)\n with ops.device('/device:GPU:0'):\n b = constant_op.constant(5.0)\n\n m1, m2 = func(a, b)\n self.assertAllEqual(m1.numpy(), 5.0)\n self.assertRegexpMatches(m1.backing_device, 'GPU')\n self.assertAllEqual(m2.numpy(), 3.0)\n self.assertRegexpMatches(m2.backing_device, 'CPU')\n\n @test_util.run_gpu_only\n def testMultiDeviceInt32(self):\n \"\"\"Tests that multi-device functions can take and output INT32s.\n\n When an INT32 device tensor is fed into a function, it is copied to CPU\n by the eager runtime. The function sees all INT32 inputs on CPU.\n\n We set allocator attribute 'on_host' for INT32 outputs. They can be\n partitioned into the GPU component function, but will be allocated on\n CPU nevertheless.\n\n There is experimental support for `ints_on_device` in\n FunctionLibraryRuntime now. We can try that.\n\n \"\"\"\n with ops.device('/device:CPU:0'):\n int_cpu = constant_op.constant(3, dtype=dtypes.int32)\n resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32)\n with ops.device('/device:GPU:0'):\n int_gpu = constant_op.constant(7, dtype=dtypes.int32)\n\n @function.defun\n def func(int_cpu, resource, int_gpu):\n with ops.device('/device:CPU:0'):\n m1 = int_cpu * resource + int_gpu\n with ops.device('/device:GPU:0'):\n # This computation will happen on GPU but m2 will be copied to CPU.\n m2 = int_gpu * resource + int_cpu + 1\n return m1, m2\n\n m1, m2 = func(int_cpu, resource, int_gpu)\n self.assertAllEqual(m1.numpy(), 22)\n self.assertRegexpMatches(m1.backing_device, 'CPU')\n self.assertAllEqual(m2.numpy(), 39)\n self.assertRegexpMatches(m2.backing_device, 'CPU')\n\n # flip arguments\n m1, m2 = func(int_gpu, resource, int_cpu)\n self.assertAllEqual(m1.numpy(), 38)\n self.assertRegexpMatches(m1.backing_device, 'CPU')\n self.assertAllEqual(m2.numpy(), 23)\n self.assertRegexpMatches(m2.backing_device, 'CPU')\n\n @test_util.run_gpu_only\n def testMultiDeviceColocateWith(self):\n \"\"\"Tests that function's outputs respect colocation constraints.\"\"\"\n @function.defun\n def func(a, b):\n with ops.colocate_with(a):\n ra = 2 * a\n with ops.colocate_with(b):\n rb = 3 * b\n return ra, rb\n\n devices = ['/device:CPU:0', '/device:GPU:0']\n for dev1, dev2 in itertools.product(devices, devices):\n with ops.device(dev1):\n a = constant_op.constant(1.0)\n with ops.device(dev2):\n b = constant_op.constant(10.0)\n\n ra, rb = func(a, b)\n self.assertEqual(ra.numpy(), 2.0)\n self.assertRegexpMatches(ra.backing_device, dev1)\n self.assertEqual(rb.numpy(), 30.0)\n self.assertRegexpMatches(rb.backing_device, dev2)\n\n @test_util.run_gpu_only\n def testMultiDeviceResources(self):\n with ops.device('/device:CPU:0'):\n c1 = resource_variable_ops.ResourceVariable(2.0)\n c2 = resource_variable_ops.ResourceVariable(7.0)\n with ops.device('/device:GPU:0'):\n g1 = resource_variable_ops.ResourceVariable(3.0)\n g2 = resource_variable_ops.ResourceVariable(5.0)\n\n @function.defun\n def func(resource1, resource2):\n with ops.device('/device:CPU:0'):\n result1 = resource1 * g2\n with ops.device('/device:GPU:0'):\n result2 = resource2 * c2\n return result1, result2\n\n r1, r2 = func(c1, g1)\n self.assertEqual(r1.numpy(), 10.0)\n self.assertRegexpMatches(r1.backing_device, 'CPU')\n self.assertEqual(r2.numpy(), 21.0)\n self.assertRegexpMatches(r2.backing_device, 'GPU')\n\n # Call with flipped inputs. Check that we look at resource's\n # device and reinstantiates the function when inputs' devices change.\n r1, r2 = func(g1, c1)\n self.assertEqual(r1.numpy(), 15.0)\n self.assertRegexpMatches(r1.backing_device, 'CPU')\n self.assertEqual(r2.numpy(), 14.0)\n self.assertRegexpMatches(r2.backing_device, 'GPU')\n\n @test_util.run_gpu_only\n def testOutputResources(self):\n with ops.device('/device:CPU:0'):\n c1 = resource_variable_ops.ResourceVariable(2.0)\n with ops.device('/device:GPU:0'):\n g1 = resource_variable_ops.ResourceVariable(3.0)\n\n @function.defun\n def func(resource1, resource2):\n with ops.device('/device:CPU:0'):\n result1 = resource1 * 5\n with ops.device('/device:GPU:0'):\n result2 = resource2 * 7\n return result1, resource1.handle, result2, resource2.handle\n\n r1, res1, r2, res2 = func(c1, g1)\n self.assertEqual(r1.numpy(), 10.0)\n self.assertRegexpMatches(r1.backing_device, 'CPU')\n self.assertEqual(r2.numpy(), 21.0)\n self.assertRegexpMatches(r2.backing_device, 'GPU')\n\n def check_handle(handle, expected_value):\n self.assertRegexpMatches(handle.backing_device, 'CPU')\n tensor = gen_resource_variable_ops.read_variable_op(\n handle, dtypes.float32)\n self.assertEqual(tensor.numpy(), expected_value)\n\n # Check that handles returned from functions are on CPU and an op using\n # the resource handle is correctly placed on the device backing the\n # resource.\n check_handle(res1, 2.0)\n check_handle(res2, 3.0)\n\n # Call with flipped inputs to make sure the same the function is\n # reinstantiated and eager runtime does not mess up the device assignment\n # for ops consuming handles returned from defuns.\n r1, res1, r2, res2 = func(g1, c1)\n self.assertEqual(r1.numpy(), 15.0)\n self.assertRegexpMatches(r1.backing_device, 'CPU')\n self.assertEqual(r2.numpy(), 14.0)\n self.assertRegexpMatches(r2.backing_device, 'GPU')\n check_handle(res1, 3.0)\n check_handle(res2, 2.0)\n\n @test_util.run_gpu_only\n def testComplexInputOutputDevicePattern(self):\n \"\"\"Tests input/output mapping logic in partitioning.\"\"\"\n with ops.device('/device:CPU:0'):\n rc0 = resource_variable_ops.ResourceVariable(2.0)\n rc1 = resource_variable_ops.ResourceVariable(3.0)\n cc0 = constant_op.constant(5.0)\n cc1 = constant_op.constant(7.0)\n with ops.device('/device:GPU:0'):\n rg0 = resource_variable_ops.ResourceVariable(11.0)\n rg1 = resource_variable_ops.ResourceVariable(13.0)\n cg0 = constant_op.constant(17.0)\n cg1 = constant_op.constant(19.0)\n\n # Make sure tensors are on expected devices.\n for tensor in [cc0, cc1]:\n self.assertRegexpMatches(tensor.backing_device, 'CPU:0')\n for tensor in [cg0, cg1]:\n self.assertRegexpMatches(tensor.backing_device, 'GPU:0')\n\n @function.defun\n def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):\n with ops.device('/device:CPU:0'):\n m1 = rc0 * cg0\n with ops.device('/device:GPU:0'):\n m2 = rg0 * cc0\n\n with ops.device('/device:CPU:0'):\n r1 = 1000.0 * m2 + rc1 * cg1\n with ops.device('/device:GPU:0'):\n r2 = 1000.0 * m1 + rg1 * cc1\n\n return r1, r2, m2, m1\n\n r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)\n self.assertRegexpMatches(m1.backing_device, 'CPU')\n self.assertRegexpMatches(r1.backing_device, 'CPU')\n self.assertRegexpMatches(m2.backing_device, 'GPU')\n self.assertRegexpMatches(r2.backing_device, 'GPU')\n self.assertEqual(m1.numpy(), 34.0)\n self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)\n self.assertEqual(m2.numpy(), 55.0)\n self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0)\n\n @test_util.run_gpu_only\n def testArgumentPrunning(self):\n \"\"\"Tests functions taking unnecessary arguments.\"\"\"\n with ops.device('/device:CPU:0'):\n c1 = constant_op.constant(5.0)\n c2 = constant_op.constant(7.0)\n\n with ops.device('/device:GPU:0'):\n g1 = constant_op.constant(11.0)\n g2 = constant_op.constant(13.0)\n g3 = constant_op.constant(17.0)\n\n @function.defun\n def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument\n # arguments g1 and g2 are unused and can be pruned by grappler.\n return c1 * g3 * c2\n\n result = func(g1, g2, c1, g3, c2)\n self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0)\n\n def testNestedCallWatchedVariables(self):\n\n v = variables.Variable(4.)\n\n @def_function.function\n def f():\n return v ** 2.\n\n with backprop.GradientTape() as tape:\n f()\n\n self.assertEqual((v,), tape.watched_variables())\n\n @def_function.function\n def g():\n return f()\n\n with backprop.GradientTape() as tape:\n g()\n\n self.assertEqual((v,), tape.watched_variables())\n\n # f() can rely on the variable being read during its trace. g() checks that\n # variables from a function which knows about them are recorded on the\n # tape. h() tests that functions forward knowledge of variables to callers.\n\n @def_function.function\n def h():\n return g()\n\n with backprop.GradientTape() as tape:\n h()\n\n self.assertEqual((v,), tape.watched_variables())\n\n def testStandardTrainingLoopInFunction(self):\n layer = core.Dense(2)\n dataset = (\n dataset_ops.DatasetV2.from_tensors(\n (array_ops.ones([784]), array_ops.ones([], dtypes.int32)))\n .map(lambda x, y: (x, y))\n .repeat(10)\n .batch(32))\n optimizer = adam.Adam()\n\n @def_function.function\n def train():\n for x, y in dataset:\n with backprop.GradientTape() as tape:\n out = layer(x)\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n logits=out, labels=y))\n layer_variables = layer.trainable_variables\n gradients = tape.gradient(loss, layer_variables)\n optimizer.apply_gradients(zip(gradients, layer_variables))\n\n train()\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution(\n config=config_pb2.ConfigProto(device_count={'CPU': 4}))\n test.main()\n"
] | [
[
"tensorflow.python.training.tracking.util.list_objects",
"tensorflow.python.keras.layers.normalization.BatchNormalization",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.training.tracking.data_structures._DictWrapper",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.training.tracking.data_structures._ListWrapper",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.eager.test.main",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.python.training.tracking.data_structures.NoDependency",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.training.tracking.data_structures.Mapping",
"tensorflow.python.training.tracking.data_structures.List",
"numpy.ones",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.engine.training.Model"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.eager.context.enable_run_metadata",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.core.protobuf.config_pb2.GraphOptions",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.eager.function.register",
"tensorflow.python.training.training_ops.resource_apply_adam",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.init_ops.ones_initializer",
"tensorflow.python.keras.optimizer_v2.adam.Adam",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.function.Function",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.framework.ops.control_dependencies",
"numpy.zeros",
"tensorflow.python.eager.function.defun_with_attributes",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.eager.function.defun",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.list_ops.tensor_list_pop_back",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.eager.context.export_run_metadata",
"tensorflow.python.framework.test_ops.device_placement_op",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.eager.context.disable_run_metadata",
"tensorflow.python.ops.resource_variable_ops.read_variable_op",
"numpy.array",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.gen_resource_variable_ops.read_variable_op",
"tensorflow.python.eager.context.function_executor_type",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.gen_random_ops.random_standard_normal",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.util.compat.as_bytes",
"numpy.ones",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
dykuang/Unsupervised-brain-leision-segmentation | [
"c83462db3cebcf8af357fc42d1a2592b67eace9b"
] | [
"sources/architecture.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 23 16:17:32 2019\n\n@author: dykua\n\narchitectures for the network\n\"\"\"\nfrom keras.layers import Input, Conv2D, Conv2DTranspose, Reshape, Lambda, MaxPooling2D, UpSampling2D, Dropout, concatenate, multiply, add, BatchNormalization, PReLU, GaussianNoise, ZeroPadding2D\nfrom keras.models import Model\nfrom Mylayers import ClusteringLayer_ver2, Feature_split\nimport tensorflow as tf\n\n\ndef encoder_block(x, dim_list):\n y = Conv2D(dim_list[0], kernel_size=3, strides=1, activation='relu', padding='same')(x)\n y = Conv2D(dim_list[0], kernel_size=3, strides=2, activation='relu')(y)\n for dim in dim_list[1:-1]:\n y = Conv2D(dim, kernel_size=3, strides=1, activation='relu', padding='same')(y)\n y = Conv2D(dim, kernel_size=3, strides=2, activation='relu')(y)\n \n y = Conv2D(dim_list[-1], kernel_size=1, strides=1, activation='relu', padding='same')(y) # emebding layer\n return y\n\ndef decoder_block(x, dim_list):\n y = Conv2DTranspose(dim_list[0], kernel_size=3, strides=2,activation='relu')(x)\n y = Conv2D(dim_list[0], kernel_size=3, strides=1, activation='relu', padding='same')(y)\n for dim in dim_list[1:-1]:\n y = Conv2DTranspose(dim, kernel_size=3, strides=2,activation='relu')(y)\n y = Conv2D(dim, kernel_size=3, strides=1, activation='relu', padding='same')(y)\n \n y = Conv2D(dim_list[-1], kernel_size=1, strides=1,activation='relu', padding='same')(y) # output layer\n return y\n\ndef create_encoder(inputs, dim_list):\n output = encoder_block(inputs, dim_list)\n return Model(inputs, output)\n\ndef create_decoder(inputs, dim_list):\n output = decoder_block(inputs, dim_list)\n return Model(inputs, output)\n\n#def make_cluster(inputs, filter_func = lambda x: 1/(1+tf.exp(-10*(x-0.5))), n_clusters, name='clustering'): \n# clusters = ClusteringLayer_ver2(n_clusters, filter_func, name)(inputs)\n# return clusters\n\ndef build_whole_model(inputs, en_dim_list, de_dim_list, n_clusters, filter_func = lambda x: 1/(1+tf.exp(-10*(x-0.5)))):\n encoder = create_encoder(inputs, en_dim_list)\n feature = encoder(inputs) # end of encoder\n \n feature_reshaped = Reshape( (feature.shape[1] * feature.shape[2], en_dim_list[-1]) )(feature) # Did not specify batch size explicitly in Reshape layers\n CLayer = ClusteringLayer_ver2(n_clusters, filter_func, name='clustering')\n x_clusters_reshaped = CLayer(feature_reshaped) \n x_clusters = Reshape((feature.shape[1], feature.shape[2], n_clusters))(x_clusters_reshaped) # end of clustering\n \n x_splited=Feature_split(en_dim_list[-1], n_clusters)([feature, x_clusters]) # feature splitted according to clusters\n \n decoder_input = Input((feature.shape[1], feature.shape[2], en_dim_list[-1]))\n decoder = create_decoder(decoder_input, de_dim_list)\n decoded = decoder(feature) # end of decoder\n \n Pred_label=[]\n for i in range(n_clusters):\n Pred_label.append(decoder(x_splited[i])) \n Squeezed = Lambda(lambda x: tf.squeeze(tf.stack(x,axis=-1), axis=-2))\n \n \n AE = Model(inputs, decoded)\n feature_map = Model(inputs, x_clusters_reshaped)\n mask_map = Model(inputs, Squeezed(Pred_label))\n whole_model = Model(inputs, [AE.output, feature_map.output, mask_map.output])\n \n return AE, feature_map, mask_map, whole_model\n \ndef unet_CL(n_clusters, filter_func = lambda x: 1/(1+tf.exp(-10*(x-0.5))), pretrained_weights = None,input_size = (256,256,1)):\n inputs = Input(input_size)\n conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n# conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n# drop4 = Dropout(0.25)(conv4)\n# pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n#\n# conv5 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n# conv5 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n# drop5 = Dropout(0.25)(conv5)\n# \n# up6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n# merge6 = concatenate([drop4,up6], axis = 3)\n# conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n# conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv4))\n merge7 = concatenate([conv3,up7], axis = 3)\n conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8], axis = 3)\n conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n up9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = concatenate([conv1,up9], axis = 3)\n conv9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n# conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation = 'relu')(conv9)\n\n # segmentation branch\n feature_reshaped = Reshape( (conv4.shape[1] * conv4.shape[2], 256) )(conv4) # Did not specify batch size explicitly in Reshape layers\n CLayer = ClusteringLayer_ver2(n_clusters, filter_func, name='clustering')\n x_clusters_reshaped = CLayer(feature_reshaped) \n x_clusters = Reshape((conv4.shape[1], conv4.shape[2], n_clusters))(x_clusters_reshaped) # end of clustering\n \n x_splited=Feature_split(conv4.shape[3], n_clusters)([conv4, x_clusters]) # feature splitted according to clusters\n \n decoder_input = Input((conv4.shape[1], conv4.shape[2], conv4.shape[3]))\n decoder = Model([inputs, decoder_input], conv10)\n \n Pred_label=[]\n for i in range(n_clusters):\n Pred_label.append(decoder([inputs, x_splited[i]])) \n Squeezed = Lambda(lambda x: tf.squeeze(tf.stack(x,axis=-1), axis=-2))\n \n #models\n encoder = Model(inputs, conv4)\n feature = encoder(inputs)\n decoded = decoder([inputs, feature])\n AE = Model(inputs, decoded)\n feature_map = Model(inputs, x_clusters_reshaped)\n mask_map = Model(inputs, Squeezed(Pred_label))\n whole_model = Model(inputs, [AE.output, feature_map.output, mask_map.output])\n \n #model.summary()\n\n if(pretrained_weights):\n AE.load_weights(pretrained_weights[0])\n whole_model.get_layer(name='clustering').set_weights(pretrained_weights[1])\n return AE, encoder, feature_map, mask_map, whole_model \n \ndef unet_AE(input_size = (256,256,1), pretrained_weights = None):\n inputs = Input(input_size)\n conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n \n# conv4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n# drop4 = Dropout(0.25)(conv4)\n# drop4 = conv4\n# pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n#\n# conv5 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n# conv5 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n## drop5 = Dropout(0.25)(conv5)\n# drop5 = conv5\n \n# up6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv4))\n# merge6 = concatenate([conv4,up6], axis = 3)\n# conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n# conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv4))\n merge7 = concatenate([conv3,up7], axis = 3)\n conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8], axis = 3)\n conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n up9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = concatenate([conv1,up9], axis = 3)\n conv9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n# conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation = 'relu')(conv9)\n\n AE = Model(inputs, conv10)\n \n #model.summary()\n\n if(pretrained_weights):\n AE.load_weights(pretrained_weights[0])\n \n return AE \n\ndef build_model_2(n_clusters, num_start = 16, pretrained_weights = None,input_size = (256,256,1)):\n inputs = Input(input_size, name='input--encoder')\n conv1 = Conv2D(num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n \n conv2 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n \n conv3 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n \n\n up4 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv3))\n merge4 = concatenate([conv2,up4], axis = 3)\n conv5 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge4)\n conv5 = Conv2D(2*num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n\n up6 = Conv2D(num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv5))\n merge6 = concatenate([conv1,up6], axis = 3)\n conv7_0 = Conv2D(num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n \n encoder = Model(inputs, conv7_0) # where to put output of the encoder? merge6, conv7_0, conv7_1\n input_de = Input( (input_size[0], input_size[1], num_start) , name = 'input--decoder')\n\n conv7_1 = Conv2D(num_start, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_de) \n conv8 = Conv2D(3, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7_1)\n conv9 = Conv2D(input_size[2], 1, activation = 'relu')(conv8)\n \n decoder = Model(input_de, conv9)\n \n feature = encoder(inputs) \n decoded = decoder(feature)\n \n feature_reshaped = Reshape( (feature.shape[1] * feature.shape[2], num_start) )(feature) # Did not specify batch size explicitly in Reshape layers\n CLayer = ClusteringLayer_ver2(n_clusters, name='clustering') \n x_clusters_reshaped = CLayer(feature_reshaped) \n\n AE = Model(inputs, decoded)\n feature_map = Model(inputs, x_clusters_reshaped)\n \n return AE, feature_map\n\n\ndef build_model(input_size = (256,256,1), en_spec = [8,16,16], de_spec=[8,4], n_features = 8, n_clusters=3):\n inputs = Input(input_size, name='input--encoder')\n memo = []\n aug_input = GaussianNoise(0.05)(inputs) \n \n conv = Conv2D(en_spec[0], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(aug_input)\n conv = Conv2D(en_spec[0], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n pool = MaxPooling2D(pool_size=(2, 2))(conv)\n memo.append(conv)\n \n for num in en_spec[1:-1]:\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool)\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n pool = MaxPooling2D(pool_size=(2, 2))(conv)\n memo.append(conv)\n \n conv = Conv2D(en_spec[-1], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool)\n conv = Conv2D(en_spec[-1], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n for i, num in enumerate(en_spec[-2::-1]):\n up = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv))\n merge = concatenate([memo[-i-1],up], axis = 3)\n #merge = add([memo[-i-1],up])\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge)\n # conv = multiply([conv, memo[-i-1]])\n if i== (len(en_spec) - 2 ):\n conv = Conv2D(n_features, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n else:\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n # conv = BatchNormalization()(conv)\n# conv = PReLU(shared_axes=(1,2))(conv)\n\n encoder = Model(inputs, conv) # where to put output of the encoder? \n input_de = Input( (input_size[0], input_size[1], n_features) , name = 'input--decoder')\n\n conv_de = Conv2D(de_spec[0], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_de) \n if len(de_spec) > 1:\n for num in de_spec[1:]:\n conv_de = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv_de)\n \n conv_de = Conv2D(input_size[2], 1, activation = 'relu')(conv_de)\n decoder = Model(input_de, conv_de)\n feature = encoder(inputs) \n decoded = decoder(feature)\n feature_reshaped = Reshape( (feature.shape[1] * feature.shape[2], n_features) )(feature) # Did not specify batch size explicitly in Reshape layers\n CLayer = ClusteringLayer_ver2(n_clusters, name='clustering') \n x_clusters_reshaped = CLayer(feature_reshaped) \n AE = Model(inputs, decoded)\n feature_map = Model(inputs, x_clusters_reshaped)\n \n return AE, feature_map \n\n\ndef build_model_3(input_size = (256,256,1), en_spec = [8,16,16], de_spec=[8,4], n_features = 8, n_clusters=3):\n inputs = Input(input_size, name='input--encoder')\n memo = []\n aug_input = GaussianNoise(0.05)(inputs) \n \n conv = Conv2D(en_spec[0], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(aug_input)\n memo.append(conv)\n conv = Conv2D(en_spec[0], 3, strides=2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n \n for num in en_spec[1:-1]:\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n memo.append(conv)\n conv = Conv2D(num, 3, strides=2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n \n conv = Conv2D(en_spec[-1], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n for i, num in enumerate(en_spec[-2::-1]):\n up = Conv2DTranspose(num, 3, strides=2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n# if up.shape[1] == memo[-i-1].shape[1]: # shape is (?,?,?,int) for up?\n merge = concatenate([memo[-i-1],up], axis = 3)\n\n #merge = add([memo[-i-1],up])\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge)\n # conv = multiply([conv, memo[-i-1]])\n if i== (len(en_spec) - 2 ):\n conv = Conv2D(n_features, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n else:\n conv = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv)\n \n # conv = BatchNormalization()(conv)\n# conv = PReLU(shared_axes=(1,2))(conv)\n\n encoder = Model(inputs, conv) # where to put output of the encoder? \n input_de = Input( (input_size[0], input_size[1], n_features) , name = 'input--decoder')\n\n conv_de = Conv2D(de_spec[0], 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_de) \n# if len(de_spec) > 1:\n# for num in de_spec[1:]:\n# conv_de = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv_de)\n \n for num in de_spec:\n conv_de_1 = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv_de) \n conv_de_1 = Conv2D(num, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv_de_1) \n conv_de = add([conv_de, conv_de_1])\n \n conv_de = Conv2D(input_size[2], 1, activation = 'relu')(conv_de)\n decoder = Model(input_de, conv_de)\n \n feature = encoder(inputs) \n\n decoded = decoder(feature)\n feature_reshaped = Reshape( (encoder.output_shape[1] * encoder.output_shape[2], n_features) )(feature) # Did not specify batch size explicitly in Reshape layers\n CLayer = ClusteringLayer_ver2(n_clusters, name='clustering') \n x_clusters_reshaped = CLayer(feature_reshaped) \n AE = Model(inputs, decoded)\n feature_map = Model(inputs, x_clusters_reshaped)\n \n return AE, feature_map \n"
] | [
[
"tensorflow.stack",
"tensorflow.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
abfariah/flatland-project | [
"eae13d1d37820514db05d30fd668ed4f4413c33f"
] | [
"example_submission/policy.py"
] | [
"import copy\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PolicyNetwork(nn.Module):\n\n def __init__(self, state_size, action_size, hidsize1=128, hidsize2=128):\n super(PolicyNetwork, self).__init__()\n\n self.fc1 = nn.Linear(state_size, hidsize1)\n self.fc2 = nn.Linear(hidsize1, hidsize2)\n self.fc3 = nn.Linear(hidsize2, action_size)\n\n def forward(self, x):\n\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x\n\n\nclass NeuroevoPolicy:\n \"\"\"A static policy network to be optimized by evolution\"\"\"\n\n def __init__(self, state_size, action_size):\n\n self.state_size = state_size\n self.action_size = action_size\n self.hidsize = 32\n self.device = torch.device(\"cpu\")\n self.model = PolicyNetwork(state_size, action_size,\n hidsize1=self.hidsize, hidsize2=self.hidsize).to(self.device)\n self.model = self.model.to(self.device).double()\n\n def act(self, state):\n state = torch.from_numpy(state).double().unsqueeze(0).to(self.device)\n self.model.eval()\n with torch.no_grad():\n action_values = self.model(state)\n return np.argmax(action_values.cpu().data.numpy())\n\n def set_params(self, params):\n if np.isnan(params).any():\n raise\n a = torch.tensor(params, device=self.device)\n torch.nn.utils.vector_to_parameters(a, self.model.parameters())\n self.model = self.model.to(self.device).double()\n\n def get_params(self):\n with torch.no_grad():\n params = self.model.parameters()\n vec = torch.nn.utils.parameters_to_vector(params)\n return vec.cpu().double().numpy()\n\n def save(self, filename):\n torch.save(self.model.state_dict(), filename)\n\n def load(self, filename):\n if os.path.exists(filename):\n self.model.load_state_dict(torch.load(filename))\n self.model = self.model.to(self.device).double()\n\n def test(self):\n self.act(np.array([[0] * self.state_size]))\n"
] | [
[
"torch.load",
"numpy.isnan",
"torch.nn.utils.parameters_to_vector",
"torch.from_numpy",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.device",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Magician6174/manipulation | [
"5e386e3a5bb2414693a72f7a4a606a7fd4188045"
] | [
"manipulation/exercises/clutter/test_grasp_candidate.py"
] | [
"import unittest\nimport timeout_decorator\nfrom gradescope_utils.autograder_utils.decorators import weight\nimport numpy as np\nimport os\nimport open3d as o3d\n\nfrom pydrake.all import RigidTransform\n\n# Store X_lst_target as global for testing all the functions\n# yapf: disable\nX_lst_target = np.array([ # noqa\n [[-0.209311, -0.977586, +0.022690, +0.022568],\n [+0.966542, -0.210354, -0.146791, +0.023823],\n [+0.148274, -0.008794, +0.988907, +0.082323],\n [+0.000000, +0.000000, +0.000000, +1.000000]],\n [[-0.731169, +0.159814, +0.663214, -0.007744],\n [-0.580146, -0.657145, -0.481239, +0.032702],\n [+0.358919, -0.736627, +0.573199, +0.144046],\n [+0.000000, +0.000000, +0.000000, +1.000000]],\n [[-0.350573, -0.936270, +0.022282, +0.018658],\n [+0.931311, -0.351029, -0.097156, +0.034710],\n [+0.098786, -0.013308, +0.995020, +0.106885],\n [+0.000000, +0.000000, +0.000000, +1.000000]],\n [[-0.843675, +0.525630, -0.109206, -0.015267],\n [-0.468279, -0.820000, -0.329111, +0.043170],\n [-0.262540, -0.226524, +0.937955, +0.045414],\n [+0.000000, +0.000000, +0.000000, +1.000000]]])\n# yapf: enable\n\ntest_indices = [10137, 21584, 7259, 32081]\n\n\nclass TestGraspCandidate(unittest.TestCase):\n\n def __init__(self, test_name, notebook_locals):\n super().__init__(test_name)\n self.notebook_locals = notebook_locals\n\n @weight(4)\n @timeout_decorator.timeout(10.)\n def test_darboux_frame(self):\n \"\"\"Test compute_darboux_frame\"\"\"\n pcd = self.notebook_locals[\"pcd\"]\n kdtree = o3d.geometry.KDTreeFlann(pcd)\n f = self.notebook_locals[\"compute_darboux_frame\"]\n\n X_lst_eval = []\n\n np.random.seed(11)\n for i in range(4):\n index = test_indices[i]\n RT = f(index, pcd, kdtree)\n X_lst_eval.append(RT.GetAsMatrix4())\n\n X_lst_eval = np.asarray(X_lst_eval)\n\n self.assertLessEqual(np.linalg.norm(X_lst_target - X_lst_eval), 1e-4,\n \"The Darboux frame is not correct\")\n\n index = 5\n RT = f(index, pcd, kdtree)\n\n X_lst_order_eval = RT.GetAsMatrix4()\n\n # yapf: disable\n X_lst_order_target = np.array([ # noqa\n [+0.036684, -0.880547, -0.472537, +0.008844],\n [+0.937533, +0.194023, -0.288768, -0.002408],\n [+0.345957, -0.432426, +0.832659, +0.191187],\n [+0.000000, +0.000000, +0.000000, +1.000000]])\n # yapf: enable\n\n self.assertLessEqual(\n np.linalg.norm(X_lst_order_eval - X_lst_order_target), 1e-4,\n \"Did you forget to sort the eigenvalues, \"\n \"or handle improper rotations?\")\n\n @weight(4)\n @timeout_decorator.timeout(10.)\n def test_minimum_distance(self):\n \"\"\"Test find_minimum_distance\"\"\"\n pcd = self.notebook_locals[\"pcd\"]\n f = self.notebook_locals[\"find_minimum_distance\"]\n\n # The following should return nan\n for i in [0, 2]:\n dist, X_new = f(pcd, RigidTransform(X_lst_target[i]))\n self.assertTrue(\n np.isnan(dist), \"There is no value of y that results in \"\n \"no collision in the grid, but dist is not nan\")\n self.assertTrue(\n isinstance(X_new, type(None)),\n \"There is no value of y that results in \"\n \"no collision in the grid, but X_WGnew is\"\n \"not None.\")\n\n # yapf: disable\n dist_new_target = np.array([ # noqa\n 0.0035799752,\n 0.0008069168])\n\n X_new_target = np.array([ # noqa\n [[-0.73117, +0.15981, +0.66321, -0.01573],\n [-0.58015, -0.65715, -0.48124, +0.06556],\n [+0.35892, -0.73663, +0.57320, +0.18088],\n [+0.00000, +0.00000, +0.00000, +1.00000]],\n [[-0.84368, +0.52563, -0.10921, -0.03571],\n [-0.46828, -0.82000, -0.32911, +0.07506],\n [-0.26254, -0.22652, +0.93796, +0.05422],\n [+0.00000, +0.00000, +0.00000, +1.00000]]])\n # yapf: enable\n\n dist_new_eval = []\n X_new_eval = []\n # The following should return numbers.\n for i in [1, 3]:\n dist, X_new = f(pcd, RigidTransform(X_lst_target[i]))\n self.assertTrue(\n not np.isnan(dist),\n \"There is a valid value of y that results in \"\n \"no collision in the grid, but dist is nan\")\n self.assertTrue(\n not isinstance(X_new, type(None)),\n \"There is a valid value of y that results in no \"\n \"collision in the grid, but X_WGnew is None.\")\n dist_new_eval.append(dist)\n X_new_eval.append(X_new.GetAsMatrix4())\n\n dist_new_eval = np.array(dist_new_eval)\n X_new_eval = np.array(X_new_eval)\n\n self.assertLessEqual(np.linalg.norm(dist_new_target - dist_new_eval),\n 1e-5, \"The returned distance is not correct.\")\n self.assertLessEqual(np.linalg.norm(X_new_target - X_new_eval), 1e-4,\n \"The returned transform is not correct.\")\n\n @weight(4)\n @timeout_decorator.timeout(60.)\n def test_candidate_grasps(self):\n \"\"\"Test compute_candidate_grasps\"\"\"\n pcd = self.notebook_locals[\"pcd\"]\n f = self.notebook_locals[\"compute_candidate_grasps\"]\n\n # yapf: disable\n X_lst_target = np.array([ # noqa\n [[-0.86670, +0.49867, -0.01296, -0.04684],\n [-0.49881, -0.86662, +0.01232, +0.07370],\n [-0.00508, +0.01714, +0.99984, +0.01943],\n [+0.00000, +0.00000, +0.00000, +1.00000]],\n [[+0.52811, -0.84916, +0.00468, +0.06930],\n [+0.83829, +0.52222, +0.15671, -0.04796],\n [-0.13552, -0.07884, +0.98763, +0.10482],\n [+0.00000, +0.00000, +0.00000, +1.00000]],\n [[-0.90546, +0.38488, +0.17889, -0.03838],\n [-0.40438, -0.65434, -0.63899, +0.05335],\n [-0.12889, -0.65092, +0.74812, +0.18382],\n [+0.00000, +0.00000, +0.00000, +1.00000]]])\n # yapf: enable\n\n grasp_candidates = f(pcd, candidate_num=3, random_seed=5)\n\n self.assertTrue(\n len(grasp_candidates) == 3,\n \"Length of returned array is not correct.\")\n\n X_lst_eval = []\n for i in range(len(grasp_candidates)):\n X_lst_eval.append(grasp_candidates[i].GetAsMatrix4())\n X_lst_eval = np.array(X_lst_eval)\n\n self.assertLessEqual(np.linalg.norm(X_lst_target - X_lst_eval), 1e-4,\n \"The returned grasp candidates are not correct.\")\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SiriusKY/SceneTextDetector | [
"88bf3cf12e2ee0887f6b1795f25d4c1e5217b665"
] | [
"tools/eval/icdar_2019_ArT.py"
] | [
"import os\nfrom tqdm import tqdm\nfrom mmdet.apis import init_detector, inference_detector\nimport numpy as np\nimport torch\nimport mmcv\nimport cv2\nimport json\nimport PIL\n\n\ntestset_dir = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/test_task13'\noutput_dir = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/output/preds'\n\nmodel_name = 'solo_r50_fpn_1x_coco'\nconfig_file = 'work_dirs/'+model_name+'/'+model_name+'.py'\ncheckpoint_file = 'work_dirs/'+model_name+'/latest.pth'\nprint(f'inferencing using model: {checkpoint_file}')\nmodel = init_detector(config_file, checkpoint_file, device='cuda:0')\nscore_thr = 0.3\n\nprint('start inference')\nfor image in tqdm(os.listdir(testset_dir)):\n image_path = os.path.join(testset_dir, image)\n\n try:\n im = PIL.Image.open(image_path)\n im.close()\n except PIL.Image.DecompressionBombError:\n print(f'skip: {image_path}')\n continue\n\n image_index = image.split('.')[0].split('_')[1]\n\n result = inference_detector(model, image_path)\n torch.cuda.empty_cache()\n\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n if isinstance(segm_result, tuple):\n segm_result = segm_result[0] # ms rcnn\n else:\n bbox_result, segm_result = result, None\n bboxes = np.vstack(bbox_result)\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n preds = []\n if segm_result is not None and len(labels) > 0: # non empty\n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > score_thr)[0]\n np.random.seed(42)\n for i in inds:\n i = int(i)\n sg = segms[i]\n if isinstance(sg, torch.Tensor):\n sg = sg.detach().cpu().numpy()\n mask = sg.astype(np.uint8)\n mask *= 255\n contours, _ = cv2.findContours(\n mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n points = [[float(point[0][0]), float(point[0][1])]\n for point in contours[0]]\n if len(points) < 3:\n continue\n points.reverse() # convert to clock-wise\n confidence = bboxes[i][-1]\n preds.append({\n 'points': points,\n 'confidence': float(confidence)\n })\n\n output_file = os.path.join(output_dir, image_index+'.json')\n with open(output_file, 'w')as f:\n json.dump(preds, f)\n\nprint('collecting results')\nsubmit = dict()\nsubmit_file = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/output/submit.json'\nfor pred in tqdm(os.listdir(output_dir)):\n pred_path = os.path.join(output_dir, pred)\n image_index = pred.split('.')[0]\n with open(pred_path, 'r')as f:\n result = json.load(f)\n submit['res_'+image_index] = result\n\n# skip image\nsubmit['res_3102'] = [{\n 'points': [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],\n 'confidence':0.0\n}]\n\nwith open(submit_file, 'w')as f:\n json.dump(submit, f)\n"
] | [
[
"numpy.random.seed",
"torch.cuda.empty_cache",
"numpy.full",
"numpy.concatenate",
"numpy.where",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rishabh1694/pytorch-deepsets-contrastive | [
"94ce648692528ceaafe1310575db8a43cdb68f4f"
] | [
"src/deepsets/experiments.py"
] | [
"import numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom tensorboardX import SummaryWriter\r\nfrom torch import optim\r\nfrom torch.autograd import Variable\r\nfrom tqdm import tqdm\r\n\r\nfrom .datasets import MNISTSummation, MNIST_TRANSFORM\r\nfrom .networks import InvariantModel, SmallMNISTCNNPhi, SmallRho\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom IPython import embed\r\n\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.manifold import TSNE\r\n\r\n\r\n\r\n\r\nclass SumOfDigits(object):\r\n def __init__(self, lr=1e-3, wd=5e-3, batch_size=32, temp=0.5, k=10, length=10, dataset_len=1000):\r\n self.lr = lr\r\n self.wd = wd\r\n self.batch_size = batch_size\r\n self.temp = temp\r\n self.k = k\r\n self.c = 10\r\n self.length = length\r\n self.dataset_len = dataset_len\r\n # self.train_db = MNISTSummation(min_len=2, max_len=10, dataset_len=100000, train=True, transform=MNIST_TRANSFORM)\r\n # self.test_db = MNISTSummation(min_len=5, max_len=50, dataset_len=100000, train=False, transform=MNIST_TRANSFORM)\r\n\r\n self.train_db = MNISTSummation(min_len=self.length, max_len=self.length, dataset_len=self.dataset_len, train=True, transform=MNIST_TRANSFORM)\r\n self.train_loader = DataLoader(self.train_db, batch_size=self.batch_size, shuffle=True, num_workers=16, pin_memory=True,\r\n drop_last=True)\r\n self.memory_db = MNISTSummation(min_len=self.length, max_len=self.length, dataset_len=self.dataset_len, train=True, transform=MNIST_TRANSFORM)\r\n self.memory_data_loader = DataLoader(self.memory_db, batch_size=self.batch_size, shuffle=False, num_workers=16, pin_memory=True,\r\n drop_last=True)\r\n self.test_db = MNISTSummation(min_len=self.length, max_len=self.length, dataset_len=self.dataset_len, train=False, transform=MNIST_TRANSFORM)\r\n self.test_data_loader = DataLoader(self.test_db, batch_size=self.batch_size, shuffle=False, num_workers=16, pin_memory=True)\r\n\r\n self.the_phi = SmallMNISTCNNPhi()\r\n # self.the_rho = SmallRho(input_size=10, output_size=1)\r\n self.the_rho = SmallRho(input_size=10, output_size=10)\r\n\r\n self.model = InvariantModel(phi=self.the_phi, rho=self.the_rho, length=self.length)\r\n if torch.cuda.is_available():\r\n self.model.cuda()\r\n\r\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.wd)\r\n\r\n self.summary_writer = SummaryWriter(\r\n log_dir='/home/souri/temp/deepsets/exp-lr_%1.5f-wd_%1.5f/' % (self.lr, self.wd))\r\n\r\n def train_1_epoch(self, epoch_num: int = 0):\r\n self.model.train()\r\n total_loss, total_num, train_bar = 0.0, 0, tqdm(self.train_loader)\r\n # for i in tqdm(range(len(self.train_db))):\r\n for x1, x2, target in train_bar:\r\n if torch.cuda.is_available():\r\n x1, x2, target = x1.cuda(), x2.cuda(), target.cuda()\r\n\r\n x1, x2, target = Variable(x1), Variable(x2), Variable(target)\r\n\r\n self.optimizer.zero_grad()\r\n feat1, out1 = self.model.forward(x1)\r\n feat2, out2 = self.model.forward(x2)\r\n\r\n # [2*B, D]\r\n out = torch.cat([out1, out2], dim=0)\r\n # [2*B, 2*B]\r\n sim_matrix = torch.exp(torch.mm(out, out.t().contiguous()) / self.temp)\r\n mask = (torch.ones_like(sim_matrix) - torch.eye(2 * self.batch_size, device=sim_matrix.device)).bool()\r\n # [2*B, 2*B-1]\r\n sim_matrix = sim_matrix.masked_select(mask).view(2 * self.batch_size, -1)\r\n\r\n # # the_loss = F.mse_loss(pred, target)\r\n # # print(pred, target)\r\n # the_loss = F.cross_entropy(pred, target)\r\n\r\n # compute loss\r\n pos_sim = torch.exp(torch.sum(out1 * out2, dim=-1) / self.temp)\r\n # [2*B]\r\n pos_sim = torch.cat([pos_sim, pos_sim], dim=0)\r\n loss = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()\r\n\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n total_num += self.batch_size\r\n total_loss += loss.item() * self.batch_size\r\n\r\n self.summary_writer.add_scalar('train_loss', total_loss / total_num, epoch_num)\r\n\r\n def evaluate(self):\r\n self.model.eval()\r\n total_top1, total_top5, total_num, feature_bank, targets = 0.0, 0.0, 0, [], []\r\n with torch.no_grad():\r\n # generate feature bank\r\n for data, _, target in tqdm(self.memory_data_loader, desc='Feature extracting'):\r\n feature, out = self.model(data)\r\n feature_bank.append(feature)\r\n targets.append(target)\r\n\r\n # [D, N]\r\n feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()\r\n # [N]\r\n # feature_labels = torch.tensor(targets, device=feature_bank.device)\r\n feature_labels = torch.cat(targets, dim=0).t().contiguous().flatten()\r\n # loop test data to predict the label by weighted knn search\r\n test_bar = tqdm(self.test_data_loader)\r\n for data, _, target in test_bar:\r\n feature, out = self.model(data)\r\n\r\n total_num += data.size(0)\r\n # compute cos similarity between each feature vector and feature bank ---> [B, N]\r\n # print(feature.shape, feature_bank.shape)\r\n sim_matrix = torch.mm(feature, feature_bank)\r\n # [B, K]\r\n sim_weight, sim_indices = sim_matrix.topk(k=self.k, dim=-1)\r\n # [B, K]\r\n sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)\r\n sim_weight = (sim_weight / self.temp).exp()\r\n\r\n # counts for each class\r\n one_hot_label = torch.zeros(data.size(0) * self.k, self.c, device=sim_labels.device)\r\n # [B*K, C]\r\n one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)\r\n # weighted score ---> [B, C]\r\n pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, self.c) * sim_weight.unsqueeze(dim=-1), dim=1)\r\n\r\n pred_labels = pred_scores.argsort(dim=-1, descending=True)\r\n total_top1 += torch.sum((pred_labels[:, :1] == target).any(dim=-1).float()).item()\r\n total_top5 += torch.sum((pred_labels[:, :5] == target).any(dim=-1).float()).item()\r\n test_bar.set_description('Acc@1:{:.2f}% Acc@5:{:.2f}%'\r\n .format(total_top1 / total_num * 100, total_top5 / total_num * 100))\r\n\r\n print(total_top1 / total_num * 100, total_top5 / total_num * 100)\r\n self.tSNE_vis(feature_labels,feature_bank,save_tag=\"tsne\",save_figure=True)\r\n\r\n def tSNE_vis(self,\r\n targets,\r\n features,\r\n save_tag=\"\",\r\n save_figure=False,\r\n feats_in_plot=50,\r\n ):\r\n \"\"\"Plots the feature quality by the means of t-SNE\r\n Args:\r\n df: Dataframe\r\n features: Training instances\r\n class_labels: labels (strings)\r\n save_tag: title of plot to save\r\n Prints & Saves:\r\n t-SNE plot of 250 instances of each class\r\n \"\"\"\r\n class_colours = [\"green\", \"gray\", \"brown\", \"blue\", \"red\", \"black\", \"yellow\", \"orange\", \"pink\", \"violet\"]\r\n class_instances = {}\r\n class_labels = np.arange(0,10)\r\n for i in class_labels:\r\n class_instances[i] = (targets == i).sum()\r\n\r\n tsne_m = TSNE(n_jobs=8, random_state=42)\r\n X_embedded = tsne_m.fit_transform(features.t())\r\n\r\n fig = plt.figure(figsize=(6, 6))\r\n lr = 150\r\n p = 50\r\n index = 0\r\n # PLOT\r\n for (label, colour, c_i) in zip(\r\n class_labels, class_colours, class_instances\r\n ):\r\n # indexes = self.random_indexes(\r\n # index, index + class_instances[label], feats_in_plot\r\n # )\r\n idx = (targets == label).nonzero().flatten()\r\n indexes = np.random.choice(idx, size=feats_in_plot, replace=False)\r\n plt.scatter(X_embedded[indexes, 0], X_embedded[indexes, 1], c=colour)\r\n index += class_instances[label]\r\n\r\n fig.legend(\r\n bbox_to_anchor=(0.075, 0.061),\r\n loc=\"lower left\",\r\n ncol=1,\r\n labels=class_labels,\r\n )\r\n if save_figure:\r\n plt.savefig(\r\n \"../figures/\" + save_tag + \".png\", bbox_inches=\"tight\",\r\n )\r\n\r\n def random_indexes(self, a, b, feats_in_plot):\r\n \"\"\"Support function for tSNE_vis\r\n Args:\r\n a: start index\r\n b: end index\r\n feats_in_plot: # of featuers to be plotted per class\r\n Returns:\r\n Random list of feats_in_plot indexes between a and b\r\n \"\"\"\r\n randomList = []\r\n # Set a length of the list to feats_in_plot\r\n for i in range(feats_in_plot):\r\n # any random numbers from a to b\r\n randomList.append(random.randint(a, b - 1))\r\n\r\n return randomList\r\n\r\n # self.model.eval()\r\n # totals = [0] * 51\r\n # corrects = [0] * 51\r\n\r\n # for i in tqdm(range(len(self.test_db))):\r\n # x1, x2, target = self.test_db.__getitem__(i)\r\n\r\n # item_size = x.shape[0]\r\n\r\n # if torch.cuda.is_available():\r\n # x = x.cuda()\r\n\r\n # pred = self.model.forward(Variable(x)).data\r\n\r\n # if torch.cuda.is_available():\r\n # pred = pred.cpu().numpy().flatten()\r\n\r\n # # pred = int(round(float(pred[0])))\r\n # pred = int(round(float(torch.argmax(pred))))\r\n # target = int(round(float(target.numpy()[0])))\r\n # # print(pred,target)\r\n\r\n # totals[item_size] += 1\r\n\r\n # if pred == target:\r\n # corrects[item_size] += 1\r\n\r\n # totals = np.array(totals)\r\n # corrects = np.array(corrects)\r\n # print(corrects,totals)\r\n # print(corrects / totals)\r\n"
] | [
[
"torch.mm",
"matplotlib.pyplot.scatter",
"torch.cat",
"numpy.random.choice",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.sum",
"matplotlib.pyplot.savefig",
"torch.eye",
"torch.autograd.Variable",
"sklearn.manifold.TSNE",
"torch.no_grad",
"torch.cuda.is_available",
"torch.ones_like",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Karapyon/apli_env | [
"c6004a43e646a85bd2ccba2249254c1bab9a7709"
] | [
"ml/tests/test_trainer.py"
] | [
"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\nimport unittest\nimport shutil\nimport numpy as np\nfrom ml.model import NumberRecognizeNN\nfrom ml.data_processor import DataProcessor\nfrom ml.trainer import Trainer\nfrom ml.resource import Resource\n\n\nclass TestTrainer(unittest.TestCase):\n TEST_DIR = \"\"\n\n @classmethod\n def setUpClass(cls):\n path = os.path.join(os.path.dirname(__file__), \"./test_trainer\")\n if not os.path.isdir(path):\n os.mkdir(path)\n cls.TEST_DIR = path\n\n @classmethod\n def tearDownClass(cls):\n if os.path.isdir(cls.TEST_DIR):\n shutil.rmtree(cls.TEST_DIR)\n\n def test_train(self):\n model = NumberRecognizeNN(Resource.INPUT_SIZE, Resource.OUTPUT_SIZE)\n r = Resource(self.TEST_DIR)\n trainer = Trainer(model, r)\n dp = DataProcessor()\n data, target = r.load_training_data()\n print(\"Test Train the model\")\n trainer.train(data, target, epoch=5)\n\n def test_baseline(self):\n from sklearn.svm import SVC\n from sklearn.metrics import accuracy_score\n r = Resource(self.TEST_DIR)\n dp = DataProcessor()\n data, target = r.load_training_data()\n dp.set_normalization_params(data)\n f_data, f_target = dp.format_x(data), dp.format_y(target)\n\n test_size = 200\n model = SVC()\n model.fit(f_data[:-test_size], f_target[:-test_size])\n\n predicted = model.predict(f_data[-test_size:])\n teacher = f_target[-test_size:]\n score = accuracy_score(teacher, predicted)\n print(\"Baseline score is {}\".format(score))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n"
] | [
[
"sklearn.svm.SVC",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
truetqy/lesion_det_dual_att | [
"5a5a77dd7f3aa195a2f5b84169822eb32c396d65",
"5a5a77dd7f3aa195a2f5b84169822eb32c396d65",
"5a5a77dd7f3aa195a2f5b84169822eb32c396d65"
] | [
"rcnn/dataset/DeepLesion.py",
"rcnn/processing/bbox_regression.py",
"rcnn/fio/rpn.py"
] | [
"\"\"\"\r\nDeepLesion database\r\n\"\"\"\r\n\r\nimport cPickle\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\n# from scipy.io import loadmat\r\nimport csv\r\nimport sys\r\n\r\nfrom ..logger import logger\r\nfrom imdb import IMDB\r\nfrom rcnn.config import config, default\r\n\r\nDEBUG = False\r\n\r\n\r\nclass DeepLesion(IMDB):\r\n def __init__(self, image_set, devkit_path):\r\n \"\"\"\r\n fill basic information to initialize imdb\r\n \"\"\"\r\n # year, image_set = image_set.split('_')\r\n super(DeepLesion, self).__init__('DeepLesion', image_set, devkit_path, devkit_path) # set self.name\r\n # self.year = year\r\n self.devkit_path = devkit_path\r\n self.data_path = os.path.join(devkit_path)\r\n\r\n self.classes = ['__background__', # always index 0\r\n 'lesion']\r\n self.num_classes = len(self.classes)\r\n self.loadinfo(os.path.join(self.devkit_path, default.groundtruth_file))\r\n self.image_set_index = self.load_image_set_index()\r\n self.num_images = len(self.image_set_index)\r\n logger.info('%s num_images %d' % (self.name, self.num_images))\r\n\r\n def loadinfo(self, path):\r\n # load annotations and meta-info from DL_info.csv\r\n info = []\r\n with open(path, 'rb') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader:\r\n filename = row[0] # replace the last _ in filename with / or \\\r\n idx = filename.rindex('_')\r\n row[0] = filename[:idx] + os.sep + filename[idx+1:]\r\n info.append(row)\r\n info = info[1:]\r\n\r\n # the information not used in this project are commented\r\n self.filenames = np.array([row[0] for row in info])\r\n # self.patient_idx = np.array([int(row[1]) for row in info])\r\n # self.study_idx = np.array([int(row[2]) for row in info])\r\n # self.series_idx = np.array([int(row[3]) for row in info])\r\n self.slice_idx = np.array([int(row[4]) for row in info])\r\n # self.d_coordinate = np.array([[float(x) for x in row[5].split(',')] for row in info])\r\n # self.d_coordinate -= 1\r\n self.boxes = np.array([[float(x) for x in row[6].split(',')] for row in info])\r\n self.boxes -= 1 # coordinates in info file start from 1\r\n # self.diameter = np.array([[float(x) for x in row[7].split(',')] for row in info])\r\n # self.norm_location = np.array([[float(x) for x in row[8].split(',')] for row in info])\r\n # self.type = np.array([int(row[9]) for row in info])\r\n self.noisy = np.array([int(row[10]) > 0 for row in info])\r\n # self.slice_range = np.array([[int(x) for x in row[11].split(',')] for row in info])\r\n self.spacing3D = np.array([[float(x) for x in row[12].split(',')] for row in info])\r\n self.spacing = self.spacing3D[:, 0]\r\n self.slice_intv = self.spacing3D[:, 2] # slice intervals\r\n # self.image_size = np.array([[int(x) for x in row[13].split(',')] for row in info])\r\n # self.DICOM_window = np.array([[float(x) for x in row[14].split(',')] for row in info])\r\n # self.gender = np.array([row[15] for row in info])\r\n # self.age = np.array([float(row[16]) for row in info]) # may be NaN\r\n self.train_val_test = np.array([int(row[17]) for row in info])\r\n\r\n\r\n def load_image_set_index(self):\r\n \"\"\"\r\n find out which indexes correspond to given image set (train or val)\r\n :return:\r\n \"\"\"\r\n # image_set_index_file = os.path.join(self.data_path, 'ImageSets', self.image_set + '.txt')\r\n # assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)\r\n # with open(image_set_index_file) as f:\r\n # image_set_index = [x.strip() for x in f.readlines()]\r\n\r\n set_list = ['train', 'val', 'test']\r\n index = set_list.index(self.image_set)\r\n image_set_index = self.filenames[self.train_val_test == index + 1]\r\n image_set_index = np.unique(image_set_index)\r\n return image_set_index\r\n\r\n def image_path_from_index(self, index):\r\n \"\"\"\r\n given image index, find out full path\r\n :param index: index of a specific image\r\n :return: full path of this image\r\n \"\"\"\r\n image_file = os.path.join(self.data_path, 'Images_16bit', index)\r\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file\r\n\r\n def gt_roidb(self):\r\n \"\"\"\r\n return ground truth image regions database\r\n :return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']\r\n \"\"\"\r\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\r\n use_cache = default.use_roidb_cache\r\n if use_cache and os.path.exists(cache_file):\r\n with open(cache_file, 'rb') as fid:\r\n roidb = cPickle.load(fid)\r\n logger.info('%s gt roidb loaded from %s' % (self.name, cache_file))\r\n else:\r\n logger.info('loading gt roidb from %s ...', os.path.join(self.devkit_path, default.groundtruth_file))\r\n roidb = [self._load_annotation(filename) for filename in self.image_set_index]\r\n with open(cache_file, 'wb') as fid:\r\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\r\n logger.info('%s wrote gt roidb to %s' % (self.name, cache_file))\r\n\r\n return roidb\r\n\r\n def _load_annotation(self, filename):\r\n \"\"\"\r\n Load annotations from .mat file.\r\n \"\"\"\r\n idx = np.where(self.filenames == filename)[0] # there may be multiple boxes (lesions) in a image\r\n assert idx.shape[0] >= 1, \"The groundtruth file contains no entry of %s!\" % (filename)\r\n boxes = self.boxes[idx, :]\r\n i = idx[0]\r\n slice_no = self.slice_idx[i]\r\n slice_intv = self.slice_intv[i]\r\n spacing = self.spacing[i]\r\n noisy = self.noisy[idx]\r\n\r\n num_boxes = boxes.shape[0]\r\n gt_classes = np.ones((num_boxes,), dtype=np.int32) # we only have one class: lesion\r\n\r\n return {'boxes': boxes,\r\n 'gt_classes': gt_classes,\r\n 'image': filename,\r\n 'slice_no': slice_no,\r\n 'spacing': spacing,\r\n 'slice_intv': slice_intv,\r\n 'noisy': noisy,\r\n 'flipped': False}\r\n",
"\"\"\"\r\nThis file has functions about generating bounding box regression targets\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom ..logger import logger\r\nfrom bbox_transform import bbox_overlaps, bbox_transform\r\nfrom rcnn.config import config\r\n\r\n\r\ndef compute_bbox_regression_targets(rois, overlaps, labels):\r\n \"\"\"\r\n given rois, overlaps, gt labels, compute bounding box regression targets\r\n :param rois: roidb[i]['boxes'] k * 4\r\n :param overlaps: roidb[i]['max_overlaps'] k * 1\r\n :param labels: roidb[i]['max_classes'] k * 1\r\n :return: targets[i][class, dx, dy, dw, dh] k * 5\r\n \"\"\"\r\n # Ensure ROIs are floats\r\n rois = rois.astype(np.float, copy=False)\r\n\r\n # Sanity check\r\n if len(rois) != len(overlaps):\r\n logger.warning('bbox regression: len(rois) != len(overlaps)')\r\n\r\n # Indices of ground-truth ROIs\r\n gt_inds = np.where(overlaps == 1)[0]\r\n if len(gt_inds) == 0:\r\n logger.warning('bbox regression: len(gt_inds) == 0')\r\n\r\n # Indices of examples for which we try to make predictions\r\n ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]\r\n\r\n # Get IoU overlap between each ex ROI and gt ROI\r\n ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])\r\n\r\n # Find which gt ROI each ex ROI has max overlap with:\r\n # this will be the ex ROI's gt target\r\n gt_assignment = ex_gt_overlaps.argmax(axis=1)\r\n gt_rois = rois[gt_inds[gt_assignment], :]\r\n ex_rois = rois[ex_inds, :]\r\n\r\n targets = np.zeros((rois.shape[0], 5), dtype=np.float32)\r\n targets[ex_inds, 0] = labels[ex_inds]\r\n targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)\r\n return targets\r\n\r\n\r\ndef add_bbox_regression_targets(roidb):\r\n \"\"\"\r\n given roidb, add ['bbox_targets'] and normalize bounding box regression targets\r\n :param roidb: roidb to be processed. must have gone through imdb.prepare_roidb\r\n :return: means, std variances of targets\r\n \"\"\"\r\n logger.info('bbox regression: add bounding box regression targets')\r\n assert len(roidb) > 0\r\n assert 'max_classes' in roidb[0]\r\n\r\n num_images = len(roidb)\r\n num_classes = roidb[0]['gt_overlaps'].shape[1]\r\n for im_i in range(num_images):\r\n rois = roidb[im_i]['boxes']\r\n max_overlaps = roidb[im_i]['max_overlaps']\r\n max_classes = roidb[im_i]['max_classes']\r\n roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)\r\n\r\n if config.TRAIN.BBOX_NORMALIZE_TARGETS:\r\n assert config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED, \"not sure if no precomuted is correct\"\r\n if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:\r\n # use fixed / precomputed means and stds instead of empirical values\r\n means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))\r\n stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))\r\n else:\r\n # compute mean, std values\r\n class_counts = np.zeros((num_classes, 1)) + 1e-14\r\n sums = np.zeros((num_classes, 4))\r\n squared_sums = np.zeros((num_classes, 4))\r\n for im_i in range(num_images):\r\n targets = roidb[im_i]['bbox_targets']\r\n for cls in range(1, num_classes):\r\n cls_indexes = np.where(targets[:, 0] == cls)[0]\r\n if cls_indexes.size > 0:\r\n class_counts[cls] += cls_indexes.size\r\n sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)\r\n squared_sums[cls, :] += (targets[cls_indexes, 1:] ** 2).sum(axis=0)\r\n\r\n means = sums / class_counts\r\n # var(x) = E(x^2) - E(x)^2\r\n stds = np.sqrt(squared_sums / class_counts - means ** 2)\r\n\r\n # normalized targets\r\n for im_i in range(num_images):\r\n targets = roidb[im_i]['bbox_targets']\r\n for cls in range(1, num_classes):\r\n cls_indexes = np.where(targets[:, 0] == cls)[0]\r\n roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]\r\n roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]\r\n\r\n return means.ravel(), stds.ravel()\r\n\r\n\r\ndef expand_bbox_regression_targets(bbox_targets_data, num_classes):\r\n \"\"\"\r\n expand from 5 to 4 * num_classes; only the right class has non-zero bbox regression targets\r\n :param bbox_targets_data: [k * 5]\r\n :param num_classes: number of classes\r\n :return: bbox target processed [k * 4 num_classes]\r\n bbox_weights ! only foreground boxes have bbox regression computation!\r\n \"\"\"\r\n classes = bbox_targets_data[:, 0]\r\n bbox_targets = np.zeros((classes.size, 4 * num_classes), dtype=np.float32)\r\n bbox_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\r\n indexes = np.where(classes > 0)[0]\r\n for index in indexes:\r\n cls = classes[index]\r\n start = int(4 * cls)\r\n end = start + 4\r\n bbox_targets[index, start:end] = bbox_targets_data[index, 1:]\r\n bbox_weights[index, start:end] = config.TRAIN.BBOX_WEIGHTS\r\n return bbox_targets, bbox_weights\r\n\r\n",
"\"\"\"\r\nRPN:\r\ndata =\r\n {'data': [num_images, c, h, w],\r\n 'im_info': [num_images, 4] (optional)}\r\nlabel =\r\n {'gt_boxes': [num_boxes, 5] (optional),\r\n 'label': [batch_size, 1] <- [batch_size, num_anchors, feat_height, feat_width],\r\n 'bbox_target': [batch_size, num_anchors, feat_height, feat_width],\r\n 'bbox_weight': [batch_size, num_anchors, feat_height, feat_width]}\r\n\"\"\"\r\n\r\nimport logging\r\nimport numpy as np\r\nimport numpy.random as npr\r\n# from rcnn.utils.timer import Timer\r\n\r\nfrom ..logger import logger\r\nfrom ..config import config\r\nfrom .image import get_image, tensor_vstack\r\nfrom ..processing.generate_anchor import generate_anchors\r\nfrom ..processing.bbox_transform import bbox_overlaps, bbox_transform\r\n\r\n\r\ndef get_rpn_testbatch(roidb):\r\n \"\"\"\r\n return a dict of testbatch\r\n :param roidb: ['image', 'flipped']\r\n :return: data, label, im_info\r\n \"\"\"\r\n assert len(roidb) == 1, 'Single batch only'\r\n imgs, roidb = get_image(roidb)\r\n im_array = imgs[0]\r\n im_info = np.array([roidb[0]['im_info']], dtype=np.float32)\r\n\r\n data = {'data': im_array,\r\n 'im_info': im_info}\r\n label = {'gt_boxes': roidb[0]['boxes']}\r\n\r\n return data, label, im_info, roidb[0]['image'], roidb[0]['crop']\r\n\r\n\r\ndef get_rpn_batch(roidbs):\r\n \"\"\"\r\n prototype for rpn batch: data, im_info, gt_boxes\r\n :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']\r\n :return: data, label\r\n \"\"\"\r\n # assert len(roidb) == 1, 'Single batch only'\r\n imgs, roidbs = get_image(roidbs)\r\n im_info = np.vstack([r['im_info'] for r in roidbs])\r\n\r\n # gt boxes: (x1, y1, x2, y2, cls)\r\n gt_boxes_all = []\r\n for r in roidbs:\r\n if r['gt_classes'].size > 0:\r\n gt_inds = np.where(r['gt_classes'] != 0)[0]\r\n gt_boxes = np.empty((r['boxes'].shape[0], 5), dtype=np.float32)\r\n gt_boxes[:, 0:4] = r['boxes'][gt_inds, :]\r\n gt_boxes[:, 4] = r['gt_classes'][gt_inds]\r\n else:\r\n gt_boxes = np.empty((0, 5), dtype=np.float32)\r\n gt_boxes_all.append(gt_boxes[np.newaxis, :, :])\r\n\r\n data = {'data': tensor_vstack(imgs),\r\n 'im_info': im_info}\r\n label = {'gt_boxes': tensor_vstack(gt_boxes_all)}\r\n # print data['data'].shape\r\n\r\n return data, label\r\n\r\n\r\ndef assign_anchor(feat_shape, gt_boxes, im_info, feat_stride=16,\r\n scales=(8, 16, 32), ratios=(0.5, 1, 2), allowed_border=0):\r\n \"\"\"\r\n assign ground truth boxes to anchor positions\r\n :param feat_shape: infer output shape\r\n :param gt_boxes: assign ground truth\r\n :param im_info: filter out anchors overlapped with edges\r\n :param feat_stride: anchor position step\r\n :param scales: used to generate anchors, affects num_anchors (per location)\r\n :param ratios: aspect ratios of generated anchors\r\n :param allowed_border: filter out anchors with edge overlap > allowed_border\r\n :return: dict of label\r\n 'label': of shape (batch_size, 1) <- (batch_size, num_anchors, feat_height, feat_width)\r\n 'bbox_target': of shape (batch_size, num_anchors * 4, feat_height, feat_width)\r\n 'bbox_inside_weight': *todo* mark the assigned anchors\r\n 'bbox_outside_weight': used to normalize the bbox_loss, all weights sums to RPN_POSITIVE_WEIGHT\r\n \"\"\"\r\n def _unmap(data, count, inds, fill=0):\r\n \"\"\"\" unmap a subset inds of data into original data of size count \"\"\"\r\n if len(data.shape) == 1:\r\n ret = np.empty((count,), dtype=np.float32)\r\n ret.fill(fill)\r\n ret[inds] = data\r\n else:\r\n ret = np.empty((count,) + data.shape[1:], dtype=np.float32)\r\n ret.fill(fill)\r\n ret[inds, :] = data\r\n return ret\r\n\r\n im_info = im_info[0]\r\n scales = np.array(scales, dtype=np.float32)\r\n base_anchors = generate_anchors(base_size=feat_stride, ratios=list(ratios), scales=scales)\r\n num_anchors = base_anchors.shape[0]\r\n feat_height, feat_width = feat_shape[-2:]\r\n\r\n logger.debug('anchors: %s' % base_anchors)\r\n logger.debug('anchor shapes: %s' % np.hstack((base_anchors[:, 2::4] - base_anchors[:, 0::4],\r\n base_anchors[:, 3::4] - base_anchors[:, 1::4])))\r\n logger.debug('im_info %s' % im_info)\r\n logger.debug('height %d width %d' % (feat_height, feat_width))\r\n logger.debug('gt_boxes shape %s' % np.array(gt_boxes.shape))\r\n logger.debug('gt_boxes %s' % gt_boxes)\r\n\r\n # 1. generate proposals from bbox deltas and shifted anchors\r\n shift_x = np.arange(0, feat_width) * feat_stride\r\n shift_y = np.arange(0, feat_height) * feat_stride\r\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\r\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()\r\n # add A anchors (1, A, 4) to\r\n # cell K shifts (K, 1, 4) to get\r\n # shift anchors (K, A, 4)\r\n # reshape to (K*A, 4) shifted anchors\r\n A = num_anchors\r\n K = shifts.shape[0]\r\n all_anchors = base_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))\r\n all_anchors = all_anchors.reshape((K * A, 4))\r\n total_anchors = int(K * A)\r\n\r\n # only keep anchors inside the image\r\n inds_inside = np.where((all_anchors[:, 0] >= -allowed_border) &\r\n (all_anchors[:, 1] >= -allowed_border) &\r\n (all_anchors[:, 2] < im_info[1] + allowed_border) &\r\n (all_anchors[:, 3] < im_info[0] + allowed_border))[0]\r\n logger.debug('total_anchors %d' % total_anchors)\r\n logger.debug('inds_inside %d' % len(inds_inside))\r\n\r\n # keep only inside anchors\r\n anchors = all_anchors[inds_inside, :]\r\n logger.debug('anchors shape %s' % np.array(anchors.shape))\r\n\r\n # label: 1 is positive, 0 is negative, -1 is dont care\r\n labels = np.empty((len(inds_inside),), dtype=np.float32)\r\n labels.fill(-1)\r\n\r\n if gt_boxes.size > 0:\r\n # overlap between the anchors and the gt boxes\r\n # overlaps (ex, gt)\r\n overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))\r\n argmax_overlaps = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\r\n gt_argmax_overlaps = overlaps.argmax(axis=0)\r\n gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]\r\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\r\n\r\n if not config.TRAIN.RPN_CLOBBER_POSITIVES:\r\n # assign bg labels first so that positive labels can clobber them\r\n labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\r\n\r\n # fg label: for each gt, anchor with highest overlap\r\n labels[gt_argmax_overlaps] = 1\r\n\r\n # fg label: above threshold IoU\r\n labels[max_overlaps >= config.TRAIN.RPN_POSITIVE_OVERLAP] = 1\r\n\r\n if config.TRAIN.RPN_CLOBBER_POSITIVES:\r\n # assign bg labels last so that negative labels can clobber positives\r\n labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\r\n else:\r\n labels[:] = 0\r\n\r\n # subsample positive labels if we have too many\r\n num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)\r\n fg_inds = np.where(labels == 1)[0]\r\n if len(fg_inds) > num_fg:\r\n disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)\r\n # if logger.level == logging.INFO:\r\n # disable_inds = fg_inds[:(len(fg_inds) - num_fg)]\r\n labels[disable_inds] = -1\r\n\r\n # subsample negative labels if we have too many\r\n num_bg = config.TRAIN.RPN_BATCH_SIZE - np.sum(labels == 1)\r\n bg_inds = np.where(labels == 0)[0]\r\n if len(bg_inds) > num_bg:\r\n disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)\r\n # if logger.level == logging.INFO:\r\n # disable_inds = bg_inds[:(len(bg_inds) - num_bg)]\r\n labels[disable_inds] = -1\r\n\r\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\r\n if gt_boxes.size > 0:\r\n bbox_targets[:] = bbox_transform(anchors, gt_boxes[argmax_overlaps, :4])\r\n\r\n bbox_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\r\n bbox_weights[labels == 1, :] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)\r\n\r\n if logger.level == logging.DEBUG:\r\n _sums = bbox_targets[labels == 1, :].sum(axis=0)\r\n _squared_sums = (bbox_targets[labels == 1, :] ** 2).sum(axis=0)\r\n _counts = np.sum(labels == 1)\r\n means = _sums / (_counts + 1e-14)\r\n stds = np.sqrt(_squared_sums / _counts - means ** 2)\r\n logger.debug('means %s' % means)\r\n logger.debug('stdevs %s' % stds)\r\n\r\n # map up to original set of anchors\r\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\r\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\r\n bbox_weights = _unmap(bbox_weights, total_anchors, inds_inside, fill=0)\r\n\r\n if logger.level == logging.DEBUG:\r\n if gt_boxes.size > 0:\r\n logger.debug('rpn: max max_overlaps %f' % np.max(max_overlaps))\r\n logger.debug('rpn: num_positives %f' % np.sum(labels == 1))\r\n logger.debug('rpn: num_negatives %f' % np.sum(labels == 0))\r\n _fg_sum = np.sum(labels == 1)\r\n _bg_sum = np.sum(labels == 0)\r\n _count = 1\r\n logger.debug('rpn: num_positive avg %f' % (_fg_sum / _count))\r\n logger.debug('rpn: num_negative avg %f' % (_bg_sum / _count))\r\n\r\n labels = labels.reshape((1, feat_height, feat_width, A)).transpose(0, 3, 1, 2)\r\n labels = labels.reshape((1, A * feat_height * feat_width))\r\n bbox_targets = bbox_targets.reshape((1, feat_height, feat_width, A * 4)).transpose(0, 3, 1, 2)\r\n bbox_weights = bbox_weights.reshape((1, feat_height, feat_width, A * 4)).transpose((0, 3, 1, 2))\r\n\r\n label = {'label': labels,\r\n 'bbox_target': bbox_targets,\r\n 'bbox_weight': bbox_weights}\r\n return label\r\n"
] | [
[
"numpy.ones",
"numpy.array",
"numpy.where",
"numpy.unique"
],
[
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.hstack",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.arange",
"numpy.empty",
"numpy.max",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tinkerNamedFerro/5head.biz | [
"413da41f03e3b3f0ea0d86c0b56a8203867c7c3b"
] | [
"app/dash/crossfilter_example.py"
] | [
"from dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport plotly.graph_objs as go\nimport pandas as pd\n\nfrom .dash import Dash\n\ndf = pd.read_csv(\"app/data/indicators.csv\")\navailable_indicators = df[\"Indicator Name\"].unique()\n\napp_layout = dbc.Container(\n [\n html.H1(\"Crossfilter example\"),\n html.Hr(),\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.Card(\n [\n dcc.Dropdown(\n id=\"crossfilter-xaxis-column\",\n options=[\n {\"label\": i, \"value\": i} for i in available_indicators\n ],\n value=\"Fertility rate, total (births per woman)\",\n ),\n # dbc.FormGroup(\n # [\n # dbc.RadioItems(\n # id=\"crossfilter-xaxis-type\",\n # inline=True,\n # options=[\n # {\"label\": i, \"value\": i} for i in [\"Linear\", \"Log\"]\n # ],\n # value=\"Linear\",\n # labelStyle={\"display\": \"inline-block\"},\n # ),\n\n # ],\n # className=\"p-2\"\n # ),\n ]\n ),\n dbc.Card(\n [\n dcc.Graph(\n id=\"crossfilter-indicator-scatter\",\n hoverData={\"points\": [{\"customdata\": \"Japan\"}]},\n )\n ],\n # style={\"width\": \"49%\", \"display\": \"inline-block\", \"padding\": \"0 20\"},\n ),\n dbc.Card(\n dcc.Slider(\n id=\"crossfilter-year--slider\",\n min=df[\"Year\"].min(),\n max=df[\"Year\"].max(),\n value=df[\"Year\"].max(),\n step=None,\n marks={str(year): str(year) for year in df[\"Year\"].unique()},\n ),\n className=\"pt-2\"\n # style={\"width\": \"49%\", \"padding\": \"0px 20px 20px 20px\"},\n ),\n ],\n md=6,\n ),\n dbc.Col(\n [\n dbc.Card(\n [\n dcc.Dropdown(\n id=\"crossfilter-yaxis-column\",\n options=[\n {\"label\": i, \"value\": i} for i in available_indicators\n ],\n value=\"Life expectancy at birth, total (years)\",\n ),\n # dbc.FormGroup(\n # [\n # dbc.RadioItems(\n # id=\"crossfilter-yaxis-type\",\n # inline=True,\n # options=[\n # {\"label\": i, \"value\": i} for i in [\"Linear\", \"Log\"]\n # ],\n # value=\"Linear\",\n # labelStyle={\"display\": \"inline-block\"},\n # ),\n # ],\n # className=\"p-2\"\n # ),\n ]\n ),\n dbc.Card(\n [dcc.Graph(id=\"x-time-series\"), dcc.Graph(id=\"y-time-series\"), ],\n # style={\"display\": \"inline-block\", \"width\": \"49%\"},\n ),\n ],\n md=6,\n ),\n ],\n ),\n ],\n fluid=True,\n)\n\n\ndef update_graph(\n xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value\n):\n dff = df[df[\"Year\"] == year_value]\n\n return {\n \"data\": [\n go.Scatter(\n x=dff[dff[\"Indicator Name\"] == xaxis_column_name][\"Value\"],\n y=dff[dff[\"Indicator Name\"] == yaxis_column_name][\"Value\"],\n text=dff[dff[\"Indicator Name\"] == yaxis_column_name][\"Country Name\"],\n customdata=dff[dff[\"Indicator Name\"] == yaxis_column_name][\n \"Country Name\"\n ],\n mode=\"markers\",\n marker={\n \"size\": 15,\n \"opacity\": 0.5,\n \"line\": {\"width\": 0.5, \"color\": \"white\"},\n },\n )\n ],\n \"layout\": go.Layout(\n xaxis={\n \"title\": xaxis_column_name,\n \"type\": \"linear\" if xaxis_type == \"Linear\" else \"log\",\n },\n yaxis={\n \"title\": yaxis_column_name,\n \"type\": \"linear\" if yaxis_type == \"Linear\" else \"log\",\n },\n # margin={\"l\": 40, \"b\": 30, \"t\": 10, \"r\": 0},\n margin={\"l\": 80, \"b\": 60, \"t\": 20, \"r\": 20},\n height=450,\n hovermode=\"closest\",\n ),\n }\n\n\ndef create_time_series(dff, axis_type, title):\n return {\n \"data\": [go.Scatter(x=dff[\"Year\"], y=dff[\"Value\"], mode=\"lines+markers\")],\n \"layout\": {\n \"height\": 225,\n # \"margin\": {\"l\": 20, \"b\": 30, \"r\": 10, \"t\": 10},\n \"margin\": {\"l\": 40, \"b\": 60, \"r\": 20, \"t\": 20},\n \"annotations\": [\n {\n \"x\": 0,\n \"y\": 0.85,\n \"xanchor\": \"left\",\n \"yanchor\": \"bottom\",\n \"xref\": \"paper\",\n \"yref\": \"paper\",\n \"showarrow\": False,\n \"align\": \"left\",\n \"bgcolor\": \"rgba(255, 255, 255, 0.5)\",\n \"text\": title,\n }\n ],\n \"yaxis\": {\"type\": \"linear\" if axis_type == \"Linear\" else \"log\"},\n \"xaxis\": {\"showgrid\": False},\n },\n }\n\n\ndef update_y_timeseries(hoverData, xaxis_column_name, axis_type):\n country_name = hoverData[\"points\"][0][\"customdata\"]\n dff = df[df[\"Country Name\"] == country_name]\n dff = dff[dff[\"Indicator Name\"] == xaxis_column_name]\n title = \"<b>{}</b><br>{}\".format(country_name, xaxis_column_name)\n return create_time_series(dff, axis_type, title)\n\n\ndef update_x_timeseries(hoverData, yaxis_column_name, axis_type):\n dff = df[df[\"Country Name\"] == hoverData[\"points\"][0][\"customdata\"]]\n dff = dff[dff[\"Indicator Name\"] == yaxis_column_name]\n return create_time_series(dff, axis_type, yaxis_column_name)\n\n\ndef init_callbacks(dash_app):\n dash_app.callback(\n Output(\"crossfilter-indicator-scatter\", \"figure\"),\n [\n Input(\"crossfilter-xaxis-column\", \"value\"),\n Input(\"crossfilter-yaxis-column\", \"value\"),\n Input(\"crossfilter-xaxis-type\", \"value\"),\n Input(\"crossfilter-yaxis-type\", \"value\"),\n Input(\"crossfilter-year--slider\", \"value\"),\n ],\n )(update_graph)\n\n dash_app.callback(\n Output(\"x-time-series\", \"figure\"),\n [\n Input(\"crossfilter-indicator-scatter\", \"hoverData\"),\n Input(\"crossfilter-xaxis-column\", \"value\"),\n Input(\"crossfilter-xaxis-type\", \"value\"),\n ],\n )(update_y_timeseries)\n\n dash_app.callback(\n Output(\"y-time-series\", \"figure\"),\n [\n Input(\"crossfilter-indicator-scatter\", \"hoverData\"),\n Input(\"crossfilter-yaxis-column\", \"value\"),\n Input(\"crossfilter-yaxis-type\", \"value\"),\n ],\n )(update_x_timeseries)\n\n return dash_app\n\n\ndef init_dash(server):\n \"\"\"Create a Plotly Dash dashboard.\"\"\"\n dash_app = Dash(server=server, routes_pathname_prefix=\"/crossfilter-example/\", )\n\n # create dash layout\n dash_app.layout = app_layout\n\n # initialize callbacks\n init_callbacks(dash_app)\n\n return dash_app.server\n\n\nif __name__ == \"__main__\":\n app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n init_callbacks(app)\n app.run_server(debug=True, port=8080)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhaoguangxiang/OFA | [
"cc1719df2713f0a046f34acb0afd8782e08ea6be",
"cc1719df2713f0a046f34acb0afd8782e08ea6be",
"cc1719df2713f0a046f34acb0afd8782e08ea6be",
"cc1719df2713f0a046f34acb0afd8782e08ea6be",
"cc1719df2713f0a046f34acb0afd8782e08ea6be",
"cc1719df2713f0a046f34acb0afd8782e08ea6be"
] | [
"models/clip/clip.py",
"fairseq/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py",
"fairseq/fairseq/options.py",
"fairseq/fairseq/models/text_to_speech/tts_transformer.py",
"fairseq/fairseq/criterions/fastspeech2_loss.py",
"fairseq/fairseq/data/iterators.py"
] | [
"import hashlib\nimport os\nimport urllib\nimport warnings\nfrom typing import Any, Union, List\nfrom pkg_resources import packaging\n\nimport torch\nfrom PIL import Image\nfrom torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\nfrom tqdm import tqdm\n\nfrom .model import build_model\nfrom .simple_tokenizer import SimpleTokenizer as _Tokenizer\n\ntry:\n from torchvision.transforms import InterpolationMode\n BICUBIC = InterpolationMode.BICUBIC\nexcept ImportError:\n BICUBIC = Image.BICUBIC\n\n\nif packaging.version.parse(torch.__version__) < packaging.version.parse(\"1.7.1\"):\n warnings.warn(\"PyTorch version 1.7.1 or higher is recommended\")\n\n\n__all__ = [\"available_models\", \"load\", \"tokenize\"]\n_tokenizer = _Tokenizer()\n\n_MODELS = {\n \"RN50\": \"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt\",\n \"RN101\": \"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt\",\n \"RN50x4\": \"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt\",\n \"RN50x16\": \"https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt\",\n \"ViT-B/32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\",\n \"ViT-B/16\": \"https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt\",\n}\n\n\ndef _download(url: str, root: str):\n os.makedirs(root, exist_ok=True)\n filename = os.path.basename(url)\n\n expected_sha256 = url.split(\"/\")[-2]\n download_target = os.path.join(root, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if hashlib.sha256(open(download_target, \"rb\").read()).hexdigest() == expected_sha256:\n return download_target\n else:\n warnings.warn(f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\")\n\n with urllib.request.urlopen(url) as source, open(download_target, \"wb\") as output:\n with tqdm(total=int(source.info().get(\"Content-Length\")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:\n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if hashlib.sha256(open(download_target, \"rb\").read()).hexdigest() != expected_sha256:\n raise RuntimeError(f\"Model has been downloaded but the SHA256 checksum does not not match\")\n\n return download_target\n\n\ndef _convert_image_to_rgb(image):\n return image.convert(\"RGB\")\n\n\ndef _transform(n_px):\n return Compose([\n Resize(n_px, interpolation=BICUBIC),\n CenterCrop(n_px),\n _convert_image_to_rgb,\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])\n\n\ndef available_models() -> List[str]:\n \"\"\"Returns the names of available CLIP models\"\"\"\n return list(_MODELS.keys())\n\n\ndef load(name: str, device: Union[str, torch.device] = \"cuda\" if torch.cuda.is_available() else \"cpu\", jit: bool = False, download_root: str = None):\n \"\"\"Load a CLIP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n\n device : Union[str, torch.device]\n The device to put the loaded model\n\n jit : bool\n Whether to load the optimized JIT model or more hackable non-JIT model (default).\n\n download_root: str\n path to download the model files; by default, it uses \"~/.cache/clip\"\n\n Returns\n -------\n model : torch.nn.Module\n The CLIP model\n\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if name in _MODELS:\n model_path = _download(_MODELS[name], download_root or os.path.expanduser(\"~/.cache/clip\"))\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(f\"Model {name} not found; available models = {available_models()}\")\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(f\"File {model_path} is not a JIT archive. Loading as a state dict instead\")\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n model = build_model(state_dict or model.state_dict()).to(device)\n if str(device) == \"cpu\":\n model.float()\n return model, _transform(model.visual.input_resolution)\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 on CPU\n if str(device) == \"cpu\":\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n\n model.float()\n\n return model, _transform(model.input_resolution.item())\n\n\ndef tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:\n \"\"\"\n Returns the tokenized representation of given input string(s)\n\n Parameters\n ----------\n texts : Union[str, List[str]]\n An input string or a list of input strings to tokenize\n\n context_length : int\n The context length to use; all CLIP models use 77 as the context length\n\n truncate: bool\n Whether to truncate the text in case its encoding is longer than the context length\n\n Returns\n -------\n A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]\n \"\"\"\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<|startoftext|>\"]\n eot_token = _tokenizer.encoder[\"<|endoftext|>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n if truncate:\n tokens = tokens[:context_length]\n tokens[-1] = eot_token\n else:\n raise RuntimeError(f\"Input {texts[i]} is too long for context length {context_length}\")\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import checkpoint_utils\nfrom fairseq import utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqDecoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.models.fairseq_encoder import EncoderOut\nfrom fairseq.models.speech_to_text import (\n TransformerDecoder,\n S2TTransformerEncoder,\n)\nfrom fairseq.models.transformer import TransformerEncoder\nfrom fairseq.modules import (\n TransformerEncoderLayer,\n GradMultiply,\n LayerNorm,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SpeechEoSEncoder(FairseqEncoder):\n def __init__(self, encoder, eos_num, feat_dim, adapter_type=\"None\", adapter_dim=0):\n super().__init__(None)\n self.encoder = encoder\n self.eos_num = eos_num # downsampling rate for speech input feature\n self.eos_emb = (\n nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True)\n if eos_num > 0\n else None\n )\n self.adapter = self.add_adapter(adapter_type, adapter_dim)\n\n def add_adapter(self, adapter_type, adapter_dim):\n def _make_identity(linear, eps=1e-5):\n assert isinstance(linear, nn.Linear)\n linear.weight.data.mul_(eps)\n linear.weight.data.fill_diagonal_(1.0)\n if linear.bias is not None:\n linear.bias.data.mul_(eps)\n\n adapter = None\n if adapter_type == \"Linear\":\n assert adapter_dim > 0\n adapter = nn.Sequential(\n nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim)\n )\n # initialize the adapter as identity matrix first\n _make_identity(adapter[0])\n\n elif adapter_type == \"MLP\":\n assert adapter_dim > 0\n # assume the model is pre-norm model\n adapter = nn.Sequential(\n nn.Linear(adapter_dim, 2 * adapter_dim),\n nn.ReLU(),\n nn.Linear(2 * adapter_dim, adapter_dim),\n LayerNorm(adapter_dim),\n )\n _make_identity(adapter[0])\n _make_identity(adapter[2])\n return adapter\n\n def add_eos(self, src_tokens, src_lengths):\n bsz, max_seq_len, fdim = src_tokens.size()\n if self.eos_num > 0:\n src_token_eos = torch.zeros(\n [bsz, max_seq_len + self.eos_num, fdim],\n dtype=src_tokens.dtype,\n device=src_tokens.device,\n )\n src_token_eos[:, :max_seq_len] = src_tokens\n for bi in range(bsz):\n src_token_eos[bi][\n src_lengths[bi] : src_lengths[bi] + self.eos_num\n ] = self.eos_emb.expand(self.eos_num, fdim)\n src_lengths = src_lengths + self.eos_num\n src_tokens = src_token_eos\n return src_tokens, src_lengths\n\n def apply_adapter(self, enc_out):\n if self.adapter is None:\n return enc_out\n rst = self.adapter(enc_out.encoder_out)\n if enc_out.encoder_padding_mask is not None:\n rst.masked_fill_(\n enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0\n )\n return EncoderOut(\n encoder_out=rst,\n encoder_padding_mask=enc_out.encoder_padding_mask,\n encoder_embedding=enc_out.encoder_embedding,\n encoder_states=enc_out.encoder_states,\n src_tokens=enc_out.src_tokens,\n src_lengths=enc_out.src_lengths,\n )\n\n def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):\n \"\"\"\n src_tokens: padded tensor (B, T, C * feat)\n src_lengths: tensor of original lengths of input utterances (B,)\n \"\"\"\n src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths)\n enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens)\n enc_out = self.apply_adapter(enc_out)\n return enc_out\n\n def reorder_encoder_out(self, encoder_out, new_order):\n return self.encoder.reorder_encoder_out(encoder_out, new_order)\n\n\nclass DualInputEncoder(FairseqEncoder):\n def __init__(\n self,\n args,\n spch_encoder,\n text_encoder,\n dictionary,\n cross_attentive_loss_before_last_layer=-1,\n ):\n super().__init__(dictionary)\n\n self.spch_encoder = spch_encoder\n self.text_encoder = text_encoder\n self.enc_grad_mult = args.enc_grad_mult\n self.cross_attentive_loss_before_last_layer = (\n cross_attentive_loss_before_last_layer\n )\n self.use_cross_attentive_loss = (\n False if cross_attentive_loss_before_last_layer <= -1 else True\n )\n self.enc2_along_grad_mult = args.enc2_along_grad_mult\n\n @classmethod\n def set_shared_layer(cls, share_level, src_layer, tgt_layer):\n \"\"\"\n share parameters from tgt_layer to src_layer\n share_level:\n 0: share everything\n 1: share everything but different model\n 2: share weight but not bias, layernorm\n \"\"\"\n if share_level == 0:\n return tgt_layer\n if isinstance(src_layer, nn.Linear):\n return tgt_layer\n if isinstance(src_layer, TransformerEncoderLayer):\n assert src_layer.embed_dim == tgt_layer.embed_dim\n assert src_layer.normalize_before == tgt_layer.normalize_before\n if share_level == 1:\n src_layer.fc1 = tgt_layer.fc1\n src_layer.fc2 = tgt_layer.fc2\n src_layer.self_attn = tgt_layer.self_attn\n src_layer.final_layer_norm = tgt_layer.final_layer_norm\n src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm\n src_layer.layernorm_embedding = tgt_layer.layernorm_embedding\n else:\n src_layer.fc1.weight = tgt_layer.fc1.weight\n src_layer.fc2.weight = tgt_layer.fc2.weight\n src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight\n src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight\n src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight\n src_layer.self_attn.out_proj.weight = (\n tgt_layer.self_attn.out_proj.weight\n )\n else:\n if share_level == 1:\n return tgt_layer\n return src_layer\n\n @classmethod\n def build_spch_encoder(cls, args):\n cfg = {\n \"input_feat_per_channel\": args.input_feat_per_channel,\n \"input_channels\": args.input_channels,\n \"conv_kernel_sizes\": args.conv_kernel_sizes,\n \"conv_channels\": args.conv_channels,\n \"encoder_embed_dim\": args.encoder_embed_dim,\n \"encoder_ffn_embed_dim\": args.encoder_ffn_embed_dim,\n \"encoder_layers\": args.speech_encoder_layers,\n \"encoder_layerdrop\": args.encoder_layerdrop,\n \"encoder_attention_heads\": args.encoder_attention_heads,\n \"max_source_positions\": args.max_source_positions,\n \"dropout\": args.dropout,\n \"encoder_normalize_before\": args.encoder_normalize_before,\n \"activation_dropout\": args.activation_dropout,\n \"attention_dropout\": args.attention_dropout,\n \"activation_fn\": args.activation_fn,\n \"layernorm_embedding\": args.layernorm_embedding,\n \"no_token_positional_embeddings\": args.no_token_positional_embeddings,\n \"no_scale_embedding\": args.no_scale_embedding,\n \"quant_noise_pq\": args.quant_noise_pq,\n \"encoder_freezing_updates\": 0,\n }\n model_args = namedtuple(\"args\", cfg.keys())(*cfg.values())\n spch_encoder = S2TTransformerEncoder(model_args)\n if args.add_speech_eos:\n spch_encoder = SpeechEoSEncoder(\n spch_encoder,\n 2 * len(args.conv_kernel_sizes.split(\",\")),\n args.input_feat_per_channel,\n adapter_type=getattr(args, \"speech_encoder_adapter_type\", \"None\"),\n adapter_dim=args.encoder_embed_dim,\n )\n return spch_encoder\n\n @classmethod\n def build_text_encoder(cls, args, src_dictionary, spch_encoder):\n if args.encoder_shared_layers > 0:\n mx_shared_layers = (\n args.speech_encoder_layers\n if args.speech_encoder_layers < args.text_encoder_layers\n else args.text_encoder_layers\n )\n args.encoder_shared_layers = (\n args.encoder_shared_layers\n if args.encoder_shared_layers <= mx_shared_layers\n else mx_shared_layers\n )\n cfg = {\n \"encoder_embed_dim\": args.encoder_text_embed_dim,\n \"encoder_ffn_embed_dim\": args.encoder_ffn_embed_dim,\n \"encoder_layers\": args.text_encoder_layers,\n \"encoder_layerdrop\": args.encoder_layerdrop,\n \"encoder_attention_heads\": args.encoder_attention_heads,\n \"encoder_learned_pos\": args.encoder_learned_pos,\n \"max_source_positions\": args.max_source_positions,\n \"dropout\": args.dropout,\n \"encoder_normalize_before\": args.encoder_normalize_before,\n \"activation_dropout\": args.activation_dropout,\n \"attention_dropout\": args.attention_dropout,\n \"activation_fn\": args.activation_fn,\n \"adaptive_input\": args.adaptive_input,\n \"no_token_positional_embeddings\": args.no_token_positional_embeddings,\n \"no_scale_embedding\": args.no_scale_embedding,\n \"quant_noise_pq\": args.quant_noise_pq,\n }\n model_args = namedtuple(\"args\", cfg.keys())(*cfg.values())\n enc_emb = nn.Embedding(\n len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad()\n )\n text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)\n if args.add_speech_eos:\n spch_encoder = spch_encoder.encoder\n if args.encoder_shared_layers > 0:\n text_encoder.layer_norm = cls.set_shared_layer(\n args.encoder_shared_layer_level,\n text_encoder.layer_norm,\n spch_encoder.layer_norm,\n )\n for i, ly in enumerate(\n spch_encoder.transformer_layers[-args.encoder_shared_layers :]\n ):\n ly_id = i + args.text_encoder_layers - args.encoder_shared_layers\n assert isinstance(text_encoder.layers[ly_id], type(ly))\n text_encoder.layers[ly_id] = cls.set_shared_layer(\n args.encoder_shared_layer_level,\n text_encoder.layers[ly_id],\n ly,\n )\n return text_encoder\n\n def mult_rst_grad(self, rst, ratio):\n assert isinstance(rst, dict) # instead of EncoderOut\n assert len(rst[\"encoder_out\"]) == 1\n rst[\"encoder_out\"][0] = GradMultiply.apply(rst[\"encoder_out\"][0], ratio)\n return rst\n\n def process_attentive_loss_states(self, rst, interstates):\n assert isinstance(rst, dict) # instead of EncoderOut\n rst[\"encoder_states\"] = interstates\n return rst\n\n def forward(\n self,\n src_tokens,\n src_lengths=None,\n src_txt_tokens=None,\n src_txt_lengths=None,\n **kwargs\n ):\n \"\"\"\n Args:\n src_tokens: padded tensor (B, T, C * feat)\n src_lengths: tensor of original lengths of input utterances (speech) (B,)\n src_txt_tokens: padded tensor (B, T)\n src_txt_lengths: tensor of original lengths of input utterances (text) (B,)\n \"\"\"\n # src_tokens only: inference\n # src_tokens, src_lengths: speech only training\n # src_txt_tokens, src_txt_lengths: text only training\n # all valid: speech + text training\n\n if src_tokens is None and src_txt_tokens is None:\n raise ValueError(\n \"src_tokens and src_txt_tokens cannot be None at the same time\"\n )\n ret1 = None\n ret2 = None\n return_all_hiddens = False\n if src_tokens is not None:\n if (\n self.use_cross_attentive_loss and src_txt_tokens is not None\n ): # remove self.training so we can get attn score during validation step\n return_all_hiddens = True\n ret1 = self.spch_encoder(\n src_tokens, src_lengths, return_all_hiddens=return_all_hiddens\n )\n\n if self.use_cross_attentive_loss and src_txt_tokens is not None:\n assert self.cross_attentive_loss_before_last_layer < len(\n ret1[\"encoder_states\"]\n )\n ret1 = self.process_attentive_loss_states(\n ret1,\n ret1[\"encoder_states\"][\n -self.cross_attentive_loss_before_last_layer - 1\n ],\n )\n\n if src_txt_tokens is not None:\n ret2 = self.text_encoder(\n src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens\n )\n if return_all_hiddens:\n if self.cross_attentive_loss_before_last_layer == len(\n self.text_encoder.layers\n ):\n text_embedding, _ = self.text_encoder.forward_embedding(\n src_txt_tokens\n )\n text_embedding = text_embedding.transpose(0, 1)\n ret2 = self.process_attentive_loss_states(ret2, text_embedding)\n else:\n assert self.cross_attentive_loss_before_last_layer < len(\n self.text_encoder.layers\n )\n ret2 = self.process_attentive_loss_states(\n ret2,\n ret2[\"encoder_states\"][\n -self.cross_attentive_loss_before_last_layer - 1\n ],\n )\n\n def merge_output(rst1, rst2):\n if rst1 is None:\n if not (self.enc2_along_grad_mult == 1.0 or self.training):\n rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult)\n return rst2\n if rst2 is None:\n return rst1\n if self.enc_grad_mult != 1.0 and self.training:\n rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult)\n rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult)\n rst = (rst1, rst2)\n return rst\n\n return merge_output(ret1, ret2)\n\n def reorder_encoder_out(self, encoder_out, new_order):\n assert self.training is False # used for inference only\n return self.spch_encoder.reorder_encoder_out(encoder_out, new_order)\n\n\n# TransformerMultiInputDecoder: take one or two encoder inputs\nclass TransformerMultiInputDecoder(FairseqDecoder):\n def __init__(\n self,\n dictionary,\n spch_decoder,\n text_decoder,\n compute_cross_attentive_loss=False,\n cross_attentive_loss_with_norm=True,\n cross_attentive_loss_reverse=False,\n ):\n\n super().__init__(dictionary)\n self.spch_decoder = spch_decoder\n self.text_decoder = text_decoder\n self.compute_cross_attentive_loss = compute_cross_attentive_loss\n self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm\n self.cross_attentive_loss_reverse = cross_attentive_loss_reverse\n\n @classmethod\n def share_spchdecoder(cls, task_args, text_decoder, spch_decoder):\n if task_args.decoder_shared_layer_level == 0:\n return text_decoder\n assert text_decoder.embed_tokens == spch_decoder.embed_tokens\n spch_decoder.project_in_dim = text_decoder.project_in_dim\n spch_decoder.embed_positions = text_decoder.embed_positions\n spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding\n spch_decoder.project_out_dim = text_decoder.project_out_dim\n spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax\n if task_args.decoder_shared_layer_level == 1:\n spch_decoder.output_projection = text_decoder.output_projection\n spch_decoder.layer_norm = text_decoder.layer_norm\n else: # 2\n spch_decoder.output_projection.weight = (\n text_decoder.output_projection.weight\n )\n for i, ly in enumerate(text_decoder.layers):\n sly = spch_decoder.layers[i]\n sly.self_attn = ly.self_attn\n sly.self_attn_layer_norm = ly.self_attn_layer_norm\n # sly.encoder_attn = ly.encoder_attn\n if (\n task_args.decoder_shared_layer_level == 1\n ): # share everything, but under different models\n sly.encoder_attn = ly.encoder_attn\n sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm\n sly.fc1 = ly.fc1\n sly.fc2 = ly.fc2\n sly.final_layer_norm = ly.final_layer_norm\n else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias\n sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight\n sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight\n sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight\n sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight\n sly.fc1.weight = ly.fc1.weight\n sly.fc2.weight = ly.fc2.weight\n\n return spch_decoder\n\n def cross_attentive_loss(\n self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6\n ):\n x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D\n y = student_states.transpose(0, 1)\n if self.cross_attentive_loss_with_norm:\n x = x / (x.norm(dim=2, keepdim=True) + eps)\n y = y / (y.norm(dim=2, keepdim=True) + eps)\n dim = x.size(-1)\n # lengths: batch X seqLen\n sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ]\n if y.dtype == torch.float16:\n sim_scores_xy = sim_scores_xy.float()\n y = y.float()\n x = x.float()\n if teacher_masking != []:\n assert len(teacher_masking) == 1\n sim_scores_xy = sim_scores_xy.masked_fill(\n teacher_masking[0].unsqueeze(-1), float(\"-inf\")\n )\n if student_masking != []:\n sim_scores_xy = sim_scores_xy.masked_fill(\n student_masking[0].unsqueeze(1), float(\"-inf\")\n )\n # do masking\n y_weights = utils.softmax(sim_scores_xy, dim=-1)\n if teacher_masking != []:\n y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)\n x_reconstruct_from_y = torch.bmm(y_weights, y)\n\n sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ]\n x_weights = utils.softmax(sim_scores_xx, dim=-1)\n if teacher_masking != []:\n x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)\n\n # no gradient for teacher state\n x_reconstruct_from_x = torch.bmm(x_weights, x).detach()\n cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2)\n if teacher_masking != []:\n cost = cost.masked_fill(teacher_masking[0], 0)\n\n if not self.cross_attentive_loss_with_norm:\n cost = cost / dim\n return cost\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out,\n incremental_state=None,\n has_txt_input=False,\n **kwargs\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing. If there are\n two or more input during training, they will share the same prev_output_tokens\n encoder_out (tuple[Tensor]): output from the encoder, used for\n encoder-side attention. It will be tuple if there are more inputs, but a tensor\n if only one input\n incremental_state ([dict]): dictionary used for storing state during\n :ref:`Incremental decoding`. It is only valid for inference, only from single\n input\n Returns:\n tuple:\n - the last decoder layer's output of shape `(batch, tgt_len,\n vocab)`. If there are N inputs, batch will be N bigger than a single input\n - the last decoder layer's attention weights of shape `(batch,\n tgt_len, src_len)`\n \"\"\"\n assert not isinstance(encoder_out, EncoderOut)\n if isinstance(encoder_out, tuple): # training with mulitple input\n rst = []\n assert len(encoder_out) == 2\n for i, eo in enumerate(encoder_out):\n assert incremental_state is None\n if i == 0:\n rst.append(\n self.spch_decoder(prev_output_tokens, eo, incremental_state)\n )\n else:\n rst.append(\n self.text_decoder(prev_output_tokens, eo, incremental_state)\n )\n dec_out = torch.cat([r[0] for r in rst], dim=0)\n attn_cost = None\n if self.compute_cross_attentive_loss:\n assert isinstance(encoder_out[0], dict)\n if self.cross_attentive_loss_reverse:\n attn_cost = self.cross_attentive_loss(\n teacher_states=encoder_out[1][\"encoder_states\"], # text_states\n student_states=encoder_out[0][\"encoder_states\"], # spch_states\n teacher_masking=encoder_out[1][\"encoder_padding_mask\"],\n student_masking=encoder_out[0][\"encoder_padding_mask\"],\n )\n else:\n attn_cost = self.cross_attentive_loss(\n teacher_states=encoder_out[0][\"encoder_states\"], # spch_states\n student_states=encoder_out[1][\"encoder_states\"], # text_states\n teacher_masking=encoder_out[0][\"encoder_padding_mask\"],\n student_masking=encoder_out[1][\"encoder_padding_mask\"],\n )\n\n return (dec_out, {\"attn_cost\": attn_cost})\n else: # inference or training with one input\n if has_txt_input:\n return self.text_decoder(\n prev_output_tokens, encoder_out, incremental_state\n )\n return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state)\n\n\n# Note:\n# dual input transformer:\n# encoder: S2TTransformerEncoder for speech + TransformerEncoder for text\n# decoder: TransformerDecoder for text\n@register_model(\"dual_input_s2t_transformer\")\nclass DualInputS2TTransformerModel(FairseqEncoderDecoderModel):\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n self.num_updates = 0\n\n def max_positions(self):\n return None # it is provided in task\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # encoder 1: S2TTransformerEncoder for speech\n parser.add_argument(\n \"--conv-kernel-sizes\",\n type=str,\n metavar=\"N\",\n help=\"kernel sizes of Conv1d subsampling layers\",\n )\n parser.add_argument(\n \"--conv-channels\",\n type=int,\n metavar=\"N\",\n help=\"# of channels in Conv1d subsampling layers\",\n )\n parser.add_argument(\n \"--enc-output-dim\",\n type=int,\n metavar=\"N\",\n help=\"\"\"\n encoder output dimension, can be None. If specified, projecting the\n transformer output to the specified dimension\"\"\",\n )\n # standard Transformer\n parser.add_argument(\n \"--activation-fn\",\n type=str,\n default=\"relu\",\n choices=utils.get_available_activation_fns(),\n help=\"activation function to use\",\n )\n parser.add_argument(\n \"--dropout\", type=float, metavar=\"D\", help=\"dropout probability\"\n )\n parser.add_argument(\n \"--attention-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability for attention weights\",\n )\n parser.add_argument(\n \"--activation-dropout\",\n \"--relu-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability after activation in FFN.\",\n )\n parser.add_argument(\n \"--encoder-embed-dim\",\n type=int,\n metavar=\"N\",\n help=\"encoder embedding dimension\",\n )\n parser.add_argument(\n \"--encoder-text-embed-dim\",\n type=int,\n metavar=\"N\",\n help=\"encoder text embedding dimension\",\n )\n parser.add_argument(\n \"--encoder-ffn-embed-dim\",\n type=int,\n metavar=\"N\",\n help=\"encoder embedding dimension for FFN\",\n )\n parser.add_argument(\n \"--encoder-attention-heads\",\n type=int,\n metavar=\"N\",\n help=\"num encoder attention heads\",\n )\n parser.add_argument(\n \"--decoder-embed-dim\",\n type=int,\n metavar=\"N\",\n help=\"decoder embedding dimension\",\n )\n parser.add_argument(\n \"--decoder-ffn-embed-dim\",\n type=int,\n metavar=\"N\",\n help=\"decoder embedding dimension for FFN\",\n )\n parser.add_argument(\n \"--decoder-layers\", type=int, metavar=\"N\", help=\"num decoder layers\"\n )\n parser.add_argument(\n \"--decoder-attention-heads\",\n type=int,\n metavar=\"N\",\n help=\"num decoder attention heads\",\n )\n parser.add_argument(\n \"--layernorm-embedding\",\n action=\"store_true\",\n help=\"add layernorm to embedding\",\n )\n parser.add_argument(\n \"--no-scale-embedding\",\n action=\"store_true\",\n help=\"if True, dont scale embeddings\",\n )\n # non-standard transformer parameters\n parser.add_argument(\n \"--speech-encoder-layers\",\n type=int,\n metavar=\"N\",\n help=\"num speech encoder layers\",\n )\n parser.add_argument(\n \"--text-encoder-layers\",\n type=int,\n metavar=\"N\",\n help=\"num text encoder layers\",\n )\n parser.add_argument(\n \"--encoder-shared-layers\",\n type=int,\n metavar=\"N\",\n help=\"num shared encoder layers\",\n )\n parser.add_argument(\n \"--encoder-shared-layer-level\",\n type=int,\n metavar=\"N\",\n default=0,\n choices=[0, 1, 2],\n help=\"share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm\",\n )\n\n parser.add_argument(\n \"--decoder-shared-layer-level\",\n default=0,\n choices=[0, 1, 2],\n type=int,\n metavar=\"N\",\n help=\"0: share everything; 1: share everything with different model 2: no share layer_norm and bias\",\n )\n ###\n parser.add_argument(\n \"--text-input-cost-ratio\",\n type=float,\n default=1.0,\n metavar=\"V\",\n help=\"text input cost ratio relative to speech input cost\",\n )\n parser.add_argument(\n \"--init-scale\",\n type=float,\n default=1.0,\n metavar=\"V\",\n help=\"scale the initial weight by given factor\",\n )\n parser.add_argument(\n \"--enc-grad-mult\",\n type=float,\n metavar=\"V\",\n default=1.0,\n help=\"multiply enc1 and enc2 gradient by V\",\n )\n parser.add_argument(\n \"--enc2-along-grad-mult\",\n type=float,\n metavar=\"V\",\n default=1.0,\n help=\"multiply enc2 gradient by V if only enc2 is used\",\n )\n parser.add_argument(\n \"--load-pretrain-encoder\",\n type=str,\n default=\"\",\n metavar=\"EXPR\",\n help=\"\"\" path to the pretrained encoder \"\"\",\n )\n parser.add_argument(\n \"--load-pretrain-speech-encoder\",\n type=str,\n default=\"\",\n metavar=\"EXPR\",\n help=\"\"\" path to the pretrained speech encoder \"\"\",\n )\n parser.add_argument(\n \"--load-pretrain-text-encoder\",\n type=str,\n default=\"\",\n metavar=\"EXPR\",\n help=\"\"\" path to the pretrained text encoder \"\"\",\n )\n parser.add_argument(\n \"--load-pretrain-text-encoder-last\",\n type=str,\n default=\"\",\n metavar=\"EXPR\",\n help=\"\"\" path to the pretrained text encoder \"\"\",\n )\n parser.add_argument(\n \"--load-pretrain-decoder\",\n type=str,\n metavar=\"EXPR\",\n default=\"\",\n help=\"\"\" path to the pretrained encoder \"\"\",\n )\n parser.add_argument(\n \"--add-speech-eos\",\n action=\"store_true\",\n help=\"add eos token at the end of input feature\",\n )\n parser.add_argument(\n \"--speech-encoder-adapter-type\",\n type=str,\n metavar=\"EXPR\",\n default=\"None\",\n choices=[\"None\", \"Linear\", \"MLP\"],\n help=\"add speech encoder adapter\",\n )\n\n @classmethod\n def build_encoder(cls, args, task):\n spch_encoder = DualInputEncoder.build_spch_encoder(args)\n text_encoder = DualInputEncoder.build_text_encoder(\n args, task.src_dict, spch_encoder\n )\n cross_attentive_loss_before_last_layer = (\n 0 if getattr(args, \"attentive_cost_regularization\", 0.0) > 0.0 else -1\n )\n encoder = DualInputEncoder(\n args,\n spch_encoder,\n text_encoder,\n task.src_dict,\n cross_attentive_loss_before_last_layer,\n )\n if args.init_scale != 1.0:\n with torch.no_grad():\n for param in encoder.parameters():\n param.data.mul_(args.init_scale)\n if args.load_pretrain_text_encoder != \"\":\n checkpoint_utils.load_pretrained_component_from_model(\n text_encoder, args.load_pretrain_text_encoder\n )\n if args.load_pretrain_speech_encoder != \"\":\n if hasattr(spch_encoder, \"encoder\"):\n checkpoint_utils.load_pretrained_component_from_model(\n spch_encoder.encoder, args.load_pretrain_speech_encoder\n )\n else:\n checkpoint_utils.load_pretrained_component_from_model(\n spch_encoder, args.load_pretrain_speech_encoder\n )\n if (\n args.load_pretrain_text_encoder_last != \"\"\n ): # if share encoder, speech encoder parameters will be used.\n # It provides a chance to use pre-trained mt encoder instead\n checkpoint_utils.load_pretrained_component_from_model(\n text_encoder, args.load_pretrain_text_encoder_last\n )\n\n if args.load_pretrain_encoder != \"\":\n checkpoint_utils.load_pretrained_component_from_model(\n encoder, args.load_pretrain_encoder\n )\n return encoder\n\n @classmethod\n def build_decoder(cls, args, task):\n dec_cfg = {\n \"decoder_layerdrop\": args.decoder_layerdrop,\n \"share_decoder_input_output_embed\": args.share_decoder_input_output_embed,\n \"decoder_embed_dim\": args.decoder_embed_dim,\n \"max_target_positions\": args.max_target_positions,\n \"dropout\": args.dropout,\n \"encoder_learned_pos\": args.encoder_learned_pos,\n \"decoder_learned_pos\": args.decoder_learned_pos,\n \"layernorm_embedding\": args.layernorm_embedding,\n \"decoder_normalize_before\": args.decoder_normalize_before,\n \"activation_dropout\": args.activation_dropout,\n \"attention_dropout\": args.attention_dropout,\n \"decoder_ffn_embed_dim\": args.decoder_ffn_embed_dim,\n \"decoder_layers\": args.decoder_layers,\n \"decoder_attention_heads\": args.decoder_attention_heads,\n \"decoder_output_dim\": args.decoder_embed_dim,\n \"no_scale_embedding\": args.no_scale_embedding,\n \"adaptive_input\": args.adaptive_input,\n \"quant_noise_pq\": args.quant_noise_pq,\n \"adaptive_softmax_cutoff\": args.adaptive_softmax_cutoff,\n \"tie_adaptive_weights\": args.tie_adaptive_weights,\n \"no_token_positional_embeddings\": args.no_token_positional_embeddings,\n }\n dec_cfg = namedtuple(\"args\", dec_cfg.keys())(*dec_cfg.values())\n dec_emb = nn.Embedding(\n len(task.target_dictionary),\n args.decoder_embed_dim,\n task.target_dictionary.pad(),\n )\n compute_cross_attentive_loss = (\n True if getattr(args, \"attentive_cost_regularization\", 0.0) > 0.0 else False\n )\n cross_attentive_loss_without_norm = getattr(\n args, \"attentive_cost_without_normalize\", False\n )\n cross_attentive_loss_reverse = (\n False # getattr(args, \"attentive_cost_reverse\", False)\n )\n\n text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)\n spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)\n spch_decoder = TransformerMultiInputDecoder.share_spchdecoder(\n args, text_decoder, spch_decoder\n )\n decoder = TransformerMultiInputDecoder(\n dictionary=task.target_dictionary,\n spch_decoder=spch_decoder,\n text_decoder=text_decoder,\n compute_cross_attentive_loss=compute_cross_attentive_loss,\n cross_attentive_loss_with_norm=True\n if not cross_attentive_loss_without_norm\n else False,\n cross_attentive_loss_reverse=cross_attentive_loss_reverse,\n )\n if args.init_scale != 1.0:\n with torch.no_grad():\n for param in decoder.parameters():\n param.data.mul_(args.init_scale)\n if args.load_pretrain_decoder != \"\":\n try:\n checkpoint_utils.load_pretrained_component_from_model(\n decoder, args.load_pretrain_decoder\n )\n except RuntimeError:\n checkpoint_utils.load_pretrained_component_from_model(\n decoder.text_decoder, args.load_pretrain_decoder\n )\n if args.decoder_shared_layer_level > 0:\n checkpoint_utils.load_pretrained_component_from_model(\n decoder.spch_decoder, args.load_pretrain_decoder\n )\n\n return decoder\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n # make sure that all args are properly defaulted\n # (in case there are any new ones)\n dualinputs2ttransformer_base(args)\n\n encoder = cls.build_encoder(args, task)\n decoder = cls.build_decoder(args, task)\n return cls(encoder, decoder)\n\n def get_normalized_probs(self, net_output, log_probs, sample=None):\n # net_output['encoder_out'] is a (B, T, D) tensor\n lprobs = super().get_normalized_probs(net_output, log_probs, sample)\n lprobs.batch_first = True\n return lprobs\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n super().set_num_updates(num_updates)\n self.num_updates = num_updates\n\n def forward(\n self,\n src_tokens,\n src_lengths,\n prev_output_tokens,\n use_encoder_outputs=False,\n src_txt_tokens=None,\n src_txt_lengths=None,\n mode=\"sup_speech\",\n **kwargs\n ):\n \"\"\"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., teacher forcing) to\n the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n mode = 'sup_speech' or 'text'\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if mode == \"text\":\n assert src_txt_tokens is None\n src_txt_tokens = src_tokens\n src_txt_lengths = src_lengths\n src_tokens = None\n src_lengths = None\n encoder_out = self.encoder(\n src_tokens,\n src_lengths=src_lengths,\n src_txt_tokens=src_txt_tokens,\n src_txt_lengths=src_txt_lengths,\n **kwargs\n )\n has_txt_input = True if src_txt_tokens is not None else False\n decoder_out = self.decoder(\n prev_output_tokens,\n encoder_out=encoder_out,\n has_txt_input=has_txt_input,\n **kwargs\n )\n if use_encoder_outputs:\n return decoder_out, encoder_out\n return decoder_out\n\n\n@register_model_architecture(\n \"dual_input_s2t_transformer\", \"dualinputs2ttransformer_base\"\n)\ndef dualinputs2ttransformer_base(args):\n args.encoder_freezing_updates = getattr(args, \"encoder_freezing_updates\", 0)\n # Convolutional subsampler\n args.input_feat_per_channel = getattr(args, \"input_feat_per_channel\", 80)\n args.conv_kernel_sizes = getattr(args, \"conv_kernel_sizes\", \"5,5\")\n args.conv_channels = getattr(args, \"conv_channels\", 1024)\n # Transformer\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_text_embed_dim = getattr(\n args, \"encoder_text_embed_dim\", args.encoder_embed_dim\n )\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", True)\n args.encoder_layerdrop = getattr(args, \"encoder_layerdrop\", 0)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", True)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.attention_dropout = getattr(args, \"attention_dropout\", args.dropout)\n args.activation_dropout = getattr(args, \"activation_dropout\", args.dropout)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.tie_adaptive_weights = getattr(args, \"tie_adaptive_weights\", False)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.decoder_layerdrop = getattr(args, \"decoder_layerdrop\", 0.0)\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.layernorm_embedding = getattr(args, \"layernorm_embedding\", False)\n args.no_scale_embedding = getattr(args, \"no_scale_embedding\", False)\n args.quant_noise_pq = getattr(args, \"quant_noise_pq\", 0)\n\n args.speech_encoder_layers = getattr(args, \"speech_encoder_layers\", 10)\n args.text_encoder_layers = getattr(args, \"text_encoder_layers\", 6)\n args.encoder_shared_layers = getattr(args, \"encoder_shared_layers\", 0)\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n\n args.add_speech_eos = getattr(args, \"add_speech_eos\", False)\n\n\n@register_model_architecture(\"dual_input_s2t_transformer\", \"dualinputs2ttransformer_s\")\ndef dualinputs2ttransformer_s(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 256)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 256 * 4)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.speech_encoder_layers = getattr(args, \"speech_encoder_layers\", 7)\n args.text_encoder_layers = getattr(args, \"text_encoder_layers\", 7)\n args.decoder_layers = getattr(args, \"decoder_layers\", 7)\n dualinputs2ttransformer_base(args)\n\n\n@register_model_architecture(\"dual_input_s2t_transformer\", \"dualinputs2ttransformer_m\")\ndef dualinputs2ttransformer_m(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 512 * 4)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.dropout = getattr(args, \"dropout\", 0.15)\n args.speech_encoder_layers = getattr(args, \"speech_encoder_layers\", 10)\n args.text_encoder_layers = getattr(args, \"text_encoder_layers\", 6)\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n dualinputs2ttransformer_base(args)\n\n\n@register_model_architecture(\"dual_input_s2t_transformer\", \"dualinputs2ttransformer_b\")\ndef dualinputs2ttransformer_b(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 768)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 768 * 4)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 12)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 12)\n args.dropout = getattr(args, \"dropout\", 0.15)\n args.speech_encoder_layers = getattr(args, \"speech_encoder_layers\", 12)\n args.text_encoder_layers = getattr(args, \"text_encoder_layers\", 6)\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n dualinputs2ttransformer_base(args)\n\n\n@register_model_architecture(\"dual_input_s2t_transformer\", \"dualinputs2ttransformer_l\")\ndef dualinputs2ttransformer_l(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 1024)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 1024 * 4)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 16)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 16)\n args.dropout = getattr(args, \"dropout\", 0.2)\n args.speech_encoder_layers = getattr(args, \"speech_encoder_layers\", 12)\n args.text_encoder_layers = getattr(args, \"text_encoder_layers\", 6)\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n dualinputs2ttransformer_base(args)\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nfrom pathlib import Path\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom fairseq import utils\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\nfrom fairseq.dataclass.configs import (\n CheckpointConfig,\n CommonConfig,\n CommonEvalConfig,\n DatasetConfig,\n DistributedTrainingConfig,\n EvalLMConfig,\n GenerationConfig,\n InteractiveConfig,\n OptimizationConfig,\n EMAConfig,\n)\nfrom fairseq.dataclass.utils import gen_parser_from_dataclass\n\n# this import is for backward compatibility\nfrom fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa\n\n\ndef get_preprocessing_parser(default_task=\"translation\"):\n parser = get_parser(\"Preprocessing\", default_task)\n add_preprocess_args(parser)\n return parser\n\n\ndef get_training_parser(default_task=\"translation\"):\n parser = get_parser(\"Trainer\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser)\n add_model_args(parser)\n add_optimization_args(parser)\n add_checkpoint_args(parser)\n add_ema_args(parser)\n return parser\n\n\ndef get_generation_parser(interactive=False, default_task=\"translation\"):\n parser = get_parser(\"Generation\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser)\n add_generation_args(parser)\n add_checkpoint_args(parser)\n if interactive:\n add_interactive_args(parser)\n return parser\n\n\ndef get_speech_generation_parser(default_task=\"text_to_speech\"):\n parser = get_parser(\"Speech Generation\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_speech_generation_args(parser)\n return parser\n\n\ndef get_interactive_generation_parser(default_task=\"translation\"):\n return get_generation_parser(interactive=True, default_task=default_task)\n\n\ndef get_eval_lm_parser(default_task=\"language_modeling\"):\n parser = get_parser(\"Evaluate Language Model\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_eval_lm_args(parser)\n return parser\n\n\ndef get_validation_parser(default_task=None):\n parser = get_parser(\"Validation\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser, default_world_size=1)\n group = parser.add_argument_group(\"Evaluation\")\n gen_parser_from_dataclass(group, CommonEvalConfig())\n return parser\n\n\ndef parse_args_and_arch(\n parser: argparse.ArgumentParser,\n input_args: List[str] = None,\n parse_known: bool = False,\n suppress_defaults: bool = False,\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,\n):\n \"\"\"\n Args:\n parser (ArgumentParser): the parser\n input_args (List[str]): strings to parse, defaults to sys.argv\n parse_known (bool): only parse known arguments, similar to\n `ArgumentParser.parse_known_args`\n suppress_defaults (bool): parse while ignoring all default values\n modify_parser (Optional[Callable[[ArgumentParser], None]]):\n function to modify the parser, e.g., to set default values\n \"\"\"\n if suppress_defaults:\n # Parse args without any default values. This requires us to parse\n # twice, once to identify all the necessary task/model args, and a second\n # time with all defaults set to None.\n args = parse_args_and_arch(\n parser,\n input_args=input_args,\n parse_known=parse_known,\n suppress_defaults=False,\n )\n suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])\n suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})\n args = suppressed_parser.parse_args(input_args)\n return argparse.Namespace(\n **{k: v for k, v in vars(args).items() if v is not None}\n )\n\n from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY\n\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args(input_args)\n utils.import_user_module(usr_args)\n\n if modify_parser is not None:\n modify_parser(parser)\n\n # The parser doesn't know about model/criterion/optimizer-specific args, so\n # we parse twice. First we parse the model/criterion/optimizer, then we\n # parse a second time after adding the *-specific arguments.\n # If input_args is given, we will parse those args instead of sys.argv.\n args, _ = parser.parse_known_args(input_args)\n\n # Add model-specific args to parser.\n if hasattr(args, \"arch\"):\n model_specific_group = parser.add_argument_group(\n \"Model-specific configuration\",\n # Only include attributes which are explicitly given as command-line\n # arguments or which have default values.\n argument_default=argparse.SUPPRESS,\n )\n if args.arch in ARCH_MODEL_REGISTRY:\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n elif args.arch in MODEL_REGISTRY:\n MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n else:\n raise RuntimeError()\n\n if hasattr(args, \"task\"):\n from fairseq.tasks import TASK_REGISTRY\n\n TASK_REGISTRY[args.task].add_args(parser)\n if getattr(args, \"use_bmuf\", False):\n # hack to support extra args for block distributed data parallelism\n from fairseq.optim.bmuf import FairseqBMUF\n\n FairseqBMUF.add_args(parser)\n\n # Add *-specific args to parser.\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n choice = getattr(args, registry_name, None)\n if choice is not None:\n cls = REGISTRY[\"registry\"][choice]\n if hasattr(cls, \"add_args\"):\n cls.add_args(parser)\n elif hasattr(cls, \"__dataclass\"):\n gen_parser_from_dataclass(parser, cls.__dataclass())\n\n # Modify the parser a second time, since defaults may have been reset\n if modify_parser is not None:\n modify_parser(parser)\n\n # Parse a second time.\n if parse_known:\n args, extra = parser.parse_known_args(input_args)\n else:\n args = parser.parse_args(input_args)\n extra = None\n # Post-process args.\n if (\n hasattr(args, \"batch_size_valid\") and args.batch_size_valid is None\n ) or not hasattr(args, \"batch_size_valid\"):\n args.batch_size_valid = args.batch_size\n if hasattr(args, \"max_tokens_valid\") and args.max_tokens_valid is None:\n args.max_tokens_valid = args.max_tokens\n if getattr(args, \"memory_efficient_fp16\", False):\n args.fp16 = True\n if getattr(args, \"memory_efficient_bf16\", False):\n args.bf16 = True\n args.tpu = getattr(args, \"tpu\", False)\n args.bf16 = getattr(args, \"bf16\", False)\n if args.bf16:\n args.tpu = True\n if args.tpu and args.fp16:\n raise ValueError(\"Cannot combine --fp16 and --tpu, use --bf16 on TPUs\")\n\n if getattr(args, \"seed\", None) is None:\n args.seed = 1 # default seed for training\n args.no_seed_provided = True\n else:\n args.no_seed_provided = False\n\n # Apply architecture configuration.\n if hasattr(args, \"arch\") and args.arch in ARCH_CONFIG_REGISTRY:\n ARCH_CONFIG_REGISTRY[args.arch](args)\n\n if parse_known:\n return args, extra\n else:\n return args\n\n\ndef get_parser(desc, default_task=\"translation\"):\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args()\n utils.import_user_module(usr_args)\n\n parser = argparse.ArgumentParser(allow_abbrev=False)\n gen_parser_from_dataclass(parser, CommonConfig())\n\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n parser.add_argument(\n \"--\" + registry_name.replace(\"_\", \"-\"),\n default=REGISTRY[\"default\"],\n choices=REGISTRY[\"registry\"].keys(),\n )\n\n # Task definitions can be found under fairseq/tasks/\n from fairseq.tasks import TASK_REGISTRY\n\n parser.add_argument(\n \"--task\",\n metavar=\"TASK\",\n default=default_task,\n choices=TASK_REGISTRY.keys(),\n help=\"task\",\n )\n # fmt: on\n return parser\n\n\ndef add_preprocess_args(parser):\n group = parser.add_argument_group(\"Preprocessing\")\n # fmt: off\n group.add_argument(\"-s\", \"--source-lang\", default=None, metavar=\"SRC\",\n help=\"source language\")\n group.add_argument(\"-t\", \"--target-lang\", default=None, metavar=\"TARGET\",\n help=\"target language\")\n group.add_argument(\"--trainpref\", metavar=\"FP\", default=None,\n help=\"train file prefix (also used to build dictionaries)\")\n group.add_argument(\"--validpref\", metavar=\"FP\", default=None,\n help=\"comma separated, valid file prefixes \"\n \"(words missing from train set are replaced with <unk>)\")\n group.add_argument(\"--testpref\", metavar=\"FP\", default=None,\n help=\"comma separated, test file prefixes \"\n \"(words missing from train set are replaced with <unk>)\")\n group.add_argument(\"--align-suffix\", metavar=\"FP\", default=None,\n help=\"alignment file suffix\")\n group.add_argument(\"--destdir\", metavar=\"DIR\", default=\"data-bin\",\n help=\"destination dir\")\n group.add_argument(\"--thresholdtgt\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--thresholdsrc\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--tgtdict\", metavar=\"FP\",\n help=\"reuse given target dictionary\")\n group.add_argument(\"--srcdict\", metavar=\"FP\",\n help=\"reuse given source dictionary\")\n group.add_argument(\"--nwordstgt\", metavar=\"N\", default=-1, type=int,\n help=\"number of target words to retain\")\n group.add_argument(\"--nwordssrc\", metavar=\"N\", default=-1, type=int,\n help=\"number of source words to retain\")\n group.add_argument(\"--alignfile\", metavar=\"ALIGN\", default=None,\n help=\"an alignment file (optional)\")\n parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument(\"--joined-dictionary\", action=\"store_true\",\n help=\"Generate joined dictionary\")\n group.add_argument(\"--only-source\", action=\"store_true\",\n help=\"Only process the source language\")\n group.add_argument(\"--padding-factor\", metavar=\"N\", default=8, type=int,\n help=\"Pad dictionary size to be multiple of N\")\n group.add_argument(\"--workers\", metavar=\"N\", default=1, type=int,\n help=\"number of parallel workers\")\n group.add_argument(\"--dict-only\", action='store_true',\n help=\"if true, only builds a dictionary and then exits\")\n # fmt: on\n return parser\n\n\ndef add_dataset_args(parser, train=False, gen=False):\n group = parser.add_argument_group(\"dataset_data_loading\")\n gen_parser_from_dataclass(group, DatasetConfig())\n # fmt: on\n return group\n\n\ndef add_distributed_training_args(parser, default_world_size=None):\n group = parser.add_argument_group(\"distributed_training\")\n if default_world_size is None:\n default_world_size = max(1, torch.cuda.device_count())\n gen_parser_from_dataclass(\n group, DistributedTrainingConfig(distributed_world_size=default_world_size)\n )\n return group\n\n\ndef add_optimization_args(parser):\n group = parser.add_argument_group(\"optimization\")\n # fmt: off\n gen_parser_from_dataclass(group, OptimizationConfig())\n # fmt: on\n return group\n\n\ndef add_checkpoint_args(parser):\n group = parser.add_argument_group(\"checkpoint\")\n # fmt: off\n gen_parser_from_dataclass(group, CheckpointConfig())\n # fmt: on\n return group\n\n\ndef add_common_eval_args(group):\n gen_parser_from_dataclass(group, CommonEvalConfig())\n\n\ndef add_eval_lm_args(parser):\n group = parser.add_argument_group(\"LM Evaluation\")\n add_common_eval_args(group)\n gen_parser_from_dataclass(group, EvalLMConfig())\n\n\ndef add_generation_args(parser):\n group = parser.add_argument_group(\"Generation\")\n add_common_eval_args(group)\n gen_parser_from_dataclass(group, GenerationConfig())\n return group\n\n\ndef add_speech_generation_args(parser):\n group = parser.add_argument_group(\"Speech Generation\")\n add_common_eval_args(group) # NOTE: remove_bpe is not needed\n # fmt: off\n group.add_argument('--eos_prob_threshold', default=0.5, type=float,\n help='terminate when eos probability exceeds this')\n # fmt: on\n return group\n\n\ndef add_interactive_args(parser):\n group = parser.add_argument_group(\"Interactive\")\n gen_parser_from_dataclass(group, InteractiveConfig())\n\n\ndef add_model_args(parser):\n group = parser.add_argument_group(\"Model configuration\")\n # fmt: off\n\n # Model definitions can be found under fairseq/models/\n #\n # The model architecture can be specified in several ways.\n # In increasing order of priority:\n # 1) model defaults (lowest priority)\n # 2) --arch argument\n # 3) --encoder/decoder-* arguments (highest priority)\n from fairseq.models import ARCH_MODEL_REGISTRY\n group.add_argument('--arch', '-a', metavar='ARCH',\n choices=ARCH_MODEL_REGISTRY.keys(),\n help='model architecture')\n # fmt: on\n return group\n\n\ndef get_args(\n data: Union[str, Path],\n task: str = \"translation\",\n arch: str = \"transformer\",\n **overrides\n):\n parser = get_training_parser(task)\n args = parse_args_and_arch(parser, [str(data), \"--task\", task, \"--arch\", arch])\n\n for k, v in overrides.items():\n setattr(args, k, v)\n\n return args\n\n\ndef add_ema_args(parser):\n group = parser.add_argument_group(\"EMA configuration\")\n gen_parser_from_dataclass(group, EMAConfig())\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import List, Optional\n\nimport torch\nfrom torch import nn\n\nfrom fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel,\n FairseqIncrementalDecoder, register_model,\n register_model_architecture)\nfrom fairseq.modules import (\n TransformerEncoderLayer, TransformerDecoderLayer\n)\nfrom fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet\nfrom fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout\nfrom fairseq.data.data_utils import lengths_to_padding_mask\nfrom fairseq import utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef encoder_init(m):\n if isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain(\"relu\"))\n\n\ndef Embedding(num_embeddings, embedding_dim):\n m = nn.Embedding(num_embeddings, embedding_dim)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n return m\n\n\nclass TTSTransformerEncoder(FairseqEncoder):\n def __init__(self, args, src_dict, embed_speaker):\n super().__init__(src_dict)\n self.padding_idx = src_dict.pad()\n self.embed_speaker = embed_speaker\n self.spk_emb_proj = None\n if embed_speaker is not None:\n self.spk_emb_proj = nn.Linear(\n args.encoder_embed_dim + args.speaker_embed_dim,\n args.encoder_embed_dim\n )\n\n self.dropout_module = FairseqDropout(\n p=args.dropout, module_name=self.__class__.__name__\n )\n self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim,\n padding_idx=self.padding_idx)\n assert(args.encoder_conv_kernel_size % 2 == 1)\n self.prenet = nn.ModuleList(\n nn.Sequential(\n nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim,\n kernel_size=args.encoder_conv_kernel_size,\n padding=((args.encoder_conv_kernel_size - 1) // 2)),\n nn.BatchNorm1d(args.encoder_embed_dim),\n nn.ReLU(),\n nn.Dropout(args.encoder_dropout),\n )\n for _ in range(args.encoder_conv_layers)\n )\n self.prenet_proj = nn.Linear(\n args.encoder_embed_dim, args.encoder_embed_dim\n )\n self.embed_positions = PositionalEmbedding(\n args.max_source_positions, args.encoder_embed_dim, self.padding_idx\n )\n self.pos_emb_alpha = nn.Parameter(torch.ones(1))\n\n self.transformer_layers = nn.ModuleList(\n TransformerEncoderLayer(args)\n for _ in range(args.encoder_transformer_layers)\n )\n if args.encoder_normalize_before:\n self.layer_norm = LayerNorm(args.encoder_embed_dim)\n else:\n self.layer_norm = None\n\n self.apply(encoder_init)\n\n def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):\n x = self.embed_tokens(src_tokens)\n x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T\n for conv in self.prenet:\n x = conv(x)\n x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C\n x = self.prenet_proj(x)\n\n padding_mask = src_tokens.eq(self.padding_idx)\n positions = self.embed_positions(padding_mask)\n x += self.pos_emb_alpha * positions\n x = self.dropout_module(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n for layer in self.transformer_layers:\n x = layer(x, padding_mask)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n if self.embed_speaker is not None:\n seq_len, bsz, _ = x.size()\n emb = self.embed_speaker(speaker).transpose(0, 1)\n emb = emb.expand(seq_len, bsz, -1)\n x = self.spk_emb_proj(torch.cat([x, emb], dim=2))\n\n return {\n \"encoder_out\": [x], # T x B x C\n \"encoder_padding_mask\": [padding_mask] if padding_mask.any() else [], # B x T\n \"encoder_embedding\": [], # B x T x C\n \"encoder_states\": [], # List[T x B x C]\n \"src_tokens\": [],\n \"src_lengths\": [],\n }\n\n\ndef decoder_init(m):\n if isinstance(m, torch.nn.Conv1d):\n nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain(\"tanh\"))\n\n\nclass TTSTransformerDecoder(FairseqIncrementalDecoder):\n def __init__(self, args, src_dict):\n super().__init__(None)\n self._future_mask = torch.empty(0)\n\n self.args = args\n self.padding_idx = src_dict.pad()\n self.n_frames_per_step = args.n_frames_per_step\n self.out_dim = args.output_frame_dim * args.n_frames_per_step\n\n self.dropout_module = FairseqDropout(\n args.dropout, module_name=self.__class__.__name__\n )\n self.embed_positions = PositionalEmbedding(\n args.max_target_positions, args.decoder_embed_dim, self.padding_idx\n )\n self.pos_emb_alpha = nn.Parameter(torch.ones(1))\n self.prenet = nn.Sequential(\n Prenet(self.out_dim, args.prenet_layers, args.prenet_dim,\n args.prenet_dropout),\n nn.Linear(args.prenet_dim, args.decoder_embed_dim),\n )\n\n self.n_transformer_layers = args.decoder_transformer_layers\n self.transformer_layers = nn.ModuleList(\n TransformerDecoderLayer(args)\n for _ in range(self.n_transformer_layers)\n )\n if args.decoder_normalize_before:\n self.layer_norm = LayerNorm(args.decoder_embed_dim)\n else:\n self.layer_norm = None\n\n self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)\n self.eos_proj = nn.Linear(args.decoder_embed_dim, 1)\n\n self.postnet = Postnet(self.out_dim, args.postnet_conv_dim,\n args.postnet_conv_kernel_size,\n args.postnet_layers, args.postnet_dropout)\n\n self.ctc_proj = None\n if getattr(args, \"ctc_weight\", 0.) > 0.:\n self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))\n\n self.apply(decoder_init)\n\n def extract_features(\n self, prev_outputs, encoder_out=None, incremental_state=None,\n target_lengths=None, speaker=None, **kwargs\n ):\n alignment_layer = self.n_transformer_layers - 1\n self_attn_padding_mask = lengths_to_padding_mask(target_lengths)\n positions = self.embed_positions(\n self_attn_padding_mask, incremental_state=incremental_state\n )\n\n if incremental_state is not None:\n prev_outputs = prev_outputs[:, -1:, :]\n self_attn_padding_mask = self_attn_padding_mask[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n x = self.prenet(prev_outputs)\n x += self.pos_emb_alpha * positions\n x = self.dropout_module(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n if not self_attn_padding_mask.any():\n self_attn_padding_mask = None\n\n attn: Optional[torch.Tensor] = None\n inner_states: List[Optional[torch.Tensor]] = [x]\n for idx, transformer_layer in enumerate(self.transformer_layers):\n if incremental_state is None:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n\n x, layer_attn, _ = transformer_layer(\n x,\n encoder_out[\"encoder_out\"][0]\n if (encoder_out is not None and len(encoder_out[\"encoder_out\"]) > 0)\n else None,\n encoder_out[\"encoder_padding_mask\"][0]\n if (\n encoder_out is not None\n and len(encoder_out[\"encoder_padding_mask\"]) > 0\n )\n else None,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=bool((idx == alignment_layer)),\n need_head_weights=bool((idx == alignment_layer)),\n )\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float().to(x)\n\n if attn is not None:\n # average probabilities over heads, transpose to\n # (B, src_len, tgt_len)\n attn = attn.mean(dim=0).transpose(2, 1)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n return x, {\"attn\": attn, \"inner_states\": inner_states}\n\n def forward(self, prev_output_tokens, encoder_out=None,\n incremental_state=None, target_lengths=None, speaker=None,\n **kwargs):\n x, extra = self.extract_features(\n prev_output_tokens, encoder_out=encoder_out,\n incremental_state=incremental_state, target_lengths=target_lengths,\n speaker=speaker, **kwargs\n )\n attn = extra[\"attn\"]\n feat_out = self.feat_proj(x)\n bsz, seq_len, _ = x.size()\n eos_out = self.eos_proj(x)\n post_feat_out = feat_out + self.postnet(feat_out)\n return post_feat_out, eos_out, {\"attn\": attn, \"feature_out\": feat_out}\n\n def get_normalized_probs(self, net_output, log_probs, sample):\n logits = self.ctc_proj(net_output[2][\"feature_out\"])\n if log_probs:\n return utils.log_softmax(logits.float(), dim=-1)\n else:\n return utils.softmax(logits.float(), dim=-1)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.\n if (\n self._future_mask.size(0) == 0\n or (not self._future_mask.device == tensor.device)\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1\n )\n self._future_mask = self._future_mask.to(tensor)\n return self._future_mask[:dim, :dim]\n\n\n@register_model(\"tts_transformer\")\nclass TTSTransformerModel(FairseqEncoderDecoderModel):\n \"\"\"\n Implementation for https://arxiv.org/pdf/1809.08895.pdf\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--dropout\", type=float)\n parser.add_argument(\"--output-frame-dim\", type=int)\n parser.add_argument(\"--speaker-embed-dim\", type=int)\n # encoder prenet\n parser.add_argument(\"--encoder-dropout\", type=float)\n parser.add_argument(\"--encoder-conv-layers\", type=int)\n parser.add_argument(\"--encoder-conv-kernel-size\", type=int)\n # encoder transformer layers\n parser.add_argument(\"--encoder-transformer-layers\", type=int)\n parser.add_argument(\"--encoder-embed-dim\", type=int)\n parser.add_argument(\"--encoder-ffn-embed-dim\", type=int)\n parser.add_argument(\"--encoder-normalize-before\", action=\"store_true\")\n parser.add_argument(\"--encoder-attention-heads\", type=int)\n parser.add_argument(\"--attention-dropout\", type=float)\n parser.add_argument(\"--activation-dropout\", \"--relu-dropout\", type=float)\n parser.add_argument(\"--activation-fn\", type=str, default=\"relu\")\n # decoder prenet\n parser.add_argument(\"--prenet-dropout\", type=float)\n parser.add_argument(\"--prenet-layers\", type=int)\n parser.add_argument(\"--prenet-dim\", type=int)\n # decoder postnet\n parser.add_argument(\"--postnet-dropout\", type=float)\n parser.add_argument(\"--postnet-layers\", type=int)\n parser.add_argument(\"--postnet-conv-dim\", type=int)\n parser.add_argument(\"--postnet-conv-kernel-size\", type=int)\n # decoder transformer layers\n parser.add_argument(\"--decoder-transformer-layers\", type=int)\n parser.add_argument(\"--decoder-embed-dim\", type=int)\n parser.add_argument(\"--decoder-ffn-embed-dim\", type=int)\n parser.add_argument(\"--decoder-normalize-before\", action=\"store_true\")\n parser.add_argument(\"--decoder-attention-heads\", type=int)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._num_updates = 0\n\n @classmethod\n def build_model(cls, args, task):\n embed_speaker = task.get_speaker_embeddings(args)\n encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker)\n decoder = TTSTransformerDecoder(args, task.src_dict)\n return cls(encoder, decoder)\n\n def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):\n return self.encoder(src_tokens, src_lengths=src_lengths,\n speaker=speaker, **kwargs)\n\n def set_num_updates(self, num_updates):\n super().set_num_updates(num_updates)\n self._num_updates = num_updates\n\n\n@register_model_architecture(\"tts_transformer\", \"tts_transformer\")\ndef base_architecture(args):\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.output_frame_dim = getattr(args, \"output_frame_dim\", 80)\n args.speaker_embed_dim = getattr(args, \"speaker_embed_dim\", 64)\n # encoder prenet\n args.encoder_dropout = getattr(args, \"encoder_dropout\", 0.5)\n args.encoder_conv_layers = getattr(args, \"encoder_conv_layers\", 3)\n args.encoder_conv_kernel_size = getattr(args, \"encoder_conv_kernel_size\", 5)\n # encoder transformer layers\n args.encoder_transformer_layers = getattr(args, \"encoder_transformer_layers\", 6)\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 4 * args.encoder_embed_dim)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n # decoder prenet\n args.prenet_dropout = getattr(args, \"prenet_dropout\", 0.5)\n args.prenet_layers = getattr(args, \"prenet_layers\", 2)\n args.prenet_dim = getattr(args, \"prenet_dim\", 256)\n # decoder postnet\n args.postnet_dropout = getattr(args, \"postnet_dropout\", 0.5)\n args.postnet_layers = getattr(args, \"postnet_layers\", 5)\n args.postnet_conv_dim = getattr(args, \"postnet_conv_dim\", 512)\n args.postnet_conv_kernel_size = getattr(args, \"postnet_conv_kernel_size\", 5)\n # decoder transformer layers\n args.decoder_transformer_layers = getattr(args, \"decoder_transformer_layers\", 6)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 512)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 4 * args.decoder_embed_dim)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom typing import List, Dict, Any\nfrom dataclasses import dataclass, field\n\nimport torch\nimport torch.nn.functional as F\n\nfrom fairseq import metrics, utils\nfrom fairseq.criterions import FairseqCriterion, register_criterion\nfrom fairseq.dataclass import FairseqDataclass\nfrom fairseq.data.data_utils import lengths_to_mask\nfrom fairseq.models.fairseq_model import FairseqEncoderModel\n\n\n@dataclass\nclass FastSpeech2CriterionConfig(FairseqDataclass):\n ctc_weight: float = field(\n default=0.0, metadata={\"help\": \"weight for CTC loss\"}\n )\n\n\n@register_criterion(\"fastspeech2\", dataclass=FastSpeech2CriterionConfig)\nclass FastSpeech2Loss(FairseqCriterion):\n def __init__(self, task, ctc_weight):\n super().__init__(task)\n self.ctc_weight = ctc_weight\n\n def forward(self, model: FairseqEncoderModel, sample, reduction=\"mean\"):\n src_tokens = sample[\"net_input\"][\"src_tokens\"]\n src_lens = sample[\"net_input\"][\"src_lengths\"]\n tgt_lens = sample[\"target_lengths\"]\n _feat_out, _, log_dur_out, pitch_out, energy_out = model(\n src_tokens=src_tokens,\n src_lengths=src_lens,\n prev_output_tokens=sample[\"net_input\"][\"prev_output_tokens\"],\n incremental_state=None,\n target_lengths=tgt_lens,\n speaker=sample[\"speaker\"],\n durations=sample[\"durations\"],\n pitches=sample[\"pitches\"],\n energies=sample[\"energies\"]\n )\n\n src_mask = lengths_to_mask(sample[\"net_input\"][\"src_lengths\"])\n tgt_mask = lengths_to_mask(sample[\"target_lengths\"])\n\n pitches, energies = sample[\"pitches\"], sample[\"energies\"]\n pitch_out, pitches = pitch_out[src_mask], pitches[src_mask]\n energy_out, energies = energy_out[src_mask], energies[src_mask]\n\n feat_out, feat = _feat_out[tgt_mask], sample[\"target\"][tgt_mask]\n l1_loss = F.l1_loss(feat_out, feat, reduction=reduction)\n\n pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction)\n energy_loss = F.mse_loss(energy_out, energies, reduction=reduction)\n\n log_dur_out = log_dur_out[src_mask]\n dur = sample[\"durations\"].float()\n dur = dur.half() if log_dur_out.type().endswith(\".HalfTensor\") else dur\n log_dur = torch.log(dur + 1)[src_mask]\n dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction)\n\n ctc_loss = torch.tensor(0.).type_as(l1_loss)\n if self.ctc_weight > 0.:\n lprobs = model.get_normalized_probs((_feat_out,), log_probs=True)\n lprobs = lprobs.transpose(0, 1) # T x B x C\n src_mask = lengths_to_mask(src_lens)\n src_tokens_flat = src_tokens.masked_select(src_mask)\n ctc_loss = F.ctc_loss(\n lprobs, src_tokens_flat, tgt_lens, src_lens,\n reduction=reduction, zero_infinity=True\n ) * self.ctc_weight\n\n loss = l1_loss + dur_loss + pitch_loss + energy_loss + ctc_loss\n\n sample_size = sample[\"nsentences\"]\n logging_output = {\n \"loss\": utils.item(loss.data),\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"nsentences\"],\n \"sample_size\": sample_size,\n \"l1_loss\": utils.item(l1_loss.data),\n \"dur_loss\": utils.item(dur_loss.data),\n \"pitch_loss\": utils.item(pitch_loss.data),\n \"energy_loss\": utils.item(energy_loss.data),\n \"ctc_loss\": utils.item(ctc_loss.data),\n }\n return loss, sample_size, logging_output\n\n @classmethod\n def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n ns = [log.get(\"sample_size\", 0) for log in logging_outputs]\n ntot = sum(ns)\n ws = [n / (ntot + 1e-8) for n in ns]\n for key in [\n \"loss\", \"l1_loss\", \"dur_loss\", \"pitch_loss\", \"energy_loss\",\n \"ctc_loss\"\n ]:\n vals = [log.get(key, 0) for log in logging_outputs]\n val = sum(val * w for val, w in zip(vals, ws))\n metrics.log_scalar(key, val, ntot, round=3)\n metrics.log_scalar(\"sample_size\", ntot, len(logging_outputs))\n\n # inference metrics\n if \"targ_frames\" not in logging_outputs[0]:\n return\n n = sum(log.get(\"targ_frames\", 0) for log in logging_outputs)\n for key, new_key in [\n (\"mcd_loss\", \"mcd_loss\"),\n (\"pred_frames\", \"pred_ratio\"),\n (\"nins\", \"ins_rate\"),\n (\"ndel\", \"del_rate\"),\n ]:\n val = sum(log.get(key, 0) for log in logging_outputs)\n metrics.log_scalar(new_key, val / n, n, round=3)\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n return False\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport math\nimport operator\nimport os\nimport queue\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport torch\nfrom fairseq.data import data_utils\n\n\nlogger = logging.getLogger(__name__)\n\n# Object used by _background_consumer to signal the source is exhausted\n# to the main thread.\n_sentinel = object()\n\n\nclass CountingIterator(object):\n \"\"\"Wrapper around an iterable that maintains the iteration count.\n\n Args:\n iterable (iterable): iterable to wrap\n start (int): starting iteration count. Note that this doesn't\n actually advance the iterator.\n total (int): override the iterator length returned by ``__len``.\n This can be used to truncate *iterator*.\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, start=None, total=None):\n self._itr = iter(iterable)\n self.n = start or getattr(iterable, \"n\", 0)\n self.total = total or self.n + len(iterable)\n\n def __len__(self):\n return self.total\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if not self.has_next():\n raise StopIteration\n try:\n x = next(self._itr)\n except StopIteration:\n raise IndexError(f\"Iterator expected to have length {self.total}, \"\n \"but exhausted at position {self.n}.\")\n self.n += 1\n return x\n\n def has_next(self):\n \"\"\"Whether the iterator has been exhausted.\"\"\"\n return self.n < self.total\n\n def skip(self, n):\n \"\"\"Fast-forward the iterator by skipping n elements.\"\"\"\n for _ in range(n):\n next(self)\n return self\n\n def take(self, n):\n \"\"\"Truncate the iterator to n elements at most.\"\"\"\n self.total = min(self.total, n)\n # Propagate this change to the underlying iterator\n if hasattr(self._itr, \"take\"):\n self._itr.take(max(n - self.n, 0))\n return self\n\n\nclass EpochBatchIterating(object):\n def __len__(self) -> int:\n raise NotImplementedError\n\n @property\n def next_epoch_idx(self):\n raise NotImplementedError\n\n def next_epoch_itr(\n self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True\n ):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus (bool, optional): ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n set_dataset_epoch (bool, optional): update the wrapped Dataset with\n the new epoch number (default: True).\n \"\"\"\n raise NotImplementedError\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n raise NotImplementedError\n\n @property\n def iterations_in_epoch(self) -> int:\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n raise NotImplementedError\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n raise NotImplementedError\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n raise NotImplementedError\n\n @property\n def first_batch(self):\n return \"DUMMY\"\n\n\nclass StreamingEpochBatchIterator(EpochBatchIterating):\n \"\"\"A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`.\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n max_sentences: batch size\n collate_fn (callable): merges a list of samples to form a mini-batch\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 1).\n buffer_size (int, optional): the number of batches to keep ready in the\n queue. Helps speeding up dataloading. When buffer_size is zero, the\n default torch.utils.data.DataLoader preloading is used.\n timeout (int, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative (default: ``0``).\n \"\"\"\n\n def __init__(\n self,\n dataset,\n max_sentences=1,\n collate_fn=None,\n epoch=1,\n num_workers=0,\n buffer_size=0,\n timeout=0,\n ):\n assert isinstance(dataset, torch.utils.data.IterableDataset)\n self.dataset = dataset\n self.max_sentences = max_sentences\n self.collate_fn = collate_fn\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self.num_workers = num_workers\n # This upper limit here is to prevent people from abusing this feature\n # in a shared computing environment.\n self.buffer_size = min(buffer_size, 20)\n self.timeout = timeout\n\n self._current_epoch_iterator = None\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._current_epoch_iterator is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(\n self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True\n ):\n self.epoch = self.next_epoch_idx\n if set_dataset_epoch and hasattr(self.dataset, \"set_epoch\"):\n self.dataset.set_epoch(self.epoch)\n self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle)\n return self._current_epoch_iterator\n\n def end_of_epoch(self) -> bool:\n return not self._current_epoch_iterator.has_next()\n\n @property\n def iterations_in_epoch(self) -> int:\n if self._current_epoch_iterator is not None:\n return self._current_epoch_iterator.n\n return 0\n\n def state_dict(self):\n return {\n \"epoch\": self.epoch,\n }\n\n def load_state_dict(self, state_dict):\n self.epoch = state_dict[\"epoch\"]\n\n def _get_iterator_for_epoch(self, epoch, shuffle, offset=0):\n if self.num_workers > 0:\n os.environ[\"PYTHONWARNINGS\"] = \"ignore:semaphore_tracker:UserWarning\"\n\n # Create data loader\n worker_init_fn = getattr(self.dataset, \"worker_init_fn\", None)\n itr = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=self.max_sentences,\n collate_fn=self.collate_fn,\n num_workers=self.num_workers,\n timeout=self.timeout,\n worker_init_fn=worker_init_fn,\n pin_memory=True,\n )\n\n # Wrap with a BufferedIterator if needed\n if self.buffer_size > 0:\n itr = BufferedIterator(self.buffer_size, itr)\n\n # Wrap with CountingIterator\n itr = CountingIterator(itr, start=offset)\n\n return itr\n\n\nclass EpochBatchIterator(EpochBatchIterating):\n \"\"\"A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.\n\n Compared to :class:`torch.utils.data.DataLoader`, this iterator:\n\n - can be reused across multiple epochs with the :func:`next_epoch_itr`\n method (optionally shuffled between epochs)\n - can be serialized/deserialized with the :func:`state_dict` and\n :func:`load_state_dict` methods\n - supports sharding with the *num_shards* and *shard_id* arguments\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n collate_fn (callable): merges a list of samples to form a mini-batch\n batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of\n indices, or a callable to create such an iterator (~torch.utils.data.Sampler).\n A callable batch_sampler will be called for each epoch to enable per epoch dynamic\n batch iterators defined by this callable batch_sampler.\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 1).\n buffer_size (int, optional): the number of batches to keep ready in the\n queue. Helps speeding up dataloading. When buffer_size is zero, the\n default torch.utils.data.DataLoader preloading is used.\n timeout (int, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative (default: ``0``).\n disable_shuffling (bool, optional): force disable shuffling\n (default: ``False``).\n \"\"\"\n\n def __init__(\n self,\n dataset,\n collate_fn,\n batch_sampler,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=1,\n buffer_size=0,\n timeout=0,\n disable_shuffling=False,\n ):\n assert isinstance(dataset, torch.utils.data.Dataset)\n self.dataset = dataset\n self.collate_fn = collate_fn\n self.batch_sampler = batch_sampler\n self._frozen_batches = (\n tuple(batch_sampler) if not callable(batch_sampler) else None\n )\n self.seed = seed\n self.num_shards = num_shards\n self.shard_id = shard_id\n self.num_workers = num_workers\n # This upper limit here is to prevent people from abusing this feature\n # in a shared computing environment.\n self.buffer_size = min(buffer_size, 20)\n self.timeout = timeout\n self.disable_shuffling = disable_shuffling\n\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self.shuffle = not disable_shuffling\n self._cur_epoch_itr = None\n self._next_epoch_itr = None\n self._supports_prefetch = getattr(dataset, \"supports_prefetch\", False)\n\n @property\n def frozen_batches(self):\n if self._frozen_batches is None:\n self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))\n return self._frozen_batches\n\n @property\n def first_batch(self):\n if len(self.frozen_batches) == 0:\n raise Exception(\n \"The dataset is empty. This could indicate \"\n \"that all elements in the dataset have been skipped. \"\n \"Try increasing the max number of allowed tokens or using \"\n \"a larger dataset.\"\n )\n\n if getattr(self.dataset, \"supports_fetch_outside_dataloader\", True):\n return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])\n else:\n return \"DUMMY\"\n\n def __len__(self):\n return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))\n\n @property\n def n(self):\n return self.iterations_in_epoch\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._next_epoch_itr is not None:\n return self.epoch\n elif self._cur_epoch_itr is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(\n self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True\n ):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus (bool, optional): ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n set_dataset_epoch (bool, optional): update the wrapped Dataset with\n the new epoch number (default: True).\n \"\"\"\n if self.disable_shuffling:\n shuffle = False\n prev_epoch = self.epoch\n self.epoch = self.next_epoch_idx\n if set_dataset_epoch and hasattr(self.dataset, \"set_epoch\"):\n self.dataset.set_epoch(self.epoch)\n if self._next_epoch_itr is not None:\n self._cur_epoch_itr = self._next_epoch_itr\n self._next_epoch_itr = None\n else:\n if callable(self.batch_sampler) and prev_epoch != self.epoch:\n # reset _frozen_batches to refresh the next epoch\n self._frozen_batches = None\n self._cur_epoch_itr = self._get_iterator_for_epoch(\n self.epoch,\n shuffle,\n fix_batches_to_gpus=fix_batches_to_gpus,\n )\n self.shuffle = shuffle\n return self._cur_epoch_itr\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n return not self._cur_epoch_itr.has_next()\n\n @property\n def iterations_in_epoch(self):\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.n\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.n\n return 0\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n if self.end_of_epoch():\n epoch = self.epoch + 1\n iter_in_epoch = 0\n else:\n epoch = self.epoch\n iter_in_epoch = self.iterations_in_epoch\n return {\n \"version\": 2,\n \"epoch\": epoch,\n \"iterations_in_epoch\": iter_in_epoch,\n \"shuffle\": self.shuffle,\n }\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n self.epoch = state_dict[\"epoch\"]\n itr_pos = state_dict.get(\"iterations_in_epoch\", 0)\n version = state_dict.get(\"version\", 1)\n if itr_pos > 0:\n # fast-forward epoch iterator\n self._next_epoch_itr = self._get_iterator_for_epoch(\n self.epoch,\n shuffle=state_dict.get(\"shuffle\", True),\n offset=itr_pos,\n )\n if self._next_epoch_itr is None:\n if version == 1:\n # legacy behavior: we finished the epoch, increment epoch counter\n self.epoch += 1\n else:\n raise RuntimeError(\n \"Cannot resume training due to dataloader mismatch, please \"\n \"report this to the fairseq developers. You can relaunch \"\n \"training with `--reset-dataloader` and it should work.\"\n )\n else:\n self._next_epoch_itr = None\n\n def _get_iterator_for_epoch(\n self, epoch, shuffle, fix_batches_to_gpus=False, offset=0\n ):\n def shuffle_batches(batches, seed):\n with data_utils.numpy_seed(seed):\n np.random.shuffle(batches)\n return batches\n\n if self._supports_prefetch:\n batches = self.frozen_batches\n\n if shuffle and not fix_batches_to_gpus:\n batches = shuffle_batches(list(batches), self.seed + epoch)\n\n batches = list(\n ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])\n )\n self.dataset.prefetch([i for s in batches for i in s])\n\n if shuffle and fix_batches_to_gpus:\n batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)\n else:\n if shuffle:\n batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)\n else:\n batches = self.frozen_batches\n batches = list(\n ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])\n )\n\n if offset > 0 and offset >= len(batches):\n return None\n\n if self.num_workers > 0:\n os.environ[\"PYTHONWARNINGS\"] = \"ignore:semaphore_tracker:UserWarning\"\n\n # Create data loader\n itr = torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.collate_fn,\n batch_sampler=batches[offset:],\n num_workers=self.num_workers,\n timeout=self.timeout,\n pin_memory=True,\n )\n\n # Wrap with a BufferedIterator if needed\n if self.buffer_size > 0:\n itr = BufferedIterator(self.buffer_size, itr)\n\n # Wrap with CountingIterator\n itr = CountingIterator(itr, start=offset)\n return itr\n\n\nclass GroupedIterator(CountingIterator):\n \"\"\"Wrapper around an iterable that returns groups (chunks) of items.\n\n Args:\n iterable (iterable): iterable to wrap\n chunk_size (int): size of each chunk\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, chunk_size):\n itr = _chunk_iterator(iterable, chunk_size)\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, \"n\", 0) / float(chunk_size))),\n total=int(math.ceil(len(iterable) / float(chunk_size))),\n )\n self.chunk_size = chunk_size\n\n\ndef _chunk_iterator(itr, chunk_size):\n chunk = []\n for x in itr:\n chunk.append(x)\n if len(chunk) == chunk_size:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk\n\n\nclass ShardedIterator(CountingIterator):\n \"\"\"A sharded wrapper around an iterable, padded to length.\n\n Args:\n iterable (iterable): iterable to wrap\n num_shards (int): number of shards to split the iterable into\n shard_id (int): which shard to iterator over\n fill_value (Any, optional): padding value when the iterable doesn't\n evenly divide *num_shards* (default: None).\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, num_shards, shard_id, fill_value=None):\n if shard_id < 0 or shard_id >= num_shards:\n raise ValueError(\"shard_id must be between 0 and num_shards\")\n sharded_len = int(math.ceil(len(iterable) / float(num_shards)))\n itr = map(\n operator.itemgetter(1),\n itertools.zip_longest(\n range(sharded_len),\n itertools.islice(iterable, shard_id, len(iterable), num_shards),\n fillvalue=fill_value,\n ),\n )\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, \"n\", 0) / float(num_shards))),\n total=sharded_len,\n )\n\n\nclass BackgroundConsumer(Thread):\n def __init__(self, queue, source, max_len, cuda_device):\n Thread.__init__(self)\n\n self._queue = queue\n self._source = source\n self._max_len = max_len\n self.count = 0\n self.cuda_device = cuda_device\n\n def run(self):\n # set_device to avoid creation of GPU0 context when using pin_memory\n if self.cuda_device is not None:\n torch.cuda.set_device(self.cuda_device)\n\n try:\n for item in self._source:\n self._queue.put(item)\n\n # Stop if we reached the maximum length\n self.count += 1\n if self._max_len is not None and self.count >= self._max_len:\n break\n\n # Signal the consumer we are done.\n self._queue.put(_sentinel)\n except Exception as e:\n self._queue.put(e)\n\n\nclass BufferedIterator(object):\n def __init__(self, size, iterable):\n self._queue = queue.Queue(size)\n self._iterable = iterable\n self._consumer = None\n\n self.start_time = time.time()\n self.warning_time = None\n\n self.total = len(iterable)\n\n def _create_consumer(self):\n self._consumer = BackgroundConsumer(\n self._queue,\n self._iterable,\n self.total,\n torch.cuda.current_device() if torch.cuda.is_available() else None\n )\n self._consumer.daemon = True\n self._consumer.start()\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return self.total\n\n def take(self, n):\n self.total = min(self.total, n)\n # Propagate this change to the underlying iterator\n if hasattr(self._iterable, \"take\"):\n self._iterable.take(n)\n return self\n\n def __next__(self):\n # Create consumer if not created yet\n if self._consumer is None:\n self._create_consumer()\n\n # Notify the user if there is a data loading bottleneck\n if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):\n if time.time() - self.start_time > 5 * 60:\n if (\n self.warning_time is None\n or time.time() - self.warning_time > 15 * 60\n ):\n logger.debug(\n \"Data loading buffer is empty or nearly empty. This may \"\n \"indicate a data loading bottleneck, and increasing the \"\n \"number of workers (--num-workers) may help.\"\n )\n self.warning_time = time.time()\n\n # Get next example\n item = self._queue.get(True)\n if isinstance(item, Exception):\n raise item\n if item is _sentinel:\n raise StopIteration()\n return item\n\nclass GroupedEpochBatchIterator(EpochBatchIterator):\n \"\"\"Grouped version of EpochBatchIterator\n It takes several samplers from different datasets.\n Each epoch shuffle the dataset wise sampler individually with different\n random seed. The those sub samplers are combined with into\n one big samplers with deterministic permutation to mix batches from\n different datasets. It will act like EpochBatchIterator but make sure\n 1) data from one data set each time\n 2) for different workers, they use the same order to fetch the data\n so they will use data from the same dataset everytime\n mult_rate is used for update_freq > 1 case where we want to make sure update_freq\n mini-batches come from same source\n \"\"\"\n\n def __init__(\n self,\n dataset,\n collate_fn,\n batch_samplers,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=0,\n mult_rate=1,\n buffer_size=0,\n ):\n super().__init__(\n dataset,\n collate_fn,\n batch_samplers,\n seed,\n num_shards,\n shard_id,\n num_workers,\n epoch,\n buffer_size,\n )\n # level 0: sub-samplers 1: batch_idx 2: batches\n self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers])\n self.step_size = mult_rate * num_shards\n\n self.lengths = [\n (len(x) // self.step_size) * self.step_size for x in self.frozen_batches\n ]\n\n def __len__(self):\n return sum(self.lengths)\n\n @property\n def first_batch(self):\n if len(self.frozen_batches) == 0:\n raise Exception(\n \"The dataset is empty. This could indicate \"\n \"that all elements in the dataset have been skipped. \"\n \"Try increasing the max number of allowed tokens or using \"\n \"a larger dataset.\"\n )\n\n if self.dataset.supports_fetch_outside_dataloader:\n return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]])\n else:\n return \"DUMMY\"\n\n def _get_iterator_for_epoch(\n self, epoch, shuffle, fix_batches_to_gpus=False, offset=0\n ):\n def shuffle_batches(batches, seed):\n with data_utils.numpy_seed(seed):\n np.random.shuffle(batches)\n return batches\n\n def return_full_batches(batch_sets, seed, shuffle):\n if shuffle:\n batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets]\n\n batch_sets = [\n batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets))\n ]\n batches = list(itertools.chain.from_iterable(batch_sets))\n\n if shuffle:\n with data_utils.numpy_seed(seed):\n idx = np.random.permutation(len(batches) // self.step_size)\n if len(idx) * self.step_size != len(batches):\n raise ValueError(\n \"ERROR: %d %d %d %d\"\n % (len(idx), self.step_size, len(batches), self.shard_id),\n \":\".join([\"%d\" % x for x in self.lengths]),\n )\n mini_shards = [\n batches[i * self.step_size : (i + 1) * self.step_size]\n for i in idx\n ]\n batches = list(itertools.chain.from_iterable(mini_shards))\n\n return batches\n\n if self._supports_prefetch:\n raise NotImplementedError(\"To be implemented\")\n else:\n batches = return_full_batches(\n self.frozen_batches, self.seed + epoch, shuffle\n )\n batches = list(\n ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])\n )\n\n if offset > 0 and offset >= len(batches):\n return None\n\n if self.num_workers > 0:\n os.environ[\"PYTHONWARNINGS\"] = \"ignore:semaphore_tracker:UserWarning\"\n\n itr = torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.collate_fn,\n batch_sampler=batches[offset:],\n num_workers=self.num_workers,\n )\n if self.buffer_size > 0:\n itr = BufferedIterator(self.buffer_size, itr)\n\n return CountingIterator(itr, start=offset)\n"
] | [
[
"torch.jit.load",
"torch.ones",
"torch.load",
"torch.tensor",
"torch.cuda.is_available",
"torch.device"
],
[
"torch.cat",
"torch.zeros",
"torch.nn.Linear",
"torch.no_grad",
"torch.bmm",
"torch.nn.ReLU"
],
[
"torch.cuda.device_count"
],
[
"torch.nn.init.calculate_gain",
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.ones",
"torch.empty",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.Conv1d",
"torch.nn.ReLU"
],
[
"torch.nn.functional.l1_loss",
"torch.tensor",
"torch.nn.functional.mse_loss",
"torch.log",
"torch.nn.functional.ctc_loss"
],
[
"torch.cuda.set_device",
"torch.cuda.current_device",
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FelixVi/Bedrock | [
"82072341902048e5b37022512909d209efb243d6"
] | [
"projects/common/get_raw_adcs.py"
] | [
"import time\nimport struct\nfrom banyan_ch_find import banyan_ch_find\nimport numpy\nimport datetime\nimport sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../submodules/FEED/src/python\"))\n\n# Grab the start time early, so things like\n# python get_raw_adcs.py | tee `date \"+%Y%m%d_%H%M%S\"`.log\n# will usually get a timestamp that matches\nstart_time = datetime.datetime.now()\n\n\ndef get_raw_adcs_run(dev, filewritepath='raw_adcs_', mask=\"0xff\", npt_wish=0, count=10, save_data=True, verbose=False):\n\n b_status = dev.reg_read([('banyan_status')])[0]\n npt = 1 << ((b_status >> 24) & 0x3F)\n if npt == 1:\n print(\"aborting since hardware module not present\")\n sys.exit(2)\n mask_int = int(mask, 0)\n # npt_wish only works correctly if mask is 0xff\n if npt_wish and npt_wish < npt and mask_int == 0xff:\n npt = npt_wish\n print(\"npt = %d\" % npt)\n\n dev.reg_write([('banyan_mask', mask_int)])\n chans = banyan_ch_find(mask_int)\n print(chans, 8/len(chans))\n nptx = int(npt*8/len(chans))\n theta = numpy.array(range(nptx))*7*2*numpy.pi/33\n basis = numpy.vstack((numpy.cos(theta), numpy.sin(theta), theta*0+1)).T\n chan_txt = \"column assignment for banyan_mask 0x%2.2x: \" % mask_int + \" \".join([\"%d\" % x for x in chans])\n\n header = ''\n filename = ''\n\n for run_n in range(count):\n print(run_n)\n (block, timestamp) = collect_adcs(dev, npt, len(chans))\n nblock = numpy.array(block).transpose()\n coeffzs = []\n for jx in range(len(chans) if verbose else 0):\n fit = numpy.linalg.lstsq(basis, nblock.T[jx], rcond=-1)\n coeff = fit[0]\n coeffz = coeff[0]+1j*coeff[1]\n print_dbfs = numpy.log10(abs(coeffz)/32768.0)*20\n tup = jx, abs(coeffz), print_dbfs, numpy.angle(coeffz)*180/numpy.pi\n print(\"analysis %d %7.1f %7.2f dBFS %7.2f degrees\" % tup)\n coeffzs += [coeffz]\n if verbose and len(chans) == 2:\n diff1 = (numpy.angle(coeffzs[1]) - numpy.angle(coeffzs[0]))*180/numpy.pi\n if diff1 > 180:\n diff1 -= 360\n if diff1 < -180:\n diff1 += 360\n print(\"difference %6.2f\" % diff1)\n if save_data is True:\n # ISO 8601 2016-06-02T16:06:14Z\n datetimestr = datetime.datetime.utcnow().isoformat()+\"Z \"+str(timestamp)\n header = \"\\n\".join([datetimestr, chan_txt])\n\n data_dir = start_time.strftime(filewritepath + '%Y%m%d_%H%M%S')\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n filename = data_dir + '/raw_z_%2.2d' % (run_n)\n numpy.savetxt(filename, nblock, fmt=\"%d\", header=header)\n return header, filename, block\n\n\ndef pair_ram_prc(prc, addr, count):\n foo = prc.reg_read_alist(range(addr, addr+count))\n uuu = [struct.unpack('!hh', x[2]) for x in foo]\n ram1 = [x[1] for x in uuu]\n ram2 = [x[0] for x in uuu]\n return [ram1, ram2]\n\n\ndef reshape_buffer(buf, astep, npt):\n # TODO: This copy can be avoided if leep/raw.py spits out numpy arrays,\n # which we should look into\n data = numpy.array(buf)\n # Read the upper and lower part of the banyan buffer\n p1, p2 = (data & 0xffff).astype('int16'), ((data >> 16) & 0xffff).astype('int16')\n out = numpy.empty((8, 2*astep))\n # Interleave p1 and p2\n out[::2, :] = p1.reshape(4, 2*astep)\n out[1::2, :] = p2.reshape(4, 2*astep)\n out = out[:, :npt]\n return out\n\n\ndef gen_test_data(npt):\n return numpy.hstack([numpy.ones(npt).astype(numpy.int32) * (i+1) for i in range(8)])\n\n\ndef collect(dev, npt, print_minmax=True, allow_clk_frozen=False):\n dev.reg_write([('rawadc_trig', 1)])\n timestamp, minmax = slow_chain_readout(dev)\n if print_minmax:\n print(\" \".join([\"%d\" % x for x in minmax]), \"%.8f\" % (timestamp*14/1320.0e6))\n while True:\n time.sleep(0.002)\n status = dev.reg_read(['banyan_status', 'clk_status_out'])\n b_status = status[0]\n clk_status = status[1]\n # print \"%8.8x\"%b_status\n if not (b_status & 0x80000000):\n break\n # See logic for clk_status_r in digitizer_config.v, and associated comments.\n # The allow_clk_frozen feature is needed because collect() is called by zest_setup.py\n # as part of the data transfer verification process.\n if not (clk_status == 2 or allow_clk_frozen and clk_status == 1):\n print('Loss of clock detected! Rerun \"zest_setup.py -r\" to recover. Disaster, aborting!')\n exit(3)\n astep = 1 << ((b_status >> 24) & 0x3F)\n\n # TODO: The I/O call here takes twice as long, as leep/raw.py is not aware of the banyan, dual\n # read option. The old collect_prc takes advantage of that but not leep.\n full_buffer, = dev.reg_read([('banyan_data')])\n # full_buffer = gen_test_data(npt) # For debugging\n return reshape_buffer(full_buffer, astep, npt), timestamp\n\n\ndef collect_prc(prc, npt, print_minmax=True, allow_clk_frozen=False):\n prc.reg_write([{'rawadc_trig': 1}])\n (timestamp, minmax) = prc.slow_chain_readout()\n if print_minmax:\n print(\" \".join([\"%d\" % x for x in minmax]), \"%.8f\" % (timestamp*14/1320.0e6))\n while True:\n time.sleep(0.002)\n status = prc.reg_read_value(['banyan_status', 'clk_status_out'])\n b_status = status[0]\n clk_status = status[1]\n # print \"%8.8x\"%b_status\n if not (b_status & 0x80000000):\n break\n # See logic for clk_status_r in digitizer_config.v, and associated comments.\n # The allow_clk_frozen feature is needed because collect() is called by zest_setup.py\n # as part of the data transfer verification process.\n if not (clk_status == 2 or allow_clk_frozen and clk_status == 1):\n print('Loss of clock detected! Rerun \"zest_setup.py -r\" to recover. Disaster, aborting!')\n exit(3)\n astep = 1 << ((b_status >> 24) & 0x3F)\n addr_wave0 = prc.get_read_address('banyan_data')\n value = []\n for ix in range(0, 8, 2):\n value.extend(pair_ram_prc(prc, addr_wave0+ix*astep, npt))\n return (value, timestamp)\n\n\ndef collect_adcs(dev, npt, nchans, print_minmax=True):\n '''\n nchans must be the result of len(banyan_ch_find())\n '''\n value, timestamp = collect(dev, npt, print_minmax)\n # value holds 8 raw RAM blocks\n # block will have these assembled into ADC channels\n mult = 8//nchans\n block = []\n for ix in range(nchans):\n ch_data = value[ix*mult:(ix+1)*mult].reshape(mult*npt)\n block.append(ch_data)\n return block, timestamp\n\n\ndef process_adcs(dev, npt, mask_int): # ,block,timestamp):\n chans = banyan_ch_find(mask_int)\n nptx = int(npt*(8/len(chans)))\n theta = numpy.array(range(nptx))*7*2*numpy.pi/33\n basis = numpy.vstack((numpy.cos(theta), numpy.sin(theta), theta*0+1)).T\n (block, timestamp) = collect_adcs(dev, npt, len(chans), print_minmax=False)\n nblock = numpy.array(block).transpose()\n # print 'len(chans)',len(chans),type(block),len(block),len(block[0]),nblock.T.shape\n result = []\n phase0 = 0\n for ichan in range(len(chans)):\n fit = numpy.linalg.lstsq(basis, nblock.T[ichan], rcond=-1)\n coeffz = fit[0][0]+1j*fit[0][1]\n phase0 = numpy.angle(coeffz)*180/numpy.pi if ichan == 0 else phase0\n # print phase0\n result.extend([abs(coeffz), (numpy.angle(coeffz)*180/numpy.pi-phase0) % 360])\n return result\n\n\ndef slow_chain_unpack(readlist):\n nums = [256*readlist[ix]+readlist[ix+1] for ix in range(0, 32, 2)]\n nums = [x if x < 32768 else x-65536 for x in nums]\n timestamp = 0\n for ix in range(8):\n timestamp = timestamp*256 + readlist[41-ix]\n timestamp = timestamp/32 # integer number of 1320/14 MHz adc_clk cycles\n # ignore old_tag and new_tag for now\n return (timestamp, nums) # nums is 16-long list of minmax values\n\n\ndef slow_chain_readout(dev):\n readlist = dev.reg_read(42*[('slow_chain_out')])\n return slow_chain_unpack(readlist)\n\n\ndef usage():\n print(\"python get_raw_adcs_.py -a leep://192.168.21.12 -m 0xff -n 1 -c 8192\")\n\n\nif __name__ == \"__main__\":\n\n from argparse import ArgumentParser\n\n parser = ArgumentParser(description=\"Banyan Spurs: Legacy logic on waveform acquisition and logging\")\n\n parser.add_argument('-a', '--address', dest=\"dev_addr\", default=None,\n help='Device URL (leep://<IP> or ca://<PREFIX>)')\n parser.add_argument('-D', '--dir', dest='filewritepath', default=\"raw_adcs_\",\n help='Log/data directory prefix (can include path)')\n parser.add_argument('-m', '--mask', dest=\"mask\", default=\"0xff\",\n help='Channel mask')\n parser.add_argument('-n', '--npt', dest=\"npt_wish\", default=0, type=int,\n help='Number of points per acquisition')\n parser.add_argument('-c', '--count', dest=\"count\", default=10, type=int,\n help='Number of acquisitions')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False,\n help='Verbose mode')\n\n args = parser.parse_args()\n\n print(\"get_raw_adcs: collect and save Banyan waveforms\")\n\n import leep\n\n print(\"Raw ADC acquisition\")\n print('Carrier board URL %s' % args.dev_addr)\n dev = leep.open(args.dev_addr, instance=[])\n\n get_raw_adcs_run(dev, filewritepath=args.filewritepath, mask=args.mask,\n npt_wish=args.npt_wish, count=args.count, verbose=args.verbose)\n print(\"Done\")\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.linalg.lstsq",
"numpy.savetxt",
"numpy.angle",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlmondDust/Assignment3-Final-AIFO | [
"b006b2090a7b597fde7f92e9d9fbf204bc3c993e"
] | [
"aifo_simulation/java-code/analysis/analyze_1s.py"
] | [
"import numpy as np\nimport csv\nimport sys\nimport os\n\n\n##################################\n# Setup\n#\n\nprint(\"NetBench python analysis tool v0.01\")\n\n# Usage print\ndef print_usage():\n print(\"Usage: python analyze.py /path/to/run/folder\")\n\n# Check length of arguments\nif len(sys.argv) != 2:\n print(\"Number of arguments must be exactly two: analyze.py and /path/to/run/folder.\")\n print_usage()\n exit()\n\n# Check run folder path given as first argument\nrun_folder_path = sys.argv[1]\nif not os.path.isdir(run_folder_path):\n print(\"The run folder path does not exist: \" + run_folder_path)\n print_usage()\n exit()\n\n# Create analysis folder\nanalysis_folder_path = run_folder_path + '/analysis_1s'\nif not os.path.exists(analysis_folder_path):\n os.makedirs(analysis_folder_path)\n\n\n##################################\n# Analyze flow completion\n#\ndef analyze_flow_completion():\n with open(run_folder_path + '/flow_completion.csv.log') as file:\n reader = csv.reader(file)\n\n # To enable preliminary read to determine size:\n # data = list(reader)\n # row_count = len(data)\n\n # Column lists\n flow_ids = []\n source_ids = []\n target_ids = []\n sent_bytes = []\n total_size_bytes = []\n start_time = []\n end_time = []\n duration = []\n completed = []\n\n print(\"Reading in flow completion log file...\")\n\n # Read in column lists\n for row in reader:\n st = float(row[5])\n\n if st >= 500000000 and st < 1500000000:\n flow_ids.append(float(row[0]))\n source_ids.append(float(row[1]))\n target_ids.append(float(row[2]))\n sent_bytes.append(float(row[3]))\n total_size_bytes.append(float(row[4]))\n start_time.append(float(row[5]))\n end_time.append(float(row[6]))\n duration.append(float(row[7]))\n completed.append(row[8] == 'TRUE')\n if len(row) != 9:\n print(\"Invalid row: \", row)\n exit()\n\n print(\"Calculating statistics...\")\n\n statistics = {\n 'general_num_flows': len(flow_ids),\n 'general_num_unique_sources': len(set(source_ids)),\n 'general_num_unique_targets': len(set(target_ids)),\n 'general_flow_size_bytes_mean': np.mean(total_size_bytes),\n 'general_flow_size_bytes_std': np.std(total_size_bytes)\n }\n\n range_low = [-1, -1, -1, 100000, 2434900, 1000000, 10000000]\n range_high = [-1, 100000, 2434900, -1, -1, -1, -1]\n range_name = [\"all\", \"less_100KB\", \"less_2.4349MB\", \"geq_100KB\", \"geq_2.4349MB\", \"geq_1MB\", \"geq_10MB\"]\n range_completed_duration = [[], [], [], [], [], [], []]\n range_completed_throughput = [[], [], [], [], [], [], []]\n range_num_finished_flows = [0, 0, 0, 0, 0, 0, 0]\n range_num_unfinished_flows = [0, 0, 0, 0, 0, 0, 0]\n range_low_eq = [0, 0, 0, 1, 1, 1, 1,]\n range_high_eq = [0, 0, 0, 1, 1, 1, 1,]\n\n\n # Go over all flows\n for i in range(0, len(flow_ids)):\n\n # Range-specific\n for j in range(0, len(range_name)):\n if (\n (range_low[j] == -1 or (range_low_eq[j] == 0 and total_size_bytes[i] > range_low[j]) or (range_low_eq[j] == 1 and total_size_bytes[i] >= range_low[j])) and\n (range_high[j] == -1 or (range_high_eq[j] == 0 and total_size_bytes[i] < range_high[j]) or (range_high_eq[j] == 1 and total_size_bytes[i] <= range_high[j]))\n ):\n if completed[i]:\n range_num_finished_flows[j] += 1\n range_completed_duration[j].append(duration[i])\n range_completed_throughput[j].append(total_size_bytes[i] * 8 / duration[i])\n\n else:\n range_num_unfinished_flows[j] += 1\n\n # Ranges statistics\n for j in range(0, len(range_name)):\n\n # Number of finished flows\n statistics[range_name[j] + '_num_flows'] = range_num_finished_flows[j] + range_num_unfinished_flows[j]\n statistics[range_name[j] + '_num_finished_flows'] = range_num_finished_flows[j]\n statistics[range_name[j] + '_num_unfinished_flows'] = range_num_unfinished_flows[j]\n total = (range_num_finished_flows[j] + range_num_unfinished_flows[j])\n if range_num_finished_flows[j] != 0:\n statistics[range_name[j] + '_flows_completed_fraction'] = float(range_num_finished_flows[j]) / float(total)\n statistics[range_name[j] + '_mean_fct_ns'] = np.mean(range_completed_duration[j])\n statistics[range_name[j] + '_median_fct_ns'] = np.median(range_completed_duration[j])\n statistics[range_name[j] + '_99th_fct_ns'] = np.percentile(range_completed_duration[j], 99)\n statistics[range_name[j] + '_99.9th_fct_ns'] = np.percentile(range_completed_duration[j], 99.9)\n statistics[range_name[j] + '_mean_fct_ms'] = statistics[range_name[j] + '_mean_fct_ns'] / 1000000\n statistics[range_name[j] + '_median_fct_ms'] = statistics[range_name[j] + '_median_fct_ns'] / 1000000\n statistics[range_name[j] + '_99th_fct_ms'] = statistics[range_name[j] + '_99th_fct_ns'] / 1000000\n statistics[range_name[j] + '_99.9th_fct_ms'] = statistics[range_name[j] + '_99.9th_fct_ns'] / 1000000\n statistics[range_name[j] + '_throughput_mean_Gbps'] = np.mean(range_completed_throughput[j])\n statistics[range_name[j] + '_throughput_median_Gbps'] = np.median(range_completed_throughput[j])\n statistics[range_name[j] + '_throughput_99th_Gbps'] = np.percentile(range_completed_throughput[j], 99)\n statistics[range_name[j] + '_throughput_99.9th_Gbps'] = np.percentile(range_completed_throughput[j], 99.9)\n statistics[range_name[j] + '_throughput_1th_Gbps'] = np.percentile(range_completed_throughput[j], 1)\n statistics[range_name[j] + '_throughput_0.1th_Gbps'] = np.percentile(range_completed_throughput[j], 0.1)\n else:\n statistics[range_name[j] + '_flows_completed_fraction'] = 0\n\n # Print raw results\n print('Writing to result file flow_completion.statistics...')\n with open(analysis_folder_path + '/flow_completion.statistics', 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n\n##################################\n# Analyze port utilization\n#\ndef analyze_port_utilization():\n with open(run_folder_path + '/port_utilization.csv.log') as file:\n reader = csv.reader(file)\n\n # Column lists\n source_ids = []\n target_ids = []\n attached_to_server = []\n utilized_ns = []\n utilization = []\n utilization_server_ports = []\n utilization_non_server_ports = []\n num_server_port_zero = 0\n num_non_server_port_zero = 0\n\n print(\"Reading in port utilization log file...\")\n\n # Read in column lists\n for row in reader:\n source_ids.append(float(row[0]))\n target_ids.append(float(row[1]))\n attached_to_server.append(row[2] == 'Y')\n utilized_ns.append(float(row[3]))\n utilization.append(float(row[4]))\n if row[2] == 'Y':\n utilization_server_ports.append(float(row[4]))\n if float(row[4]) == 0:\n num_server_port_zero += 1\n else:\n utilization_non_server_ports.append(float(row[4]))\n if float(row[4]) == 0:\n num_non_server_port_zero += 1\n\n if len(row) != 5:\n print(\"Invalid row: \", row)\n exit()\n\n print(\"Calculating statistics...\")\n\n # General statistics (there is always a server port)\n statistics = {\n\n 'all_port_num': len(source_ids),\n 'all_port_unique_sources': len(set(source_ids)),\n 'all_port_unique_targets': len(set(target_ids)),\n 'all_port_mean_utilization': np.mean(utilization),\n 'all_port_median_utilization': np.median(utilization),\n 'all_port_std_utilization': np.std(utilization),\n 'all_port_99th_utilization': np.percentile(utilization, 99),\n 'all_port_99.9th_utilization': np.percentile(utilization, 99.9),\n\n 'server_port_num': len(utilization_server_ports),\n 'server_port_zero_num': num_server_port_zero,\n 'server_port_mean_utilization': np.mean(utilization_server_ports),\n 'server_port_median_utilization': np.median(utilization_server_ports),\n 'server_port_std_utilization': np.std(utilization_server_ports),\n 'server_port_99th_utilization': np.percentile(utilization_server_ports, 99),\n 'server_port_99.9th_utilization': np.percentile(utilization_server_ports, 99.9)\n\n }\n\n # Only print non-server port statistics if they exist\n statistics['non_server_port_num'] = len(utilization_non_server_ports)\n if len(utilization_non_server_ports) > 0:\n statistics['non_server_ports_zero_num'] = num_non_server_port_zero\n statistics['non_server_port_mean_utilization'] = np.mean(utilization_non_server_ports)\n statistics['non_server_port_median_utilization'] = np.median(utilization_non_server_ports)\n statistics['non_server_port_std_utilization'] = np.std(utilization_non_server_ports)\n statistics['non_server_port_99th_utilization'] = np.percentile(utilization_non_server_ports, 99)\n statistics['non_server_port_99.9th_utilization'] = np.percentile(utilization_non_server_ports, 99.9)\n\n # Print raw results\n print('Writing to result file port_utilization.statistics...')\n with open(analysis_folder_path + '/port_utilization.statistics', 'w+') as outfile:\n for key, value in sorted(statistics.items()):\n outfile.write(str(key) + \"=\" + str(value) + \"\\n\")\n\n# Call analysis functions\nanalyze_flow_completion()\nanalyze_port_utilization()\n"
] | [
[
"numpy.std",
"numpy.mean",
"numpy.median",
"numpy.percentile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ezekial4/atomic_neu | [
"2d7c8b9e587dd0e6076b37cb1f07560a488bca50"
] | [
"atomic/tests/test_electron_cooling.py"
] | [
"import unittest\nimport atomic_neu.atomic as atomic\nimport numpy as np\n\nclass TestElectronCooling(unittest.TestCase):\n def setUp(self):\n ad = atomic.element('li')\n eq = atomic.CollRadEquilibrium(ad)\n\n self.temperature = np.logspace(0, 3, 50)\n self.electron_density = 1e19\n # y is a FractionalAbundance object.\n y = eq.ionisation_stage_distribution(self.temperature,\n self.electron_density)\n self.elc = atomic.ElectronCooling(y, neutral_fraction=1e-2)\n\n def test_keys(self):\n \"\"\"Makes sure ElectronCooling has all the right keys\"\"\"\n expected = ['ionisation', 'recombination',\n 'cx_power', 'line_power',\n 'continuum_power', 'rad_total',\n 'total']\n result = self.elc.power.keys()\n self.assertCountEqual(expected, result)\n\n def test_rad_total(self):\n \"\"\"Tests that rad_total is what I think it is.\"\"\"\n p = self.elc.power\n expected = p['rad_total']\n result = p['line_power'] + p['cx_power'] + p['continuum_power']\n np.testing.assert_allclose(expected, result)\n\n def test_equilbrium(self):\n \"\"\"Test that ionisation and recombination powers are opposite.\n\n Hence, total = rad_total.\n \"\"\"\n ion = self.elc.power['ionisation']\n negrecomb = -self.elc.power['recombination']\n total = self.elc.power['total']\n rad_total = self.elc.power['rad_total']\n np.testing.assert_allclose(ion, negrecomb)\n np.testing.assert_allclose(total, rad_total)\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.logspace",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TransLinkForecasting/activitysim | [
"3df695bd2bf921aa46d296a09889b68087b8c911"
] | [
"activitysim/examples/example_multiple_zone/marin_work_tour_mode_choice_data.py"
] | [
"\n# marin tvpb example data processing\n# Ben Stabler, [email protected], 09/17/20\n\nimport pandas as pd\nimport openmatrix as omx\n\n# command to run the underdevelopment example\n# python simulation.py -c configs_3_zone_marin -d data_3_marin -o output_3_marin\n\n# data processing at c:\\projects\\activitysim\\marin\n\n# 1 - fix skim names, put time periods at end and make all names unique\n\ntime_periods = [\"AM\", \"EA\", \"EV\", \"MD\", \"PM\"]\nfor tp in time_periods:\n taz_file = omx.open_file('HWYSKM' + tp + '_taz.omx')\n taz_file_rename = omx.open_file('HWYSKM' + tp + '_taz_rename.omx', 'w')\n for mat_name in taz_file.list_matrices():\n taz_file_rename[mat_name + \"__\" + tp] = taz_file[mat_name][:]\n print(mat_name + \"__\" + tp)\n taz_file.close()\n taz_file_rename.close()\n\nfor tp in time_periods:\n for skim_set in [\"SET1\", \"SET2\", \"SET3\"]:\n tap_file = omx.open_file('transit_skims_' + tp + '_' + skim_set + '.omx')\n tap_file_rename = omx.open_file('transit_skims_' + tp + '_' + skim_set + '_rename.omx', 'w')\n for mat_name in tap_file.list_matrices():\n tap_file_rename[mat_name + \"_\" + skim_set + \"__\" + tp] = tap_file[mat_name][:]\n print(mat_name + '_' + skim_set + \"__\" + tp)\n tap_file.close()\n tap_file_rename.close()\n\n# 2 - nearby skims need headers\n\nmaz_tap_walk = pd.read_csv(\"2015_test_2019_02_13_Part3/skims/ped_distance_maz_tap.txt\", header=None)\nmaz_maz_walk = pd.read_csv(\"2015_test_2019_02_13_Part3/skims/ped_distance_maz_maz.txt\", header=None)\nmaz_maz_bike = pd.read_csv(\"2015_test_2019_02_13_Part3/skims/bike_distance_maz_maz.txt\", header=None)\n\nmaz_tap_walk.columns = [\"MAZ\", \"TAP\", \"TAP\", \"WALK_TRANSIT_GEN_COST\", \"WALK_TRANSIT_DIST\"]\nmaz_maz_walk.columns = [\"OMAZ\", \"DMAZ\", \"DMAZ\", \"WALK_GEN_COST\", \"WALK_DIST\"]\nmaz_maz_bike.columns = [\"OMAZ\", \"DMAZ\", \"DMAZ\", \"BIKE_GEN_COST\", \"BIKE_DIST\"]\n\nmaz_tap_walk[\"WALK_TRANSIT_DIST\"] = maz_tap_walk[\"WALK_TRANSIT_DIST\"] / 5280 # miles\nmaz_maz_walk[\"WALK_DIST\"] = maz_maz_walk[\"WALK_DIST\"] / 5280 # miles\nmaz_maz_bike[\"BIKE_DIST\"] = maz_maz_bike[\"BIKE_DIST\"] / 5280 # miles\n\nmaz_tap_walk[[\"MAZ\", \"TAP\", \"WALK_TRANSIT_DIST\"]].to_csv(\"maz_tap_walk.csv\", index=False)\nmaz_maz_walk[[\"OMAZ\", \"DMAZ\", \"WALK_DIST\"]].to_csv(\"maz_maz_walk.csv\", index=False)\nmaz_maz_bike[[\"OMAZ\", \"DMAZ\", \"BIKE_DIST\"]].to_csv(\"maz_maz_bike.csv\", index=False)\n\n# 3 - maz data\n\nmazs = pd.read_csv(\"2015_test_2019_02_13_Part2/landuse/maz_data_withDensity.csv\")\npcost = pd.read_csv(\"2015_test_2019_02_13/ctramp_output/mgraParkingCost.csv\")\n\nmazs = pd.concat([mazs, pcost], axis=1)\nmazs = mazs.fillna(0)\n\ntazs = pd.read_csv(\"2015_test_2019_02_13_Part2/landuse/taz_data.csv\")\ntazs = tazs.set_index(\"TAZ\", drop=False)\n\nmazs[\"TERMINALTIME\"] = tazs[\"TERMINALTIME\"].loc[mazs[\"TAZ\"]].tolist()\n\nmazs[\"zone_id\"] = mazs[\"MAZ\"]\nmazs[\"county_id\"] = mazs[\"CountyID\"]\nmazs = mazs.set_index(\"zone_id\", drop=False)\n\nmazs.to_csv(\"maz_data_asim.csv\", index=False)\n\n# 4 - accessibility data\n\naccess = pd.read_csv(\"2015_test_2019_02_13/ctramp_output/accessibilities.csv\")\naccess = access.drop([0])\naccess[\"zone_id\"] = access[\"mgra\"]\naccess = access.set_index(\"zone_id\", drop=False)\naccess.to_csv(\"access.csv\", index=False)\n\n# 5 - maz to tap drive data\n\ntaz_tap_drive = pd.read_csv(\"2015_test_2019_02_13_Part3/skims/drive_maz_taz_tap.csv\")\n\ntaz_tap_drive = taz_tap_drive.pivot_table(index=[\"FTAZ\", \"TTAP\"], values=['DTIME', 'DDIST', \"WDIST\"], fill_value=0)\n\ntaz_tap_drive.columns = list(map(\"\".join, taz_tap_drive.columns))\ntaz_tap_drive = taz_tap_drive.reset_index()\ntaz_tap_drive = taz_tap_drive.set_index(\"FTAZ\")\ntaz_tap_drive[\"TAP\"] = taz_tap_drive[\"TTAP\"]\n\ntaz_tap_drive = pd.merge(mazs[[\"MAZ\", \"TAZ\"]], taz_tap_drive, left_on=['TAZ'], right_on=['FTAZ'])\ntaz_tap_drive[[\"MAZ\", \"TAP\", \"DDIST\", \"DTIME\", \"WDIST\"]].to_csv(\"maz_taz_tap_drive.csv\", index=False)\n\n# 6 - tours file, we just need work tours\n\nitour = pd.read_csv(\"2015_test_2019_02_13/ctramp_output/indivTourData_3.csv\")\nwork_tours = itour[itour[\"tour_purpose\"] == \"Work\"]\n\nwork_tours[\"tour_id\"] = range(1, len(work_tours)+1)\nwork_tours[\"household_id\"] = work_tours[\"hh_id\"]\nwork_tours = work_tours.set_index(\"tour_id\", drop=False)\n\nwork_tours[\"destination\"] = work_tours[\"dest_mgra\"]\n\nwork_tours[\"start\"] = work_tours[\"start_period\"]\nwork_tours[\"end\"] = work_tours[\"end_period\"]\nwork_tours[\"tour_type\"] = \"work\"\n\nwork_tours.to_csv(\"work_tours.csv\", index=False)\n\n# 7 - households\n\nhouseholds = pd.read_csv(\"2015_test_2019_02_13_Part2/popsyn/households.csv\")\nhouseholds[\"household_id\"] = households[\"HHID\"]\nhouseholds[\"home_zone_id\"] = households[\"MAZ\"]\nhouseholds = households.set_index(\"household_id\", drop=False)\n\nhouseholds.to_csv(\"households_asim.csv\", index=False)\n\n# 8 - persons\n\npersons = pd.read_csv(\"2015_test_2019_02_13_Part2/popsyn/persons.csv\")\npersons[\"person_id\"] = persons[\"PERID\"]\npersons[\"household_id\"] = persons[\"HHID\"]\npersons = persons.set_index(\"person_id\", drop=False)\n\npersons_output = pd.read_csv(\"2015_test_2019_02_13/ctramp_output/personData_3.csv\")\npersons_output = persons_output.set_index(\"person_id\", drop=False)\npersons[\"type\"] = persons_output[\"type\"].loc[persons.index]\npersons[\"value_of_time\"] = persons_output[\"value_of_time\"].loc[persons.index]\npersons[\"is_university\"] = persons[\"type\"] == \"University student\"\npersons[\"fp_choice\"] = persons_output[\"fp_choice\"]\n\npersons.to_csv(\"persons_asim.csv\", index=False)\n\n# 9 - replace existing pipeline tables for restart for now\n\n# run simple three zone example and get output pipeline and then replace tables before tour mode choice\npipeline = pd.io.pytables.HDFStore('pipeline.h5')\npipeline.keys()\n\npipeline['/accessibility/compute_accessibility'] = access # index zone_id\npipeline['/households/joint_tour_frequency'] = households # index household_id\npipeline['/persons/non_mandatory_tour_frequency'] = persons # index person_id\npipeline['/land_use/initialize_landuse'] = mazs # index zone_id\npipeline['/tours/non_mandatory_tour_scheduling'] = work_tours # index tour_id\n\npipeline.close()\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.io.pytables.HDFStore",
"pandas.merge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Idein/chainer-hand-pose | [
"a47e0c61c4fea3369db566eea3d539d1c9398bf7",
"a47e0c61c4fea3369db566eea3d539d1c9398bf7",
"a47e0c61c4fea3369db566eea3d539d1c9398bf7",
"a47e0c61c4fea3369db566eea3d539d1c9398bf7",
"a47e0c61c4fea3369db566eea3d539d1c9398bf7"
] | [
"experiments/notebooks/data_visualizer.py",
"src/demo/hand_pose_utils.py",
"src/pose/models/network_base.py",
"src/pose/demo_rgb.py",
"src/pose/utils.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Dataset Visualizer\n\n# +\nimport sys\n\n# Yeah darkside\nsys.path.append(\"../../src\")\nsys.path.append(\"../../src/pose/hand_dataset\")\n\n# +\nimport copy\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom chainer.dataset import DatasetMixin\nimport chainercv\nfrom chainercv.links.model.ssd import random_distort\nimport numpy as np\n\nfrom pose.hand_dataset.geometry_utils import crop_domain2d, crop_domain3d, calc_com\nfrom pose.hand_dataset.geometry_utils import flip_point_zyx, rotate_point_zyx, rotate_point_vu\n\n\ndef load_dataset(dataset_type, visualize=True, iterate_all=False):\n import os\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n enable_rgb = True\n enable_depth = True\n debug = True\n if dataset_type == \"fhad\":\n dataset_dir = os.path.expanduser(\"~/dataset/fhad\")\n import fhad_dataset as x_dataset\n from fhad_dataset import get_fhad_dataset as get_dataset\n elif dataset_type == \"stb\":\n dataset_dir = os.path.expanduser(\"~/dataset/stb\")\n import stb_dataset as x_dataset\n from stb_dataset import get_stb_dataset as get_dataset\n elif dataset_type == \"rhd\":\n dataset_dir = os.path.expanduser(\"~/dataset/RHD_published_v2\")\n import rhd_dataset as x_dataset\n from rhd_dataset import get_rhd_dataset as get_dataset\n debug = False\n elif dataset_type == \"msra15\":\n dataset_dir = os.path.expanduser(\"~/dataset/cvpr15_MSRAHandGestureDB\")\n import msra15_dataset as x_dataset\n from msra15_dataset import get_msra15_dataset as get_dataset\n enable_rgb = False\n elif dataset_type == \"nyu\":\n dataset_dir = os.path.expanduser(\"~/dataset/nyu_hand_dataset_v2\")\n import nyu_dataset as x_dataset\n from nyu_dataset import get_nyu_dataset as get_dataset\n enable_rgb = False\n elif dataset_type == \"synth\":\n dataset_dir = os.path.expanduser(\"~/dataset/SynthHands_Release\")\n import synth_dataset as x_dataset\n from synth_dataset import get_synth_dataset as get_dataset\n elif dataset_type == \"ganerated\":\n dataset_dir = os.path.expanduser(\"~/dataset/GANeratedHands_Release\")\n import ganerated_dataset as x_dataset\n from ganerated_dataset import get_ganerated_dataset as get_dataset\n enable_depth = False\n debug = True\n elif dataset_type == \"multiview\":\n dataset_dir = os.path.expanduser(\"~/dataset/multiview_hand\")\n import multiview_dataset as x_dataset\n from multiview_dataset import get_multiview_dataset as get_dataset\n enable_depth = False\n debug = True\n elif dataset_type == \"handdb\":\n dataset_dir = os.path.expanduser(\"~/dataset/handdb_dataset\")\n import handdb_dataset as x_dataset\n from handdb_dataset import get_handdb_dataset as get_dataset\n enable_depth = False\n debug = False\n else:\n NotImplementedError(\"dataset_type={} is not yet\".format(dataset_type))\n param = {\n \"cube\": np.array([200, 200, 200], dtype=np.int),\n \"imsize\": np.array([224, 224], dtype=np.int),\n \"use_rgb\": enable_rgb,\n \"use_depth\": enable_depth,\n \"enable_x_flip\": False,\n \"enable_y_flip\": False,\n \"angle_range\": range(-90,90),\n \"oscillation\": {\n \"do_oscillate\": True,\n \"scale_range\": np.arange(1.25, 1.27, 0.01),\n \"shift_range\": np.arange(-0.01, 0.02, 0.01),\n },\n }\n\n logger.info(\"get dataset\")\n dataset = get_dataset(dataset_dir, param=param, debug=debug, mode=\"train\")\n logger.info(\"done get dataset\")\n return dataset, x_dataset, param\n\ndataset,x_dataset,hand_param = load_dataset(\"fhad\")\n\n# +\ncurrent_idx=0\n\ndef vis(idx):\n global current_idx\n current_idx = idx\n color_map = x_dataset.COLOR_MAP\n keypoint_names = x_dataset.KEYPOINT_NAMES\n edges = x_dataset.EDGES\n enable_rgb = hand_param[\"use_rgb\"] \n enable_depth = False\n if enable_rgb and enable_depth:\n from pose.visualizations import visualize_both\n visualize_both(dataset, keypoint_names, edges, color_map)\n elif enable_rgb:\n from pose.visualizations import visualize_rgb\n visualize_rgb(dataset, keypoint_names, edges, color_map,idx)\n elif enable_depth:\n from visualizations import visualize_depth\n visualize_depth(dataset, keypoint_names, edges, color_map)\n else:\n pass\n\nfrom ipywidgets import interact \n\nsample=np.random.choice(range(len(dataset)),100)\ninteract(vis,idx=sample)\n# -\n\n# # visualize transformed dataset\n\n# +\nfrom collections import defaultdict\n\nfrom chainer.datasets import TransformDataset\nfrom pose.models.selector import select_model\nfrom pose.hand_dataset import common_dataset\n\nconfig=defaultdict(dict)\nconfig[\"model\"][\"name\"]=\"ganerated\"\nhand_param[\"inH\"]=224\nhand_param[\"inW\"]=224\nhand_param[\"inC\"]=3\nhand_param[\"n_joints\"]=common_dataset.NUM_KEYPOINTS\nhand_param[\"edges\"] = common_dataset.EDGES\nmodel = select_model(config,hand_param)\ntransform_dataset=TransformDataset(dataset,model.encode)\n\n# +\nprint(current_idx)\n\nrgb, hm, intermediate3d, rgb_joint = transform_dataset.get_example(current_idx)\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfig=plt.figure()\nax=fig.add_subplot(121)\nax.imshow(np.max(hm,axis=0))\nax2=fig.add_subplot(122,projection=\"3d\")\nax2.scatter(*rgb_joint[:,::-1].transpose())\n",
"import itertools\n\nimport numpy as np\n\nfrom transforms import flip_point\n# Decimal Code (R,G,B)\n_BASE_COLOR = {\n \"RED\": (255, 0, 0),\n \"GREEN\": (0, 255, 0),\n \"BLUE\": (0, 0, 255),\n \"YELLOW\": (255, 255, 0),\n \"CYAN\": (0, 255, 255),\n \"MAGENTA\": (255, 0, 255),\n}\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\n\"\"\"\nCoordinates are ordered as (z, y, x) in 3 dimensional space\nIt is compatible with ChainerCV project\nSee:\nhttps://github.com/chainer/chainercv#data-conventions\n\"\"\"\nDATA_CONVENTION = \"ZYX\"\n\nNUM_KEYPOINTS = 21\nSTANDARD_KEYPOINT_NAMES = [\n \"root\",\n \"thumb_mcp\",\n \"thumb_pip\",\n \"thumb_dip\",\n \"thumb_tip\",\n \"index_mcp\",\n \"index_pip\",\n \"index_dip\",\n \"index_tip\",\n \"middle_mcp\",\n \"middle_pip\",\n \"middle_dip\",\n \"middle_tip\",\n \"ring_mcp\",\n \"ring_pip\",\n \"ring_dip\",\n \"ring_tip\",\n \"little_mcp\",\n \"little_pip\",\n \"little_dip\",\n \"little_tip\",\n]\n\nassert len(STANDARD_KEYPOINT_NAMES) == NUM_KEYPOINTS\n\n\ndef make_keypoint_converter(root, fingers, parts, sep=\"_\"):\n converter = {\"root\": root}\n for f, std_f in zip(fingers, [\"thumb\", \"index\", \"middle\", \"ring\", \"little\"]):\n for p, std_p in zip(parts, [\"mcp\", \"pip\", \"dip\", \"tip\"]):\n converter[\"_\".join([std_f, std_p])] = sep.join([f, p])\n return converter\n\n\nBASE_COLOR = {\n \"root\": (50, 50, 50),\n \"thumb\": _BASE_COLOR[\"MAGENTA\"],\n \"index\": _BASE_COLOR[\"BLUE\"],\n \"middle\": _BASE_COLOR[\"GREEN\"],\n \"ring\": _BASE_COLOR[\"YELLOW\"],\n \"little\": _BASE_COLOR[\"RED\"],\n}\n\nCOLOR_MAP = {\"root\": BASE_COLOR[\"root\"]}\n\nEDGE_NAMES = []\nfor f in [\"thumb\", \"index\", \"middle\", \"ring\", \"little\"]:\n for s, t in pairwise([\"root\", \"mcp\", \"pip\", \"dip\", \"tip\"]):\n color = BASE_COLOR[f]\n if s == \"root\":\n t = \"_\".join([f, t])\n else:\n s = \"_\".join([f, s])\n t = \"_\".join([f, t])\n EDGE_NAMES.append([s, t])\n COLOR_MAP[s, t] = color\n COLOR_MAP[t] = color\n\nEDGES = [[STANDARD_KEYPOINT_NAMES.index(s), STANDARD_KEYPOINT_NAMES.index(t)]\n for s, t in EDGE_NAMES]\n\nfor s, t in EDGE_NAMES:\n COLOR_MAP[\n STANDARD_KEYPOINT_NAMES.index(s),\n STANDARD_KEYPOINT_NAMES.index(t)\n ] = COLOR_MAP[s, t]\n COLOR_MAP[STANDARD_KEYPOINT_NAMES.index(s)] = COLOR_MAP[s]\n COLOR_MAP[STANDARD_KEYPOINT_NAMES.index(t)] = COLOR_MAP[t]\n# convert value as np.array\nCOLOR_MAP = {k: np.array(v) for k, v in COLOR_MAP.items()}\n\n\ndef normalize_rgb(rgb):\n return (rgb / 255.) - 0.5\n\n\ndef denormalize_rgb(rgb):\n return 255. * (rgb + 0.5)\n\n\ndef format_kp_proj(point, outH, outW, offsetH=0, offsetW=0, x_flip=False, y_flip=False):\n vmin = np.min(point[:, 0])\n umin = np.min(point[:, 1])\n vmax = np.max(point[:, 0])\n umax = np.max(point[:, 1])\n ulen = vmax - vmin\n vlen = umax - umin\n scale = min(outH, outW) / max(ulen, vlen)\n offset = np.array([vmin, umin])\n point = scale * (point - offset)\n point = flip_point(\n point[np.newaxis],\n (outH, outW),\n x_flip=x_flip,\n y_flip=y_flip,\n ).squeeze(axis=0)\n point = point + np.array([offsetH, offsetW])\n return point\n",
"import chainer\nimport numpy as np\nfrom chainer import initializers\nimport chainer.functions as F\nimport chainer.links as L\n\n\nclass Convolution2d(chainer.Chain):\n \"\"\"\n convert pose_estimation.network_base.convolution2d written in tensorflow.contrib.slim\n into Chainer implementation\n \"\"\"\n\n def __init__(self, in_channels, out_channels, ksize=3, stride=1):\n super(Convolution2d, self).__init__()\n self.dtype = np.float32\n initialW = initializers.HeNormal(1 / np.sqrt(2), self.dtype)\n with self.init_scope():\n self.conv = L.Convolution2D(in_channels,\n out_channels,\n ksize=ksize,\n stride=stride,\n pad=ksize // 2,\n initialW=initialW,\n nobias=True)\n self.bn = L.BatchNormalization(out_channels,\n eps=0.001, decay=0.9997)\n\n def __call__(self, x):\n return F.clipped_relu(self.bn(self.conv(x)), 6.0)\n\n\nclass Conv1x1(chainer.Chain):\n def __init__(self, in_channels, out_channels, relu=True):\n super(Conv1x1, self).__init__()\n self.relu = relu\n with self.init_scope():\n self.conv = L.Convolution2D(\n in_channels,\n out_channels,\n ksize=1,\n stride=1,\n nobias=True\n )\n self.bn = L.BatchNormalization(\n out_channels,\n eps=0.001,\n use_gamma=False\n )\n\n def __call__(self, x):\n h = self.bn(self.conv(x))\n if self.relu:\n return F.relu(h)\n else:\n return h\n\n\nclass SeparableConv(chainer.Chain):\n \"\"\"\n convert pose_estimation.network_base.separable_conv written in tensorflow.contrib.slim\n into Chainer implementation\n \"\"\"\n\n def __init__(self, in_channels, out_channels, ksize=3, stride=1, relu=True):\n super(SeparableConv, self).__init__()\n self.relu = relu\n self.ksize = ksize\n self.stride = stride\n with self.init_scope():\n self.depthwise_conv = L.DepthwiseConvolution2D(\n in_channels=in_channels,\n channel_multiplier=1,\n ksize=ksize,\n pad=ksize // 2,\n stride=stride,\n nobias=True\n )\n self.pointwise_conv = L.Convolution2D(\n in_channels,\n out_channels,\n ksize=1,\n nobias=True\n )\n self.pointwise_bn = L.BatchNormalization(\n out_channels,\n eps=0.001,\n use_gamma=False\n )\n\n def __call__(self, x):\n h = self.depthwise_conv(x)\n h = self.pointwise_conv(h)\n h = self.pointwise_bn(h)\n if self.relu:\n h = F.relu(h)\n return h\n\n\nclass ExpandedConv(chainer.Chain):\n\n def __init__(self, expand_ratio, in_channels, out_channels, stride):\n super(ExpandedConv, self).__init__()\n ksize = 3\n self.dtype = np.float32\n self.expand_ratio = expand_ratio\n expanded_channels = int(in_channels * expand_ratio)\n initialW = initializers.HeNormal(1 / np.sqrt(2), self.dtype)\n with self.init_scope():\n if expand_ratio != 1:\n self.expand_conv = L.Convolution2D(in_channels,\n expanded_channels,\n ksize=1,\n initialW=initialW,\n nobias=True)\n self.expand_bn = L.BatchNormalization(expanded_channels,\n eps=0.001, decay=0.997)\n\n self.depthwise_conv = L.DepthwiseConvolution2D(expanded_channels,\n channel_multiplier=1,\n ksize=ksize,\n stride=stride,\n pad=ksize // 2,\n initialW=initialW,\n nobias=True)\n self.depthwise_bn = L.BatchNormalization(expanded_channels,\n eps=0.001, decay=0.9997)\n self.project_conv = L.Convolution2D(expanded_channels,\n out_channels,\n ksize=1,\n initialW=initialW,\n nobias=True)\n self.project_bn = L.BatchNormalization(out_channels,\n eps=0.001, decay=0.9997)\n\n def __call__(self, x):\n h = x\n if self.expand_ratio != 1:\n h = F.clipped_relu(self.expand_bn(self.expand_conv(h)), 6.0)\n h = F.clipped_relu(self.depthwise_bn(self.depthwise_conv(h)), 6.0)\n h = self.project_bn(self.project_conv(h))\n if h.shape == x.shape:\n return h + x\n else:\n return h\n",
"import argparse\nimport configparser\nimport logging\n\nlogger = logging.getLogger()\nimport os\n\nimport cv2\n\nimport chainer\nimport chainercv\n\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\nfrom model import HandPoseNetwork\nfrom selector import select_dataset\nfrom pose.visualizations import vis_pose\nfrom image_utils import normalize_rgb\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--trained\", type=str, default=\"trained\")\n parser.add_argument(\"--camera\", type=int, default=0)\n args = parser.parse_args()\n return args\n\n\ndef create_model(config, hand_param):\n model_name = config[\"model\"][\"name\"]\n logger.info(\"use {}\".format(model_name))\n if model_name == \"mv2\":\n model_param = {\n \"width_multiplier\": config.getfloat(model_name, \"width_multiplier\"),\n }\n elif model_name == \"resnet\":\n model_param = {\n \"n_layers\": config.getint(model_name, \"n_layers\")\n }\n elif model_name == \"deep_prior\":\n model_param = {}\n model = HandPoseNetwork(hand_param, model_name, model_param)\n return model\n\n\ndef main():\n args = parse_args()\n logging.basicConfig(level=logging.INFO)\n\n config = configparser.ConfigParser()\n\n path = os.path.expanduser(os.path.join(args.trained, \"src\", \"config.ini\"))\n logger.info(\"read {}\".format(path))\n config.read(path, 'UTF-8')\n\n logger.info(\"setup devices\")\n chainer.global_config.autotune = True\n chainer.config.cudnn_fast_batch_normalization = True\n\n dataset_type = config[\"dataset\"][\"type\"]\n use_rgb = config.getboolean(\"dataset\", \"use_rgb\")\n use_depth = config.getboolean(\"dataset\", \"use_depth\")\n assert use_rgb\n assert use_rgb ^ use_depth, \"XOR(use_rgb, use_depth) must be True\"\n hand_param = select_dataset(config, return_data=[\"hand_param\"])\n model_path = os.path.expanduser(os.path.join(args.trained, \"bestmodel.npz\"))\n\n logger.info(\"> restore model\")\n model = create_model(config, hand_param)\n logger.info(\"> model.device = {}\".format(model.device))\n chainer.serializers.load_npz(model_path, model)\n\n plot_direction = \"horizontal\"\n if plot_direction == \"horizontal\":\n space = (1, 2)\n figsize = (10, 5)\n else:\n space = (2, 1)\n figsize = (5, 10)\n\n fig = plt.figure(figsize=figsize)\n ax1 = fig.add_subplot(*space, 1)\n ax3 = fig.add_subplot(*space, 2, projection=\"3d\")\n\n color_map = hand_param[\"color_map\"]\n color = [color_map[k] for k in hand_param[\"keypoint_names\"]]\n edge_color = [color_map[s, t] for s, t in hand_param[\"edges\"]]\n pred_color = [[255, 255, 255] for k in hand_param[\"keypoint_names\"]]\n\n cap = cv2.VideoCapture(args.camera)\n if cap.isOpened() is False:\n print('Error opening video stream or file')\n exit(1)\n\n try:\n while cap.isOpened():\n # Wait for a coherent pair of frames: depth and color\n ret_val, image = cap.read()\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.transpose(2, 0, 1).astype(np.float32)\n _, cH, cW = image.shape\n sz = min(cH, cW)\n image = chainercv.transforms.center_crop(image, (sz, sz))\n image = chainercv.transforms.resize(image, (hand_param[\"inH\"], hand_param[\"inW\"]))\n pred_j = model.predict(np.expand_dims(normalize_rgb(image), axis=0))\n pred_j = pred_j.array.reshape(hand_param[\"n_joints\"], -1)\n dim = pred_j.shape[-1]\n if dim == 5:\n pred_3d = pred_j[:, :3]\n pred_2d = pred_j[:, 3:]\n pred_2d = pred_2d * np.array([[hand_param[\"inH\"], hand_param[\"inW\"]]])\n else:\n pred_3d = pred_j\n\n vis_pose(pred_2d, hand_param[\"edges\"], img=image,\n point_color=color, edge_color=pred_color, ax=ax1)\n if dim != 2:\n vis_pose(pred_3d, hand_param[\"edges\"], point_color=color, edge_color=edge_color, ax=ax3)\n # set layout\n for ax in [ax3]:\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.view_init(-65, -90)\n\n fig.canvas.draw()\n buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf = buf.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n buf = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)\n # buf = cv2.resize(buf, (dW, dH))\n ax1.clear()\n ax3.clear()\n\n images = np.hstack((buf,))\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n if cv2.waitKey(1) == 27:\n break\n cv2.waitKey(1)\n finally:\n print(\"Exit\")\n\n\nif __name__ == '__main__':\n main()\n",
"import glob\nfrom itertools import tee\nimport os\nimport random\nimport shutil\n\nimport chainer\nimport numpy as np\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\ndef remove_whitespace(string):\n return string.replace(\" \", '')\n\n\ndef parse_kwargs(args):\n if args == '':\n return {}\n\n kwargs = {}\n for arg in args.split(','):\n arg = remove_whitespace(arg)\n key, value = arg.split('=')\n kwargs[key] = value\n\n return kwargs\n\n\ndef parse_cube(cube, style=\"DHW\"):\n assert sorted(style) == ['D', 'H', 'W'], \"variable 'style' must contain D, H and W\"\n\n cube = remove_whitespace(cube)\n cubeW, cubeH, cubeD = list(map(int, cube.split('x')))\n order = {\n 'W': cubeW,\n 'H': cubeH,\n 'D': cubeD,\n }\n cube = np.array([order[w] for w in style])\n return cube\n\n\ndef parse_imsize(imsize, style=\"HW\"):\n imsize = remove_whitespace(imsize)\n imW, imH = list(map(int, imsize.split('x')))\n order = {\n 'W': imW,\n 'H': imH,\n }\n imsize = np.array([order[w] for w in style])\n return imsize\n\n\ndef setup_devices(ids):\n if ids == '':\n return {'main': -1}\n devices = parse_kwargs(ids)\n for key in devices:\n devices[key] = int(devices[key])\n return devices\n\n\ndef set_random_seed(devices, seed):\n random.seed(seed)\n np.random.seed(seed)\n for key, id in devices.items():\n if id < 0:\n break\n if key == 'main':\n chainer.cuda.get_device_from_id(id).use()\n chainer.cuda.cupy.random.seed(seed)\n\n\ndef save_files(result_dir):\n target_list = [\n [\"\", [\"*.py\", \"*.sh\", \"*.ini\"]],\n [\"pose\",[\"*.py\", \"*.sh\", \"*.ini\"]],\n [os.path.join(\"pose\",\"hand_dataset\"), [\"*.py\"]],\n [os.path.join(\"pose\",\"graphics\"), [\"*.py\"]],\n [os.path.join(\"pose\",\"models\"), [\"*.py\"]],\n [os.path.join(\"pose\",\"visualizations\"), [\"*.py\"]],\n ]\n for (folder, patterns) in target_list:\n result_src_dir = os.path.join(result_dir, \"src\", folder)\n if not os.path.exists(result_src_dir):\n os.makedirs(result_src_dir)\n file_list = []\n for ptn in patterns:\n file_list += glob.glob(os.path.join(folder,ptn))\n for file in file_list:\n shutil.copy(file, os.path.join(result_src_dir, os.path.basename(file)))\n"
] | [
[
"numpy.arange",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"numpy.max",
"numpy.array",
"numpy.min"
],
[
"numpy.sqrt"
],
[
"matplotlib.use",
"numpy.array",
"numpy.hstack",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
halen48/MusicTransformer-Pytorch | [
"62f2f05d48ce7ab5b6c29a354c50b46914c08d70"
] | [
"generate.py"
] | [
"import torch\nimport torch.nn as nn\nimport os\nimport random\n\nfrom third_party.midi_processor.processor import decode_midi, encode_midi\n\nfrom utilities.argument_funcs import parse_generate_args, print_generate_args\nfrom model.music_transformer import MusicTransformer\nfrom model.music_lstm import MusicLSTM\nfrom dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, process_midi\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\n\nfrom utilities.constants import *\nfrom utilities.device import get_device, use_cuda\n\n# main\ndef main():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Entry point. Generates music from a model specified by command line arguments\n ----------\n \"\"\"\n\n args = parse_generate_args()\n print_generate_args(args)\n\n if(args.force_cpu):\n use_cuda(False)\n print(\"WARNING: Forced CPU usage, expect model to perform slower\")\n print(\"\")\n\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Grabbing dataset if needed\n _, _, dataset = create_epiano_datasets(args.midi_root, args.num_prime, random_seq=False)\n\n # Can be None, an integer index to dataset, or a file path\n if(args.primer_file is None):\n f = str(random.randrange(len(dataset)))\n else:\n f = args.primer_file\n\n if(f.isdigit()):\n idx = int(f)\n primer, _ = dataset[idx]\n primer = primer.to(get_device())\n\n print(\"Using primer index:\", idx, \"(\", dataset.data_files[idx], \")\")\n\n else:\n raw_mid = encode_midi(f)\n if(len(raw_mid) == 0):\n print(\"Error: No midi messages in primer file:\", f)\n return\n\n primer, _ = process_midi(raw_mid, args.num_prime, random_seq=False)\n primer = torch.tensor(primer, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n print(\"Using primer file:\", f)\n\n if(args.model == \"lstm\"):\n model = MusicLSTM(input_size=args.input_size, \n layers=args.batch_size,\n dropout=args.dropout,\n hidden_cells=args.hidden_cells ).to(get_device())\n elif(args.model == \"transformer\"):\n model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,\n d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,\n max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())\n else:\n print(args.model,\" not supported\")\n\n model.load_state_dict(torch.load(args.model_weights))\n\n # Saving primer first\n f_path = os.path.join(args.output_dir, \"primer.mid\")\n decode_midi(primer[:args.num_prime].cpu().numpy(), file_path=f_path)\n \n # GENERATION\n model.eval()\n with torch.set_grad_enabled(False):\n if(args.beam > 0):\n print(\"BEAM:\", args.beam)\n beam_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=args.beam)\n\n f_path = os.path.join(args.output_dir, \"beam.mid\")\n decode_midi(beam_seq[0].cpu().numpy(), file_path=f_path)\n else:\n print(\"RAND DIST\")\n rand_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=0)\n\n f_path = os.path.join(args.output_dir, \"rand.mid\")\n decode_midi(rand_seq[0].cpu().numpy(), file_path=f_path)\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.set_grad_enabled",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caidevOficial/Python_Udemy_DataManipulation | [
"a35e6f798bfd3a8857d07ff457d7fb64fa72ab81",
"a35e6f798bfd3a8857d07ff457d7fb64fa72ab81"
] | [
"Numpy_Practice/Mathematics_For_DataScience/m_01_Array_Zeros.py",
"Pandas_Practice/p_09_DataFrame_Boolean_List.py"
] | [
"# MIT License\n\n# Copyright (c) 2022 [FacuFalcone]\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nfrom m_00_Common_Variables import *\n\ndef CreateZeroArray(size: int) -> np.ndarray:\n \"\"\"[summary]\\n\n Creates an array of zeros\n \n Args:\n size (int): [Size of the array]\n Returns:\n [np.ndarray]: [Array of zeros]\n \"\"\"\n return np.zeros(size)\n\n\nif __name__ == \"__main__\":\n print(CreateZeroArray(5))\n print(f'Int Type: {SetTypeOfArray(CreateZeroArray(5), \"int\")}')\n print(f'Float Type: {SetTypeOfArray(CreateZeroArray(5), \"float\")}')\n print(f'String Type: {SetTypeOfArray(CreateZeroArray(5), \"str\")}')\n print(f'Bool Type: {SetTypeOfArray(CreateZeroArray(5), \"bool\")}')\n print(f'Complex Type: {SetTypeOfArray(CreateZeroArray(5), \"complex\")}')\n print(f'Object Type: {SetTypeOfArray(CreateZeroArray(5), \"object\")}')\n print(f'Unicode Type: {SetTypeOfArray(CreateZeroArray(5), \"unicode\")}')\n print(f'Byte Type: {SetTypeOfArray(CreateZeroArray(5), \"byte\")}')\n print(f'Unsigned Int Type: {SetTypeOfArray(CreateZeroArray(5), \"uint\")}')\n ",
"# MIT License\n\n# Copyright (c) 2022 [FacuFalcone]\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport pandas as pd\n\n# ?## Path of the file.\nPATH = './Pandas_Practice/assets/sanctuary.csv'\nJSON_CONFIG_PATH = './Pandas_Practice/assets/sanctuary.json'\n\nconfig_json = pd.read_json(JSON_CONFIG_PATH)\n\nID = config_json['Schema']['id']\nNAME = config_json['Schema']['name']\nCOSMOS = config_json['Schema']['cosmos']\nARMOR = config_json['Schema']['armor']\nLEVEL = config_json['Schema']['level']\nPOWER_LV = config_json['Schema']['power_level']\n\n# ?## Reads the file and returns a DataFrame.\nzodiac_df = pd.read_csv(PATH)\n\n# ?## Assign a indexed DF with the bracket operator to create\n# ?## a new DataFrame with 12 rows.\nsmall_df = zodiac_df[0:12]\n\n# ?## Creates a boolean array with many items as the DataFrame's size.\npisces_row = [\n False, False, False, False, False, False, \n False, False, False, False, False, True\n]\n\n# ?## Pass and prints the rown that meets the condition.\nprint(small_df[pisces_row])"
] | [
[
"numpy.zeros"
],
[
"pandas.read_csv",
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
DanielChen98/Dose-Normalization-Have-Effect-On-Domain-Adaptation | [
"99ac8072432491e350c5c24cc93aeb8b3b6ad752"
] | [
"inference.py"
] | [
"'''\nTraining script for CIFAR-10/100\nCopyright (c) Wei YANG, 2017\n'''\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport time\nimport random\nfrom dataloader import my_dataset\nfrom ngd import NGD\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport models.cifar as models\nimport copy \nfrom utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig\nfrom wd import SinkhornDistance\nfrom style_loss import style_loss\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10/100 Training')\n# Datasets\nparser.add_argument('-d', '--dataset', default='cifar10', type=str)\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n# Optimization options\nparser.add_argument('--epochs', default=1000, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n#### train test batch_size ####\nparser.add_argument('--train-batch', default=64, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--test-batch', default=64, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--drop', '--dropout', default=0, type=float,\n metavar='Dropout', help='Dropout ratio')\n#### Decrease learning ####\nparser.add_argument('--schedule', type=int, nargs='+', default=[650, 725],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n# Checkpoints\nparser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',\n help='path to save checkpoint (default: checkpoint)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n# Architecture\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet20',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('--depth', type=int, default=29, help='Model depth.')\nparser.add_argument('--block-name', type=str, default='BasicBlock',\n help='the building block for Resnet and Preresnet: BasicBlock, Bottleneck (default: Basicblock for cifar10/cifar100)')\nparser.add_argument('--cardinality', type=int, default=8, help='Model cardinality (group).')\nparser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')\nparser.add_argument('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')\nparser.add_argument('--compressionRate', type=int, default=2, help='Compression Rate (theta) for DenseNet.')\n# Miscs\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n#Device options\nparser.add_argument('--gpu-id', default='0', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n# Validate dataset\nassert args.dataset == 'cifar10' or args.dataset == 'cifar100', 'Dataset can only be cifar10 or cifar100.'\n\n# Use CUDA\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\n\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nbest_acc = 0 # best test accuracy\n\ndef main():\n global best_acc \n\n transform_test = transforms.Compose([\n transforms.Resize([120,120]),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n \n path_test = 'blood_cells/TEST_vangogh'\n testset = my_dataset(transform_test, path_test)\n num_classes = 6\n \n testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers, drop_last=True)\n\n # Model\n print(\"==> creating model '{}'\".format(args.arch))\n if args.arch.startswith('resnext'):\n model = models.__dict__[args.arch](\n cardinality=args.cardinality,\n num_classes=num_classes,\n depth=args.depth,\n widen_factor=args.widen_factor,\n dropRate=args.drop,\n )\n elif args.arch.startswith('densenet'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n growthRate=args.growthRate,\n compressionRate=args.compressionRate,\n dropRate=args.drop,\n )\n elif args.arch.startswith('wrn'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n widen_factor=args.widen_factor,\n dropRate=args.drop,\n )\n elif args.arch.endswith('resnet'):\n model = models.__dict__[args.arch](\n num_classes=num_classes,\n depth=args.depth,\n block_name=args.block_name,\n )\n else:\n model = models.__dict__[args.arch](num_classes=num_classes)\n\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))\n criterion = nn.CrossEntropyLoss()\n #### optimizer ####\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # Resume\n title = 'cifar-10-' + args.arch\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n #args.checkpoint = os.path.dirname(args.checkpoint)\n checkpoint = torch.load(args.checkpoint)\n best_acc = checkpoint['best_acc']\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)\n print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))\n return\n\ndef test(testloader, model, criterion, epoch, use_cuda):\n global best_acc\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n \n \n # BN.train()\n model.train()\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)\n with torch.no_grad():\n outputs,_,_,_ = model(inputs)\n \n #print(\"~~~~~~~~~~~~~~FINISHED BN ADJUST~~~~~~~~~~~~~~~~~~~~~~~\")\n\n # switch to evaluate mode\n model.eval()\n \n end = time.time()\n bar = Bar('Processing', max=len(testloader))\n for batch_idx, (inputs, targets) in enumerate(testloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)\n with torch.no_grad():\n # compute output\n outputs,_,_,_ = model(inputs)\n loss = criterion(outputs, targets)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 2))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | TestLoss: {loss:.4f} | testtop1: {top1: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(testloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.nn.DataParallel",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsinggggg/pandas | [
"8dbb593d0c94107ec8b91a4723c40af537807ca4"
] | [
"pandas/core/frame.py"
] | [
"\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nfrom collections import abc\nimport datetime\nfrom io import StringIO\nimport itertools\nimport mmap\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AnyStr,\n Dict,\n FrozenSet,\n Hashable,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import algos as libalgos, lib, properties\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axes,\n Axis,\n ColspaceArgType,\n CompressionOptions,\n Dtype,\n FilePathOrBuffer,\n FloatFormatType,\n FormattersType,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n Label,\n Level,\n Renamer,\n StorageOptions,\n ValueKeyFunc,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n deprecate_kwarg,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_axis_style_args,\n validate_bool_kwarg,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.cast import (\n construct_1d_arraylike_from_scalar,\n find_common_type,\n infer_dtype_from_scalar,\n invalidate_string_dtypes,\n maybe_box_datetimelike,\n maybe_convert_platform,\n maybe_downcast_to_dtype,\n maybe_infer_to_datetimelike,\n maybe_unbox_datetimelike,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n infer_dtype_from_object,\n is_bool_dtype,\n is_dataclass,\n is_datetime64_any_dtype,\n is_dict_like,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_iterator,\n is_list_like,\n is_named_tuple,\n is_object_dtype,\n is_scalar,\n is_sequence,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import isna, notna\n\nfrom pandas.core import algorithms, common as com, generic, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import (\n aggregate,\n reconstruct_func,\n relabel_result,\n transform,\n)\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import Categorical, ExtensionArray\nfrom pandas.core.arrays.sparse import SparseFrameAccessor\nfrom pandas.core.construction import extract_array, sanitize_masked_array\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.indexes import base as ibase\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n PeriodIndex,\n ensure_index,\n ensure_index_from_sequences,\n)\nfrom pandas.core.indexes.multi import MultiIndex, maybe_droplevels\nfrom pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.internals.construction import (\n arrays_to_mgr,\n dataclasses_to_dicts,\n get_names_from_index,\n init_dict,\n init_ndarray,\n masked_rec_array_to_mgr,\n reorder_arrays,\n sanitize_index,\n to_arrays,\n)\nfrom pandas.core.reshape.melt import melt\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index, lexsort_indexer, nargsort\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.formats import console, format as fmt\nfrom pandas.io.formats.info import BaseInfo, DataFrameInfo\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n from pandas.io.formats.style import Styler\n\n# ---------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = {\n \"axes\": \"index, columns\",\n \"klass\": \"DataFrame\",\n \"axes_single_arg\": \"{0 or 'index', 1 or 'columns'}\",\n \"axis\": \"\"\"axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index': apply function to each column.\n If 1 or 'columns': apply function to each row.\"\"\",\n \"optional_by\": \"\"\"\n by : str or list of str\n Name or list of names to sort by.\n\n - if `axis` is 0 or `'index'` then `by` may contain index\n levels and/or column labels.\n - if `axis` is 1 or `'columns'` then `by` may contain column\n levels and/or index labels.\"\"\",\n \"optional_labels\": \"\"\"labels : array-like, optional\n New labels / index to conform the axis specified by 'axis' to.\"\"\",\n \"optional_axis\": \"\"\"axis : int or str, optional\n Axis to target. Can be either the axis name ('index', 'columns')\n or number (0, 1).\"\"\",\n}\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame or named Series objects with a database-style join.\n\nThe join is done on columns or indexes. If joining columns on\ncolumns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes\non indexes or indexes on a column or columns, the index will be passed on.\nWhen performing a cross merge, no column specifications to merge on are\nallowed.\n\nParameters\n----------%s\nright : DataFrame or named Series\n Object to merge with.\nhow : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'\n Type of merge to be performed.\n\n * left: use only keys from left frame, similar to a SQL left outer join;\n preserve key order.\n * right: use only keys from right frame, similar to a SQL right outer join;\n preserve key order.\n * outer: use union of keys from both frames, similar to a SQL full outer\n join; sort keys lexicographically.\n * inner: use intersection of keys from both frames, similar to a SQL inner\n join; preserve the order of the left keys.\n * cross: creates the cartesian product from both frames, preserves the order\n of the left keys.\n\n .. versionadded:: 1.2.0\n\non : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If `on` is None and not merging on indexes then this defaults\n to the intersection of the columns in both DataFrames.\nleft_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\nright_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\nleft_index : bool, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels.\nright_index : bool, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\nsort : bool, default False\n Sort the join keys lexicographically in the result DataFrame. If False,\n the order of the join keys depends on the join type (how keyword).\nsuffixes : list-like, default is (\"_x\", \"_y\")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\ncopy : bool, default True\n If False, avoid copy if possible.\nindicator : bool or str, default False\n If True, adds a column to the output DataFrame called \"_merge\" with\n information on the source of each row. The column can be given a different\n name by providing a string argument. The column will have a Categorical\n type with the value of \"left_only\" for observations whose merge key only\n appears in the left DataFrame, \"right_only\" for observations\n whose merge key only appears in the right DataFrame, and \"both\"\n if the observation's merge key is found in both DataFrames.\n\nvalidate : str, optional\n If specified, checks if merge is of specified type.\n\n * \"one_to_one\" or \"1:1\": check if merge keys are unique in both\n left and right datasets.\n * \"one_to_many\" or \"1:m\": check if merge keys are unique in left\n dataset.\n * \"many_to_one\" or \"m:1\": check if merge keys are unique in right\n dataset.\n * \"many_to_many\" or \"m:m\": allowed, but does not result in checks.\n\nReturns\n-------\nDataFrame\n A DataFrame of the two merged objects.\n\nSee Also\n--------\nmerge_ordered : Merge with optional filling/interpolation.\nmerge_asof : Merge on nearest keys.\nDataFrame.join : Similar method using indices.\n\nNotes\n-----\nSupport for specifying index levels as the `on`, `left_on`, and\n`right_on` parameters was added in version 0.23.0\nSupport for merging named Series objects was added in version 0.24.0\n\nExamples\n--------\n>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [1, 2, 3, 5]})\n>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [5, 6, 7, 8]})\n>>> df1\n lkey value\n0 foo 1\n1 bar 2\n2 baz 3\n3 foo 5\n>>> df2\n rkey value\n0 foo 5\n1 bar 6\n2 baz 7\n3 foo 8\n\nMerge df1 and df2 on the lkey and rkey columns. The value columns have\nthe default suffixes, _x and _y, appended.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2 with specified left and right suffixes\nappended to any overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey',\n... suffixes=('_left', '_right'))\n lkey value_left rkey value_right\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2, but raise an exception if the DataFrames have\nany overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))\nTraceback (most recent call last):\n...\nValueError: columns overlap but no suffix specified:\n Index(['value'], dtype='object')\n\n>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n>>> df1\n a b\n0 foo 1\n1 bar 2\n>>> df2\n a c\n0 foo 3\n1 baz 4\n\n>>> df1.merge(df2, how='inner', on='a')\n a b c\n0 foo 1 3\n\n>>> df1.merge(df2, how='left', on='a')\n a b c\n0 foo 1 3.0\n1 bar 2 NaN\n\n>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})\n>>> df2 = pd.DataFrame({'right': [7, 8]})\n>>> df1\n left\n0 foo\n1 bar\n>>> df2\n right\n0 7\n1 8\n\n>>> df1.merge(df2, how='cross')\n left right\n0 foo 7\n1 foo 8\n2 bar 7\n3 bar 8\n\"\"\"\n\n\n# -----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame, OpsMixin):\n \"\"\"\n Two-dimensional, size-mutable, potentially heterogeneous tabular data.\n\n Data structure also contains labeled axes (rows and columns).\n Arithmetic operations align on both row and column labels. Can be\n thought of as a dict-like container for Series objects. The primary\n pandas data structure.\n\n Parameters\n ----------\n data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame\n Dict can contain Series, arrays, constants, dataclass or list-like objects. If\n data is a dict, column order follows insertion-order.\n\n .. versionchanged:: 0.25.0\n If data is a list of dicts, column order follows insertion-order.\n\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer.\n copy : bool, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input.\n\n See Also\n --------\n DataFrame.from_records : Constructor from tuples, also record arrays.\n DataFrame.from_dict : From dicts of Series, arrays, or dicts.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n read_table : Read general delimited file into DataFrame.\n read_clipboard : Read text from clipboard into DataFrame.\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = pd.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n ... columns=['a', 'b', 'c'])\n >>> df2\n a b c\n 0 1 2 3\n 1 4 5 6\n 2 7 8 9\n\n Constructing DataFrame from dataclass:\n\n >>> from dataclasses import make_dataclass\n >>> Point = make_dataclass(\"Point\", [(\"x\", int), (\"y\", int)])\n >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])\n x y\n 0 0 0\n 1 0 3\n 2 2 3\n \"\"\"\n\n _internal_names_set = {\"columns\", \"index\"} | NDFrame._internal_names_set\n _typ = \"dataframe\"\n _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)\n\n @property\n def _constructor(self) -> Type[DataFrame]:\n return DataFrame\n\n _constructor_sliced: Type[Series] = Series\n _hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])\n _accessors: Set[str] = {\"sparse\"}\n\n @property\n def _constructor_expanddim(self):\n # GH#31549 raising NotImplementedError on a property causes trouble\n # for `inspect`\n def constructor(*args, **kwargs):\n raise NotImplementedError(\"Not supported for DataFrames!\")\n\n return constructor\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index: Optional[Axes] = None,\n columns: Optional[Axes] = None,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n ):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._mgr\n\n if isinstance(data, BlockManager):\n if index is None and columns is None and dtype is None and copy is False:\n # GH#33357 fastpath\n NDFrame.__init__(self, data)\n return\n\n mgr = self._init_mgr(\n data, axes={\"index\": index, \"columns\": columns}, dtype=dtype, copy=copy\n )\n\n elif isinstance(data, dict):\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)\n\n # a masked array\n else:\n data = sanitize_masked_array(data)\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = {k: data[k] for k in data_columns}\n if columns is None:\n columns = data_columns\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, \"name\", None) is not None:\n mgr = init_dict({data.name: data}, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n # For data is list-like, or Iterable (will consume into list)\n elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):\n if not isinstance(data, (abc.Sequence, ExtensionArray)):\n data = list(data)\n if len(data) > 0:\n if is_dataclass(data[0]):\n data = dataclasses_to_dicts(data)\n if is_list_like(data[0]) and getattr(data[0], \"ndim\", 1) == 1:\n if is_named_tuple(data[0]) and columns is None:\n columns = data[0]._fields\n arrays, columns = to_arrays(data, columns, dtype=dtype)\n columns = ensure_index(columns)\n\n # set the index\n if index is None:\n if isinstance(data[0], Series):\n index = get_names_from_index(data)\n elif isinstance(data[0], Categorical):\n index = ibase.default_index(len(data[0]))\n else:\n index = ibase.default_index(len(data))\n\n mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n else:\n mgr = init_dict({}, index, columns, dtype=dtype)\n # For data is scalar\n else:\n if index is None or columns is None:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n if not dtype:\n dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)\n\n # For data is a scalar extension dtype\n if is_extension_array_dtype(dtype):\n\n values = [\n construct_1d_arraylike_from_scalar(data, len(index), dtype)\n for _ in range(len(columns))\n ]\n mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)\n else:\n if dtype.kind in [\"m\", \"M\"]:\n data = maybe_unbox_datetimelike(data, dtype)\n\n # Attempt to coerce to a numpy array\n try:\n arr = np.array(data, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n\n if arr.ndim != 0:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n shape = (len(index), len(columns))\n values = np.full(shape, arr)\n\n mgr = init_ndarray(\n values, index, columns, dtype=values.dtype, copy=False\n )\n\n NDFrame.__init__(self, mgr)\n\n # ----------------------------------------------------------------------\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],\n dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n See Also\n --------\n ndarray.shape : Tuple of array dimensions.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self.index), len(self.columns)\n\n @property\n def _is_homogeneous_type(self) -> bool:\n \"\"\"\n Whether all the columns in a DataFrame have the same type.\n\n Returns\n -------\n bool\n\n See Also\n --------\n Index._is_homogeneous_type : Whether the object has a single\n dtype.\n MultiIndex._is_homogeneous_type : Whether all the levels of a\n MultiIndex have the same dtype.\n\n Examples\n --------\n >>> DataFrame({\"A\": [1, 2], \"B\": [3, 4]})._is_homogeneous_type\n True\n >>> DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.0]})._is_homogeneous_type\n False\n\n Items with the same type but different sizes are considered\n different types.\n\n >>> DataFrame({\n ... \"A\": np.array([1, 2], dtype=np.int32),\n ... \"B\": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type\n False\n \"\"\"\n if self._mgr.any_extension_types:\n return len({block.dtype for block in self._mgr.blocks}) == 1\n else:\n return not self._is_mixed_type\n\n @property\n def _can_fast_transpose(self) -> bool:\n \"\"\"\n Can we transpose this DataFrame without creating any new array objects.\n \"\"\"\n if self._mgr.any_extension_types:\n # TODO(EA2D) special case would be unnecessary with 2D EAs\n return False\n return len(self._mgr.blocks) == 1\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def _repr_fits_vertical_(self) -> bool:\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns.\n\n In case of non-interactive session, no boundaries apply.\n\n `ignore_width` is here so ipynb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n width, height = console.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if (max_columns and nb_columns > max_columns) or (\n (not ignore_width) and width and nb_columns > (width // 2)\n ):\n return False\n\n # used by repr_html under IPython notebook or scripts ignore terminal\n # dims\n if ignore_width or not console.in_interactive_session():\n return True\n\n if get_option(\"display.width\") is not None or console.in_ipython_frontend():\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actually checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[: min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max(len(line) for line in value.split(\"\\n\"))\n\n return repr_width < width\n\n def _info_repr(self) -> bool:\n \"\"\"\n True if the repr should show the info view.\n \"\"\"\n info_repr_option = get_option(\"display.large_repr\") == \"info\"\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular DataFrame.\n \"\"\"\n buf = StringIO(\"\")\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n max_colwidth = get_option(\"display.max_colwidth\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = console.get_console_size()\n else:\n width = None\n self.to_string(\n buf=buf,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n line_width=width,\n max_colwidth=max_colwidth,\n show_dimensions=show_dimensions,\n )\n\n return buf.getvalue()\n\n def _repr_html_(self) -> Optional[str]:\n \"\"\"\n Return a html representation for a particular DataFrame.\n\n Mainly for IPython notebook.\n \"\"\"\n if self._info_repr():\n buf = StringIO(\"\")\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace(\"<\", r\"<\", 1)\n val = val.replace(\">\", r\">\", 1)\n return \"<pre>\" + val + \"</pre>\"\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=None,\n col_space=None,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n justify=None,\n index_names=True,\n header=True,\n index=True,\n bold_rows=True,\n escape=True,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=\".\",\n )\n return fmt.DataFrameRenderer(formatter).to_html(notebook=True)\n else:\n return None\n\n @Substitution(\n header_type=\"bool or sequence\",\n header=\"Write out the column names. If a list of strings \"\n \"is given, it is assumed to be aliases for the \"\n \"column names\",\n col_space_type=\"int, list or dict of int\",\n col_space=\"The minimum width of each column\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_string(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[int] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[fmt.FormattersType] = None,\n float_format: Optional[fmt.FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n min_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n max_colwidth: Optional[int] = None,\n encoding: Optional[str] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n %(shared_params)s\n line_width : int, optional\n Width to wrap a line in characters.\n max_colwidth : int, optional\n Max width to truncate each column in characters. By default, no limit.\n\n .. versionadded:: 1.0.0\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n %(returns)s\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}\n >>> df = pd.DataFrame(d)\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n from pandas import option_context\n\n with option_context(\"display.max_colwidth\", max_colwidth):\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header,\n index=index,\n min_rows=min_rows,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=decimal,\n )\n return fmt.DataFrameRenderer(formatter).to_string(\n buf=buf,\n encoding=encoding,\n line_width=line_width,\n )\n\n # ----------------------------------------------------------------------\n\n @property\n def style(self) -> Styler:\n \"\"\"\n Returns a Styler object.\n\n Contains methods for building a styled HTML representation of the DataFrame.\n\n See Also\n --------\n io.formats.style.Styler : Helps style a DataFrame or Series according to the\n data with HTML and CSS.\n \"\"\"\n from pandas.io.formats.style import Styler\n\n return Styler(self)\n\n _shared_docs[\n \"items\"\n ] = r\"\"\"\n Iterate over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Yields\n ------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as\n (index, Series) pairs.\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples\n of the values.\n\n Examples\n --------\n >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n >>> for label, content in df.items():\n ... print(f'label: {label}')\n ... print(f'content: {content}', sep='\\n')\n ...\n label: species\n content:\n panda bear\n polar bear\n koala marsupial\n Name: species, dtype: object\n label: population\n content:\n panda 1864\n polar 22000\n koala 80000\n Name: population, dtype: int64\n \"\"\"\n\n @Appender(_shared_docs[\"items\"])\n def items(self) -> Iterable[Tuple[Label, Series]]:\n if self.columns.is_unique and hasattr(self, \"_item_cache\"):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self._ixs(i, axis=1)\n\n @Appender(_shared_docs[\"items\"])\n def iteritems(self) -> Iterable[Tuple[Label, Series]]:\n yield from self.items()\n\n def iterrows(self) -> Iterable[Tuple[Label, Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : Series\n The data of the row as a Series.\n\n See Also\n --------\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n columns = self.columns\n klass = self._constructor_sliced\n for k, v in zip(self.index, self.values):\n s = klass(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index: bool = True, name: Optional[str] = \"Pandas\"):\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"Pandas\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> for row in df.itertuples():\n ... print(row)\n ...\n Pandas(Index='dog', num_legs=4, num_wings=0)\n Pandas(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n Pandas(num_legs=4, num_wings=0)\n Pandas(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n arrays = []\n fields = list(self.columns)\n if index:\n arrays.append(self.index)\n fields.insert(0, \"Index\")\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n\n if name is not None:\n # https://github.com/python/mypy/issues/9046\n # error: namedtuple() expects a string literal as the first argument\n itertuple = collections.namedtuple( # type: ignore[misc]\n name, fields, rename=True\n )\n return map(itertuple._make, zip(*arrays))\n\n # fallback to regular tuples\n return zip(*arrays)\n\n def __len__(self) -> int:\n \"\"\"\n Returns length of info axis, but here we use the index.\n \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series, DataFrame or a numpy array.\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series or DataFrame\n If other is a Series, return the matrix product between self and\n other as a Series. If other is a DataFrame or a numpy.array, return\n the matrix product of self and other in a DataFrame of a np.array.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n Here we multiply a DataFrame with a Series.\n\n >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> s = pd.Series([1, 1, 2, 1])\n >>> df.dot(s)\n 0 -4\n 1 5\n dtype: int64\n\n Here we multiply a DataFrame with another DataFrame.\n\n >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(other)\n 0 1\n 0 1 4\n 1 2 2\n\n Note that the dot method give the same result as @\n\n >>> df @ other\n 0 1\n 0 1 4\n 1 2 2\n\n The dot method works also if other is an np.array.\n\n >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(arr)\n 0 1\n 0 1 4\n 1 2 2\n\n Note how shuffling of the objects does not change the result.\n\n >>> s2 = s.reindex([1, 0, 2, 3])\n >>> df.dot(s2)\n 0 -4\n 1 5\n dtype: int64\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if len(common) > len(self.columns) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right._values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, DataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=left.index, columns=other.columns\n )\n elif isinstance(other, Series):\n return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return self._constructor_sliced(result, index=left.index)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n try:\n return self.T.dot(np.transpose(other)).T\n except ValueError as err:\n if \"shape mismatch\" not in str(err):\n raise\n # GH#21581 give exception message for original shapes\n msg = f\"shapes {np.shape(other)} and {self.shape} not aligned\"\n raise ValueError(msg) from err\n\n # ----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient=\"columns\", dtype=None, columns=None) -> DataFrame:\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data, orient='index')\n 0 1 2 3\n row_1 3 2 1 0\n row_2 a b c d\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> pd.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D'])\n A B C D\n row_1 3 2 1 0\n row_2 a b c d\n \"\"\"\n index = None\n orient = orient.lower()\n if orient == \"index\":\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient == \"columns\":\n if columns is not None:\n raise ValueError(\"cannot use columns parameter with orient='columns'\")\n else: # pragma: no cover\n raise ValueError(\"only recognize index or columns for orient\")\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n def to_numpy(\n self, dtype=None, copy: bool = False, na_value=lib.no_default\n ) -> np.ndarray:\n \"\"\"\n Convert the DataFrame to a NumPy array.\n\n .. versionadded:: 0.24.0\n\n By default, the dtype of the returned array will be the common NumPy\n dtype of all types in the DataFrame. For example, if the dtypes are\n ``float16`` and ``float32``, the results dtype will be ``float32``.\n This may require copying data and coercing values, which may be\n expensive.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the dtypes of the DataFrame columns.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.to_numpy : Similar method for Series.\n\n Examples\n --------\n >>> pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to\n be used.\n\n >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5]})\n >>> df.to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will\n have object dtype.\n\n >>> df['C'] = pd.date_range('2000', periods=2)\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],\n [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n result = self._mgr.as_array(\n transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value\n )\n if result.dtype is not dtype:\n result = np.array(result, dtype=dtype, copy=False)\n\n return result\n\n def to_dict(self, orient: str = \"dict\", into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n See Also\n --------\n DataFrame.from_dict: Create a DataFrame from a dictionary.\n DataFrame.to_json: Convert a DataFrame to JSON format.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df.to_dict()\n {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}\n\n You can specify the return orientation.\n\n >>> df.to_dict('series')\n {'col1': row1 1\n row2 2\n Name: col1, dtype: int64,\n 'col2': row1 0.50\n row2 0.75\n Name: col2, dtype: float64}\n\n >>> df.to_dict('split')\n {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],\n 'data': [[1, 0.5], [2, 0.75]]}\n\n >>> df.to_dict('records')\n [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]\n\n >>> df.to_dict('index')\n {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),\n ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd)\n [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),\n defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\n \"DataFrame columns are not unique, some columns will be omitted.\",\n UserWarning,\n stacklevel=2,\n )\n # GH16122\n into_c = com.standardize_mapping(into)\n\n orient = orient.lower()\n # GH32515\n if orient.startswith((\"d\", \"l\", \"s\", \"r\", \"i\")) and orient not in {\n \"dict\",\n \"list\",\n \"series\",\n \"split\",\n \"records\",\n \"index\",\n }:\n warnings.warn(\n \"Using short name for 'orient' is deprecated. Only the \"\n \"options: ('dict', list, 'series', 'split', 'records', 'index') \"\n \"will be used in a future version. Use one of the above \"\n \"to silence this warning.\",\n FutureWarning,\n )\n\n if orient.startswith(\"d\"):\n orient = \"dict\"\n elif orient.startswith(\"l\"):\n orient = \"list\"\n elif orient.startswith(\"sp\"):\n orient = \"split\"\n elif orient.startswith(\"s\"):\n orient = \"series\"\n elif orient.startswith(\"r\"):\n orient = \"records\"\n elif orient.startswith(\"i\"):\n orient = \"index\"\n\n if orient == \"dict\":\n return into_c((k, v.to_dict(into)) for k, v in self.items())\n\n elif orient == \"list\":\n return into_c((k, v.tolist()) for k, v in self.items())\n\n elif orient == \"split\":\n return into_c(\n (\n (\"index\", self.index.tolist()),\n (\"columns\", self.columns.tolist()),\n (\n \"data\",\n [\n list(map(maybe_box_datetimelike, t))\n for t in self.itertuples(index=False, name=None)\n ],\n ),\n )\n )\n\n elif orient == \"series\":\n return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())\n\n elif orient == \"records\":\n columns = self.columns.tolist()\n rows = (\n dict(zip(columns, row))\n for row in self.itertuples(index=False, name=None)\n )\n return [\n into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())\n for row in rows\n ]\n\n elif orient == \"index\":\n if not self.index.is_unique:\n raise ValueError(\"DataFrame index must be unique for orient='index'.\")\n return into_c(\n (t[0], dict(zip(self.columns, t[1:])))\n for t in self.itertuples(name=None)\n )\n\n else:\n raise ValueError(f\"orient '{orient}' not understood\")\n\n def to_gbq(\n self,\n destination_table: str,\n project_id: Optional[str] = None,\n chunksize: Optional[int] = None,\n reauth: bool = False,\n if_exists: str = \"fail\",\n auth_local_webserver: bool = False,\n table_schema: Optional[List[Dict[str, str]]] = None,\n location: Optional[str] = None,\n progress_bar: bool = True,\n credentials=None,\n ) -> None:\n \"\"\"\n Write a DataFrame to a Google BigQuery table.\n\n This function requires the `pandas-gbq package\n <https://pandas-gbq.readthedocs.io>`__.\n\n See the `How to authenticate with Google BigQuery\n <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__\n guide for authentication instructions.\n\n Parameters\n ----------\n destination_table : str\n Name of table to be written, in the form ``dataset.tablename``.\n project_id : str, optional\n Google BigQuery Account project ID. Optional when available from\n the environment.\n chunksize : int, optional\n Number of rows to be inserted in each chunk from the dataframe.\n Set to ``None`` to load the whole dataframe at once.\n reauth : bool, default False\n Force Google BigQuery to re-authenticate the user. This is useful\n if multiple accounts are used.\n if_exists : str, default 'fail'\n Behavior when the destination table exists. Value can be one of:\n\n ``'fail'``\n If table exists raise pandas_gbq.gbq.TableCreationError.\n ``'replace'``\n If table exists, drop it, recreate it, and insert data.\n ``'append'``\n If table exists, insert data. Create if does not exist.\n auth_local_webserver : bool, default False\n Use the `local webserver flow`_ instead of the `console flow`_\n when getting user credentials.\n\n .. _local webserver flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server\n .. _console flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console\n\n *New in version 0.2.0 of pandas-gbq*.\n table_schema : list of dicts, optional\n List of BigQuery table fields to which according DataFrame\n columns conform to, e.g. ``[{'name': 'col1', 'type':\n 'STRING'},...]``. If schema is not provided, it will be\n generated according to dtypes of DataFrame columns. See\n BigQuery API documentation on available names of a field.\n\n *New in version 0.3.1 of pandas-gbq*.\n location : str, optional\n Location where the load job should run. See the `BigQuery locations\n documentation\n <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a\n list of available locations. The location must match that of the\n target dataset.\n\n *New in version 0.5.0 of pandas-gbq*.\n progress_bar : bool, default True\n Use the library `tqdm` to show the progress bar for the upload,\n chunk by chunk.\n\n *New in version 0.5.0 of pandas-gbq*.\n credentials : google.auth.credentials.Credentials, optional\n Credentials for accessing Google APIs. Use this parameter to\n override default credentials, such as to use Compute Engine\n :class:`google.auth.compute_engine.Credentials` or Service\n Account :class:`google.oauth2.service_account.Credentials`\n directly.\n\n *New in version 0.8.0 of pandas-gbq*.\n\n .. versionadded:: 0.24.0\n\n See Also\n --------\n pandas_gbq.to_gbq : This function in the pandas-gbq library.\n read_gbq : Read a DataFrame from Google BigQuery.\n \"\"\"\n from pandas.io import gbq\n\n gbq.to_gbq(\n self,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n\n @classmethod\n def from_records(\n cls,\n data,\n index=None,\n exclude=None,\n columns=None,\n coerce_float: bool = False,\n nrows=None,\n ) -> DataFrame:\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Creates a DataFrame object from a structured ndarray, sequence of\n tuples or dicts, or DataFrame.\n\n Parameters\n ----------\n data : structured ndarray, sequence of tuples or dicts, or DataFrame\n Structured input data.\n index : str, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use.\n exclude : sequence, default None\n Columns or fields to exclude.\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns).\n coerce_float : bool, default False\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n nrows : int, default None\n Number of rows to read if data is an iterator.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_dict : DataFrame from dict of array-like or dicts.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n Data can be provided as a structured ndarray:\n\n >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],\n ... dtype=[('col_1', 'i4'), ('col_2', 'U1')])\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of dicts:\n\n >>> data = [{'col_1': 3, 'col_2': 'a'},\n ... {'col_1': 2, 'col_2': 'b'},\n ... {'col_1': 1, 'col_2': 'c'},\n ... {'col_1': 0, 'col_2': 'd'}]\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of tuples with corresponding columns:\n\n >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]\n >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = ensure_index(columns)\n\n if is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, \"dtype\") and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns_list = []\n for k, v in data.items():\n if k in columns:\n arr_columns_list.append(k)\n arrays.append(v)\n\n arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = to_arrays(data, columns)\n if columns is not None:\n columns = ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = to_arrays(data, columns)\n if coerce_float:\n for i, arr in enumerate(arrays):\n if arr.dtype == object:\n arrays[i] = lib.maybe_convert_objects(arr, try_float=True)\n\n arr_columns = ensure_index(arr_columns)\n if columns is not None:\n columns = ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if isinstance(index, str) or not hasattr(index, \"__iter__\"):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n index_data = [arrays[arr_columns.get_loc(field)] for field in index]\n except (KeyError, TypeError):\n # raised by get_loc, see GH#29258\n result_index = index\n else:\n result_index = ensure_index_from_sequences(index_data, names=index)\n exclude.update(index)\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)\n\n return cls(mgr)\n\n def to_records(\n self, index=True, column_dtypes=None, index_dtypes=None\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n If the DataFrame index has no label then the recarray field name\n is set to 'index'. If the index has a label then this is used as the\n field name:\n\n >>> df.index = df.index.rename(\"I\")\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False)\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"})\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n As well as for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\")\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])\n\n >>> index_dtypes = f\"<S{df.index.str.len().max()}\"\n >>> df.to_records(index_dtypes=index_dtypes)\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n if index:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = list(map(np.array, zip(*self.index._values)))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [\n np.asarray(self.iloc[:, i]) for i in range(len(self.columns))\n ]\n\n count = 0\n index_names = list(self.index.names)\n\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = f\"level_{count}\"\n count += 1\n elif index_names[0] is None:\n index_names = [\"index\"]\n\n names = [str(name) for name in itertools.chain(index_names, self.columns)]\n else:\n arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]\n names = [str(c) for c in self.columns]\n index_names = []\n\n index_len = len(index_names)\n formats = []\n\n for i, v in enumerate(arrays):\n index = i\n\n # When the names and arrays are collected, we\n # first collect those in the DataFrame's index,\n # followed by those in its columns.\n #\n # Thus, the total length of the array is:\n # len(index_names) + len(DataFrame.columns).\n #\n # This check allows us to see whether we are\n # handling a name / array in the index or column.\n if index < index_len:\n dtype_mapping = index_dtypes\n name = index_names[index]\n else:\n index -= index_len\n dtype_mapping = column_dtypes\n name = self.columns[index]\n\n # We have a dictionary, so we get the data type\n # associated with the index or column (which can\n # be denoted by its name in the DataFrame or its\n # position in DataFrame's array of indices or\n # columns, whichever is applicable.\n if is_dict_like(dtype_mapping):\n if name in dtype_mapping:\n dtype_mapping = dtype_mapping[name]\n elif index in dtype_mapping:\n dtype_mapping = dtype_mapping[index]\n else:\n dtype_mapping = None\n\n # If no mapping can be found, use the array's\n # dtype attribute for formatting.\n #\n # A valid dtype must either be a type or\n # string naming a type.\n if dtype_mapping is None:\n formats.append(v.dtype)\n elif isinstance(dtype_mapping, (type, np.dtype, str)):\n formats.append(dtype_mapping)\n else:\n element = \"row\" if i < index_len else \"column\"\n msg = f\"Invalid dtype {dtype_mapping} specified for {element} {name}\"\n raise ValueError(msg)\n\n return np.rec.fromarrays(arrays, dtype={\"names\": names, \"formats\": formats})\n\n @classmethod\n def _from_arrays(\n cls,\n arrays,\n columns,\n index,\n dtype: Optional[Dtype] = None,\n verify_integrity: bool = True,\n ) -> DataFrame:\n \"\"\"\n Create DataFrame from a list of arrays corresponding to the columns.\n\n Parameters\n ----------\n arrays : list-like of arrays\n Each array in the list corresponds to one column, in order.\n columns : list-like, Index\n The column names for the resulting DataFrame.\n index : list-like, Index\n The rows labels for the resulting DataFrame.\n dtype : dtype, optional\n Optional dtype to enforce for all arrays.\n verify_integrity : bool, default True\n Validate and homogenize all input. If set to False, it is assumed\n that all elements of `arrays` are actual arrays how they will be\n stored in a block (numpy ndarray or ExtensionArray), have the same\n length as and are aligned with the index, and that `columns` and\n `index` are ensured to be an Index object.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n mgr = arrays_to_mgr(\n arrays,\n columns,\n index,\n columns,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n return cls(mgr)\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_stata(\n self,\n path: FilePathOrBuffer,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n version: Optional[int] = 114,\n convert_strl: Optional[Sequence[Label]] = None,\n compression: CompressionOptions = \"infer\",\n storage_options: StorageOptions = None,\n ) -> None:\n \"\"\"\n Export DataFrame object to Stata dta format.\n\n Writes the DataFrame to a Stata dataset file.\n \"dta\" files contain a Stata dataset.\n\n Parameters\n ----------\n path : str, buffer or path object\n String, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() function. If using a buffer\n then the buffer will not be automatically closed after the file\n data has been written.\n\n .. versionchanged:: 1.0.0\n\n Previously this was \"fname\"\n\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata\n internal format to use when writing the dates. Options are 'tc',\n 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer\n or a name. Datetime columns that do not have a conversion type\n specified will be converted to 'tc'. Raises NotImplementedError if\n a datetime column has timezone information.\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`.\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current\n time.\n data_label : str, optional\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as\n values. Each label must be 80 characters or smaller.\n version : {{114, 117, 118, 119, None}}, default 114\n Version to use in the output dta file. Set to None to let pandas\n decide between 118 or 119 formats depending on the number of\n columns in the frame. Version 114 can be read by Stata 10 and\n later. Version 117 can be read by Stata 13 or later. Version 118\n is supported in Stata 14 and later. Version 119 is supported in\n Stata 15 and later. Version 114 limits string variables to 244\n characters or fewer while versions 117 and later allow strings\n with lengths up to 2,000,000 characters. Versions 118 and 119\n support Unicode characters, and version 119 supports more than\n 32,767 variables.\n\n Version 119 should usually only be used when the number of\n variables exceeds the capacity of dta format 118. Exporting\n smaller datasets in format 119 may have unintended consequences,\n and, as of November 2020, Stata SE cannot read version 119 files.\n\n .. versionchanged:: 1.0.0\n\n Added support for formats 118 and 119.\n\n convert_strl : list, optional\n List of column names to convert to string columns to Stata StrL\n format. Only available if version is 117. Storing strings in the\n StrL format can produce smaller dta files if strings have more than\n 8 characters and values are repeated.\n compression : str or dict, default 'infer'\n For on-the-fly compression of the output dta. If string, specifies\n compression mode. If dict, value at key 'method' specifies\n compression mode. Compression mode must be one of {{'infer', 'gzip',\n 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and\n `fname` is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n compression). If dict and compression mode is one of {{'zip',\n 'gzip', 'bz2'}}, or inferred as one of the above, other entries\n passed as additional compression options.\n\n .. versionadded:: 1.1.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n * Column dtype is not representable in Stata\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n See Also\n --------\n read_stata : Import Stata data files.\n io.stata.StataWriter : Low-level writer for Stata data files.\n io.stata.StataWriter117 : Low-level writer for version 117 files.\n\n Examples\n --------\n >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}})\n >>> df.to_stata('animals.dta') # doctest: +SKIP\n \"\"\"\n if version not in (114, 117, 118, 119, None):\n raise ValueError(\"Only formats 114, 117, 118 and 119 are supported.\")\n if version == 114:\n if convert_strl is not None:\n raise ValueError(\"strl is not supported in format 114\")\n from pandas.io.stata import StataWriter as statawriter\n elif version == 117:\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriter117 as statawriter,\n )\n else: # versions 118 and 119\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriterUTF8 as statawriter,\n )\n\n kwargs: Dict[str, Any] = {}\n if version is None or version >= 117:\n # strl conversion is only supported >= 117\n kwargs[\"convert_strl\"] = convert_strl\n if version is None or version >= 118:\n # Specifying the version is only supported for UTF8 (118 or 119)\n kwargs[\"version\"] = version\n\n # mypy: Too many arguments for \"StataWriter\"\n writer = statawriter( # type: ignore[call-arg]\n path,\n self,\n convert_dates=convert_dates,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n write_index=write_index,\n variable_labels=variable_labels,\n compression=compression,\n storage_options=storage_options,\n **kwargs,\n )\n writer.write_file()\n\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:\n \"\"\"\n Write a DataFrame to the binary Feather format.\n\n Parameters\n ----------\n path : str or file-like object\n If a string, it will be used as Root Directory path.\n **kwargs :\n Additional keywords passed to :func:`pyarrow.feather.write_feather`.\n Starting with pyarrow 0.17, this includes the `compression`,\n `compression_level`, `chunksize` and `version` keywords.\n\n .. versionadded:: 1.1.0\n \"\"\"\n from pandas.io.feather_format import to_feather\n\n to_feather(self, path, **kwargs)\n\n @doc(\n Series.to_markdown,\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=_shared_docs[\"storage_options\"],\n examples=\"\"\"Examples\n --------\n >>> df = pd.DataFrame(\n ... data={\"animal_1\": [\"elk\", \"pig\"], \"animal_2\": [\"dog\", \"quetzal\"]}\n ... )\n >>> print(df.to_markdown())\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(df.to_markdown(tablefmt=\"grid\"))\n +----+------------+------------+\n | | animal_1 | animal_2 |\n +====+============+============+\n | 0 | elk | dog |\n +----+------------+------------+\n | 1 | pig | quetzal |\n +----+------------+------------+\n \"\"\",\n )\n def to_markdown(\n self,\n buf: Optional[Union[IO[str], str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n if \"showindex\" in kwargs:\n warnings.warn(\n \"'showindex' is deprecated. Only 'index' will be used \"\n \"in a future version. Use 'index' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n\n kwargs.setdefault(\"headers\", \"keys\")\n kwargs.setdefault(\"tablefmt\", \"pipe\")\n kwargs.setdefault(\"showindex\", index)\n tabulate = import_optional_dependency(\"tabulate\")\n result = tabulate.tabulate(self, **kwargs)\n if buf is None:\n return result\n\n with get_handle(buf, mode, storage_options=storage_options) as handles:\n assert not isinstance(handles.handle, (str, mmap.mmap))\n handles.handle.writelines(result)\n return None\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_parquet(\n self,\n path: Optional[FilePathOrBuffer] = None,\n engine: str = \"auto\",\n compression: Optional[str] = \"snappy\",\n index: Optional[bool] = None,\n partition_cols: Optional[List[str]] = None,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[bytes]:\n \"\"\"\n Write a DataFrame to the binary parquet format.\n\n This function writes the dataframe as a `parquet file\n <https://parquet.apache.org/>`_. You can choose different parquet\n backends, and have the option of compression. See\n :ref:`the user guide <io.parquet>` for more details.\n\n Parameters\n ----------\n path : str or file-like object, default None\n If a string, it will be used as Root Directory path\n when writing a partitioned dataset. By file-like object,\n we refer to objects with a write() method, such as a file handle\n (e.g. via builtin open function) or io.BytesIO. The engine\n fastparquet does not accept file-like objects. If path is None,\n a bytes object is returned.\n\n .. versionchanged:: 1.2.0\n\n Previously this was \"fname\"\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output.\n If ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n\n .. versionadded:: 0.24.0\n\n partition_cols : list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n\n .. versionadded:: 0.24.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Additional arguments passed to the parquet library. See\n :ref:`pandas io <io.parquet>` for more details.\n\n Returns\n -------\n bytes if no path argument is provided else None\n\n See Also\n --------\n read_parquet : Read a parquet file.\n DataFrame.to_csv : Write a csv file.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_hdf : Write to hdf.\n\n Notes\n -----\n This function requires either the `fastparquet\n <https://pypi.org/project/fastparquet>`_ or `pyarrow\n <https://arrow.apache.org/docs/python/>`_ library.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})\n >>> df.to_parquet('df.parquet.gzip',\n ... compression='gzip') # doctest: +SKIP\n >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP\n col1 col2\n 0 1 3\n 1 2 4\n\n If you want to get a buffer to the parquet content you can use a io.BytesIO\n object, as long as you don't use partition_cols, which creates multiple files.\n\n >>> import io\n >>> f = io.BytesIO()\n >>> df.to_parquet(f)\n >>> f.seek(0)\n 0\n >>> content = f.read()\n \"\"\"\n from pandas.io.parquet import to_parquet\n\n return to_parquet(\n self,\n path,\n engine,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n @Substitution(\n header_type=\"bool\",\n header=\"Whether to print column labels, default True\",\n col_space_type=\"str or int, list or dict of int or str\",\n col_space=\"The minimum width of each column in CSS length \"\n \"units. An int is assumed to be px units.\\n\\n\"\n \" .. versionadded:: 0.25.0\\n\"\n \" Ability to use str\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_html(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[ColspaceArgType] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[FormattersType] = None,\n float_format: Optional[FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: Union[bool, str] = False,\n decimal: str = \".\",\n bold_rows: bool = True,\n classes: Optional[Union[str, List, Tuple]] = None,\n escape: bool = True,\n notebook: bool = False,\n border: Optional[int] = None,\n table_id: Optional[str] = None,\n render_links: bool = False,\n encoding: Optional[str] = None,\n ):\n \"\"\"\n Render a DataFrame as an HTML table.\n %(shared_params)s\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.display.html.border``.\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links.\n\n .. versionadded:: 0.24.0\n %(returns)s\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:\n raise ValueError(\"Invalid value for justify parameter\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n )\n # TODO: a generic formatter wld b in DataFrameFormatter\n return fmt.DataFrameRenderer(formatter).to_html(\n buf=buf,\n classes=classes,\n notebook=notebook,\n border=border,\n encoding=encoding,\n table_id=table_id,\n render_links=render_links,\n )\n\n # ----------------------------------------------------------------------\n @Substitution(\n klass=\"DataFrame\",\n type_sub=\" and columns\",\n max_cols_sub=dedent(\n \"\"\"\\\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used.\"\"\"\n ),\n show_counts_sub=dedent(\n \"\"\"\\\n show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts.\n null_counts : bool, optional\n .. deprecated:: 1.2.0\n Use show_counts instead.\"\"\"\n ),\n examples_sub=dedent(\n \"\"\"\\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({\"int_col\": int_values, \"text_col\": text_values,\n ... \"float_col\": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\"df_info.txt\", \"w\",\n ... encoding=\"utf-8\") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)\n ... })\n >>> df.info()\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage='deep')\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB\"\"\"\n ),\n see_also_sub=dedent(\n \"\"\"\\\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns.\"\"\"\n ),\n version_added_sub=\"\",\n )\n @doc(BaseInfo.render)\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n memory_usage: Optional[Union[bool, str]] = None,\n show_counts: Optional[bool] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n if null_counts is not None:\n if show_counts is not None:\n raise ValueError(\"null_counts used with show_counts. Use show_counts.\")\n warnings.warn(\n \"null_counts is deprecated. Use show_counts instead\",\n FutureWarning,\n stacklevel=2,\n )\n show_counts = null_counts\n info = DataFrameInfo(\n data=self,\n memory_usage=memory_usage,\n )\n info.render(\n buf=buf,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n\n def memory_usage(self, index=True, deep=False) -> Series:\n \"\"\"\n Return the memory usage of each column in bytes.\n\n The memory usage can optionally include the contribution of\n the index and elements of `object` dtype.\n\n This value is displayed in `DataFrame.info` by default. This can be\n suppressed by setting ``pandas.options.display.memory_usage`` to False.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the DataFrame's\n index in returned Series. If ``index=True``, the memory usage of\n the index is the first item in the output.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the original column names and whose values\n is the memory usage of each column in bytes.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of an\n ndarray.\n Series.memory_usage : Bytes consumed by a Series.\n Categorical : Memory-efficient array for string values with\n many repeated values.\n DataFrame.info : Concise summary of a DataFrame.\n\n Examples\n --------\n >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']\n >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))\n ... for t in dtypes])\n >>> df = pd.DataFrame(data)\n >>> df.head()\n int64 float64 complex128 object bool\n 0 1 1.0 1.0+0.0j 1 True\n 1 1 1.0 1.0+0.0j 1 True\n 2 1 1.0 1.0+0.0j 1 True\n 3 1 1.0 1.0+0.0j 1 True\n 4 1 1.0 1.0+0.0j 1 True\n\n >>> df.memory_usage()\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n >>> df.memory_usage(index=False)\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n The memory footprint of `object` dtype columns is ignored by default:\n\n >>> df.memory_usage(deep=True)\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 180000\n bool 5000\n dtype: int64\n\n Use a Categorical for efficient storage of an object-dtype column with\n many repeated values.\n\n >>> df['object'].astype('category').memory_usage(deep=True)\n 5244\n \"\"\"\n result = self._constructor_sliced(\n [c.memory_usage(index=False, deep=deep) for col, c in self.items()],\n index=self.columns,\n )\n if index:\n result = self._constructor_sliced(\n self.index.memory_usage(deep=deep), index=[\"Index\"]\n ).append(result)\n return result\n\n def transpose(self, *args, copy: bool = False) -> DataFrame:\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n Parameters\n ----------\n *args : tuple, optional\n Accepted for compatibility with NumPy.\n copy : bool, default False\n Whether to copy the data after transposing, even for DataFrames\n with a single dtype.\n\n Note that a copy is always required for mixed dtype DataFrames,\n or for DataFrames with any extension types.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n See Also\n --------\n numpy.transpose : Permute the dimensions of a given array.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the `object` dtype. In such a case, a copy of the data\n is always made.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = pd.DataFrame(data=d1)\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T # or df1.transpose()\n >>> df1_transposed\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'name': ['Alice', 'Bob'],\n ... 'score': [9.5, 8],\n ... 'employed': [False, True],\n ... 'kids': [0, 0]}\n >>> df2 = pd.DataFrame(data=d2)\n >>> df2\n name score employed kids\n 0 Alice 9.5 False 0\n 1 Bob 8.0 True 0\n\n >>> df2_transposed = df2.T # or df2.transpose()\n >>> df2_transposed\n 0 1\n name Alice Bob\n score 9.5 8.0\n employed False True\n kids 0 0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the `object` dtype:\n\n >>> df2.dtypes\n name object\n score float64\n employed bool\n kids int64\n dtype: object\n >>> df2_transposed.dtypes\n 0 object\n 1 object\n dtype: object\n \"\"\"\n nv.validate_transpose(args, {})\n # construct the args\n\n dtypes = list(self.dtypes)\n if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):\n # We have EAs with the same dtype. We can preserve that dtype in transpose.\n dtype = dtypes[0]\n arr_type = dtype.construct_array_type()\n values = self.values\n\n new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]\n result = self._constructor(\n dict(zip(self.index, new_values)), index=self.columns\n )\n\n else:\n new_values = self.values.T\n if copy:\n new_values = new_values.copy()\n result = self._constructor(\n new_values, index=self.columns, columns=self.index\n )\n\n return result.__finalize__(self, method=\"transpose\")\n\n @property\n def T(self) -> DataFrame:\n return self.transpose()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Parameters\n ----------\n i : int\n axis : int\n\n Notes\n -----\n If slice passed, the resulting data will be a view.\n \"\"\"\n # irow\n if axis == 0:\n new_values = self._mgr.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values, np.ndarray) and new_values.base is None\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[i],\n dtype=new_values.dtype,\n )\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n label = self.columns[i]\n\n values = self._mgr.iget(i)\n result = self._box_col_values(values, i)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def _get_column_array(self, i: int) -> ArrayLike:\n \"\"\"\n Get the values of the i'th column (ndarray or ExtensionArray, as stored\n in the Block)\n \"\"\"\n return self._mgr.iget_values(i)\n\n def _iter_column_arrays(self) -> Iterator[ArrayLike]:\n \"\"\"\n Iterate over the arrays of all columns in order.\n This returns the values as stored in the Block (ndarray or ExtensionArray).\n \"\"\"\n for i in range(len(self.columns)):\n yield self._get_column_array(i)\n\n def __getitem__(self, key):\n key = lib.item_from_zerodim(key)\n key = com.apply_if_callable(key, self)\n\n if is_hashable(key):\n # shortcut if the key is in columns\n if self.columns.is_unique and key in self.columns:\n if isinstance(self.columns, MultiIndex):\n return self._getitem_multilevel(key)\n return self._get_item_cache(key)\n\n # Do we have a slicer (on rows)?\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n if isinstance(indexer, np.ndarray):\n indexer = lib.maybe_indices_to_slice(\n indexer.astype(np.intp, copy=False), len(self)\n )\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._slice(indexer, axis=0)\n\n # Do we have a (boolean) DataFrame?\n if isinstance(key, DataFrame):\n return self.where(key)\n\n # Do we have a (boolean) 1d indexer?\n if com.is_bool_indexer(key):\n return self._getitem_bool_array(key)\n\n # We are left with two options: a single key, and a collection of keys,\n # We interpret tuples as collections only for non-MultiIndex\n is_single_key = isinstance(key, tuple) or not is_list_like(key)\n\n if is_single_key:\n if self.columns.nlevels > 1:\n return self._getitem_multilevel(key)\n indexer = self.columns.get_loc(key)\n if is_integer(indexer):\n indexer = [indexer]\n else:\n if is_iterator(key):\n key = list(key)\n indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]\n\n # take() does not accept boolean indexers\n if getattr(indexer, \"dtype\", None) == bool:\n indexer = np.where(indexer)[0]\n\n data = self._take_with_is_copy(indexer, axis=1)\n\n if is_single_key:\n # What does looking for a single key in a non-unique index return?\n # The behavior is inconsistent. It returns a Series, except when\n # - the key itself is repeated (test on data.shape, #9519), or\n # - we have a MultiIndex on columns (test on self.columns, #21309)\n if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):\n # GH#26490 using data[key] can cause RecursionError\n data = data._get_item_cache(key)\n\n return data\n\n def _getitem_bool_array(self, key):\n # also raises Exception if object array with NA values\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\n \"Boolean Series key will be reindexed to match DataFrame index.\",\n UserWarning,\n stacklevel=3,\n )\n elif len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}.\"\n )\n\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self._take_with_is_copy(indexer, axis=0)\n\n def _getitem_multilevel(self, key):\n # self.columns is a MultiIndex\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = self._constructor(\n new_values, index=self.index, columns=result_columns\n )\n result = result.__finalize__(self)\n\n # If there is only one column being returned, and its name is\n # either an empty string, or a tuple with an empty string as its\n # first element, then treat the empty string as a placeholder\n # and return the column as if the user had provided that empty\n # string in the key. If the result is a Series, exclude the\n # implied empty string from its name.\n if len(result.columns) == 1:\n top = result.columns[0]\n if isinstance(top, tuple):\n top = top[0]\n if top == \"\":\n result = result[\"\"]\n if isinstance(result, Series):\n result = self._constructor_sliced(\n result, index=self.index, name=key\n )\n\n result._set_is_copy(self)\n return result\n else:\n # loc is neither a slice nor ndarray, so must be an int\n return self._ixs(loc, axis=1)\n\n def _get_value(self, index, col, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n scalar\n \"\"\"\n if takeable:\n series = self._ixs(col, axis=1)\n return series._values[index]\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n\n try:\n loc = engine.get_loc(index)\n return series._values[loc]\n except KeyError:\n # GH 20629\n if self.index.nlevels > 1:\n # partial indexing forbidden\n raise\n\n # we cannot handle direct indexing\n # use positional\n col = self.columns.get_loc(col)\n index = self.index.get_loc(index)\n return self._get_value(index, col, takeable=True)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, DataFrame) or getattr(key, \"ndim\", None) == 2:\n self._setitem_frame(key, value)\n elif isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(value, DataFrame):\n self._set_item_frame_value(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key: slice, value):\n # NB: we can't just use self.loc[key] = value because that\n # operates on labels and we need to operate positional for\n # backwards-compat, xref GH#31469\n self._check_setitem_copy()\n self.iloc[key] = value\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}!\"\n )\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.iloc[indexer] = value\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError(\"Columns must be same length as key\")\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n self.loc._ensure_listlike_indexer(key, axis=1, value=value)\n indexer = self.loc._get_listlike_indexer(\n key, axis=1, raise_missing=False\n )[1]\n self._check_setitem_copy()\n self.iloc[:, indexer] = value\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if isinstance(key, np.ndarray):\n if key.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n key = self._constructor(key, **self._construct_axes_dict())\n\n if key.size and not is_bool_dtype(key.values):\n raise TypeError(\n \"Must pass DataFrame or 2-d ndarray with boolean values only\"\n )\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self._where(-key, value, inplace=True)\n\n def _set_item_frame_value(self, key, value: \"DataFrame\") -> None:\n self._ensure_valid_index(value)\n\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex(cols, axis=1)\n\n # now align rows\n value = _reindex_for_setitem(value, self.index)\n value = value.T\n self._set_item_mgr(key, value)\n\n def _iset_item_mgr(self, loc: int, value) -> None:\n self._mgr.iset(loc, value)\n self._clear_item_cache()\n\n def _set_item_mgr(self, key, value):\n value = _maybe_atleast_2d(value)\n\n try:\n loc = self._info_axis.get_loc(key)\n except KeyError:\n # This item wasn't present, just insert at end\n self._mgr.insert(len(self._info_axis), key, value)\n else:\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _iset_item(self, loc: int, value):\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._iset_item_mgr(loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n value = self._sanitize_column(value)\n\n if (\n key in self.columns\n and value.ndim == 1\n and not is_extension_array_dtype(value)\n ):\n # broadcast across multiple columns if necessary\n if not self.columns.is_unique or isinstance(self.columns, MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n self._set_item_mgr(key, value)\n\n def _set_value(self, index, col, value, takeable: bool = False):\n \"\"\"\n Put single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar\n takeable : interpret the index/col as indexers, default False\n \"\"\"\n try:\n if takeable is True:\n series = self._ixs(col, axis=1)\n series._set_value(index, value, takeable=True)\n return\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n loc = engine.get_loc(index)\n validate_numeric_casting(series.dtype, value)\n\n series._values[loc] = value\n # Note: trying to use series._set_value breaks tests in\n # tests.frame.indexing.test_indexing and tests.indexing.test_partial\n except (KeyError, TypeError):\n # set using a non-recursive method & reset the cache\n if takeable:\n self.iloc[index, col] = value\n else:\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n Ensure that if we don't have an index, that we can create one from the\n passed value.\n \"\"\"\n # GH5632, make sure that we are a Series convertible\n if not len(self.index) and is_list_like(value) and len(value):\n try:\n value = Series(value)\n except (ValueError, NotImplementedError, TypeError) as err:\n raise ValueError(\n \"Cannot set a frame with no defined index \"\n \"and a value that cannot be converted to a Series\"\n ) from err\n\n # GH31368 preserve name of index\n index_copy = value.index.copy()\n if self.index.name is not None:\n index_copy.name = self.index.name\n\n self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)\n\n def _box_col_values(self, values, loc: int) -> Series:\n \"\"\"\n Provide boxed values for a column.\n \"\"\"\n # Lookup in columns so that if e.g. a str datetime was passed\n # we attach the Timestamp object as the name.\n name = self.columns[loc]\n klass = self._constructor_sliced\n return klass(values, index=self.index, name=name, fastpath=True)\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def query(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n\n You can refer to column names that are not valid Python variable names\n by surrounding them in backticks. Thus, column names containing spaces\n or punctuations (besides underscores) or starting with digits must be\n surrounded by backticks. (For example, a column named \"Area (cm^2) would\n be referenced as `Area (cm^2)`). Column names which are Python keywords\n (like \"list\", \"for\", \"import\", etc) cannot be used.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n .. versionadded:: 0.25.0\n Backtick quoting introduced.\n\n .. versionadded:: 1.0.0\n Expanding functionality of backtick quoting for more than only spaces.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n DataFrame or None\n DataFrame resulting from the provided query expression or\n None if ``inplace=True``.\n\n See Also\n --------\n eval : Evaluate a string describing operations on\n DataFrame columns.\n DataFrame.eval : Evaluate a string describing operations on\n DataFrame columns.\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query. Please note that\n Python keywords may not be used as identifiers.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n *Backtick quoted variables*\n\n Backtick quoted variables are parsed as literal Python code and\n are converted internally to a Python valid identifier.\n This can lead to the following problems.\n\n During parsing a number of disallowed characters inside the backtick\n quoted string are replaced by strings that are allowed as a Python identifier.\n These characters include all operators in Python, the space character, the\n question mark, the exclamation mark, the dollar sign, and the euro sign.\n For other characters that fall outside the ASCII range (U+0001..U+007F)\n and those that are not further specified in PEP 3131,\n the query parser will raise an error.\n This excludes whitespace different than the space character,\n but also the hashtag (as it is used for comments) and the backtick\n itself (backtick can also not be escaped).\n\n In a special case, quotes that make a pair around a backtick can\n confuse the parser.\n For example, ```it's` > `that's``` will raise an error,\n as it forms a quoted string (``'s > `that'``) with a backtick inside.\n\n See also the Python documentation about lexical analysis\n (https://docs.python.org/3/reference/lexical_analysis.html)\n in combination with the source code in :mod:`pandas.core.computation.parsing`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not isinstance(expr, str):\n msg = f\"expr must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n kwargs[\"target\"] = None\n res = self.eval(expr, **kwargs)\n\n try:\n result = self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n result = self[res]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def eval(self, expr: str, inplace: bool = False, **kwargs):\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ndarray, scalar, pandas object, or None\n The result of the evaluation or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Notes\n -----\n For more details see the API documentation for :func:`~eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n\n Multiple columns can be assigned to using multi-line expressions:\n\n >>> df.eval(\n ... '''\n ... C = A + B\n ... D = A - B\n ... '''\n ... )\n A B C D\n 0 1 10 11 -9\n 1 2 8 10 -6\n 2 3 6 9 -3\n 3 4 4 8 0\n 4 5 2 7 3\n \"\"\"\n from pandas.core.computation.eval import eval as _eval\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n resolvers = kwargs.pop(\"resolvers\", None)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n column_resolvers = self._get_cleaned_column_resolvers()\n resolvers = column_resolvers, index_resolvers\n if \"target\" not in kwargs:\n kwargs[\"target\"] = self\n kwargs[\"resolvers\"] = kwargs.get(\"resolvers\", ()) + tuple(resolvers)\n\n return _eval(expr, inplace=inplace, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None) -> DataFrame:\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n\n See Also\n --------\n DataFrame.dtypes: Return Series with the data type of each column.\n\n Notes\n -----\n * To select all *numeric* types, use ``np.number`` or ``'number'``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or\n ``'timedelta64'``\n * To select Pandas categorical dtypes, use ``'category'``\n * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in\n 0.20.0) or ``'datetime64[ns, tz]'``\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int64'])\n b c\n 0 True 1.0\n 1 False 2.0\n 2 True 1.0\n 3 False 2.0\n 4 True 1.0\n 5 False 2.0\n \"\"\"\n if not is_list_like(include):\n include = (include,) if include is not None else ()\n if not is_list_like(exclude):\n exclude = (exclude,) if exclude is not None else ()\n\n selection = (frozenset(include), frozenset(exclude))\n\n if not any(selection):\n raise ValueError(\"at least one of include or exclude must be nonempty\")\n\n # convert the myriad valid dtypes object to a single representation\n include = frozenset(infer_dtype_from_object(x) for x in include)\n exclude = frozenset(infer_dtype_from_object(x) for x in exclude)\n for dtypes in (include, exclude):\n invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError(f\"include and exclude overlap on {(include & exclude)}\")\n\n # We raise when both include and exclude are empty\n # Hence, we can just shrink the columns we want to keep\n keep_these = np.full(self.shape[1], True)\n\n def extract_unique_dtypes_from_dtypes_set(\n dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray\n ) -> List[Dtype]:\n extracted_dtypes = [\n unique_dtype\n for unique_dtype in unique_dtypes\n if (\n issubclass(\n unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]\n )\n or (\n np.number in dtypes_set\n and getattr(unique_dtype, \"_is_numeric\", False)\n )\n )\n ]\n return extracted_dtypes\n\n unique_dtypes = self.dtypes.unique()\n\n if include:\n included_dtypes = extract_unique_dtypes_from_dtypes_set(\n include, unique_dtypes\n )\n keep_these &= self.dtypes.isin(included_dtypes)\n\n if exclude:\n excluded_dtypes = extract_unique_dtypes_from_dtypes_set(\n exclude, unique_dtypes\n )\n keep_these &= ~self.dtypes.isin(excluded_dtypes)\n\n return self.iloc[:, keep_these.values]\n\n def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n\n See Also\n --------\n Index.insert : Insert new item by index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n >>> df.insert(1, \"newcol\", [99, 99])\n >>> df\n col1 newcol col2\n 0 1 99 3\n 1 2 99 4\n >>> df.insert(0, \"col1\", [100, 100], allow_duplicates=True)\n >>> df\n col1 col1 newcol col2\n 0 100 1 99 3\n 1 100 2 99 4\n \"\"\"\n if allow_duplicates and not self.flags.allows_duplicate_labels:\n raise ValueError(\n \"Cannot specify 'allow_duplicates=True' when \"\n \"'self.flags.allows_duplicate_labels' is False.\"\n )\n value = self._sanitize_column(value)\n value = _maybe_atleast_2d(value)\n self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs) -> DataFrame:\n r\"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n \"\"\"\n data = self.copy()\n\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data\n\n def _sanitize_column(self, value):\n \"\"\"\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n value : scalar, Series, or array-like\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n self._ensure_valid_index(value)\n\n # We should never get here with DataFrame value\n if isinstance(value, Series):\n value = _reindex_for_setitem(value, self.index)\n\n elif isinstance(value, ExtensionArray):\n # Explicitly copy here, instead of in sanitize_index,\n # as sanitize_index won't copy an EA, even with copy=True\n value = value.copy()\n value = sanitize_index(value, self.index)\n\n elif isinstance(value, Index) or is_sequence(value):\n\n # turn me into an ndarray\n value = sanitize_index(value, self.index)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = maybe_convert_platform(value)\n else:\n value = com.asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n elif isinstance(value, Index):\n value = value.copy(deep=True)\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = maybe_infer_to_datetimelike(value)\n\n else:\n value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)\n\n return value\n\n @property\n def _series(self):\n return {\n item: Series(\n self._mgr.iget(idx), index=self.index, name=item, fastpath=True\n )\n for idx, item in enumerate(self.columns)\n }\n\n def lookup(self, row_labels, col_labels) -> np.ndarray:\n \"\"\"\n Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n .. deprecated:: 1.2.0\n DataFrame.lookup is deprecated,\n use DataFrame.melt and DataFrame.loc instead.\n For an example see :meth:`~pandas.DataFrame.lookup`\n in the user guide.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup.\n col_labels : sequence\n The column labels to use for lookup.\n\n Returns\n -------\n numpy.ndarray\n The found values.\n \"\"\"\n msg = (\n \"The 'lookup' method is deprecated and will be\"\n \"removed in a future version.\"\n \"You can use DataFrame.melt and DataFrame.loc\"\n \"as a substitute.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError(\"Row labels must have same size as column labels\")\n if not (self.index.is_unique and self.columns.is_unique):\n # GH#33041\n raise ValueError(\"DataFrame.lookup requires unique index and columns\")\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError(\"One or more row labels was not found\")\n if (cidx == -1).any():\n raise KeyError(\"One or more column labels was not found\")\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype=\"O\")\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self._get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n # ----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):\n frame = self\n\n columns = axes[\"columns\"]\n if columns is not None:\n frame = frame._reindex_columns(\n columns, method, copy, level, fill_value, limit, tolerance\n )\n\n index = axes[\"index\"]\n if index is not None:\n frame = frame._reindex_index(\n index, method, copy, level, fill_value, limit, tolerance\n )\n\n return frame\n\n def _reindex_index(\n self,\n new_index,\n method,\n copy: bool,\n level: Level,\n fill_value=np.nan,\n limit=None,\n tolerance=None,\n ):\n new_index, indexer = self.index.reindex(\n new_index, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {0: [new_index, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_columns(\n self,\n new_columns,\n method,\n copy: bool,\n level: Level,\n fill_value=None,\n limit=None,\n tolerance=None,\n ):\n new_columns, indexer = self.columns.reindex(\n new_columns, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {1: [new_columns, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:\n \"\"\"\n We are guaranteed non-Nones in the axes.\n \"\"\"\n new_index, row_indexer = self.index.reindex(axes[\"index\"])\n new_columns, col_indexer = self.columns.reindex(axes[\"columns\"])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = algorithms.take_2d_multi(\n self.values, indexer, fill_value=fill_value\n )\n return self._constructor(new_values, index=new_index, columns=new_columns)\n else:\n return self._reindex_with_indexers(\n {0: [new_index, row_indexer], 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.align, **_shared_doc_kwargs)\n def align(\n self,\n other,\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n level: Optional[Level] = None,\n copy: bool = True,\n fill_value=None,\n method: Optional[str] = None,\n limit=None,\n fill_axis: Axis = 0,\n broadcast_axis: Optional[Axis] = None,\n ) -> DataFrame:\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\" column or\",\n axis_description_sub=\", and 1 identifies the columns\",\n see_also_sub=\" or columns\",\n )\n @Appender(NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.reindex.__doc__)\n @rewrite_axis_style_signature(\n \"labels\",\n [\n (\"method\", None),\n (\"copy\", True),\n (\"level\", None),\n (\"fill_value\", np.nan),\n (\"limit\", None),\n (\"tolerance\", None),\n ],\n )\n def reindex(self, *args, **kwargs) -> DataFrame:\n axes = validate_axis_style_args(self, args, kwargs, \"labels\", \"reindex\")\n kwargs.update(axes)\n # Pop these, since the values are in `kwargs` under different names\n kwargs.pop(\"axis\", None)\n kwargs.pop(\"labels\", None)\n return super().reindex(**kwargs)\n\n def drop(\n self,\n labels=None,\n axis: Axis = 0,\n index=None,\n columns=None,\n level: Optional[Level] = None,\n inplace: bool = False,\n errors: str = \"raise\",\n ):\n \"\"\"\n Drop specified labels from rows or columns.\n\n Remove rows or columns by specifying label names and corresponding\n axis, or by specifying directly index or column names. When using a\n multi-index, labels on different levels can be removed by specifying\n the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index or column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Whether to drop labels from the index (0 or 'index') or\n columns (1 or 'columns').\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is equivalent to ``index=labels``).\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If False, return a copy. Otherwise, do operation\n inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n DataFrame or None\n DataFrame without the removed index or column labels or\n None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis.\n\n See Also\n --------\n DataFrame.loc : Label-location based indexer for selection by label.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n DataFrame.drop_duplicates : Return DataFrame with duplicate rows\n removed, optionally only considering certain columns.\n Series.drop : Return Series with specified index labels removed.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n Drop columns and/or rows of MultiIndex DataFrame\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],\n ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],\n ... [250, 150], [1.5, 0.8], [320, 250],\n ... [1, 0.8], [0.3, 0.2]])\n >>> df\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n length 1.5 1.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n length 1.5 0.8\n falcon speed 320.0 250.0\n weight 1.0 0.8\n length 0.3 0.2\n\n >>> df.drop(index='cow', columns='small')\n big\n lama speed 45.0\n weight 200.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n\n >>> df.drop(index='length', level=1)\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n falcon speed 320.0 250.0\n weight 1.0 0.8\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @rewrite_axis_style_signature(\n \"mapper\",\n [(\"copy\", True), (\"inplace\", False), (\"level\", None), (\"errors\", \"ignore\")],\n )\n def rename(\n self,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Alter axes labels.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or function transformations to apply to\n that axis' values. Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index`` and\n ``columns``.\n index : dict-like or function\n Alternative to specifying axis (``mapper, axis=0``\n is equivalent to ``index=mapper``).\n columns : dict-like or function\n Alternative to specifying axis (``mapper, axis=1``\n is equivalent to ``columns=mapper``).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to target with ``mapper``. Can be either the axis name\n ('index', 'columns') or number (0, 1). The default is 'index'.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new DataFrame. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the renamed axis labels or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n DataFrame.rename_axis : Set the name of the axis.\n\n Examples\n --------\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Rename columns using a mapping:\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n Rename index using a mapping:\n\n >>> df.rename(index={0: \"x\", 1: \"y\", 2: \"z\"})\n A B\n x 1 4\n y 2 5\n z 3 6\n\n Cast index labels to a different type:\n\n >>> df.index\n RangeIndex(start=0, stop=3, step=1)\n >>> df.rename(index=str).index\n Index(['0', '1', '2'], dtype='object')\n\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"b\", \"C\": \"c\"}, errors=\"raise\")\n Traceback (most recent call last):\n KeyError: ['C'] not found in axis\n\n Using axis-style parameters:\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n \"\"\"\n return super().rename(\n mapper=mapper,\n index=index,\n columns=columns,\n axis=axis,\n copy=copy,\n inplace=inplace,\n level=level,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit=None,\n downcast=None,\n ) -> Optional[DataFrame]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Label) -> Series:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n return super().pop(item=item)\n\n @doc(NDFrame.replace, **_shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace: bool = False,\n limit=None,\n regex: bool = False,\n method: str = \"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_columnwise(\n self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex\n ):\n \"\"\"\n Dispatch to Series.replace column-wise.\n\n\n Parameters\n ----------\n mapping : dict\n of the form {col: (target, value)}\n inplace : bool\n regex : bool or same types as `to_replace` in DataFrame.replace\n\n Returns\n -------\n DataFrame or None\n \"\"\"\n # Operate column-wise\n res = self if inplace else self.copy()\n ax = self.columns\n\n for i in range(len(ax)):\n if ax[i] in mapping:\n ser = self.iloc[:, i]\n\n target, value = mapping[ax[i]]\n newobj = ser.replace(target, value, regex=regex)\n\n res.iloc[:, i] = newobj\n\n if inplace:\n return\n return res.__finalize__(self)\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(\n self, periods=1, freq=None, axis: Axis = 0, fill_value=lib.no_default\n ) -> DataFrame:\n axis = self._get_axis_number(axis)\n\n ncols = len(self.columns)\n if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:\n # We will infer fill_value to match the closest column\n\n if periods > 0:\n result = self.iloc[:, :-periods]\n for col in range(min(ncols, abs(periods))):\n # TODO(EA2D): doing this in a loop unnecessary with 2D EAs\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, 0].shift(len(self))\n result.insert(0, col, filler, allow_duplicates=True)\n else:\n result = self.iloc[:, -periods:]\n for col in range(min(ncols, abs(periods))):\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, -1].shift(len(self))\n result.insert(\n len(result.columns), col, filler, allow_duplicates=True\n )\n\n result.columns = self.columns.copy()\n return result\n\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def set_index(\n self,\n keys,\n drop: bool = True,\n append: bool = False,\n inplace: bool = False,\n verify_integrity: bool = False,\n ):\n \"\"\"\n Set the DataFrame index using existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and\n instances of :class:`~collections.abc.Iterator`.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n If True, modifies the DataFrame in place (do not create a new object).\n verify_integrity : bool, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method.\n\n Returns\n -------\n DataFrame or None\n Changed row labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]})\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month')\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month'])\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n\n Create a MultiIndex using an Index and a column:\n\n >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\n month sale\n year\n 1 2012 1 55\n 2 2014 4 40\n 3 2013 7 84\n 4 2014 10 31\n\n Create a MultiIndex using two Series:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> df.set_index([s, s**2])\n month year sale\n 1 1 1 2012 55\n 2 4 4 2014 40\n 3 9 7 2013 84\n 4 16 10 2014 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if not isinstance(keys, list):\n keys = [keys]\n\n err_msg = (\n 'The parameter \"keys\" may be a column key, one-dimensional '\n \"array, or a list containing only valid column keys and \"\n \"one-dimensional arrays.\"\n )\n\n missing: List[Label] = []\n for col in keys:\n if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):\n # arrays are fine as long as they are one-dimensional\n # iterators get converted to list below\n if getattr(col, \"ndim\", 1) != 1:\n raise ValueError(err_msg)\n else:\n # everything else gets tried as a key; see GH 24969\n try:\n found = col in self.columns\n except TypeError as err:\n raise TypeError(\n f\"{err_msg}. Received column of type {type(col)}\"\n ) from err\n else:\n if not found:\n missing.append(col)\n\n if missing:\n raise KeyError(f\"None of {missing} are in the columns\")\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names: List[Label] = []\n if append:\n names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index._get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove: List[Label] = []\n for col in keys:\n if isinstance(col, MultiIndex):\n for n in range(col.nlevels):\n arrays.append(col._get_level_values(n))\n names.extend(col.names)\n elif isinstance(col, (Index, Series)):\n # if Index then not MultiIndex (treated above)\n arrays.append(col)\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray)):\n arrays.append(col)\n names.append(None)\n elif isinstance(col, abc.Iterator):\n arrays.append(list(col))\n names.append(None)\n # from here, col can only be a column label\n else:\n arrays.append(frame[col]._values)\n names.append(col)\n if drop:\n to_remove.append(col)\n\n if len(arrays[-1]) != len(self):\n # check newest element against length of calling frame, since\n # ensure_index_from_sequences would not raise for append=False.\n raise ValueError(\n f\"Length mismatch: Expected {len(self)} rows, \"\n f\"received array of length {len(arrays[-1])}\"\n )\n\n index = ensure_index_from_sequences(arrays, names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index[index.duplicated()].unique()\n raise ValueError(f\"Index has duplicate keys: {duplicates}\")\n\n # use set to handle duplicate column names gracefully in case of drop\n for c in set(to_remove):\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n @overload\n # https://github.com/python/mypy/issues/6580\n # Overloaded function signatures 1 and 2 overlap with incompatible return types\n def reset_index( # type: ignore[misc]\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[False] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> DataFrame:\n ...\n\n @overload\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[True] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> None:\n ...\n\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: Hashable = 0,\n col_fill: Label = \"\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Reset the index, or a level of it.\n\n Reset the index of the DataFrame, and use the default one instead.\n If the DataFrame has a MultiIndex, this method can remove one or more\n levels.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the new index or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column, and a\n new sequential index is used:\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = pd.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class')\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1)\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='species')\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='genus')\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n new_index = ibase.default_index(len(new_obj))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if not drop:\n to_insert: Iterable[Tuple[Any, Optional[Any]]]\n if isinstance(self.index, MultiIndex):\n names = [\n (n if n is not None else f\"level_{i}\")\n for i, n in enumerate(self.index.names)\n ]\n to_insert = zip(self.index.levels, self.index.codes)\n else:\n default = \"index\" if \"index\" not in self else \"level_0\"\n names = [default] if self.index.name is None else [self.index.name]\n to_insert = ((self.index, None),)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(to_insert))):\n if not (level is None or i in level):\n continue\n name = names[i]\n if multi_col:\n col_name = list(name) if isinstance(name, tuple) else [name]\n if col_fill is None:\n if len(col_name) not in (1, self.columns.nlevels):\n raise ValueError(\n \"col_fill=None is incompatible \"\n f\"with incomplete column name {name}\"\n )\n col_fill = col_name[0]\n\n lev_num = self.columns._get_level_number(col_level)\n name_lst = [col_fill] * lev_num + col_name\n missing = self.columns.nlevels - len(name_lst)\n name_lst += [col_fill] * missing\n name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = lev._values\n if level_values.dtype == np.object_:\n level_values = lib.maybe_convert_objects(level_values)\n\n if lab is not None:\n # if we have the codes, extract the values with a mask\n level_values = algorithms.take(\n level_values, lab, allow_fill=True, fill_value=lev._na_value\n )\n\n new_obj.insert(0, name, level_values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n return None\n\n # ----------------------------------------------------------------------\n # Reindex-based selection methods\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> DataFrame:\n result = self._constructor(self._mgr.isna(func=isna))\n return result.__finalize__(self, method=\"isna\")\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> DataFrame:\n return self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> DataFrame:\n return ~self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> DataFrame:\n return ~self.isna()\n\n def dropna(\n self,\n axis: Axis = 0,\n how: str = \"any\",\n thresh=None,\n subset=None,\n inplace: bool = False,\n ):\n \"\"\"\n Remove missing values.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n * 1, or 'columns' : Drop columns which contain missing value.\n\n .. versionchanged:: 1.0.0\n\n Pass tuple or list to drop on multiple axes.\n Only a single axis is allowed.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.isna: Indicate missing values.\n DataFrame.notna : Indicate existing (non-missing) values.\n DataFrame.fillna : Replace missing values.\n Series.dropna : Drop missing values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.nan, 'Batmobile', 'Bullwhip'],\n ... \"born\": [pd.NaT, pd.Timestamp(\"1940-04-25\"),\n ... pd.NaT]})\n >>> df\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(axis, (tuple, list)):\n # GH20987\n raise TypeError(\"supplying multiple axes to axis is no longer supported.\")\n\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n agg_obj = self.take(indices, axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == \"all\":\n mask = count > 0\n else:\n if how is not None:\n raise ValueError(f\"invalid how option: {how}\")\n else:\n raise TypeError(\"must specify how or thresh\")\n\n result = self.loc(axis=axis)[mask]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n inplace: bool = False,\n ignore_index: bool = False,\n ) -> Optional[DataFrame]:\n \"\"\"\n Return DataFrame with duplicate rows removed.\n\n Considering certain columns is optional. Indexes, including time indexes\n are ignored.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : bool, default False\n Whether to drop duplicates in place or to return a copy.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n DataFrame or None\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.value_counts: Count unique combinations of columns.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, it removes duplicate rows based on all columns.\n\n >>> df.drop_duplicates()\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n To remove duplicates on specific column(s), use ``subset``.\n\n >>> df.drop_duplicates(subset=['brand'])\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n\n To remove duplicates and keep last occurrences, use ``keep``.\n\n >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')\n brand style rating\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 4 Indomie pack 5.0\n \"\"\"\n if self.empty:\n return self.copy()\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ignore_index = validate_bool_kwarg(ignore_index, \"ignore_index\")\n duplicated = self.duplicated(subset, keep=keep)\n\n result = self[-duplicated]\n if ignore_index:\n result.index = ibase.default_index(len(result))\n\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n ) -> Series:\n \"\"\"\n Return boolean Series denoting duplicate rows.\n\n Considering certain columns is optional.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to mark.\n\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Boolean series for each duplicated rows.\n\n See Also\n --------\n Index.duplicated : Equivalent method on index.\n Series.duplicated : Equivalent method on Series.\n Series.drop_duplicates : Remove duplicate values from Series.\n DataFrame.drop_duplicates : Remove duplicate values from DataFrame.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, for each set of duplicated values, the first occurrence\n is set on False and all others on True.\n\n >>> df.duplicated()\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True.\n\n >>> df.duplicated(keep='last')\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By setting ``keep`` on False, all duplicates are True.\n\n >>> df.duplicated(keep=False)\n 0 True\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n To find duplicates on specific column(s), use ``subset``.\n\n >>> df.duplicated(subset=['brand'])\n 0 False\n 1 True\n 2 False\n 3 True\n 4 True\n dtype: bool\n \"\"\"\n from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64\n\n if self.empty:\n return self._constructor_sliced(dtype=bool)\n\n def f(vals):\n labels, shape = algorithms.factorize(\n vals, size_hint=min(len(self), SIZE_HINT_LIMIT)\n )\n return labels.astype(\"i8\", copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self.columns\n ):\n subset = (subset,)\n\n # needed for mypy since can't narrow types using np.iterable\n subset = cast(Iterable, subset)\n\n # Verify all columns in subset exist in the queried dataframe\n # Otherwise, raise a KeyError, same as if you try to __getitem__ with a\n # key that doesn't exist.\n diff = Index(subset).difference(self.columns)\n if not diff.empty:\n raise KeyError(diff)\n\n vals = (col.values for name, col in self.items() if name in subset)\n labels, shape = map(list, zip(*map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n # ----------------------------------------------------------------------\n # Sorting\n # TODO: Just move the sort_values doc here.\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.sort_values.__doc__)\n # error: Signature of \"sort_values\" incompatible with supertype \"NDFrame\"\n def sort_values( # type: ignore[override]\n self,\n by,\n axis: Axis = 0,\n ascending=True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = self._get_axis_number(axis)\n\n if not isinstance(by, list):\n by = [by]\n if is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) != length of by ({len(by)})\"\n )\n if len(by) > 1:\n\n keys = [self._get_label_or_level_values(x, axis=axis) for x in by]\n\n # need to rewrap columns in Series to apply key function\n if key is not None:\n keys = [Series(k, name=name) for (k, name) in zip(keys, by)]\n\n indexer = lexsort_indexer(\n keys, orders=ascending, na_position=na_position, key=key\n )\n indexer = ensure_platform_int(indexer)\n else:\n\n by = by[0]\n k = self._get_label_or_level_values(by, axis=axis)\n\n # need to rewrap column in Series to apply key function\n if key is not None:\n k = Series(k, name=by)\n\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = nargsort(\n k, kind=kind, ascending=ascending, na_position=na_position, key=key\n )\n\n new_data = self._mgr.take(\n indexer, axis=self._get_block_manager_axis(axis), verify=False\n )\n\n if ignore_index:\n new_data.axes[1] = ibase.default_index(len(indexer))\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Level] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort object by labels (along an axis).\n\n Returns a new DataFrame sorted by label if `inplace` argument is\n ``False``, otherwise updates the original DataFrame and returns None.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape. For MultiIndex\n inputs, the key is applied *per level*.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame or None\n The original DataFrame sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort Series by the index.\n DataFrame.sort_values : Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],\n ... columns=['A'])\n >>> df.sort_index()\n A\n 1 4\n 29 2\n 100 1\n 150 5\n 234 3\n\n By default, it sorts in ascending order, to sort in descending order,\n use ``ascending=False``\n\n >>> df.sort_index(ascending=False)\n A\n 234 3\n 150 5\n 100 1\n 29 2\n 1 4\n\n A key function can be specified which is applied to the index before\n sorting. For a ``MultiIndex`` this is applied to each level separately.\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])\n >>> df.sort_index(key=lambda x: x.str.lower())\n a\n A 1\n b 2\n C 3\n d 4\n \"\"\"\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def value_counts(\n self,\n subset: Optional[Sequence[Label]] = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n ):\n \"\"\"\n Return a Series containing counts of unique rows in the DataFrame.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n\n Notes\n -----\n The returned Series will have a MultiIndex with one level per input\n column. By default, rows that contain any NA values are omitted from\n the result. By default, the resulting Series will be in descending\n order so that the first element is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],\n ... 'num_wings': [2, 0, 0, 0]},\n ... index=['falcon', 'dog', 'cat', 'ant'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 4 0\n ant 6 0\n\n >>> df.value_counts()\n num_legs num_wings\n 4 0 2\n 2 2 1\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(sort=False)\n num_legs num_wings\n 2 2 1\n 4 0 2\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(ascending=True)\n num_legs num_wings\n 2 2 1\n 6 0 1\n 4 0 2\n dtype: int64\n\n >>> df.value_counts(normalize=True)\n num_legs num_wings\n 4 0 0.50\n 2 2 0.25\n 6 0 0.25\n dtype: float64\n \"\"\"\n if subset is None:\n subset = self.columns.tolist()\n\n counts = self.groupby(subset).grouper.size()\n\n if sort:\n counts = counts.sort_values(ascending=ascending)\n if normalize:\n counts /= counts.sum()\n\n # Force MultiIndex for single column\n if len(subset) == 1:\n counts.index = MultiIndex.from_arrays(\n [counts.index], names=[counts.index.name]\n )\n\n return counts\n\n def nlargest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - `first` : prioritize the first occurrence(s)\n - `last` : prioritize the last occurrence(s)\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 11300,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 11300 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(3, 'population')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nlargest(3, 'population', keep='last')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nlargest(3, 'population', keep='all')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n\n To order by the largest values in column \"population\" and then \"GDP\",\n we can specify multiple columns like in the next example.\n\n >>> df.nlargest(3, ['population', 'GDP'])\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n \"\"\"\n return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()\n\n def nsmallest(self, n, columns, keep: str = \"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 337000,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"population\".\n\n >>> df.nsmallest(3, 'population')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nsmallest(3, 'population', keep='last')\n population GDP alpha-2\n Anguilla 11300 311 AI\n Tuvalu 11300 38 TV\n Nauru 337000 182 NR\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nsmallest(3, 'population', keep='all')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n\n To order by the smallest values in column \"population\" and then \"GDP\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(3, ['population', 'GDP'])\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Nauru 337000 182 NR\n \"\"\"\n return algorithms.SelectNFrame(\n self, n=n, keep=keep, columns=columns\n ).nsmallest()\n\n def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n\n if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only swap levels on a hierarchical axis.\")\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.swaplevel(i, j)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Where to reorder levels.\n\n Returns\n -------\n DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.reorder_levels(order)\n return result\n\n # ----------------------------------------------------------------------\n # Arithmetic Methods\n\n def _cmp_method(self, other, op):\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)\n\n # See GH#4537 for discussion of scalar op behavior\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n def _arith_method(self, other, op):\n if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):\n return ops.frame_arith_method_with_reindex(self, other, op)\n\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)\n\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n _logical_method = _arith_method\n\n def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):\n \"\"\"\n Evaluate the frame operation func(left, right) by evaluating\n column-by-column, dispatching to the Series implementation.\n\n Parameters\n ----------\n right : scalar, Series, or DataFrame\n func : arithmetic or comparison operator\n axis : {None, 0, 1}\n\n Returns\n -------\n DataFrame\n \"\"\"\n # Get the appropriate array-op to apply to each column/block's values.\n array_op = ops.get_array_op(func)\n\n right = lib.item_from_zerodim(right)\n if not is_list_like(right):\n # i.e. scalar, faster than checking np.ndim(right) == 0\n bm = self._mgr.apply(array_op, right=right)\n return type(self)(bm)\n\n elif isinstance(right, DataFrame):\n assert self.index.equals(right.index)\n assert self.columns.equals(right.columns)\n # TODO: The previous assertion `assert right._indexed_same(self)`\n # fails in cases with empty columns reached via\n # _frame_arith_method_with_reindex\n\n bm = self._mgr.operate_blockwise(right._mgr, array_op)\n return type(self)(bm)\n\n elif isinstance(right, Series) and axis == 1:\n # axis=1 means we want to operate row-by-row\n assert right.index.equals(self.columns)\n\n right = right._values\n # maybe_align_as_frame ensures we do not have an ndarray here\n assert not isinstance(right, np.ndarray)\n\n arrays = [\n array_op(_left, _right)\n for _left, _right in zip(self._iter_column_arrays(), right)\n ]\n\n elif isinstance(right, Series):\n assert right.index.equals(self.index) # Handle other cases later\n right = right._values\n\n arrays = [array_op(left, right) for left in self._iter_column_arrays()]\n\n else:\n # Remaining cases have less-obvious dispatch rules\n raise NotImplementedError(right)\n\n return type(self)._from_arrays(\n arrays, self.columns, self.index, verify_integrity=False\n )\n\n def _combine_frame(self, other: DataFrame, func, fill_value=None):\n # at this point we have `self._indexed_same(other)`\n\n if fill_value is None:\n # since _arith_op may be called in a loop, avoid function call\n # overhead if possible by doing this check once\n _arith_op = func\n\n else:\n\n def _arith_op(left, right):\n # for the mixed_type case where we iterate over columns,\n # _arith_op(left, right) is equivalent to\n # left._binop(right, func, fill_value=fill_value)\n left, right = ops.fill_binop(left, right, fill_value)\n return func(left, right)\n\n new_data = self._dispatch_frame_op(other, _arith_op)\n return new_data\n\n def _construct_result(self, result) -> DataFrame:\n \"\"\"\n Wrap the result of an arithmetic, comparison, or logical operation.\n\n Parameters\n ----------\n result : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n out = self._constructor(result, copy=False)\n # Pin columns instead of passing to constructor for compat with\n # non-unique columns case\n out.columns = self.columns\n out.index = self.index\n return out\n\n def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = self // other\n mod = self - div * other\n return div, mod\n\n def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = other // self\n mod = other - div * self\n return div, mod\n\n # ----------------------------------------------------------------------\n # Combination-Related\n\n @doc(\n _shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nDataFrame\n DataFrame that shows the differences stacked side by side.\n\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\nRaises\n------\nValueError\n When the two DataFrames don't have identical labels or shape.\n\nSee Also\n--------\nSeries.compare : Compare with another Series and show differences.\nDataFrame.equals : Test whether two objects contain the same elements.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nCan only compare identically-labeled\n(i.e. same shape, identical row and column labels) DataFrames\n\nExamples\n--------\n>>> df = pd.DataFrame(\n... {{\n... \"col1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n... \"col2\": [1.0, 2.0, 3.0, np.nan, 5.0],\n... \"col3\": [1.0, 2.0, 3.0, 4.0, 5.0]\n... }},\n... columns=[\"col1\", \"col2\", \"col3\"],\n... )\n>>> df\n col1 col2 col3\n0 a 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 3.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\n>>> df2 = df.copy()\n>>> df2.loc[0, 'col1'] = 'c'\n>>> df2.loc[2, 'col3'] = 4.0\n>>> df2\n col1 col2 col3\n0 c 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 4.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\nAlign the differences on columns\n\n>>> df.compare(df2)\n col1 col3\n self other self other\n0 a c NaN NaN\n2 NaN NaN 3.0 4.0\n\nStack the differences on rows\n\n>>> df.compare(df2, align_axis=0)\n col1 col3\n0 self a NaN\n other c NaN\n2 self NaN 3.0\n other NaN 4.0\n\nKeep the equal values\n\n>>> df.compare(df2, keep_equal=True)\n col1 col3\n self other self other\n0 a c 1.0 1.0\n2 b b 3.0 4.0\n\nKeep all original rows and columns\n\n>>> df.compare(df2, keep_shape=True)\n col1 col2 col3\n self other self other self other\n0 a c NaN NaN NaN NaN\n1 NaN NaN NaN NaN NaN NaN\n2 NaN NaN NaN NaN 3.0 4.0\n3 NaN NaN NaN NaN NaN NaN\n4 NaN NaN NaN NaN NaN NaN\n\nKeep all original rows and columns and also all original values\n\n>>> df.compare(df2, keep_shape=True, keep_equal=True)\n col1 col2 col3\n self other self other self other\n0 a c 1.0 1.0 1.0 1.0\n1 a a 2.0 2.0 2.0 2.0\n2 b b 3.0 3.0 3.0 4.0\n3 b b NaN NaN 4.0 4.0\n4 a a 5.0 5.0 5.0 5.0\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: DataFrame,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> DataFrame:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(\n self, other: DataFrame, func, fill_value=None, overwrite: bool = True\n ) -> DataFrame:\n \"\"\"\n Perform column-wise combine with another DataFrame.\n\n Combines a DataFrame with `other` DataFrame using `func`\n to element-wise combine columns. The row and column indexes of the\n resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n The DataFrame to merge column-wise.\n func : function\n Function that takes two series as inputs and return a Series or a\n scalar. Used to merge the two dataframes column by columns.\n fill_value : scalar value, default None\n The value to fill NaNs with prior to passing any column to the\n merge func.\n overwrite : bool, default True\n If True, columns in `self` that do not exist in `other` will be\n overwritten with NaNs.\n\n Returns\n -------\n DataFrame\n Combination of the provided DataFrames.\n\n See Also\n --------\n DataFrame.combine_first : Combine two DataFrame objects and default to\n non-null values in frame calling the method.\n\n Examples\n --------\n Combine using a simple function that chooses the smaller column.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2\n >>> df1.combine(df2, take_smaller)\n A B\n 0 0 3\n 1 0 3\n\n Example using a true element-wise combine function.\n\n >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, np.minimum)\n A B\n 0 1 2\n 1 0 3\n\n Using `fill_value` fills Nones prior to passing the column to the\n merge function.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 4.0\n\n However, if the same element in both dataframes is None, that None\n is preserved\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 3.0\n\n Example that demonstrates the use of `overwrite` and behavior when\n the axis differ between the dataframes.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])\n >>> df1.combine(df2, take_smaller)\n A B C\n 0 NaN NaN NaN\n 1 NaN 3.0 -10.0\n 2 NaN 3.0 1.0\n\n >>> df1.combine(df2, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 -10.0\n 2 NaN 3.0 1.0\n\n Demonstrating the preference of the passed in dataframe.\n\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])\n >>> df2.combine(df1, take_smaller)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 NaN\n 2 NaN 3.0 NaN\n\n >>> df2.combine(df1, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isna(series)\n other_mask = isna(otherSeries)\n\n # don't overwrite columns unnecessarily\n # DO propagate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n if col not in self.columns:\n # If self DataFrame does not have col in other DataFrame,\n # try to promote series, which is all NaN, as other_dtype.\n new_dtype = other_dtype\n try:\n series = series.astype(new_dtype, copy=False)\n except ValueError:\n # e.g. new_dtype is integer types\n pass\n else:\n # if we have different dtypes, possibly promote\n new_dtype = find_common_type([this_dtype, other_dtype])\n if not is_dtype_equal(this_dtype, new_dtype):\n series = series.astype(new_dtype)\n if not is_dtype_equal(other_dtype, new_dtype):\n otherSeries = otherSeries.astype(new_dtype)\n\n arr = func(series, otherSeries)\n arr = maybe_downcast_to_dtype(arr, new_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result, index=new_index, columns=new_columns)\n\n def combine_first(self, other: DataFrame) -> DataFrame:\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.combine : Perform series-wise operation on two DataFrames\n using a given function.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine_first(df2)\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value\n does not exist in `other`\n\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n >>> df1.combine_first(df2)\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n def combiner(x, y):\n mask = extract_array(isna(x))\n\n x_values = extract_array(x, extract_numpy=True)\n y_values = extract_array(y, extract_numpy=True)\n\n # If the column y in other DataFrame is not in first DataFrame,\n # just return y_values.\n if y.name not in self.columns:\n return y_values\n\n return expressions.where(mask, y_values, x_values)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(\n self,\n other,\n join: str = \"left\",\n overwrite: bool = True,\n filter_func=None,\n errors: str = \"ignore\",\n ) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n Should have at least one matching index/column label\n with the original DataFrame. If a Series is passed,\n its name attribute must be set, and that will be\n used as the column name to align with the original DataFrame.\n join : {'left'}, default 'left'\n Only left join is implemented, keeping the index and columns of the\n original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values\n with values from `other`.\n * False: only update values that are NA in\n the original DataFrame.\n\n filter_func : callable(1d-array) -> bool 1d-array, optional\n Can choose to replace values other than NA. Return True for values\n that should be updated.\n errors : {'raise', 'ignore'}, default 'ignore'\n If 'raise', will raise a ValueError if the DataFrame and `other`\n both contain non-NA data in the same place.\n\n .. versionchanged:: 0.24.0\n Changed from `raise_conflict=False|True`\n to `errors='ignore'|'raise'`.\n\n Returns\n -------\n None : method directly changes calling object\n\n Raises\n ------\n ValueError\n * When `errors='raise'` and there's overlapping non-NA data.\n * When `errors` is not either `'ignore'` or `'raise'`\n NotImplementedError\n * If `join != 'left'`\n\n See Also\n --------\n dict.update : Similar method for dictionaries.\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, 5, 6],\n ... 'C': [7, 8, 9]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})\n >>> df.update(new_df)\n >>> df\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, its name attribute must be set.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df\n A B\n 0 a d\n 1 b y\n 2 c e\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])\n >>> df.update(new_df)\n >>> df\n A B\n 0 a x\n 1 b d\n 2 c e\n\n If `other` contains NaNs the corresponding values are not updated\n in the original dataframe.\n\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n # TODO: Support other joins\n if join != \"left\": # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n if errors not in [\"ignore\", \"raise\"]:\n raise ValueError(\"The parameter errors must be either 'ignore' or 'raise'\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col]._values\n that = other[col]._values\n if filter_func is not None:\n with np.errstate(all=\"ignore\"):\n mask = ~filter_func(this) | isna(that)\n else:\n if errors == \"raise\":\n mask_this = notna(that)\n mask_that = notna(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isna(that)\n else:\n mask = notna(this)\n\n # don't overwrite columns unnecessarily\n if mask.all():\n continue\n\n self[col] = expressions.where(mask, this, that)\n\n # ----------------------------------------------------------------------\n # Data reshaping\n @Appender(\n \"\"\"\nExamples\n--------\n>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n... 'Parrot', 'Parrot'],\n... 'Max Speed': [380., 370., 24., 26.]})\n>>> df\n Animal Max Speed\n0 Falcon 380.0\n1 Falcon 370.0\n2 Parrot 24.0\n3 Parrot 26.0\n>>> df.groupby(['Animal']).mean()\n Max Speed\nAnimal\nFalcon 375.0\nParrot 25.0\n\n**Hierarchical Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n... index=index)\n>>> df\n Max Speed\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\n>>> df.groupby(level=0).mean()\n Max Speed\nAnimal\nFalcon 370.0\nParrot 25.0\n>>> df.groupby(level=\"Type\").mean()\n Max Speed\nType\nCaptive 210.0\nWild 185.0\n\nWe can also choose to include NA in group keys or not by setting\n`dropna` parameter, the default setting is `True`:\n\n>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=[\"b\"]).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\n\n>>> df.groupby(by=[\"b\"], dropna=False).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\nNaN 1 4\n\n>>> l = [[\"a\", 12, 12], [None, 12.3, 33.], [\"b\", 12.3, 123], [\"a\", 1, 1]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=\"a\").sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\n\n>>> df.groupby(by=\"a\", dropna=False).sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\nNaN 12.3 33.0\n\"\"\"\n )\n @Appender(_shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis: Axis = 0,\n level: Optional[Level] = None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> DataFrameGroupBy:\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return DataFrameGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n _shared_docs[\n \"pivot\"\n ] = \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation, multiple values will result in a MultiIndex in the\n columns. See the :ref:`User Guide <reshaping>` for more on reshaping.\n\n Parameters\n ----------%s\n index : str or object or a list of str, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n\n .. versionchanged:: 1.1.0\n Also accept list of index names.\n\n columns : str or object or a list of str\n Column to use to make new frame's columns.\n\n .. versionchanged:: 1.1.0\n Also accept list of columns names.\n\n values : str, object or a list of the previous, optional\n Column(s) to use for populating new frame's values. If not\n specified, all remaining columns will be used and the result will\n have hierarchically indexed columns.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n Raises\n ------\n ValueError:\n When there are any `index`, `columns` combinations with multiple\n values. `DataFrame.pivot_table` when you need to aggregate.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods.\n\n Examples\n --------\n >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar')['baz']\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n baz zoo\n bar A B C A B C\n foo\n one 1 2 3 x y z\n two 4 5 6 q w t\n\n You could also assign a list of column names or a list of index names.\n\n >>> df = pd.DataFrame({\n ... \"lev1\": [1, 1, 1, 2, 2, 2],\n ... \"lev2\": [1, 1, 2, 1, 1, 2],\n ... \"lev3\": [1, 2, 1, 2, 1, 2],\n ... \"lev4\": [1, 2, 3, 4, 5, 6],\n ... \"values\": [0, 1, 2, 3, 4, 5]})\n >>> df\n lev1 lev2 lev3 lev4 values\n 0 1 1 1 1 0\n 1 1 1 2 2 1\n 2 1 2 1 3 2\n 3 2 1 2 4 3\n 4 2 1 1 5 4\n 5 2 2 2 6 5\n\n >>> df.pivot(index=\"lev1\", columns=[\"lev2\", \"lev3\"],values=\"values\")\n lev2 1 2\n lev3 1 2 1 2\n lev1\n 1 0.0 1.0 2.0 NaN\n 2 4.0 3.0 NaN 5.0\n\n >>> df.pivot(index=[\"lev1\", \"lev2\"], columns=[\"lev3\"],values=\"values\")\n lev3 1 2\n lev1 lev2\n 1 1 0.0 1.0\n 2 2.0 NaN\n 2 1 4.0 3.0\n 2 NaN 5.0\n\n A ValueError is raised if there are any duplicates.\n\n >>> df = pd.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]})\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n Notice that the first two rows are the same for our `index`\n and `columns` arguments.\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n Traceback (most recent call last):\n ...\n ValueError: Index contains duplicate entries, cannot reshape\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot\"])\n def pivot(self, index=None, columns=None, values=None) -> DataFrame:\n from pandas.core.reshape.pivot import pivot\n\n return pivot(self, index=index, columns=columns, values=values)\n\n _shared_docs[\n \"pivot_table\"\n ] = \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame.\n\n The levels in the pivot table will be stored in MultiIndex objects\n (hierarchical indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------%s\n values : column to aggregate, optional\n index : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table index. If an array is passed,\n it is being used as the same manner as column values.\n columns : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table column. If an array is passed,\n it is being used as the same manner as column values.\n aggfunc : function, list of functions, dict, default numpy.mean\n If list of functions passed, the resulting pivot table will have\n hierarchical columns whose top level are the function names\n (inferred from the function objects themselves)\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with (in the resulting pivot table,\n after aggregation).\n margins : bool, default False\n Add all row / columns (e.g. for subtotal / grand totals).\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n margins_name : str, default 'All'\n Name of the row / column that will contain the totals\n when margins is True.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 0.25.0\n\n Returns\n -------\n DataFrame\n An Excel style pivot table.\n\n See Also\n --------\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.melt: Unpivot a DataFrame from wide to long format,\n optionally leaving identifiers set.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum)\n >>> table\n C large small\n A B\n bar one 4.0 5.0\n two 7.0 6.0\n foo one 4.0 1.0\n two NaN 6.0\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum, fill_value=0)\n >>> table\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n The next example aggregates by taking the mean across multiple columns.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': np.mean})\n >>> table\n D E\n A C\n bar large 5.500000 7.500000\n small 5.500000 8.500000\n foo large 2.000000 4.500000\n small 2.333333 4.333333\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': [min, max, np.mean]})\n >>> table\n D E\n mean max mean min\n A C\n bar large 5.500000 9.0 7.500000 6.0\n small 5.500000 9.0 8.500000 8.0\n foo large 2.000000 5.0 4.500000 4.0\n small 2.333333 6.0 4.333333 2.0\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot_table\"])\n def pivot_table(\n self,\n values=None,\n index=None,\n columns=None,\n aggfunc=\"mean\",\n fill_value=None,\n margins=False,\n dropna=True,\n margins_name=\"All\",\n observed=False,\n ) -> DataFrame:\n from pandas.core.reshape.pivot import pivot_table\n\n return pivot_table(\n self,\n values=values,\n index=index,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n )\n\n def stack(self, level: Level = -1, dropna: bool = True):\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n Parameters\n ----------\n level : int, str, list, default -1\n Level(s) to stack from the column axis onto the index\n axis, defined as one index or label, or a list of indices\n or labels.\n dropna : bool, default True\n Whether to drop rows in the resulting Frame/Series with\n missing values. Stacking a column level onto the index\n axis can create combinations of index and column values\n that are missing from the original dataframe. See Examples\n section.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack()\n cat weight 0\n height 1\n dog weight 2\n height 3\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack()\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n\n **Prescribing the level(s) to be stacked**\n\n The first parameter controls which level or levels are stacked:\n\n >>> df_multi_level_cols2.stack(0)\n kg m\n cat height NaN 2.0\n weight 1.0 NaN\n dog height NaN 4.0\n weight 3.0 NaN\n >>> df_multi_level_cols2.stack([0, 1])\n cat height m 2.0\n weight kg 1.0\n dog height m 4.0\n weight kg 3.0\n dtype: float64\n\n **Dropping missing values**\n\n >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n Note that rows where all values are missing are dropped by\n default but this behaviour can be controlled via the dropna\n keyword parameter:\n\n >>> df_multi_level_cols3\n weight height\n kg m\n cat NaN 1.0\n dog 2.0 3.0\n >>> df_multi_level_cols3.stack(dropna=False)\n height weight\n cat kg NaN NaN\n m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n >>> df_multi_level_cols3.stack(dropna=True)\n height weight\n cat m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n \"\"\"\n from pandas.core.reshape.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n result = stack_multiple(self, level, dropna=dropna)\n else:\n result = stack(self, level, dropna=dropna)\n\n return result.__finalize__(self, method=\"stack\")\n\n def explode(\n self, column: Union[str, Tuple], ignore_index: bool = False\n ) -> DataFrame:\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n Raises\n ------\n ValueError :\n if columns of the frame are not unique.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n Series.explode : Explode a DataFrame from list-like columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of rows in the\n output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 foo 1\n 2 [] 1\n 3 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1 1\n 0 2 1\n 0 3 1\n 1 foo 1\n 2 NaN 1\n 3 3 1\n 3 4 1\n \"\"\"\n if not (is_scalar(column) or isinstance(column, tuple)):\n raise ValueError(\"column must be a scalar\")\n if not self.columns.is_unique:\n raise ValueError(\"columns must be unique\")\n\n df = self.reset_index(drop=True)\n result = df[column].explode()\n result = df.drop([column], axis=1).join(result)\n if ignore_index:\n result.index = ibase.default_index(len(result))\n else:\n result.index = self.index.take(result.index)\n result = result.reindex(columns=self.columns, copy=False)\n\n return result\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series\n (the analogue of stack when the columns are not a MultiIndex).\n\n Parameters\n ----------\n level : int, str, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name.\n fill_value : int, str or dict\n Replace NaN with this value if the unstack produces missing values.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1.0 2.0\n two 3.0 4.0\n\n >>> s.unstack(level=0)\n one two\n a 1.0 3.0\n b 2.0 4.0\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n result = unstack(self, level, fill_value)\n\n return result.__finalize__(self, method=\"unstack\")\n\n @Appender(_shared_docs[\"melt\"] % {\"caller\": \"df.melt(\", \"other\": \"melt\"})\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level: Optional[Level] = None,\n ignore_index=True,\n ) -> DataFrame:\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n ignore_index=ignore_index,\n )\n\n # ----------------------------------------------------------------------\n # Time series-related\n\n @doc(\n Series.diff,\n klass=\"Dataframe\",\n extra_params=\"axis : {0 or 'index', 1 or 'columns'}, default 0\\n \"\n \"Take difference over rows (0) or columns (1).\\n\",\n other_klass=\"Series\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]})\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(axis=1)\n a b c\n 0 NaN 0 0\n 1 NaN -1 3\n 2 NaN -1 7\n 3 NaN -1 13\n 4 NaN 0 20\n 5 NaN 2 28\n\n Difference with 3rd previous row\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n\n Overflow in input dtype\n\n >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)\n >>> df.diff()\n a\n 0 NaN\n 1 255.0\"\"\"\n ),\n )\n def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:\n if not isinstance(periods, int):\n if not (is_float(periods) and periods.is_integer()):\n raise ValueError(\"periods must be an integer\")\n periods = int(periods)\n\n bm_axis = self._get_block_manager_axis(axis)\n\n if bm_axis == 0 and periods != 0:\n return self - self.shift(periods, axis=axis)\n\n new_data = self._mgr.diff(n=periods, axis=bm_axis)\n return self._constructor(new_data).__finalize__(self, \"diff\")\n\n # ----------------------------------------------------------------------\n # Function application\n\n def _gotitem(\n self,\n key: Union[Label, List[Label]],\n ndim: int,\n subset: Optional[FrameOrSeriesUnion] = None,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n if subset is None:\n subset = self\n elif subset.ndim == 1: # is Series\n return subset\n\n # TODO: _shallow_copy(subset)?\n return subset[key]\n\n _agg_summary_and_see_also_doc = dedent(\n \"\"\"\n The aggregation operations are always performed over an axis, either the\n index (default) or the column axis. This behavior is different from\n `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,\n `var`), where the default is to compute the aggregation of the flattened\n array, e.g., ``numpy.mean(arr_2d)`` as opposed to\n ``numpy.mean(arr_2d, axis=0)``.\n\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Perform any type of operations.\n DataFrame.transform : Perform transformation type operations.\n core.groupby.GroupBy : Perform operations over groups.\n core.resample.Resampler : Perform operations over resampled bins.\n core.window.Rolling : Perform operations over rolling window.\n core.window.Expanding : Perform operations over expanding window.\n core.window.ExponentialMovingWindow : Perform operation over exponential weighted\n window.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])\n A B C\n sum 12.0 15.0 18.0\n min 1.0 2.0 3.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})\n A B\n sum 12.0 NaN\n min 1.0 2.0\n max NaN 8.0\n\n Aggregate different functions over the columns and rename the index of the resulting\n DataFrame.\n\n >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\n A B C\n x 7.0 NaN NaN\n y NaN 2.0 NaN\n z NaN NaN 6.0\n\n Aggregate over the columns.\n\n >>> df.agg(\"mean\", axis=\"columns\")\n 0 2.0\n 1 5.0\n 2 8.0\n 3 NaN\n dtype: float64\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_summary_and_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):\n axis = self._get_axis_number(axis)\n\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n\n result = None\n try:\n result, how = self._aggregate(func, axis, *args, **kwargs)\n except TypeError as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n if result is None:\n return self.apply(func, axis=axis, args=args, **kwargs)\n\n if relabeling:\n # This is to keep the order to columns occurrence unchanged, and also\n # keep the order of new columns occurrence unchanged\n\n # For the return values of reconstruct_func, if relabeling is\n # False, columns and order will be None.\n assert columns is not None\n assert order is not None\n\n result_in_dict = relabel_result(result, func, columns, order)\n result = DataFrame(result_in_dict, index=columns)\n\n return result\n\n def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):\n if axis == 1:\n # NDFrame.aggregate returns a tuple, and we need to transpose\n # only result\n result, how = aggregate(self.T, arg, *args, **kwargs)\n result = result.T if result is not None else result\n return result, how\n return aggregate(self, arg, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> DataFrame:\n result = transform(self, func, axis, *args, **kwargs)\n assert isinstance(result, DataFrame)\n return result\n\n def apply(\n self, func, axis: Axis = 0, raw: bool = False, result_type=None, args=(), **kwds\n ):\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``). By default (``result_type=None``), the final return type\n is inferred from the return type of the applied function. Otherwise,\n it depends on the `result_type` argument.\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n\n raw : bool, default False\n Determines if row or column is passed as a Series or ndarray object:\n\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray objects\n instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n result_type : {'expand', 'reduce', 'broadcast', None}, default None\n These only act when ``axis=1`` (columns):\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : returns a Series if possible rather than expanding\n list-like results. This is the opposite of 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the DataFrame, the original index and columns will be\n retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap: For elementwise operations.\n DataFrame.aggregate: Only perform aggregating type operations.\n DataFrame.transform: Only perform transforming type operations.\n\n Examples\n --------\n >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> df.apply(np.sqrt)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n Using a reducing function on either axis\n\n >>> df.apply(np.sum, axis=0)\n A 12\n B 27\n dtype: int64\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n Passing ``result_type='expand'`` will expand list-like results\n to columns of a Dataframe\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')\n 0 1\n 0 1 2\n 1 1 2\n 2 1 2\n\n Returning a Series inside the function is similar to passing\n ``result_type='expand'``. The resulting column names\n will be the Series index.\n\n >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)\n foo bar\n 0 1 2\n 1 1 2\n 2 1 2\n\n Passing ``result_type='broadcast'`` will ensure the same shape\n result, whether list-like or scalar is returned by the function,\n and broadcast it along the axis. The resulting column names will\n be the originals.\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')\n A B\n 0 1 2\n 1 1 2\n 2 1 2\n \"\"\"\n from pandas.core.apply import frame_apply\n\n op = frame_apply(\n self,\n func=func,\n axis=axis,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n return op.get_result()\n\n def applymap(self, func, na_action: Optional[str] = None) -> DataFrame:\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n na_action : {None, 'ignore'}, default None\n If ‘ignore’, propagate NaN values, without passing them to func.\n\n .. versionadded:: 1.2\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> df.applymap(lambda x: len(str(x)))\n 0 1\n 0 3 4\n 1 5 5\n\n Like Series.map, NA values can be ignored:\n\n >>> df_copy = df.copy()\n >>> df_copy.iloc[0, 0] = pd.NA\n >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\n 0 1\n 0 <NA> 4\n 1 5 5\n\n Note that a vectorized version of `func` often exists, which will\n be much faster. You could square each number elementwise.\n\n >>> df.applymap(lambda x: x**2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n But it's better to avoid applymap in that case.\n\n >>> df ** 2\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n if na_action not in {\"ignore\", None}:\n raise ValueError(\n f\"na_action must be 'ignore' or None. Got {repr(na_action)}\"\n )\n ignore_na = na_action == \"ignore\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if x.empty:\n return lib.map_infer(x, func, ignore_na=ignore_na)\n return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)\n\n return self.apply(infer).__finalize__(self, \"applymap\")\n\n # ----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(\n self,\n other,\n ignore_index: bool = False,\n verify_integrity: bool = False,\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Append rows of `other` to the end of caller, returning a new object.\n\n Columns in `other` that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise ValueError on creating index with duplicates.\n sort : bool, default False\n Sort columns if the columns of `self` and `other` are not aligned.\n\n .. versionchanged:: 1.0.0\n\n Changed to not sort by default.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in\n the DataFrame's index, the order of the columns in the resulting\n DataFrame will be unchanged.\n\n Iteratively appending rows to a DataFrame can be more computationally\n intensive than a single concatenate. A better solution is to append\n those rows to a list and then concatenate the list with the original\n DataFrame all at once.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n The following, while not recommended methods for generating DataFrames,\n show two ways to generate a DataFrame from multiple data sources.\n\n Less efficient:\n\n >>> df = pd.DataFrame(columns=['A'])\n >>> for i in range(5):\n ... df = df.append({'A': i}, ignore_index=True)\n >>> df\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n More efficient:\n\n >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n ... ignore_index=True)\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n if not ignore_index:\n raise TypeError(\"Can only append a dict if ignore_index=True\")\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError(\n \"Can only append a Series if ignore_index=True \"\n \"or if the Series has a name\"\n )\n\n index = Index([other.name], name=self.index.name)\n idx_diff = other.index.difference(self.columns)\n try:\n combined_columns = self.columns.append(idx_diff)\n except TypeError:\n combined_columns = self.columns.astype(object).append(idx_diff)\n other = (\n other.reindex(combined_columns, copy=False)\n .to_frame()\n .T.infer_objects()\n .rename_axis(index.names, copy=False)\n )\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list):\n if not other:\n pass\n elif not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.reindex(columns=self.columns)\n\n from pandas.core.reshape.concat import concat\n\n if isinstance(other, (list, tuple)):\n to_concat = [self, *other]\n else:\n to_concat = [self, other]\n return (\n concat(\n to_concat,\n ignore_index=ignore_index,\n verify_integrity=verify_integrity,\n sort=sort,\n )\n ).__finalize__(self, method=\"append\")\n\n def join(\n self,\n other,\n on=None,\n how: str = \"left\",\n lsuffix: str = \"\",\n rsuffix: str = \"\",\n sort: bool = False,\n ) -> DataFrame:\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `other` DataFrame either on index or on a key\n column. Efficiently join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame.\n on : str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index\n in `other`, otherwise joins index-on-index. If multiple\n values given, the `other` DataFrame must have a MultiIndex. Can\n pass an array as the join key if it is not already contained in\n the calling DataFrame. Like an Excel VLOOKUP operation.\n how : {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use calling frame's index (or column if on is specified)\n * right: use `other`'s index.\n * outer: form union of calling frame's index (or column if on is\n specified) with `other`'s index, and sort it.\n lexicographically.\n * inner: form intersection of calling frame's index (or column if\n on is specified) with `other`'s index, preserving the order\n of the calling's one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from right frame's overlapping columns.\n sort : bool, default False\n Order result DataFrame lexicographically by the join key. If False,\n the order of the join key depends on the join type (how keyword).\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the caller and `other`.\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Notes\n -----\n Parameters `on`, `lsuffix`, and `rsuffix` are not supported when\n passing a list of `DataFrame` objects.\n\n Support for specifying index levels as the `on` parameter was added\n in version 0.23.0.\n\n Examples\n --------\n >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n\n >>> df\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n 4 K4 A4\n 5 K5 A5\n\n >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']})\n\n >>> other\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> df.join(other, lsuffix='_caller', rsuffix='_other')\n key_caller A key_other B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 NaN NaN\n 4 K4 A4 NaN NaN\n 5 K5 A5 NaN NaN\n\n If we want to join using the key columns, we need to set key to be\n the index in both `df` and `other`. The joined DataFrame will have\n key as its index.\n\n >>> df.set_index('key').join(other.set_index('key'))\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 NaN\n K4 A4 NaN\n K5 A5 NaN\n\n Another option to join using the key columns is to use the `on`\n parameter. DataFrame.join always uses `other`'s index but we can use\n any column in `df`. This method preserves the original DataFrame's\n index in the result.\n\n >>> df.join(other.set_index('key'), on='key')\n key A B\n 0 K0 A0 B0\n 1 K1 A1 B1\n 2 K2 A2 B2\n 3 K3 A3 NaN\n 4 K4 A4 NaN\n 5 K5 A5 NaN\n \"\"\"\n return self._join_compat(\n other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort\n )\n\n def _join_compat(\n self, other, on=None, how=\"left\", lsuffix=\"\", rsuffix=\"\", sort=False\n ):\n from pandas.core.reshape.concat import concat\n from pandas.core.reshape.merge import merge\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError(\"Other Series must have a name\")\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n if how == \"cross\":\n return merge(\n self,\n other,\n how=how,\n on=on,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n return merge(\n self,\n other,\n left_on=on,\n how=how,\n left_index=on is None,\n right_index=True,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n else:\n if on is not None:\n raise ValueError(\n \"Joining multiple DataFrames only supported for joining on index\"\n )\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n # join indexes only using concat\n if can_concat:\n if how == \"left\":\n res = concat(\n frames, axis=1, join=\"outer\", verify_integrity=True, sort=sort\n )\n return res.reindex(self.index, copy=False)\n else:\n return concat(\n frames, axis=1, join=how, verify_integrity=True, sort=sort\n )\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(\n joined, frame, how=how, left_index=True, right_index=True\n )\n\n return joined\n\n @Substitution(\"\")\n @Appender(_merge_doc, indents=2)\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n sort=False,\n suffixes=(\"_x\", \"_y\"),\n copy=True,\n indicator=False,\n validate=None,\n ) -> DataFrame:\n from pandas.core.reshape.merge import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n copy=copy,\n indicator=indicator,\n validate=validate,\n )\n\n def round(self, decimals=0, *args, **kwargs) -> DataFrame:\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n *args\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n\n Returns\n -------\n DataFrame\n A DataFrame with the affected columns rounded to the specified\n number of decimal places.\n\n See Also\n --------\n numpy.around : Round a numpy array to the given number of decimals.\n Series.round : Round a Series to the given number of decimals.\n\n Examples\n --------\n >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],\n ... columns=['dogs', 'cats'])\n >>> df\n dogs cats\n 0 0.21 0.32\n 1 0.01 0.67\n 2 0.66 0.03\n 3 0.21 0.18\n\n By providing an integer each column is rounded to the same number\n of decimal places\n\n >>> df.round(1)\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.7\n 2 0.7 0.0\n 3 0.2 0.2\n\n With a dict, the number of places for specific columns can be\n specified with the column names as key and the number of decimal\n places as value\n\n >>> df.round({'dogs': 1, 'cats': 0})\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n\n Using a Series, the number of places for specific columns can be\n specified with the column names as index and the number of\n decimal places as value\n\n >>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])\n >>> df.round(decimals)\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n def _dict_round(df, decimals):\n for col, vals in df.items():\n try:\n yield _series_round(vals, decimals[col])\n except KeyError:\n yield vals\n\n def _series_round(s, decimals):\n if is_integer_dtype(s) or is_float_dtype(s):\n return s.round(decimals)\n return s\n\n nv.validate_round(args, kwargs)\n\n if isinstance(decimals, (dict, Series)):\n if isinstance(decimals, Series):\n if not decimals.index.is_unique:\n raise ValueError(\"Index of decimals must be unique\")\n new_cols = list(_dict_round(self, decimals))\n elif is_integer(decimals):\n # Dispatch to Series.round\n new_cols = [_series_round(v, decimals) for _, v in self.items()]\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n if len(new_cols) > 0:\n return self._constructor(\n concat(new_cols, axis=1), index=self.index, columns=self.columns\n )\n else:\n return self\n\n # ----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method=\"pearson\", min_periods=1) -> DataFrame:\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float. Note that the returned matrix from corr\n will have 1 along the diagonals and will be symmetric\n regardless of the callable's behavior.\n\n .. versionadded:: 0.24.0\n\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for Pearson\n and Spearman correlation.\n\n Returns\n -------\n DataFrame\n Correlation matrix.\n\n See Also\n --------\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n Series.corr : Compute the correlation between two Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr(method=histogram_intersection)\n dogs cats\n dogs 1.0 0.3\n cats 0.3 1.0\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if method == \"pearson\":\n correl = libalgos.nancorr(mat, minp=min_periods)\n elif method == \"spearman\":\n correl = libalgos.nancorr_spearman(mat, minp=min_periods)\n elif method == \"kendall\" or callable(method):\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n if i > j:\n continue\n\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = np.nan\n elif i == j:\n c = 1.0\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n else:\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n return self._constructor(correl, index=idx, columns=cols)\n\n def cov(\n self, min_periods: Optional[int] = None, ddof: Optional[int] = 1\n ) -> DataFrame:\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.\n core.window.Expanding.cov : Expanding sample covariance.\n core.window.Rolling.cov : Rolling sample covariance.\n\n Notes\n -----\n Returns the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-ddof.\n\n For DataFrames that have Series that are missing data (assuming that\n data is `missing at random\n <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)\n the returned covariance matrix will be an unbiased estimate\n of the variance and covariance between the member Series.\n\n However, for many applications this estimate may not be acceptable\n because the estimate covariance matrix is not guaranteed to be positive\n semi-definite. This could lead to estimate correlations having\n absolute values which are greater than one, and/or a non-invertible\n covariance matrix. See `Estimation of covariance matrices\n <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_\n matrices>`__ for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> df.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if notna(mat).all():\n if min_periods is not None and min_periods > len(mat):\n base_cov = np.empty((mat.shape[1], mat.shape[1]))\n base_cov.fill(np.nan)\n else:\n base_cov = np.cov(mat.T, ddof=ddof)\n base_cov = base_cov.reshape((len(cols), len(cols)))\n else:\n base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)\n\n return self._constructor(base_cov, index=idx, columns=cols)\n\n def corrwith(self, other, axis: Axis = 0, drop=False, method=\"pearson\") -> Series:\n \"\"\"\n Compute pairwise correlation.\n\n Pairwise correlation is computed between rows or columns of\n DataFrame with rows or columns of Series or DataFrame. DataFrames\n are first aligned along both axes before computing the\n correlations.\n\n Parameters\n ----------\n other : DataFrame, Series\n Object with which to compute correlations.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for\n row-wise.\n drop : bool, default False\n Drop missing indices from result.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Pairwise correlations.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation of columns.\n \"\"\"\n axis = self._get_axis_number(axis)\n this = self._get_numeric_data()\n\n if isinstance(other, Series):\n return this.apply(lambda x: other.corr(x, method=method), axis=axis)\n\n other = other._get_numeric_data()\n left, right = this.align(other, join=\"inner\", copy=False)\n\n if axis == 1:\n left = left.T\n right = right.T\n\n if method == \"pearson\":\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n elif method in [\"kendall\", \"spearman\"] or callable(method):\n\n def c(x):\n return nanops.nancorr(x[0], x[1], method=method)\n\n correl = self._constructor_sliced(\n map(c, zip(left.values.T, right.values.T)), index=left.columns\n )\n\n else:\n raise ValueError(\n f\"Invalid method {method} was passed, \"\n \"valid methods are: 'pearson', 'kendall', \"\n \"'spearman', or callable\"\n )\n\n if not drop:\n # Find non-matching labels along the given axis\n # and append missing correlations (GH 22375)\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n idx_diff = result_index.difference(correl.index)\n\n if len(idx_diff) > 0:\n correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))\n\n return correl\n\n # ----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(\n self, axis: Axis = 0, level: Optional[Level] = None, numeric_only: bool = False\n ):\n \"\"\"\n Count non-NA cells for each column or row.\n\n The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending\n on `pandas.options.mode.use_inf_as_na`) are considered NA.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index' counts are generated for each column.\n If 1 or 'columns' counts are generated for each row.\n level : int or str, optional\n If the axis is a `MultiIndex` (hierarchical), count along a\n particular `level`, collapsing into a `DataFrame`.\n A `str` specifies the level name.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n Returns\n -------\n Series or DataFrame\n For each column/row the number of non-NA/null entries.\n If `level` is specified returns a `DataFrame`.\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.value_counts: Count unique combinations of columns.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = pd.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]})\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n Counts for each **row**:\n\n >>> df.count(axis='columns')\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n Counts for one level of a `MultiIndex`:\n\n >>> df.set_index([\"Person\", \"Single\"]).count(level=\"Person\")\n Age\n Person\n John 2\n Lewis 1\n Myla 1\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis, numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type or frame._mgr.any_extension_types:\n # the or any_extension_types is really only hit for single-\n # column frames with an extension array\n result = notna(frame).sum(axis=axis)\n else:\n # GH13407\n series_counts = notna(frame).sum(axis=axis)\n counts = series_counts.values\n result = self._constructor_sliced(\n counts, index=frame._get_agg_axis(axis)\n )\n\n return result.astype(\"int64\")\n\n def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\n f\"Can only count levels on hierarchical {self._get_axis_name(axis)}.\"\n )\n\n # Mask NaNs: Mask rows or columns where the index level is NaN, and all\n # values in the DataFrame that are NaN\n if frame._is_mixed_type:\n # Since we have mixed types, calling notna(frame.values) might\n # upcast everything to object\n values_mask = notna(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n values_mask = notna(frame.values)\n\n index_mask = notna(count_axis.get_level_values(level=level))\n if axis == 1:\n mask = index_mask & values_mask\n else:\n mask = index_mask.reshape(-1, 1) & values_mask\n\n if isinstance(level, str):\n level = count_axis._get_level_number(level)\n\n level_name = count_axis._names[level]\n level_index = count_axis.levels[level]._shallow_copy(name=level_name)\n level_codes = ensure_int64(count_axis.codes[level])\n counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)\n\n if axis == 1:\n result = self._constructor(counts, index=agg_axis, columns=level_index)\n else:\n result = self._constructor(counts, index=level_index, columns=agg_axis)\n\n return result\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis: Axis = 0,\n skipna: bool = True,\n numeric_only: Optional[bool] = None,\n filter_type=None,\n **kwds,\n ):\n\n assert filter_type is None or filter_type == \"bool\", filter_type\n out_dtype = \"bool\" if filter_type == \"bool\" else None\n\n own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]\n\n dtype_is_dt = np.array(\n [is_datetime64_any_dtype(dtype) for dtype in own_dtypes],\n dtype=bool,\n )\n if numeric_only is None and name in [\"mean\", \"median\"] and dtype_is_dt.any():\n warnings.warn(\n \"DataFrame.mean and DataFrame.median with numeric_only=None \"\n \"will include datetime64 and datetime64tz columns in a \"\n \"future version.\",\n FutureWarning,\n stacklevel=5,\n )\n cols = self.columns[~dtype_is_dt]\n self = self[cols]\n\n # TODO: Make other agg func handle axis=None properly GH#21597\n axis = self._get_axis_number(axis)\n labels = self._get_agg_axis(axis)\n assert axis in [0, 1]\n\n def func(values: np.ndarray):\n # We only use this in the case that operates on self.values\n return op(values, axis=axis, skipna=skipna, **kwds)\n\n def blk_func(values):\n if isinstance(values, ExtensionArray):\n return values._reduce(name, skipna=skipna, **kwds)\n else:\n return op(values, axis=1, skipna=skipna, **kwds)\n\n def _get_data() -> DataFrame:\n if filter_type is None:\n data = self._get_numeric_data()\n else:\n # GH#25101, GH#24434\n assert filter_type == \"bool\"\n data = self._get_bool_data()\n return data\n\n if numeric_only is not None or axis == 0:\n # For numeric_only non-None and axis non-None, we know\n # which blocks to use and no try/except is needed.\n # For numeric_only=None only the case with axis==0 and no object\n # dtypes are unambiguous can be handled with BlockManager.reduce\n # Case with EAs see GH#35881\n df = self\n if numeric_only is True:\n df = _get_data()\n if axis == 1:\n df = df.T\n axis = 0\n\n ignore_failures = numeric_only is None\n\n # After possibly _get_data and transposing, we are now in the\n # simple case where we can use BlockManager.reduce\n res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)\n out = df._constructor(res).iloc[0]\n if out_dtype is not None:\n out = out.astype(out_dtype)\n if axis == 0 and len(self) == 0 and name in [\"sum\", \"prod\"]:\n # Even if we are object dtype, follow numpy and return\n # float64, see test_apply_funcs_over_empty\n out = out.astype(np.float64)\n return out\n\n assert numeric_only is None\n\n data = self\n values = data.values\n\n try:\n result = func(values)\n\n except TypeError:\n # e.g. in nanops trying to convert strs to float\n\n data = _get_data()\n labels = data._get_agg_axis(axis)\n\n values = data.values\n with np.errstate(all=\"ignore\"):\n result = func(values)\n\n if filter_type == \"bool\" and notna(result).all():\n result = result.astype(np.bool_)\n elif filter_type is None and is_object_dtype(result.dtype):\n try:\n result = result.astype(np.float64)\n except (ValueError, TypeError):\n # try to coerce to the original dtypes item by item if we can\n pass\n\n result = self._constructor_sliced(result, index=labels)\n return result\n\n def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:\n \"\"\"\n Count distinct observations over requested axis.\n\n Return Series with number of distinct observations. Can ignore NaN\n values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for\n column-wise.\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.nunique: Method nunique for Series.\n DataFrame.count: Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(axis=1)\n 0 1\n 1 2\n 2 2\n dtype: int64\n \"\"\"\n return self.apply(Series.nunique, axis=axis, dropna=dropna)\n\n def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of minima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmin : Return index of the minimum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the minimum value in each column.\n\n >>> df.idxmin()\n consumption Pork\n co2_emissions Wheat Products\n dtype: object\n\n To return the index for the minimum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmin(axis=\"columns\")\n Pork consumption\n Wheat Products co2_emissions\n Beef consumption\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmin, \"argmin\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of maxima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmax : Return index of the maximum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the maximum value in each column.\n\n >>> df.idxmax()\n consumption Wheat Products\n co2_emissions Beef\n dtype: object\n\n To return the index for the maximum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmax(axis=\"columns\")\n Pork co2_emissions\n Wheat Products consumption\n Beef co2_emissions\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmax, \"argmax\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num: int) -> Index:\n \"\"\"\n Let's be explicit about this.\n \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError(f\"Axis must be 0 or 1 (got {repr(axis_num)})\")\n\n def mode(\n self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True\n ) -> DataFrame:\n \"\"\"\n Get the mode(s) of each element along the selected axis.\n\n The mode of a set of values is the value that appears most often.\n It can be multiple values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to iterate over while searching for the mode:\n\n * 0 or 'index' : get mode of each column\n * 1 or 'columns' : get mode of each row.\n\n numeric_only : bool, default False\n If True, only apply to numeric columns.\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The modes of each column or row.\n\n See Also\n --------\n Series.mode : Return the highest frequency value in a Series.\n Series.value_counts : Return the counts of values in a Series.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 2, 2),\n ... ('mammal', 4, np.nan),\n ... ('arthropod', 8, 0),\n ... ('bird', 2, np.nan)],\n ... index=('falcon', 'horse', 'spider', 'ostrich'),\n ... columns=('species', 'legs', 'wings'))\n >>> df\n species legs wings\n falcon bird 2 2.0\n horse mammal 4 NaN\n spider arthropod 8 0.0\n ostrich bird 2 NaN\n\n By default, missing values are not considered, and the mode of wings\n are both 0 and 2. Because the resulting DataFrame has two rows,\n the second row of ``species`` and ``legs`` contains ``NaN``.\n\n >>> df.mode()\n species legs wings\n 0 bird 2.0 0.0\n 1 NaN NaN 2.0\n\n Setting ``dropna=False`` ``NaN`` values are considered and they can be\n the mode (like for wings).\n\n >>> df.mode(dropna=False)\n species legs wings\n 0 bird 2 NaN\n\n Setting ``numeric_only=True``, only the mode of numeric columns is\n computed, and columns of other types are ignored.\n\n >>> df.mode(numeric_only=True)\n legs wings\n 0 2.0 0.0\n 1 NaN 2.0\n\n To compute the mode over columns and not rows, use the axis parameter:\n\n >>> df.mode(axis='columns', numeric_only=True)\n 0 1\n falcon 2.0 NaN\n horse 4.0 NaN\n spider 0.0 8.0\n ostrich 2.0 NaN\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n\n def f(s):\n return s.mode(dropna=dropna)\n\n return data.apply(f, axis=axis)\n\n def quantile(\n self,\n q=0.5,\n axis: Axis = 0,\n numeric_only: bool = True,\n interpolation: str = \"linear\",\n ):\n \"\"\"\n Return values at the given quantile over requested axis.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n Value between 0 <= q <= 1, the quantile(s) to compute.\n axis : {0, 1, 'index', 'columns'}, default 0\n Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be\n computed as well.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n Series or DataFrame\n\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n See Also\n --------\n core.window.Rolling.quantile: Rolling quantile.\n numpy.percentile: Numpy function to compute the percentile.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n ... columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n Name: 0.1, dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n\n Specifying `numeric_only=False` will also compute the quantile of\n datetime and timedelta data.\n\n >>> df = pd.DataFrame({'A': [1, 2],\n ... 'B': [pd.Timestamp('2010'),\n ... pd.Timestamp('2011')],\n ... 'C': [pd.Timedelta('1 days'),\n ... pd.Timedelta('2 days')]})\n >>> df.quantile(0.5, numeric_only=False)\n A 1.5\n B 2010-07-02 12:00:00\n C 1 days 12:00:00\n Name: 0.5, dtype: object\n \"\"\"\n validate_percentile(q)\n\n data = self._get_numeric_data() if numeric_only else self\n axis = self._get_axis_number(axis)\n is_transposed = axis == 1\n\n if is_transposed:\n data = data.T\n\n if len(data.columns) == 0:\n # GH#23925 _get_numeric_data may have dropped all columns\n cols = Index([], name=self.columns.name)\n if is_list_like(q):\n return self._constructor([], index=q, columns=cols)\n return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)\n\n result = data._mgr.quantile(\n qs=q, axis=1, interpolation=interpolation, transposed=is_transposed\n )\n\n if result.ndim == 2:\n result = self._constructor(result)\n else:\n result = self._constructor_sliced(result, name=q)\n\n if is_transposed:\n result = result.T\n\n return result\n\n def to_timestamp(\n self, freq=None, how: str = \"start\", axis: Axis = 0, copy: bool = True\n ) -> DataFrame:\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with DatetimeIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_timestamp(freq=freq, how=how)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex.\n\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed).\n\n Parameters\n ----------\n freq : str, default\n Frequency of the PeriodIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with PeriodIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_period(freq=freq)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def isin(self, values) -> DataFrame:\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dict, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n See Also\n --------\n DataFrame.eq: Equality test for DataFrame.\n Series.isin: Equivalent method on Series.\n Series.str.contains: Test if pattern or regex is contained within a\n string of a Series or Index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n\n When ``values`` is a Series or DataFrame the index and column must\n match. Note that 'falcon' does not match based on the number of legs\n in df2.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> df.isin(other)\n num_legs num_wings\n falcon True True\n dog False False\n \"\"\"\n if isinstance(values, dict):\n from pandas.core.reshape.concat import concat\n\n values = collections.defaultdict(list, values)\n return concat(\n (\n self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)\n ),\n axis=1,\n )\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis=\"index\")\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\n \"only list-like or dict-like objects are allowed \"\n \"to be passed to DataFrame.isin(), \"\n f\"you passed a '{type(values).__name__}'\"\n )\n return self._constructor(\n algorithms.isin(self.values.ravel(), values).reshape(self.shape),\n self.index,\n self.columns,\n )\n\n # ----------------------------------------------------------------------\n # Add index and columns\n _AXIS_ORDERS = [\"index\", \"columns\"]\n _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {\n **NDFrame._AXIS_TO_AXIS_NUMBER,\n 1: 1,\n \"columns\": 1,\n }\n _AXIS_REVERSED = True\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 1\n _info_axis_name = \"columns\"\n\n index: Index = properties.AxisProperty(\n axis=1, doc=\"The index (row labels) of the DataFrame.\"\n )\n columns: Index = properties.AxisProperty(\n axis=0, doc=\"The column labels of the DataFrame.\"\n )\n\n @property\n def _AXIS_NUMBERS(self) -> Dict[str, int]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NUMBERS\n return {\"index\": 0, \"columns\": 1}\n\n @property\n def _AXIS_NAMES(self) -> Dict[int, str]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NAMES\n return {0: \"index\", 1: \"columns\"}\n\n # ----------------------------------------------------------------------\n # Add plotting methods to DataFrame\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n hist = pandas.plotting.hist_frame\n boxplot = pandas.plotting.boxplot_frame\n sparse = CachedAccessor(\"sparse\", SparseFrameAccessor)\n\n\nDataFrame._add_numeric_operations()\n\nops.add_flex_arithmetic_methods(DataFrame)\n\n\ndef _from_nested_dict(data) -> collections.defaultdict:\n new_data: collections.defaultdict = collections.defaultdict(dict)\n for index, s in data.items():\n for col, v in s.items():\n new_data[col][index] = v\n return new_data\n\n\ndef _reindex_for_setitem(value, index: Index):\n # reindex if necessary\n\n if value.index.equals(index) or not len(index):\n return value._values.copy()\n\n # GH#4107\n try:\n value = value.reindex(index)._values\n except ValueError as err:\n # raised in MultiIndex.from_tuples, see test_insert_error_msmgs\n if not value.index.is_unique:\n # duplicate axis\n raise err\n\n raise TypeError(\n \"incompatible index of inserted column with frame index\"\n ) from err\n return value\n\n\ndef _maybe_atleast_2d(value):\n # TODO(EA2D): not needed with 2D EAs\n\n if is_extension_array_dtype(value):\n return value\n\n return np.atleast_2d(np.asarray(value))\n"
] | [
[
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.cast.maybe_box_datetimelike",
"pandas.core.aggregation.transform",
"pandas.core.dtypes.common.infer_dtype_from_object",
"pandas.core.aggregation.reconstruct_func",
"numpy.where",
"pandas.core.dtypes.common.is_named_tuple",
"pandas._libs.algos.nancorr",
"pandas.core.common.standardize_mapping",
"pandas.core.dtypes.cast.maybe_convert_platform",
"numpy.full",
"pandas.core.internals.construction.init_dict",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.sorting.lexsort_indexer",
"pandas.core.dtypes.common.is_list_like",
"pandas.io.formats.format.DataFrameRenderer",
"pandas.core.dtypes.common.is_sequence",
"pandas._libs.hashtable.duplicated_int64",
"pandas.core.internals.construction.masked_rec_array_to_mgr",
"numpy.array",
"pandas.core.algorithms.take",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.reshape.reshape.stack",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.computation.eval.eval",
"pandas.core.ops.fill_binop",
"pandas._libs.algos.nancorr_spearman",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.groupby.generic.DataFrameGroupBy",
"pandas.core.internals.construction.arrays_to_mgr",
"numpy.shape",
"pandas.io.formats.style.Styler",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.cast.maybe_unbox_datetimelike",
"pandas.core.internals.construction.get_names_from_index",
"pandas.core.generic.NDFrame.__init__",
"numpy.asarray",
"pandas.compat.numpy.function.validate_round",
"pandas._config.get_option",
"pandas.io.gbq.to_gbq",
"pandas.core.internals.construction.sanitize_index",
"pandas.core.dtypes.cast.invalidate_string_dtypes",
"pandas.core.dtypes.common.is_dataclass",
"pandas.core.common.asarray_tuplesafe",
"pandas.compat._optional.import_optional_dependency",
"pandas.io.formats.console.in_interactive_session",
"pandas.core.nanops.nancorr",
"pandas.core.internals.construction.to_arrays",
"pandas.core.ops.frame_arith_method_with_reindex",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.compat.numpy.function.validate_transpose",
"pandas.core.indexes.api.Index",
"pandas.io.stata.StataWriterUTF8",
"numpy.errstate",
"pandas.core.dtypes.cast.maybe_infer_to_datetimelike",
"pandas.util._validators.validate_axis_style_args",
"numpy.rec.fromarrays",
"pandas.core.dtypes.common.is_integer",
"pandas.core.aggregation.aggregate",
"pandas.core.aggregation.relabel_result",
"pandas.core.nanops.get_corr_func",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.reshape.pivot.pivot_table",
"pandas.util._decorators.deprecate_kwarg",
"pandas.io.formats.console.get_console_size",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.reshape.pivot.pivot",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.missing.notna",
"pandas.core.construction.sanitize_masked_array",
"pandas.core.sorting.get_group_index",
"pandas.core.ops.should_reindex_frame_op",
"pandas.core.reshape.melt.melt",
"pandas.util._decorators.Substitution",
"pandas.core.series.Series",
"pandas.core.internals.construction.init_ndarray",
"pandas.core.ops.align_method_FRAME",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.cast.validate_numeric_casting",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_hashable",
"numpy.cov",
"numpy.transpose",
"numpy.iterable",
"pandas._libs.properties.AxisProperty",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.core.algorithms.SelectNFrame",
"pandas.core.sorting.nargsort",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.accessor.CachedAccessor",
"pandas.core.indexes.api.ensure_index",
"pandas.core.ops.get_array_op",
"pandas.util._validators.validate_percentile",
"numpy.dot",
"pandas.core.indexes.multi.maybe_droplevels",
"pandas.core.computation.expressions.where",
"pandas.io.formats.info.DataFrameInfo",
"pandas.core.indexing.check_bool_indexer",
"pandas.io.common.get_handle",
"pandas.io.parquet.to_parquet",
"pandas.core.reshape.reshape.stack_multiple",
"pandas.core.dtypes.common.is_float",
"pandas.core.apply.frame_apply",
"pandas.core.indexes.api.ensure_index_from_sequences",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexing.convert_to_index_sliceable",
"pandas.option_context",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.reshape.concat.concat",
"pandas.core.algorithms.take_2d_multi",
"pandas.core.reshape.reshape.unstack",
"pandas.core.internals.construction.dataclasses_to_dicts",
"numpy.isfinite",
"pandas.io.formats.console.in_ipython_frontend",
"numpy.compress",
"pandas.core.reshape.merge.merge",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.common.apply_if_callable",
"pandas._libs.lib.maybe_convert_objects",
"pandas.io.feather_format.to_feather",
"pandas.core.internals.construction.reorder_arrays",
"pandas.core.construction.extract_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.0",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pcmoritz/analytics-zoo | [
"4d9f1eb6ccbf58d49dd5dce41b491c0f76107c31"
] | [
"pyzoo/zoo/pipeline/api/net/tf_dataset.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport sys\n\nfrom bigdl.dataset.dataset import DataSet\nfrom bigdl.transform.vision.image import FeatureTransformer\nfrom bigdl.util.common import get_node_and_core_number, callBigDlFunc\nfrom zoo.common import Sample, JTensor\nfrom zoo.common.nncontext import getOrCreateSparkContext\nfrom zoo.feature.image import ImagePreprocessing\nfrom zoo.util import nest\n\nif sys.version >= '3':\n long = int\n unicode = str\n\n\ndef _to_tensor_structure(tensors):\n if isinstance(tensors, tuple):\n tensor_structure = TensorMeta(dtype=tensors[0], shape=tensors[1], name=\"input0\")\n elif isinstance(tensors, list):\n tensor_structure = [TensorMeta(dtype=value[0], shape=value[1],\n name=\"list_input_\" + str(idx))\n for (idx, value) in enumerate(tensors)]\n elif isinstance(tensors, dict):\n tensor_structure = {}\n for key, value in tensors.items():\n tensor_structure[key] = TensorMeta(dtype=value[0], shape=value[1], name=key)\n else:\n raise ValueError(\"In TFDataset.from_rdd, features and labels should be a tuple, \"\n \"a list of tuples or a dict of tuples\")\n return tensor_structure\n\n\ndef _tensors_to_rdd(tensors, sc, splits):\n import tensorflow as tf\n if isinstance(tensors, np.ndarray):\n tensors = (tensors,)\n\n if isinstance(tensors, list):\n for i in range(len(tensors)):\n if tensors[i].dtype == np.dtype(\"float64\"):\n tensors[i] = np.float32(tensors[i])\n\n data_list = _splits(tensors)\n rdd = sc.parallelize(data_list, splits)\n tensor_structure = [TensorMeta(tf.as_dtype(t.dtype),\n shape=t.shape[1:],\n name=\"input_%s\" % i)\n for i, t in enumerate(tensors)]\n else:\n flattened = nest.flatten(tensors)\n for i in range(len(flattened)):\n if flattened[i].dtype == np.dtype(\"float64\"):\n flattened[i] = np.float32(flattened[i])\n data_list = _splits(flattened)\n rdd = sc.parallelize(data_list, splits)\n rdd = rdd.map(lambda x: nest.pack_sequence_as(tensors, x))\n tensor_structure = nest.pack_sequence_as(tensors,\n [TensorMeta(tf.as_dtype(t.dtype),\n shape=t.shape[1:],\n name=\"input_%s\" % i)\n for i, t in enumerate(flattened)])\n return rdd, tensor_structure\n\n\ndef _splits(tensors):\n data_list = []\n data_size = tensors[0].shape[0]\n for i in range(data_size):\n sample = []\n for j in range(len(tensors)):\n sample.append(tensors[j][i])\n data_list.append(sample)\n return data_list\n\n\nclass MergeFeatureLabelImagePreprocessing(ImagePreprocessing):\n def __init__(self, bigdl_type=\"float\"):\n super(MergeFeatureLabelImagePreprocessing, self).__init__(bigdl_type)\n\n\nclass MergeFeatureLabelFeatureTransformer(FeatureTransformer):\n def __init__(self, bigdl_type=\"float\"):\n super(MergeFeatureLabelFeatureTransformer, self).__init__(bigdl_type)\n\n\nclass TensorMeta(object):\n def __init__(self, dtype, name=None, shape=None):\n self.dtype = dtype\n self.name = name\n self.shape = shape\n\n\nclass TFDataset(object):\n def __init__(self, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size=False):\n \"\"\"\n\n TFDataset represents a distributed collection of elements (backed by a RDD)\n to be feed into Tensorflow graph.\n\n :param tensor_structure: a nested structure of TensorMeta objects specifying the\n name, shape and data type of each element in this TFDataset\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n \"\"\"\n\n if batch_size > 0 and batch_per_thread > 0:\n raise ValueError(\"bath_size and batch_per_thread should not be set simultaneously\")\n\n self.has_batch = True\n node_num, core_num = get_node_and_core_number()\n self.total_core_num = node_num * core_num\n if batch_size > 0:\n if batch_size % self.total_core_num != 0:\n raise ValueError(\"batch_size should be a multiple \" +\n \"of total core number, but got batch_size: \" +\n \"%s where total core number is %s\" % (batch_size,\n self.total_core_num))\n if batch_size <= 0 and batch_per_thread <= 0:\n batch_per_thread = 1\n batch_size = self.total_core_num\n self.has_batch = False\n\n self.batch_size = batch_size\n self.batch_per_thread = batch_per_thread\n self.hard_code_batch_size = hard_code_batch_size\n self.tensor_structure = tensor_structure\n\n if not self.hard_code_batch_size:\n self.output_shapes = nest.pack_sequence_as(\n self.tensor_structure, [[None] + list(t.shape)\n if t is not None else None\n for t in nest.flatten(self.tensor_structure)])\n else:\n if self.batch_per_thread > 0:\n self.output_shapes = nest.pack_sequence_as(\n self.tensor_structure, [[self.batch_per_thread] + t.shape\n if t is not None else None\n for t in nest.flatten(self.tensor_structure)])\n else:\n self.output_shapes = nest.pack_sequence_as(\n self.tensor_structure, [[self.batch_size // self.total_core_num] + t.shape\n if t is not None else None\n for t in nest.flatten(self.tensor_structure)])\n\n self.input_names = nest.pack_sequence_as(\n self.tensor_structure, [t.name\n if t is not None else None\n for t in nest.flatten(self.tensor_structure)])\n\n self._tensors = None\n\n def _create_placeholders(self):\n import tensorflow as tf\n if not self.hard_code_batch_size:\n tensors = nest.pack_sequence_as(\n self.tensor_structure, [tf.placeholder(name=t.name,\n dtype=t.dtype,\n shape=[None] + list(t.shape))\n for t in nest.flatten(self.tensor_structure)])\n else:\n if self.batch_per_thread > 0:\n tensors = nest.pack_sequence_as(\n self.tensor_structure,\n [tf.placeholder(name=t.name,\n dtype=t.dtype,\n shape=[self.batch_per_thread] + list(t.shape))\n for t in nest.flatten(self.tensor_structure)])\n else:\n tensors = nest.pack_sequence_as(\n self.tensor_structure,\n [tf.placeholder(name=t.name,\n dtype=t.dtype,\n shape=[self.batch_size // self.total_core_num] + list(t.shape))\n for t in nest.flatten(self.tensor_structure)])\n\n for tensor in nest.flatten(tensors):\n tf.get_default_graph().clear_collection(tensor.name)\n tf.add_to_collection(tensor.name, self)\n\n self._original_tensors = tensors\n self._tensors = tensors\n\n if not self.has_batch:\n self._tensors = nest.pack_sequence_as(self.tensor_structure,\n [t[0] for t in nest.flatten(tensors)])\n\n return tensors\n\n @property\n def tensors(self):\n \"\"\"\n a nested structure of TensorFlow tensor object in TensorFlow graph.\n The elements of this dataset will be fed into these tensors on each iteration.\n :return: the nested structure of TensorFlow tensor object\n \"\"\"\n\n if self._tensors is None:\n self._create_placeholders()\n\n return self._tensors\n\n @property\n def feature_tensors(self):\n\n if self._tensors is None:\n self._create_placeholders()\n\n if not isinstance(self._tensors, tuple):\n raise ValueError(\"To use feature_tensors, \" +\n \"the element in TFDataset must be a tuple of two components. \" +\n \"Please use TFDataset.from_rdd(rdd, features=..., labels=...). \")\n\n return self._tensors[0]\n\n @property\n def label_tensors(self):\n\n if self._tensors is None:\n self._create_placeholders()\n\n if not isinstance(self._tensors, tuple):\n raise ValueError(\"To use label_tensors, \" +\n \"the element in TFDataset must be a tuple of two components. \" +\n \"Please use TFDataset.from_rdd(rdd, features=..., labels=...). \")\n\n return self._tensors[1]\n\n @staticmethod\n def _to_tensor_structure(features, labels):\n feature_structure = _to_tensor_structure(features)\n if labels is not None:\n label_structure = _to_tensor_structure(labels)\n tensor_structure = (feature_structure, label_structure)\n\n else:\n tensor_structure = (feature_structure,)\n return tensor_structure\n\n def get_prediction_data(self):\n \"\"\"\n :return: an object that can be used for TFNet.predict\n e.g. an RDD of Sample or a ImageSet\n \"\"\"\n raise NotImplementedError\n\n def get_evaluation_data(self):\n \"\"\"\n :return: an object that can be used for TFNet.evaluate,\n e.g. an RDD of Sample or a ImageSet\n \"\"\"\n raise NotImplementedError\n\n def get_training_data(self):\n \"\"\"\n :return: an object that can be used to create a BigDL optimizer,\n e.g. an RDD of Sample or a DataSet\n \"\"\"\n raise NotImplementedError\n\n def get_validation_data(self):\n \"\"\"\n :return: an object that can be used to set validation in a BigDL optimizer,\n e.g. an RDD of Sample or a DataSet\n \"\"\"\n raise NotImplementedError\n\n def get_num_partitions(self):\n \"\"\"\n :return: the num of partitions of the underlying RDD\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def from_rdd(*args, **kwargs):\n \"\"\"\n Create a TFDataset from a rdd.\n\n For training and evaluation, both `features` and `labels` arguments should be specified.\n The element of the rdd should be a tuple of two, (features, labels), each has the\n same structure of numpy.ndarrays of the argument `features`, `labels`.\n\n E.g. if `features` is [(tf.float32, [10]), (tf.float32, [20])],\n and `labels` is {\"label1\":(tf.float32, [10]), \"label2\": (tf.float32, [20])}\n then a valid element of the rdd could be\n\n (\n [np.zeros(dtype=float, shape=(10,), np.zeros(dtype=float, shape=(10,)))],\n {\"label1\": np.zeros(dtype=float, shape=(10,)),\n \"label2\":np.zeros(dtype=float, shape=(10,))))}\n )\n\n If `labels` is not specified,\n then the above element should be changed to\n [np.zeros(dtype=float, shape=(10,), np.zeros(dtype=float, shape=(10,)))]\n\n For inference, `labels` can be not specified.\n The element of the rdd should be some ndarrays of the same structure of the `features`\n argument.\n\n A note on the legacy api: if you are using `names`, `shapes`, `types` arguments,\n each element of the rdd should be a list of numpy.ndarray.\n\n :param rdd: a rdd containing the numpy.ndarrays to be used\n for training/evaluation/inference\n :param features: the structure of input features, should one the following:\n - a tuple (dtype, shape), e.g. (tf.float32, [28, 28, 1])\n - a list of such tuple [(dtype1, shape1), (dtype2, shape2)],\n e.g. [(tf.float32, [10]), (tf.float32, [20])],\n - a dict of such tuple, mapping string names to tuple {\"name\": (dtype, shape},\n e.g. {\"input1\":(tf.float32, [10]), \"input2\": (tf.float32, [20])}\n\n :param labels: the structure of input labels, format is the same as features\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param val_rdd: validation data with the same structure of rdd\n :return: a TFDataset\n \"\"\"\n return TFNdarrayDataset.from_rdd(*args, **kwargs)\n\n @staticmethod\n def from_ndarrays(*args, **kwargs):\n \"\"\"\n Create a TFDataset from a nested structure of numpy ndarrays. Each element\n in the resulting TFDataset has the same structure of the argument tensors and\n is created by indexing on the first dimension of each ndarray in the tensors\n argument.\n\n This method is equivalent to sc.parallize the tensors and call TFDataset.from_rdd\n\n :param tensors: the numpy ndarrays\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param val_tensors: the numpy ndarrays used for validation during training\n :return:\n \"\"\"\n return TFNdarrayDataset.from_ndarrays(*args, **kwargs)\n\n @staticmethod\n def from_image_set(image_set, image, label=None,\n batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, validation_image_set=None):\n \"\"\"\n Create a TFDataset from a ImagetSet. Each ImageFeature in the ImageSet should\n already has the \"sample\" field, i.e. the result of ImageSetToSample transformer\n\n :param image_set: the ImageSet used to create this TFDataset\n :param image: a tuple of two, the first element is the type of image, the second element\n is the shape of this element, i.e. (tf.float32, [224, 224, 3]))\n :param label: a tuple of two, the first element is the type of label, the second element\n is the shape of this element, i.e. (tf.int32, [1]))\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param validation_image_set: the ImageSet used for validation during training\n :return:\n \"\"\"\n tensor_structure = TFDataset._to_tensor_structure(image, label)\n return TFImageDataset(image_set, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size,\n validation_image_set)\n\n @staticmethod\n def from_text_set(text_set, text, label=None,\n batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, validation_image_set=None):\n \"\"\"\n Create a TFDataset from a TextSet. The TextSet must be transformed to Sample, i.e.\n the result of TextFeatureToSample transformer.\n :param text_set: the TextSet used to create this TFDataset\n :param text: a tuple of two, the first element is the type of this input feature,\n the second element is the shape of this element, i.e. (tf.float32, [10, 100, 4])).\n text can also be nested structure of this tuple of two.\n :param label: a tuple of two, the first element is the type of label, the second element\n is the shape of this element, i.e. (tf.int32, [1])). label can also be nested structure of\n this tuple of two.\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param validation_image_set: The TextSet used for validation during training\n :return:\n \"\"\"\n tensor_structure = TFDataset._to_tensor_structure(text, label)\n return TFTextDataset(text_set, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size,\n validation_image_set)\n\n @staticmethod\n def from_tfrecord(file_path, parse_fn, batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, validation_file_path=None):\n \"\"\"\n Create a TFDataset from tfrecord files.\n :param file_path: comma seperated tfrecord file(s) path\n :param parse_fn: a TensorFlow function that takes a serialized example string to a nested\n structure of tensors. Follows the signature:\n * Args:\n * `example`: a string TensorFlow tensor representing a single record\n * Returns:\n a tuple or dictionary of output tensors and the output tensors must be\n of numeric type\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param validation_file_path: The tfrecord files used for validation\n :return:\n \"\"\"\n\n return TFRecordDataset(file_path, parse_fn, batch_size, batch_per_thread,\n hard_code_batch_size, validation_file_path)\n\n @staticmethod\n def from_feature_set(dataset, features, labels=None, batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, validation_dataset=None):\n \"\"\"\n Create a TFDataset from a FeatureSet. Currently, the element in this Feature set must be a\n ImageFeature that has a sample field, i.e. the result of ImageSetToSample transformer\n :param dataset: the feature set used to create this TFDataset\n :param features: a tuple of two, the first element is the type of this input feature,\n the second element is the shape of this element, i.e. (tf.float32, [224, 224, 3])).\n text can also be nested structure of this tuple of two.\n :param labels: a tuple of two, the first element is the type of label, the second element\n is the shape of this element, i.e. (tf.int32, [1])). label can also be nested structure of\n this tuple of two.\n :param batch_size: the batch size, used for training, should be a multiple of\n total core num\n :param batch_per_thread: the batch size for each thread, used for inference or evaluation\n :param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,\n if True, the static size of the first dimension of the resulting tensors is\n batch_size/total_core_num (training) or batch_per_thread for inference; if False,\n it is None.\n :param validation_dataset: The FeatureSet used for validation during training\n :return:\n \"\"\"\n tensor_structure = TFDataset._to_tensor_structure(features, labels)\n\n return TFFeatureDataset(dataset, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size, validation_dataset)\n\n\nclass TFFeatureDataset(TFDataset):\n\n def __init__(self, dataset, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size=False, validation_dataset=None):\n super(TFFeatureDataset, self).__init__(tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size)\n self.dataset = dataset\n self.validation_dataset = validation_dataset\n\n def get_prediction_data(self):\n raise Exception(\"TFFeatureDataset is only supported in training\")\n\n def get_evaluation_data(self):\n raise Exception(\"TFFeatureDataset is only supported in training\")\n\n def get_training_data(self):\n return self.dataset.transform(MergeFeatureLabelFeatureTransformer()).to_dataset()\n\n def get_validation_data(self):\n if self.validation_dataset is not None:\n return self.validation_dataset.transform(\n MergeFeatureLabelFeatureTransformer()).to_dataset()\n return None\n\n\nclass TFRecordDataset(TFDataset):\n\n def get_num_partitions(self):\n self.train_rdd.getNumPartitions()\n\n def __init__(self, file_path, parse_fn, batch_size,\n batch_per_thread, hard_code_batch_size=False, validation_file_path=None):\n import tensorflow as tf\n g = tf.Graph()\n with g.as_default():\n serialized_example = tf.placeholder(dtype=tf.string, shape=[])\n results = parse_fn(serialized_example)\n\n flattened = nest.flatten(results)\n output_names = [tf.cast(t, dtype=tf.float32).name for t in flattened]\n\n serialized_graph = bytearray(g.as_graph_def().SerializeToString())\n\n sc = getOrCreateSparkContext()\n train_rdd = callBigDlFunc(\"float\", \"createRDDFromTFRecords\",\n file_path, sc, serialized_graph,\n serialized_example.name, output_names)\n validation_rdd = None\n if validation_file_path is not None:\n validation_rdd = callBigDlFunc(\"float\", \"createRDDFromTFRecords\",\n validation_file_path, sc, serialized_graph,\n serialized_example.name, output_names)\n\n tensor_structure = nest.pack_sequence_as(results,\n [TensorMeta(tf.as_dtype(t.dtype),\n shape=t.shape,\n name=\"data_%s\" % i)\n for i, t in enumerate(nest.flatten(results))])\n\n super(TFRecordDataset, self).__init__(tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size)\n\n self.train_rdd = train_rdd\n self.validation_rdd = validation_rdd\n\n def get_prediction_data(self):\n return self.train_rdd\n\n def get_evaluation_data(self):\n return self.train_rdd\n\n def get_training_data(self):\n return self.train_rdd\n\n def get_validation_data(self):\n return self.validation_rdd\n\n\nclass TFTextDataset(TFDataset):\n\n def __init__(self, text_set, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size=False, validation_text_set=None):\n super(TFTextDataset, self).__init__(tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size)\n self.text_set = text_set\n self.validation_text_set = validation_text_set\n\n def get_prediction_data(self):\n return self.text_set.get_samples().map(\n lambda sample: Sample.from_jtensor(features=sample.features,\n labels=JTensor.from_ndarray(np.array([0.0]))))\n\n def get_evaluation_data(self):\n return self.text_set.get_samples()\n\n def get_training_data(self):\n return self.text_set.get_samples().map(\n lambda sample: Sample.from_jtensor(features=sample.features + sample.labels,\n labels=JTensor.from_ndarray(np.array([0.0]))))\n\n def get_validation_data(self):\n if self.validation_text_set is not None:\n return self.validation_text_set.get_samples().map(\n lambda sample: Sample.from_jtensor(features=sample.features + sample.labels,\n labels=JTensor.from_ndarray(np.array([0.0]))))\n return None\n\n def get_num_partitions(self):\n return self.text_set.get_samples().getNumPartitions()\n\n\nclass TFImageDataset(TFDataset):\n def __init__(self, image_set, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size=False, validation_image_set=None):\n super(TFImageDataset, self).__init__(tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size)\n self.image_set = image_set\n self.validation_image_set = validation_image_set\n\n def get_prediction_data(self):\n return self.image_set\n\n def get_evaluation_data(self):\n return self.image_set.to_image_frame()\n\n def get_training_data(self):\n return DataSet.image_frame(self.image_set\n .transform(MergeFeatureLabelImagePreprocessing())\n .to_image_frame())\n\n def get_validation_data(self):\n if self.validation_image_set is not None:\n return DataSet.image_frame(self.validation_image_set.\n transform(MergeFeatureLabelImagePreprocessing())\n .to_image_frame())\n return None\n\n def get_num_partitions(self):\n return self.image_set.get_image().getNumPartitions()\n\n\nclass TFNdarrayDataset(TFDataset):\n\n def __init__(self, rdd, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size=False, val_rdd=None):\n\n super(TFNdarrayDataset, self).__init__(tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size)\n\n self.val_rdd = val_rdd\n self.rdd = rdd\n\n def get_prediction_data(self):\n data = self.rdd.map(lambda t: Sample.from_ndarray(\n nest.flatten(t[0] if isinstance(t, tuple) else t), np.array([0.0])))\n return data\n\n def get_evaluation_data(self):\n if isinstance(self.tensor_structure, tuple):\n return self.rdd.map(\n lambda t: Sample.from_ndarray(nest.flatten(t[0]), nest.flatten(t[1])))\n return self.rdd.map(lambda t: Sample.from_ndarray(nest.flatten(t), np.array([0.0])))\n\n def get_training_data(self):\n return self.rdd.map(lambda t: Sample.from_ndarray(nest.flatten(t), np.array([0.0])))\n\n def get_validation_data(self):\n if self.val_rdd is not None:\n return self.val_rdd.map(lambda t: Sample.from_ndarray(nest.flatten(t),\n np.array([0.0])))\n return None\n\n def get_num_partitions(self):\n return self.rdd.getNumPartitions()\n\n @staticmethod\n def from_rdd(rdd, names=None, shapes=None, types=None,\n batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, val_rdd=None,\n features=None, labels=None):\n\n import tensorflow as tf\n\n if features is not None:\n feature_structure = _to_tensor_structure(features)\n if labels is not None:\n label_structure = _to_tensor_structure(labels)\n tensor_structure = (feature_structure, label_structure)\n\n else:\n tensor_structure = (feature_structure,)\n\n return TFNdarrayDataset(rdd, tensor_structure,\n batch_size, batch_per_thread,\n hard_code_batch_size, val_rdd)\n\n if names is not None or shapes is not None or types is not None:\n if not names:\n names = [\"features\", \"labels\"]\n if not shapes:\n shapes = [None] * len(names)\n\n if not types:\n types = [tf.float32] * len(names)\n tensor_structure = []\n for i in range(len(names)):\n tensor_structure.append(TensorMeta(types[i], name=names[i], shape=shapes[i]))\n else:\n tensor_structure = [TensorMeta(dtype=tf.float32), TensorMeta(dtype=tf.float32)]\n\n return TFNdarrayDataset(rdd, tensor_structure,\n batch_size, batch_per_thread,\n hard_code_batch_size, val_rdd)\n\n @staticmethod\n def from_ndarrays(tensors, batch_size=-1, batch_per_thread=-1,\n hard_code_batch_size=False, val_tensors=None):\n sc = getOrCreateSparkContext()\n node_num, core_num = get_node_and_core_number()\n total_core_num = node_num * core_num\n\n rdd, tensor_structure = _tensors_to_rdd(tensors, sc, total_core_num)\n\n val_rdd = None\n if val_tensors is not None:\n val_rdd, _ = _tensors_to_rdd(val_tensors, sc, total_core_num)\n\n return TFNdarrayDataset(rdd, tensor_structure, batch_size,\n batch_per_thread, hard_code_batch_size, val_rdd)\n"
] | [
[
"tensorflow.Graph",
"tensorflow.as_dtype",
"tensorflow.cast",
"tensorflow.placeholder",
"numpy.dtype",
"numpy.float32",
"tensorflow.get_default_graph",
"numpy.array",
"tensorflow.add_to_collection"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
blutjens/deepxde | [
"f74b9a48165eee1984f4b43dec05f1129f77200f"
] | [
"deepxde/optimizers/tensorflow_compat_v1/external_optimizer.py"
] | [
"\"\"\"TensorFlow interface for third-party optimizers.\n\nCode below is taken from https://github.com/tensorflow/tensorflow/blob/v1.15.2/tensorflow/contrib/opt/python/training/external_optimizer.py,\nbecause the ``tf.contrib`` module is not included in TensorFlow 2.\n\nAnother solution is using TensorFlow Probability, see the following references.\nBut the following solution requires setting the weights before building the network and loss,\nwhich is not consistent with other optimizers in graph mode.\nA possible solution Could be adding a TFPOptimizerInterface similar to ScipyOptimizerInterface.\n\n- https://www.tensorflow.org/probability/api_docs/python/tfp/optimizer/lbfgs_minimize\n- https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/optimizer/lbfgs_test.py\n- https://stackoverflow.com/questions/58591562/how-can-we-use-lbfgs-minimize-in-tensorflow-2-0\n- https://stackoverflow.com/questions/59029854/use-scipy-optimizer-with-tensorflow-2-0-for-neural-network-training\n- https://pychao.com/2019/11/02/optimize-tensorflow-keras-models-with-l-bfgs-from-tensorflow-probability/\n- https://gist.github.com/piyueh/712ec7d4540489aad2dcfb80f9a54993\n- https://github.com/pierremtb/PINNs-TF2.0/blob/master/utils/neuralnetwork.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom ...backend import tf\n\n__all__ = [\"ExternalOptimizerInterface\", \"ScipyOptimizerInterface\"]\n\n\nclass ExternalOptimizerInterface(object):\n \"\"\"Base class for interfaces with external optimization algorithms.\n Subclass this and implement `_minimize` in order to wrap a new optimization\n algorithm.\n `ExternalOptimizerInterface` should not be instantiated directly; instead use\n e.g. `ScipyOptimizerInterface`.\n @@__init__\n @@minimize\n \"\"\"\n\n def __init__(\n self,\n loss,\n var_list=None,\n equalities=None,\n inequalities=None,\n var_to_bounds=None,\n **optimizer_kwargs\n ):\n \"\"\"Initialize a new interface instance.\n Args:\n loss: A scalar `Tensor` to be minimized.\n var_list: Optional `list` of `Variable` objects to update to minimize\n `loss`. Defaults to the list of variables collected in the graph\n under the key `GraphKeys.TRAINABLE_VARIABLES`.\n equalities: Optional `list` of equality constraint scalar `Tensor`s to be\n held equal to zero.\n inequalities: Optional `list` of inequality constraint scalar `Tensor`s\n to be held nonnegative.\n var_to_bounds: Optional `dict` where each key is an optimization\n `Variable` and each corresponding value is a length-2 tuple of\n `(low, high)` bounds. Although enforcing this kind of simple constraint\n could be accomplished with the `inequalities` arg, not all optimization\n algorithms support general inequality constraints, e.g. L-BFGS-B. Both\n `low` and `high` can either be numbers or anything convertible to a\n NumPy array that can be broadcast to the shape of `var` (using\n `np.broadcast_to`). To indicate that there is no bound, use `None` (or\n `+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of\n the following corresponding `bounds` could be supplied:\n * `(0, np.infty)`: Each element of `var` held positive.\n * `(-np.infty, [1, 2])`: First column less than 1, second column less\n than 2.\n * `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less\n than 2, etc.\n * `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,\n `var[0, 1]` less than 2, etc.\n **optimizer_kwargs: Other subclass-specific keyword arguments.\n \"\"\"\n self._loss = loss\n self._equalities = equalities or []\n self._inequalities = inequalities or []\n\n if var_list is None:\n self._vars = tf.trainable_variables()\n else:\n self._vars = list(var_list)\n\n packed_bounds = None\n if var_to_bounds is not None:\n left_packed_bounds = []\n right_packed_bounds = []\n for var in self._vars:\n shape = var.get_shape().as_list()\n bounds = (-np.infty, np.infty)\n if var in var_to_bounds:\n bounds = var_to_bounds[var]\n left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))\n right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))\n packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))\n self._packed_bounds = packed_bounds\n\n self._update_placeholders = [tf.placeholder(var.dtype) for var in self._vars]\n self._var_updates = [\n var.assign(tf.reshape(placeholder, _get_shape_tuple(var)))\n for var, placeholder in zip(self._vars, self._update_placeholders)\n ]\n\n loss_grads = _compute_gradients(loss, self._vars)\n equalities_grads = [\n _compute_gradients(equality, self._vars) for equality in self._equalities\n ]\n inequalities_grads = [\n _compute_gradients(inequality, self._vars)\n for inequality in self._inequalities\n ]\n\n self.optimizer_kwargs = optimizer_kwargs\n\n self._packed_var = self._pack(self._vars)\n self._packed_loss_grad = self._pack(loss_grads)\n self._packed_equality_grads = [\n self._pack(equality_grads) for equality_grads in equalities_grads\n ]\n self._packed_inequality_grads = [\n self._pack(inequality_grads) for inequality_grads in inequalities_grads\n ]\n\n dims = [_prod(_get_shape_tuple(var)) for var in self._vars]\n accumulated_dims = list(_accumulate(dims))\n self._packing_slices = [\n slice(start, end)\n for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])\n ]\n\n def minimize(\n self,\n session=None,\n feed_dict=None,\n fetches=None,\n step_callback=None,\n loss_callback=None,\n **run_kwargs\n ):\n \"\"\"Minimize a scalar `Tensor`.\n Variables subject to optimization are updated in-place at the end of\n optimization.\n Note that this method does *not* just return a minimization `Op`, unlike\n `Optimizer.minimize()`; instead it actually performs minimization by\n executing commands to control a `Session`.\n Args:\n session: A `Session` instance.\n feed_dict: A feed dict to be passed to calls to `session.run`.\n fetches: A list of `Tensor`s to fetch and supply to `loss_callback`\n as positional arguments.\n step_callback: A function to be called at each optimization step;\n arguments are the current values of all optimization variables\n flattened into a single vector.\n loss_callback: A function to be called every time the loss and gradients\n are computed, with evaluated fetches supplied as positional arguments.\n **run_kwargs: kwargs to pass to `session.run`.\n \"\"\"\n session = session or tf.get_default_session()\n feed_dict = feed_dict or {}\n fetches = fetches or []\n\n loss_callback = loss_callback or (lambda *fetches: None)\n step_callback = step_callback or (lambda xk: None)\n\n # Construct loss function and associated gradient.\n loss_grad_func = self._make_eval_func(\n [self._loss, self._packed_loss_grad],\n session,\n feed_dict,\n fetches,\n loss_callback,\n )\n\n # Construct equality constraint functions and associated gradients.\n equality_funcs = self._make_eval_funcs(\n self._equalities, session, feed_dict, fetches\n )\n equality_grad_funcs = self._make_eval_funcs(\n self._packed_equality_grads, session, feed_dict, fetches\n )\n\n # Construct inequality constraint functions and associated gradients.\n inequality_funcs = self._make_eval_funcs(\n self._inequalities, session, feed_dict, fetches\n )\n inequality_grad_funcs = self._make_eval_funcs(\n self._packed_inequality_grads, session, feed_dict, fetches\n )\n\n # Get initial value from TF session.\n initial_packed_var_val = session.run(self._packed_var)\n\n # Perform minimization.\n packed_var_val = self._minimize(\n initial_val=initial_packed_var_val,\n loss_grad_func=loss_grad_func,\n equality_funcs=equality_funcs,\n equality_grad_funcs=equality_grad_funcs,\n inequality_funcs=inequality_funcs,\n inequality_grad_funcs=inequality_grad_funcs,\n packed_bounds=self._packed_bounds,\n step_callback=step_callback,\n optimizer_kwargs=self.optimizer_kwargs,\n )\n var_vals = [\n packed_var_val[packing_slice] for packing_slice in self._packing_slices\n ]\n\n # Set optimization variables to their new values.\n session.run(\n self._var_updates,\n feed_dict=dict(zip(self._update_placeholders, var_vals)),\n **run_kwargs\n )\n\n def _minimize(\n self,\n initial_val,\n loss_grad_func,\n equality_funcs,\n equality_grad_funcs,\n inequality_funcs,\n inequality_grad_funcs,\n packed_bounds,\n step_callback,\n optimizer_kwargs,\n ):\n \"\"\"Wrapper for a particular optimization algorithm implementation.\n It would be appropriate for a subclass implementation of this method to\n raise `NotImplementedError` if unsupported arguments are passed: e.g. if an\n algorithm does not support constraints but `len(equality_funcs) > 0`.\n Args:\n initial_val: A NumPy vector of initial values.\n loss_grad_func: A function accepting a NumPy packed variable vector and\n returning two outputs, a loss value and the gradient of that loss with\n respect to the packed variable vector.\n equality_funcs: A list of functions each of which specifies a scalar\n quantity that an optimizer should hold exactly zero.\n equality_grad_funcs: A list of gradients of equality_funcs.\n inequality_funcs: A list of functions each of which specifies a scalar\n quantity that an optimizer should hold >= 0.\n inequality_grad_funcs: A list of gradients of inequality_funcs.\n packed_bounds: A list of bounds for each index, or `None`.\n step_callback: A callback function to execute at each optimization step,\n supplied with the current value of the packed variable vector.\n optimizer_kwargs: Other key-value arguments available to the optimizer.\n Returns:\n The optimal variable vector as a NumPy vector.\n \"\"\"\n raise NotImplementedError(\n \"To use ExternalOptimizerInterface, subclass from it and implement \"\n \"the _minimize() method.\"\n )\n\n @classmethod\n def _pack(cls, tensors):\n \"\"\"Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`.\"\"\"\n if not tensors:\n return None\n elif len(tensors) == 1:\n return tf.reshape(tensors[0], [-1])\n else:\n flattened = [tf.reshape(tensor, [-1]) for tensor in tensors]\n return tf.concat(flattened, 0)\n\n def _make_eval_func(self, tensors, session, feed_dict, fetches, callback=None):\n \"\"\"Construct a function that evaluates a `Tensor` or list of `Tensor`s.\"\"\"\n if not isinstance(tensors, list):\n tensors = [tensors]\n num_tensors = len(tensors)\n\n def eval_func(x):\n \"\"\"Function to evaluate a `Tensor`.\"\"\"\n augmented_feed_dict = {\n var: x[packing_slice].reshape(_get_shape_tuple(var))\n for var, packing_slice in zip(self._vars, self._packing_slices)\n }\n augmented_feed_dict.update(feed_dict)\n augmented_fetches = tensors + fetches\n\n augmented_fetch_vals = session.run(\n augmented_fetches, feed_dict=augmented_feed_dict\n )\n\n if callable(callback):\n callback(*augmented_fetch_vals[num_tensors:])\n\n return augmented_fetch_vals[:num_tensors]\n\n return eval_func\n\n def _make_eval_funcs(self, tensors, session, feed_dict, fetches, callback=None):\n return [\n self._make_eval_func(tensor, session, feed_dict, fetches, callback)\n for tensor in tensors\n ]\n\n\nclass ScipyOptimizerInterface(ExternalOptimizerInterface):\n \"\"\"Wrapper allowing `scipy.optimize.minimize` to operate a `tf.compat.v1.Session`.\n Example:\n ```python\n vector = tf.Variable([7., 7.], 'vector')\n # Make vector norm as small as possible.\n loss = tf.reduce_sum(tf.square(vector))\n optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})\n with tf.compat.v1.Session() as session:\n optimizer.minimize(session)\n # The value of vector should now be [0., 0.].\n ```\n Example with simple bound constraints:\n ```python\n vector = tf.Variable([7., 7.], 'vector')\n # Make vector norm as small as possible.\n loss = tf.reduce_sum(tf.square(vector))\n optimizer = ScipyOptimizerInterface(\n loss, var_to_bounds={vector: ([1, 2], np.infty)})\n with tf.compat.v1.Session() as session:\n optimizer.minimize(session)\n # The value of vector should now be [1., 2.].\n ```\n Example with more complicated constraints:\n ```python\n vector = tf.Variable([7., 7.], 'vector')\n # Make vector norm as small as possible.\n loss = tf.reduce_sum(tf.square(vector))\n # Ensure the vector's y component is = 1.\n equalities = [vector[1] - 1.]\n # Ensure the vector's x component is >= 1.\n inequalities = [vector[0] - 1.]\n # Our default SciPy optimization algorithm, L-BFGS-B, does not support\n # general constraints. Thus we use SLSQP instead.\n optimizer = ScipyOptimizerInterface(\n loss, equalities=equalities, inequalities=inequalities, method='SLSQP')\n with tf.compat.v1.Session() as session:\n optimizer.minimize(session)\n # The value of vector should now be [1., 1.].\n ```\n \"\"\"\n\n _DEFAULT_METHOD = \"L-BFGS-B\"\n\n def _minimize(\n self,\n initial_val,\n loss_grad_func,\n equality_funcs,\n equality_grad_funcs,\n inequality_funcs,\n inequality_grad_funcs,\n packed_bounds,\n step_callback,\n optimizer_kwargs,\n ):\n def loss_grad_func_wrapper(x):\n # SciPy's L-BFGS-B Fortran implementation requires gradients as doubles.\n loss, gradient = loss_grad_func(x)\n return loss, gradient.astype(\"float64\")\n\n optimizer_kwargs = dict(optimizer_kwargs.items())\n method = optimizer_kwargs.pop(\"method\", self._DEFAULT_METHOD)\n\n constraints = []\n for func, grad_func in zip(equality_funcs, equality_grad_funcs):\n constraints.append({\"type\": \"eq\", \"fun\": func, \"jac\": grad_func})\n for func, grad_func in zip(inequality_funcs, inequality_grad_funcs):\n constraints.append({\"type\": \"ineq\", \"fun\": func, \"jac\": grad_func})\n\n minimize_args = [loss_grad_func_wrapper, initial_val]\n minimize_kwargs = {\n \"jac\": True,\n \"callback\": step_callback,\n \"method\": method,\n \"constraints\": constraints,\n \"bounds\": packed_bounds,\n }\n\n for kwarg in minimize_kwargs:\n if kwarg in optimizer_kwargs:\n if kwarg == \"bounds\":\n # Special handling for 'bounds' kwarg since ability to specify bounds\n # was added after this module was already publicly released.\n raise ValueError(\n \"Bounds must be set using the var_to_bounds argument\"\n )\n raise ValueError(\n \"Optimizer keyword arg '{}' is set \"\n \"automatically and cannot be injected manually\".format(kwarg)\n )\n\n minimize_kwargs.update(optimizer_kwargs)\n\n import scipy.optimize # pylint: disable=g-import-not-at-top\n\n result = scipy.optimize.minimize(*minimize_args, **minimize_kwargs)\n\n message_lines = [\n \"Optimization terminated with:\",\n \" Message: %s\",\n \" Objective function value: %f\",\n ]\n message_args = [result.message, result.fun]\n if hasattr(result, \"nit\"):\n # Some optimization methods might not provide information such as nit and\n # nfev in the return. Logs only available information.\n message_lines.append(\" Number of iterations: %d\")\n message_args.append(result.nit)\n if hasattr(result, \"nfev\"):\n message_lines.append(\" Number of functions evaluations: %d\")\n message_args.append(result.nfev)\n tf.logging.info(\"\\n\".join(message_lines), *message_args)\n\n return result[\"x\"]\n\n\ndef _accumulate(list_):\n total = 0\n yield total\n for x in list_:\n total += x\n yield total\n\n\ndef _get_shape_tuple(tensor):\n return tuple(tensor.shape)\n\n\ndef _prod(array):\n prod = 1\n for value in array:\n prod *= value\n return prod\n\n\ndef _compute_gradients(tensor, var_list):\n grads = tf.gradients(tensor, var_list)\n # tf.gradients sometimes returns `None` when it should return 0.\n return [\n grad if grad is not None else tf.zeros_like(var)\n for var, grad in zip(var_list, grads)\n ]\n"
] | [
[
"numpy.broadcast_to"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pmulcaire/rosita | [
"fffe45fb450d79cf36e0a3e2625300dc95249367",
"fffe45fb450d79cf36e0a3e2625300dc95249367"
] | [
"allennlp/data/fields/text_field.py",
"allennlp/tests/custom_extensions/alternating_highway_lstm_test.py"
] | [
"\"\"\"\nA ``TextField`` represents a string of text, the kind that you might want to represent with\nstandard word vectors, or pass through an LSTM.\n\"\"\"\nimport IPython as ipy\n\nfrom typing import Dict, List, Optional, Iterator\nimport textwrap\n\nfrom overrides import overrides\nfrom spacy.tokens import Token as SpacyToken\nimport torch\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data.fields.sequence_field import SequenceField\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.nn import util\n\nTokenList = List[TokenType] # pylint: disable=invalid-name\n\n\nclass TextField(SequenceField[Dict[str, torch.Tensor]]):\n \"\"\"\n This ``Field`` represents a list of string tokens. Before constructing this object, you need\n to tokenize raw strings using a :class:`~allennlp.data.tokenizers.tokenizer.Tokenizer`.\n\n Because string tokens can be represented as indexed arrays in a number of ways, we also take a\n dictionary of :class:`~allennlp.data.token_indexers.token_indexer.TokenIndexer`\n objects that will be used to convert the tokens into indices.\n Each ``TokenIndexer`` could represent each token as a single ID, or a list of character IDs, or\n something else.\n\n This field will get converted into a dictionary of arrays, one for each ``TokenIndexer``. A\n ``SingleIdTokenIndexer`` produces an array of shape (num_tokens,), while a\n ``TokenCharactersIndexer`` produces an array of shape (num_tokens, num_characters).\n \"\"\"\n def __init__(self, tokens: List[Token], token_indexers: Dict[str, TokenIndexer]) -> None:\n self.tokens = tokens\n self._token_indexers = token_indexers\n self._indexed_tokens: Optional[Dict[str, TokenList]] = None\n self._indexer_name_to_indexed_token: Optional[Dict[str, List[str]]] = None\n\n if not all([isinstance(x, (Token, SpacyToken)) for x in tokens]):\n raise ConfigurationError(\"TextFields must be passed Tokens. \"\n \"Found: {} with types {}.\".format(tokens, [type(x) for x in tokens]))\n\n # Sequence[Token] methods\n def __iter__(self) -> Iterator[Token]:\n return iter(self.tokens)\n\n def __getitem__(self, idx: int) -> Token:\n return self.tokens[idx]\n\n def __len__(self) -> int:\n return len(self.tokens)\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n for indexer in self._token_indexers.values():\n for token in self.tokens:\n indexer.count_vocab_items(token, counter)\n\n @overrides\n def index(self, vocab: Vocabulary):\n token_arrays: Dict[str, TokenList] = {}\n indexer_name_to_indexed_token: Dict[str, List[str]] = {}\n for indexer_name, indexer in self._token_indexers.items():\n token_indices = indexer.tokens_to_indices(self.tokens, vocab, indexer_name)\n token_arrays.update(token_indices)\n indexer_name_to_indexed_token[indexer_name] = list(token_indices.keys())\n self._indexed_tokens = token_arrays\n self._indexer_name_to_indexed_token = indexer_name_to_indexed_token\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n \"\"\"\n The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by\n (potentially) several ``TokenIndexers``. This method gets the max length (over tokens)\n associated with each of these arrays.\n \"\"\"\n # Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens\n # for each indexer separately. Then we will combine the results for each indexer into a single\n # dictionary, resolving any (unlikely) key conflicts by taking a max.\n lengths = []\n if self._indexed_tokens is None:\n raise ConfigurationError(\"You must call .index(vocabulary) on a \"\n \"field before determining padding lengths.\")\n\n # Each indexer can return a different sequence length, and for indexers that return\n # multiple arrays each can have a different length. We'll keep track of them here.\n for indexer_name, indexer in self._token_indexers.items():\n indexer_lengths = {}\n\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:\n # This is a list of dicts, one for each token in the field.\n token_lengths = [indexer.get_padding_lengths(token)\n for token in self._indexed_tokens[indexed_tokens_key]]\n if not token_lengths:\n # This is a padding edge case and occurs when we want to pad a ListField of\n # TextFields. In order to pad the list field, we need to be able to have an\n # _empty_ TextField, but if this is the case, token_lengths will be an empty\n # list, so we add the default empty padding dictionary to the list instead.\n token_lengths = [{}]\n # Iterate over the keys and find the maximum token length.\n # It's fine to iterate over the keys of the first token since all tokens have the same keys.\n for key in token_lengths[0]:\n indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)\n lengths.append(indexer_lengths)\n\n indexer_sequence_lengths = {key: len(val) for key, val in self._indexed_tokens.items()}\n # Get the padding lengths for sequence lengths.\n if len(set(indexer_sequence_lengths.values())) == 1:\n # This is the default case where all indexers return the same length.\n # Keep the existing 'num_tokens' key for backward compatibility with existing config files.\n padding_lengths = {'num_tokens': list(indexer_sequence_lengths.values())[0]}\n else:\n # The indexers return different lengths.\n padding_lengths = indexer_sequence_lengths\n\n # Get all keys which have been used for padding for each indexer and take the max if there are duplicates.\n padding_keys = {key for d in lengths for key in d.keys()}\n for padding_key in padding_keys:\n padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)\n return padding_lengths\n\n @overrides\n def sequence_length(self) -> int:\n return len(self.tokens)\n\n @overrides\n def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:\n tensors = {}\n num_tokens = padding_lengths.get('num_tokens')\n for indexer_name, indexer in self._token_indexers.items():\n if num_tokens is None:\n # The indexers return different lengths.\n # Get the desired_num_tokens for this indexer.\n desired_num_tokens = {\n indexed_tokens_key: padding_lengths[indexed_tokens_key]\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]\n }\n else:\n desired_num_tokens = {indexer_name: num_tokens}\n\n indices_to_pad = {indexed_tokens_key: self._indexed_tokens[indexed_tokens_key]\n for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]}\n padded_array = indexer.pad_token_sequence(indices_to_pad,\n desired_num_tokens, padding_lengths)\n # We use the key of the indexer to recognise what the tensor corresponds to within the\n # field (i.e. the result of word indexing, or the result of character indexing, for\n # example).\n # TODO(mattg): we might someday have a TokenIndexer that needs to use something other\n # than a LongTensor here, and it's not clear how to signal that. Maybe we'll need to\n # add a class method to TokenIndexer to tell us the type? But we can worry about that\n # when there's a compelling use case for it.\n try:\n indexer_tensors = {key: torch.LongTensor(array) for key, array in padded_array.items()}\n except Exception as exc:\n print(\"\\n\\n\",exc,\"\\n\")\n ipy.embed()\n raise(exc)\n tensors.update(indexer_tensors)\n return tensors\n\n @overrides\n def empty_field(self):\n # pylint: disable=protected-access\n text_field = TextField([], self._token_indexers)\n text_field._indexed_tokens = {}\n text_field._indexer_name_to_indexed_token = {}\n for indexer_name, indexer in self._token_indexers.items():\n array_keys = indexer.get_keys(indexer_name)\n for key in array_keys:\n text_field._indexed_tokens[key] = []\n text_field._indexer_name_to_indexed_token[indexer_name] = array_keys\n return text_field\n\n @overrides\n def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n # pylint: disable=no-self-use\n # This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used\n # to index this field.\n return util.batch_tensor_dicts(tensor_list)\n\n def __str__(self) -> str:\n indexers = {name: indexer.__class__.__name__ for name, indexer in self._token_indexers.items()}\n\n # Double tab to indent under the header.\n formatted_text = \"\".join([\"\\t\\t\" + text + \"\\n\"\n for text in textwrap.wrap(repr(self.tokens), 100)])\n return f\"TextField of length {self.sequence_length()} with \" \\\n f\"text: \\n {formatted_text} \\t\\tand TokenIndexers : {indexers}\"\n",
"import torch\n\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nimport numpy\nimport pytest\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm\n\nimport IPython as ipy\n\[email protected](not torch.cuda.is_available(), reason=\"No CUDA device registered.\")\nclass TestCustomHighwayLSTM(AllenNlpTestCase):\n\n def test_small_model(self):\n args = self.get_models_and_inputs(5, 3, 11, 2, 5, 0.0)\n self.forward_and_backward_outputs_match(*args)\n\n def test_large_model(self):\n ipy.embed()\n\n args = self.get_models_and_inputs(83, 103, 311, 8, 101, 0.0)\n self.forward_and_backward_outputs_match(*args)\n\n def test_validation_forward_pass_is_deterministic_in_model_with_dropout(self):\n _, model, _, model_input, lengths = self.get_models_and_inputs(5, 3, 11, 2, 5, dropout_prob=0.5)\n model.eval()\n model_input = pack_padded_sequence(model_input, lengths, batch_first=True)\n output, _ = model(model_input)\n output, _ = pad_packed_sequence(output, batch_first=True)\n\n for i in range(3):\n output_new, _ = model(model_input)\n output_new, _ = pad_packed_sequence(output_new, batch_first=True)\n numpy.testing.assert_array_almost_equal(output.detach().cpu().numpy(), output_new.detach().cpu().numpy())\n output = output_new\n\n @staticmethod\n def forward_and_backward_outputs_match(baseline_model, kernel_model,\n baseline_input, kernel_input, lengths):\n packed_baseline_input = pack_padded_sequence(baseline_input, lengths, batch_first=True)\n baseline_output, _ = baseline_model(packed_baseline_input)\n baseline_output, _ = pad_packed_sequence(baseline_output, batch_first=True)\n\n packed_kernel_input = pack_padded_sequence(kernel_input, lengths, batch_first=True)\n kernel_output, _ = kernel_model(packed_kernel_input)\n kernel_output, _ = pad_packed_sequence(kernel_output, batch_first=True)\n\n numpy.testing.assert_array_almost_equal(baseline_output.detach().cpu().numpy(),\n kernel_output.detach().cpu().numpy())\n\n # Backprop some random error.\n random_error = torch.randn(baseline_output.size()).cuda()\n baseline_model.zero_grad()\n baseline_output.backward(random_error)\n\n kernel_model.zero_grad()\n kernel_output.backward(random_error)\n\n numpy.testing.assert_array_almost_equal(baseline_input.grad.detach().cpu().numpy(),\n kernel_input.grad.detach().cpu().numpy())\n weight_index = 0\n bias_index = 0\n for layer in range(baseline_model.num_layers):\n input_grad = getattr(baseline_model, 'layer_%d' % layer).input_linearity.weight.grad\n state_grad = getattr(baseline_model, 'layer_%d' % layer).state_linearity.weight.grad\n bias_grad = getattr(baseline_model, 'layer_%d' % layer).state_linearity.bias.grad\n\n kernel_input_grad = kernel_model.weight.grad[weight_index: weight_index+input_grad.nelement()]\\\n .view(input_grad.size(1), input_grad.size(0)).t()\n weight_index += input_grad.nelement()\n\n kernel_state_grad = kernel_model.weight.grad[weight_index: weight_index + state_grad.nelement()]\\\n .view(state_grad.size(1), state_grad.size(0)).t()\n weight_index += state_grad.nelement()\n\n kernel_bias_grad = kernel_model.bias.grad[bias_index:bias_index+bias_grad.nelement()]\n bias_index += bias_grad.nelement()\n\n numpy.testing.assert_array_almost_equal(kernel_input_grad.detach().cpu().numpy(),\n input_grad.detach().cpu().numpy(), decimal=4)\n numpy.testing.assert_array_almost_equal(kernel_state_grad.detach().cpu().numpy(),\n state_grad.detach().cpu().numpy(), decimal=4)\n numpy.testing.assert_array_almost_equal(kernel_bias_grad.detach().cpu().numpy(),\n bias_grad.detach().cpu().numpy(), decimal=4)\n\n @staticmethod\n def get_models_and_inputs(batch_size, input_size, output_size, num_layers, timesteps, dropout_prob):\n\n # Import is here because the layer requires a GPU.\n from allennlp.modules.alternating_highway_lstm import AlternatingHighwayLSTM\n\n baseline = StackedAlternatingLstm(input_size, output_size, num_layers,\n dropout_prob, use_input_projection_bias=False).cuda()\n kernel_version = AlternatingHighwayLSTM(input_size, output_size, num_layers, dropout_prob).cuda()\n\n # Copy weights from non-cuda version into cuda version,\n # so we are starting from exactly the same place.\n weight_index = 0\n bias_index = 0\n for layer_index in range(num_layers):\n\n layer = getattr(baseline, 'layer_%d' % layer_index)\n input_weight = layer.input_linearity.weight\n state_weight = layer.state_linearity.weight\n bias = layer.state_linearity.bias\n\n kernel_version.weight.data[weight_index: weight_index + input_weight.nelement()]\\\n .view_as(input_weight.t()).copy_(input_weight.data.t())\n weight_index += input_weight.nelement()\n\n kernel_version.weight.data[weight_index: weight_index + state_weight.nelement()]\\\n .view_as(state_weight.t()).copy_(state_weight.data.t())\n weight_index += state_weight.nelement()\n\n kernel_version.bias.data[bias_index:bias_index + bias.nelement()].copy_(bias.data)\n bias_index += bias.nelement()\n\n baseline_input = torch.randn(batch_size, timesteps, input_size, requires_grad=True).cuda()\n # Clone variable so different models are\n # completely separate in the graph.\n kernel_version_input = baseline_input.clone()\n\n lengths = [timesteps - int((i / 2)) for i in range(batch_size)]\n lengths = lengths[:batch_size]\n\n return baseline, kernel_version, baseline_input, kernel_version_input, lengths\n"
] | [
[
"torch.LongTensor"
],
[
"torch.randn",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.cuda.is_available",
"torch.nn.utils.rnn.pack_padded_sequence"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stharrold/doc | [
"a96647f0bd090069112be9f097db0ec5cc5a052b"
] | [
"doc/utils.py"
] | [
"#!/usr/bin/env python\n\"\"\"Utilities for documentation.\n\nSee Also\n--------\nRELATED : {}\n\nNotes\n-----\nDocstring formats adapted from [1]_.\nTODO : To-do items are flagged with 'TODO:'.\nSee Also : Sets of related objects, categorized by `CALLS`, `CALLED_BY`, `RELATED`.\n\nReferences\n----------\n.. [1] https://github.com/numpy/numpy/blob/master/doc/example.py\n\n\"\"\"\n# TODO: use http://sphinx-doc.org/\n\n\n# Import standard packages.\nfrom __future__ import absolute_import, division, print_function\nimport ast\nimport warnings\n# Import installed packages.\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef parse_see_also(line):\n \"\"\"Parse field from 'See Also' section of docstring.\n \n Parameters\n ----------\n line : string\n Example 1: ' CALLS : {}'\n Example 2: ' CALLED_BY : {func1}'\n Example 3: ' RELATED : {func1, func2}'\n \n Returns\n -------\n field : set\n {''} returned as empty set instead of {} to allow iteration through the set\n Example 1: {''}\n Example 2: {'func1'}\n Example 3: {'func1', 'func2'}\n \n See Also\n --------\n CALLS : {}\n CALLED_BY : {parse_docstring}\n RELATED : {}\n \n Notes\n -----\n Docstring format adapted from [1]_.\n \n References\n ----------\n .. [1] https://github.com/numpy/numpy/blob/master/doc/example.py\n \n \"\"\"\n field = line.split(':')[1].strip()\n field = [elt.strip().replace('`', '').replace('\\'', '').replace('\"', '') for elt in field.split(',')]\n field = [elt.replace('{', '').replace('}', '') for elt in field]\n return set(field)\n\n\ndef parse_docstring(docstring):\n \"\"\"Parse relationships from docstring.\n \n Returns a generator that iterates through (field, value) tuples.\n Docstring must have 'See Also' section with the fields 'CALLS', 'CALLED_BY', 'RELATED'.\n\n Parameters\n ----------\n docstring : string\n Line separators are '\\n'.\n\n Returns\n -------\n field : string\n Examples: 'CALLS', 'CALLED_BY', 'RELATED'\n value : set\n Examples: {''}, {'func1', 'func2'} \n \n See Also\n --------\n CALLS : {parse_see_also}\n CALLED_BY : {}\n RELATED : {}\n \n Notes\n -----\n Docstring format adapted from [1]_.\n \n References\n ----------\n .. [1] https://github.com/numpy/numpy/blob/master/doc/example.py\n\n \"\"\"\n lines = docstring.split('\\n')\n catch_calls = None\n catch_called_by = None\n catch_related = None\n for line in lines:\n if 'See Also' in line:\n catch_calls = True\n catch_called_by = True\n catch_related = True\n continue\n elif catch_calls and 'CALLS' in line:\n catch_calls = False\n yield ('CALLS', parse_see_also(line))\n continue\n elif catch_called_by and 'CALLED_BY' in line:\n catch_called_by = False\n yield ('CALLED_BY', parse_see_also(line))\n continue\n elif catch_related and 'RELATED' in line:\n catch_related = False\n yield ('RELATED', parse_see_also(line))\n continue\n else:\n continue\n\n\ndef make_docs_dict(fpath):\n \"\"\"Parse file to make dict of docstrings.\n \n Parameters\n ----------\n fpath : string\n Path to file.\n Example: '/path/to/module.py'\n \n Returns\n -------\n docs_dict : dict\n ``dict`` of parsed docstring.\n Includes attribute names from ``ast`` package:\n lineno : line number\n col_offset : column offset number\n \n See Also\n --------\n CALLS : {parse_docstring}\n CALLED_BY : {}\n RELATED : {}\n \n Notes\n -----\n Nested structure is not preserved.\n \n References\n ----------\n .. [1] http://greentreesnakes.readthedocs.org/en/latest/manipulating.html\n \n \"\"\"\n # TODO: check if CALLS, CALLED_BY, RELATED are existing nodes.\n with open(fpath, 'rb') as fobj:\n tree = ast.parse(''.join(fobj))\n docs_dict = {'docstring': ast.get_docstring(tree)}\n for (field, value) in parse_docstring(docs_dict['docstring']):\n docs_dict[field] = value\n for node in ast.walk(tree):\n if isinstance(node, ast.FunctionDef):\n docs_dict[node.name] = {}\n docs_dict[node.name]['lineno'] = int(node.lineno)\n docs_dict[node.name]['col_offset'] = int(node.col_offset)\n docs_dict[node.name]['docstring'] = ast.get_docstring(node)\n for (field, value) in parse_docstring(docs_dict[node.name]['docstring']):\n docs_dict[node.name][field] = value\n return docs_dict\n\n\ndef pretty_print_dict(dobj, indent=0):\n \"\"\"Recursively print dict with formatting.\n \n Parameters\n ----------\n dobj : dict\n indent : {0}, int, optional\n Top-level indent.\n \n Returns\n -------\n None\n \n See Also\n --------\n CALLS : {}\n CALLED_BY : {}\n RELATED : {make_docs_dict}\n \n \"\"\"\n for key in sorted(dobj):\n if key == 'docstring':\n print(' '*indent+\"{key}: [omitted]\".format(key=key))\n else:\n if isinstance(dobj[key], dict):\n print(' '*indent+\"{key}:\".format(key=key))\n pretty_print_dict(dobj[key], indent=indent+2)\n else:\n print(' '*indent+\"{key}: {val}\".format(key=key, val=dobj[key]))\n return None\n\n\ndef generate_edges(dobj, parent=''):\n \"\"\"Recursively yield edges of multi-direction graph from ``dict``.\n \n Parameters\n ----------\n dobj : dict\n Output from `make_docs_dict`.\n parent : {''}, string, optional\n Label for initial parent node.\n Use '' as place holder for null node so that node is hashable.\n \n Returns\n -------\n node1, node2 : hashable\n Hashable Python objects. '' used in place of ``None`` for null nodes.\n attr_dict : dict\n Example: {'relationship': 'CONTAINS'}\n \n See Also\n --------\n CALLS : {generate_edges}\n CALLED_BY : {make_graph}\n RELATED : {parse_see_also, make_docs_dict, generate_positions}\n \n \"\"\"\n for key in sorted(dobj):\n if key == 'docstring':\n continue\n elif key in ['CALLS', 'CALLED_BY', 'RELATED']:\n node1 = parent\n node2s = dobj[key]\n for node2 in node2s:\n yield (node1, node2, {'relationship': key})\n continue\n elif isinstance(dobj[key], dict):\n node1 = parent\n node2 = key\n yield (node1, node2, {'relationship': 'CONTAINS'})\n for edge in generate_edges(dobj=dobj[key], parent=key):\n yield edge\n continue\n else:\n continue\n \n\ndef make_graph(dobj, parent=''):\n \"\"\"Make graph from ``dict``.\n \n Parameters\n ----------\n dobj : dict\n Output from `make_docs_dict`.\n parent : {''}, string, optional\n Label for initial parent node.\n Use '' in place of ``None`` for empty nodes so that nodes are hashable.\n \n Returns\n -------\n graph : networkx.MultiDiGraph\n \n See Also\n --------\n CALLS : {generate_edges}\n CALLED_BY : {}\n RELATED : {make_docs_dict, plot_graph}\n\n \"\"\"\n # TODO: make node attributes for lineno and col_offset\n graph = nx.MultiDiGraph()\n for (node1, node2, attr_dict) in generate_edges(dobj, parent=parent):\n graph.add_edge(node1, node2, attr_dict=attr_dict)\n # Remove empty node references.\n graph.remove_node('')\n return graph\n\n\ndef generate_positions(dobj, parent=''):\n \"\"\"Recursively yield line and column number of nodes from ``dict``.\n \n Parameters\n ----------\n dobj : dict\n Output from `make_docs_dict`.\n parent : {''}, string, optional\n Label for initial parent node.\n Use '' as place holder for null node so that node is hashable.\n \n Returns\n -------\n node : hashable\n Hashable Python objects. '' used in place of ``None`` for null nodes.\n attr : string\n Attribute names from ``ast`` package:\n lineno : line number\n col_offset : column offset number\n value : int\n Value of attribute: line number, column offset number.\n \n See Also\n --------\n CALLS : {generate_positions}\n CALLED_BY : {make_positions_dict}\n RELATED : {make_docs_dict, generate_edges}\n \n \"\"\"\n # TODO: make node attributes for lineno and col_offset\n for key in sorted(dobj):\n if key == 'lineno':\n node = parent\n yield (node, 'lineno', dobj[key])\n continue\n elif key == 'col_offset':\n node = parent\n yield (node, 'col_offset', dobj[key])\n continue\n elif isinstance(dobj[key], dict):\n for position in generate_positions(dobj=dobj[key], parent=key):\n yield position\n continue\n else:\n continue\n\n\ndef make_positions_dict(dobj, graph, parent=''):\n \"\"\"Make positions from ``dict``.\n \n Parameters\n ----------\n dobj : dict\n Output from `make_docs_dict`.\n graph : networkx.MultiDiGraph\n Output from `make_graph`\n parent : {''}, string, optional\n Label for initial parent node.\n Use '' in place of ``None`` for empty nodes so that nodes are hashable.\n \n Returns\n -------\n positions : dict\n ``dict`` of ``list`` for use by ``networkx.draw``.\n Format:\n {node: [lineno, col_offset], ...}\n \n See Also\n --------\n CALLS : {generate_positions}\n CALLED_BY : {}\n RELATED : {make_docs_dict, plot_graph, make_graph}\n\n \"\"\"\n # TODO: make node attributes for lineno and col_offset\n # TODO: space out nodes\n positions_df = pd.DataFrame(columns=['lineno', 'col_offset'])\n positions_df.index.names = ['node']\n for (node, attr, value) in generate_positions(dobj=dobj, parent=parent):\n if attr == 'lineno':\n positions_df.loc[node, 'lineno'] = int(value)\n continue\n elif attr == 'col_offset':\n positions_df.loc[node, 'col_offset'] = int(value)\n continue\n else:\n continue\n lineno_max = int(positions_df['lineno'].max(axis=0))\n nodes_only_in_graph = set(graph.nodes()) - set(positions_df.index.values)\n for node in nodes_only_in_graph:\n lineno_max += 1\n positions_df.loc[node, 'lineno'] = lineno_max\n positions_df.loc[node, 'col_offset'] = 0\n # Convert line numbers to relative positions. (0, 0) of plot is in lower left.\n # Convert dtype to use method='first' for rank.\n positions_df = positions_df.astype(float)\n positions_df.sort(columns=['lineno', 'col_offset'], axis=0, inplace=True)\n positions_df[['rankpct_lineno', 'rankpct_col_offset']] = \\\n positions_df[['lineno', 'col_offset']].rank(axis=0, method='first', ascending=False, pct=True)\n positions_dict = positions_df.stack().unstack(['node']).to_dict()\n # Convert from dict of dict to dict of list. numpy order: [y, x]\n positions_dict = {node:[positions_dict[node]['rankpct_col_offset'],\n positions_dict[node]['rankpct_lineno']] for node in sorted(positions_dict)}\n return positions_dict\n\n\ndef plot_graph(graph, fixed=None, positions=None, show_plot=True, fpath=None):\n \"\"\"Plot graph.\n \n Parameters\n ----------\n graph : networkx.MultiDiGraph\n Output from `make_graph`\n fixed : {None}, list, optional\n Node around which to fix graph. Overrides `positions`.\n Example: fixed=['mod1']\n positions : {None}, dict, optional\n ``dict`` of ``list`` output from `make_positions_dict`.\n Requires `fixed` is ``None``, otherwise overridden. \n show_plot : {True, False}, bool, optional\n Flag to display plot in window.\n fpath : {None}, string, optional\n Path for plotting graph.\n \n Returns\n -------\n None\n \n See Also\n --------\n CALLS : {}\n CALLED_BY : {}\n RELATED : {make_positions_dict, make_graph}\n\n \"\"\"\n # TODO: Space out points. Scale to larger image?\n # TODO: make relationships different colors\n # Check input and define positions.\n if fixed is None:\n if positions is None:\n pos = nx.spring_layout(graph, fixed=fixed)\n else:\n pos = positions\n else:\n if positions is not None:\n warnings.warn(\n (\"\\n\" +\n \"`fixed` overrides `positions`:\\n\" +\n \"fixed = {fixed}\").format(\n fixed=fixed))\n pos = nx.spring_layout(graph, fixed=fixed)\n # Draw graph and save.\n nx.draw(graph, pos=pos)\n nx.draw_networkx_labels(graph, pos=pos)\n if fpath is not None:\n plt.savefig(fpath, bbox_inches='tight')\n plt.show()\n return None\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
captain-pool/optuna | [
"2ae8c17afea54362460320870304c763e91c0596",
"2ae8c17afea54362460320870304c763e91c0596",
"2ae8c17afea54362460320870304c763e91c0596",
"2ae8c17afea54362460320870304c763e91c0596"
] | [
"tests/integration_tests/lightgbm_tuner_tests/test_optimize.py",
"optuna/samplers/nsgaii/_sampler.py",
"optuna/samplers/_qmc.py",
"tests/visualization_tests/matplotlib_tests/test_pareto_front.py"
] | [
"import contextlib\nfrom tempfile import TemporaryDirectory\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\nfrom typing import Union\nfrom unittest import mock\nimport warnings\n\nfrom lightgbm import log_evaluation\nimport numpy as np\nimport pytest\nimport sklearn.datasets\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\n\nimport optuna\nfrom optuna.integration._lightgbm_tuner.optimize import _BaseTuner\nfrom optuna.integration._lightgbm_tuner.optimize import _OptunaObjective\nfrom optuna.integration._lightgbm_tuner.optimize import _OptunaObjectiveCV\nfrom optuna.integration._lightgbm_tuner.optimize import LightGBMTuner\nfrom optuna.integration._lightgbm_tuner.optimize import LightGBMTunerCV\nimport optuna.integration.lightgbm as lgb\nfrom optuna.study import Study\n\n\[email protected]\ndef turnoff_train(metric: str = \"binary_logloss\") -> Generator[None, None, None]:\n\n unexpected_value = 0.5\n dummy_num_iterations = 1234\n\n class DummyBooster(object):\n def __init__(self) -> None:\n\n self.best_score = {\n \"valid_0\": {metric: unexpected_value},\n }\n\n def current_iteration(self) -> int:\n\n return dummy_num_iterations\n\n dummy_booster = DummyBooster()\n\n with mock.patch(\"lightgbm.train\", return_value=dummy_booster):\n yield\n\n\[email protected]\ndef turnoff_cv(metric: str = \"binary_logloss\") -> Generator[None, None, None]:\n\n unexpected_value = 0.5\n dummy_results = {\"{}-mean\".format(metric): [unexpected_value]}\n\n with mock.patch(\"lightgbm.cv\", return_value=dummy_results):\n yield\n\n\nclass TestOptunaObjective(object):\n def test_init_(self) -> None:\n\n target_param_names = [\"learning_rate\"] # Invalid parameter name.\n\n with pytest.raises(NotImplementedError) as execinfo:\n _OptunaObjective(target_param_names, {}, None, {}, 0, \"tune_learning_rate\", None)\n\n assert execinfo.type is NotImplementedError\n\n def test_call(self) -> None:\n\n target_param_names = [\"lambda_l1\"]\n lgbm_params: Dict[str, Any] = {}\n train_set = lgb.Dataset(None)\n val_set = lgb.Dataset(None)\n\n lgbm_kwargs = {\"valid_sets\": val_set}\n best_score = -np.inf\n\n with turnoff_train():\n objective = _OptunaObjective(\n target_param_names,\n lgbm_params,\n train_set,\n lgbm_kwargs,\n best_score,\n \"tune_lambda_l1\",\n None,\n )\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=10)\n\n assert study.best_value == 0.5\n\n\nclass TestOptunaObjectiveCV(object):\n def test_call(self) -> None:\n target_param_names = [\"lambda_l1\"]\n lgbm_params: Dict[str, Any] = {}\n train_set = lgb.Dataset(None)\n lgbm_kwargs: Dict[str, Any] = {}\n best_score = -np.inf\n\n with turnoff_cv():\n objective = _OptunaObjectiveCV(\n target_param_names,\n lgbm_params,\n train_set,\n lgbm_kwargs,\n best_score,\n \"tune_lambda_l1\",\n None,\n )\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=10)\n\n assert study.best_value == 0.5\n\n\nclass TestBaseTuner(object):\n def test_get_booster_best_score(self) -> None:\n\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self) -> None:\n\n self.best_score = {\"valid_0\": {\"binary_logloss\": expected_value}}\n\n booster = DummyBooster()\n dummy_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(lgbm_kwargs=dict(valid_sets=dummy_dataset))\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_higher_is_better(self) -> None:\n\n for metric in [\n \"auc\",\n \"auc_mu\",\n \"ndcg\",\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n \"map\",\n \"mean_average_precision\",\n \"average_precision\",\n ]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert tuner.higher_is_better()\n\n for metric in [\n \"mae\",\n \"rmse\",\n \"quantile\",\n \"mape\",\n \"binary_logloss\",\n \"multi_logloss\",\n \"cross_entropy\",\n ]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert not tuner.higher_is_better()\n\n def test_get_booster_best_score__using_valid_names_as_str(self) -> None:\n\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self) -> None:\n\n self.best_score = {\"dev\": {\"binary_logloss\": expected_value}}\n\n booster = DummyBooster()\n dummy_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(lgbm_kwargs={\"valid_names\": \"dev\", \"valid_sets\": dummy_dataset})\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_get_booster_best_score__using_valid_names_as_list(self) -> None:\n\n unexpected_value = 0.5\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self) -> None:\n\n self.best_score = {\n \"train\": {\"binary_logloss\": unexpected_value},\n \"val\": {\"binary_logloss\": expected_value},\n }\n\n booster = DummyBooster()\n dummy_train_dataset = lgb.Dataset(None)\n dummy_val_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(\n lgbm_kwargs={\n \"valid_names\": [\"train\", \"val\"],\n \"valid_sets\": [dummy_train_dataset, dummy_val_dataset],\n }\n )\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_compare_validation_metrics(self) -> None:\n\n for metric in [\n \"auc\",\n \"ndcg\",\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n \"map\",\n \"mean_average_precision\",\n ]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert tuner.compare_validation_metrics(0.5, 0.1)\n assert not tuner.compare_validation_metrics(0.5, 0.5)\n assert not tuner.compare_validation_metrics(0.1, 0.5)\n\n for metric in [\"rmsle\", \"rmse\", \"binary_logloss\"]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert not tuner.compare_validation_metrics(0.5, 0.1)\n assert not tuner.compare_validation_metrics(0.5, 0.5)\n assert tuner.compare_validation_metrics(0.1, 0.5)\n\n @pytest.mark.parametrize(\n \"metric, eval_at_param, expected\",\n [\n (\"auc\", {\"eval_at\": 5}, \"auc\"),\n (\"accuracy\", {\"eval_at\": 5}, \"accuracy\"),\n (\"rmsle\", {\"eval_at\": 5}, \"rmsle\"),\n (\"rmse\", {\"eval_at\": 5}, \"rmse\"),\n (\"binary_logloss\", {\"eval_at\": 5}, \"binary_logloss\"),\n (\"ndcg\", {\"eval_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"ndcg_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"ndcg_eval_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"eval_at\": [20]}, \"ndcg@20\"),\n (\"ndcg\", {\"eval_at\": [10, 20]}, \"ndcg@10\"),\n (\"ndcg\", {}, \"ndcg@1\"),\n (\"map\", {\"eval_at\": 5}, \"map@5\"),\n (\"map\", {\"eval_at\": [20]}, \"map@20\"),\n (\"map\", {\"eval_at\": [10, 20]}, \"map@10\"),\n (\"map\", {}, \"map@1\"),\n ],\n )\n def test_metric_with_eval_at(\n self, metric: str, eval_at_param: Dict[str, Union[int, List[int]]], expected: str\n ) -> None:\n\n params: Dict[str, Union[str, int, List[int]]] = {\"metric\": metric}\n params.update(eval_at_param)\n tuner = _BaseTuner(lgbm_params=params)\n assert tuner._metric_with_eval_at(metric) == expected\n\n def test_metric_with_eval_at_error(self) -> None:\n\n tuner = _BaseTuner(lgbm_params={\"metric\": \"ndcg\", \"eval_at\": \"1\"})\n with pytest.raises(ValueError):\n tuner._metric_with_eval_at(\"ndcg\")\n\n\nclass TestLightGBMTuner(object):\n def _get_tuner_object(\n self,\n params: Dict[str, Any] = {},\n train_set: Optional[lgb.Dataset] = None,\n kwargs_options: Dict[str, Any] = {},\n study: Optional[Study] = None,\n ) -> lgb.LightGBMTuner:\n\n # Required keyword arguments.\n dummy_dataset = lgb.Dataset(None)\n\n kwargs = dict(\n num_boost_round=5, early_stopping_rounds=2, valid_sets=dummy_dataset, study=study\n )\n kwargs.update(kwargs_options)\n\n runner = lgb.LightGBMTuner(params, train_set, **kwargs)\n return runner\n\n def test_deprecated_args(self) -> None:\n dummy_dataset = lgb.Dataset(None)\n\n with pytest.warns(FutureWarning):\n LightGBMTuner({}, dummy_dataset, valid_sets=[dummy_dataset], verbosity=1)\n\n def test_no_eval_set_args(self) -> None:\n\n params: Dict[str, Any] = {}\n train_set = lgb.Dataset(None)\n with pytest.raises(ValueError) as excinfo:\n lgb.LightGBMTuner(params, train_set, num_boost_round=5, early_stopping_rounds=2)\n\n assert excinfo.type == ValueError\n assert str(excinfo.value) == \"`valid_sets` is required.\"\n\n @pytest.mark.parametrize(\n \"metric, study_direction\",\n [\n (\"auc\", \"minimize\"),\n (\"mse\", \"maximize\"),\n (None, \"maximize\"), # The default metric is binary_logloss.\n ],\n )\n def test_inconsistent_study_direction(self, metric: str, study_direction: str) -> None:\n\n params: Dict[str, Any] = {}\n if metric is not None:\n params[\"metric\"] = metric\n train_set = lgb.Dataset(None)\n valid_set = lgb.Dataset(None)\n study = optuna.create_study(direction=study_direction)\n with pytest.raises(ValueError) as excinfo:\n lgb.LightGBMTuner(\n params,\n train_set,\n valid_sets=[train_set, valid_set],\n num_boost_round=5,\n early_stopping_rounds=2,\n study=study,\n )\n\n assert excinfo.type == ValueError\n assert str(excinfo.value).startswith(\"Study direction is inconsistent with the metric\")\n\n def test_with_minimum_required_args(self) -> None:\n\n runner = self._get_tuner_object()\n assert \"num_boost_round\" in runner.lgbm_kwargs\n assert \"num_boost_round\" not in runner.auto_options\n assert runner.lgbm_kwargs[\"num_boost_round\"] == 5\n\n def test__parse_args_wrapper_args(self) -> None:\n\n params: Dict[str, Any] = {}\n train_set = lgb.Dataset(None)\n val_set = lgb.Dataset(None)\n kwargs = dict(\n num_boost_round=12,\n early_stopping_rounds=10,\n valid_sets=val_set,\n time_budget=600,\n sample_size=1000,\n )\n runner = lgb.LightGBMTuner(params, train_set, **kwargs)\n new_args = [\"time_budget\", \"time_budget\", \"sample_size\"]\n for new_arg in new_args:\n assert new_arg not in runner.lgbm_kwargs\n assert new_arg in runner.auto_options\n\n @pytest.mark.parametrize(\n \"metric, study_direction, expected\",\n [(\"auc\", \"maximize\", -np.inf), (\"l2\", \"minimize\", np.inf)],\n )\n def test_best_score(self, metric: str, study_direction: str, expected: float) -> None:\n with turnoff_train(metric=metric):\n study = optuna.create_study(direction=study_direction)\n runner = self._get_tuner_object(\n params=dict(lambda_l1=0.0, metric=metric), kwargs_options={}, study=study\n )\n assert runner.best_score == expected\n runner.tune_regularization_factors()\n assert runner.best_score == 0.5\n\n def test_best_params(self) -> None:\n unexpected_value = 20 # out of scope.\n\n with turnoff_train():\n study = optuna.create_study()\n runner = self._get_tuner_object(\n params=dict(lambda_l1=unexpected_value), kwargs_options={}, study=study\n )\n assert runner.best_params[\"lambda_l1\"] == unexpected_value\n runner.tune_regularization_factors()\n assert runner.best_params[\"lambda_l1\"] != unexpected_value\n\n def test_sample_train_set(self) -> None:\n\n sample_size = 3\n\n X_trn = np.random.uniform(10, size=50).reshape((10, 5))\n y_trn = np.random.randint(2, size=10)\n train_dataset = lgb.Dataset(X_trn, label=y_trn)\n runner = self._get_tuner_object(\n train_set=train_dataset, kwargs_options=dict(sample_size=sample_size)\n )\n runner.sample_train_set()\n\n # Workaround for mypy.\n if not TYPE_CHECKING:\n runner.train_subset.construct() # Cannot get label before construct `lgb.Dataset`.\n assert runner.train_subset.get_label().shape[0] == sample_size\n\n def test_time_budget(self) -> None:\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n kwargs_options=dict(time_budget=0),\n )\n assert len(runner.study.trials) == 0\n # No trials run because `time_budget` is set to zero.\n runner.tune_feature_fraction()\n assert runner.lgbm_params[\"feature_fraction\"] == unexpected_value\n assert len(runner.study.trials) == 0\n\n def test_tune_feature_fraction(self) -> None:\n\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 7\n\n def test_tune_num_leaves(self) -> None:\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(num_leaves=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_num_leaves()\n\n assert runner.lgbm_params[\"num_leaves\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_num_leaves_negative_max_depth(self) -> None:\n\n params: Dict[str, Any] = {\"metric\": \"binary_logloss\", \"max_depth\": -1, \"verbose\": -1}\n X_trn = np.random.uniform(10, size=(10, 5))\n y_trn = np.random.randint(2, size=10)\n train_dataset = lgb.Dataset(X_trn, label=y_trn)\n valid_dataset = lgb.Dataset(X_trn, label=y_trn)\n\n runner = lgb.LightGBMTuner(\n params,\n train_dataset,\n num_boost_round=3,\n early_stopping_rounds=2,\n valid_sets=valid_dataset,\n callbacks=[log_evaluation(-1)],\n )\n runner.tune_num_leaves()\n assert len(runner.study.trials) == 20\n\n def test_tune_bagging(self) -> None:\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(bagging_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_bagging()\n\n assert runner.lgbm_params[\"bagging_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 10\n\n def test_tune_feature_fraction_stage2(self) -> None:\n\n unexpected_value = 0.5\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(feature_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction_stage2()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 6\n\n def test_tune_regularization_factors(self) -> None:\n\n unexpected_value = 20 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(lambda_l1=unexpected_value) # set default as unexpected value.\n )\n assert len(runner.study.trials) == 0\n runner.tune_regularization_factors()\n\n assert runner.lgbm_params[\"lambda_l1\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_min_data_in_leaf(self) -> None:\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n min_child_samples=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_min_data_in_leaf()\n\n assert runner.lgbm_params[\"min_child_samples\"] != unexpected_value\n assert len(runner.study.trials) == 5\n\n def test_when_a_step_does_not_improve_best_score(self) -> None:\n\n params: Dict = {}\n valid_data = np.zeros((10, 10))\n valid_sets = lgb.Dataset(valid_data)\n\n tuner = LightGBMTuner(params, None, valid_sets=valid_sets)\n assert not tuner.higher_is_better()\n\n with mock.patch(\"lightgbm.train\"), mock.patch.object(\n _BaseTuner, \"_get_booster_best_score\", return_value=0.9\n ):\n tuner.tune_feature_fraction()\n\n assert \"feature_fraction\" in tuner.best_params\n assert tuner.best_score == 0.9\n\n # Assume that tuning `num_leaves` doesn't improve the `best_score`.\n with mock.patch(\"lightgbm.train\"), mock.patch.object(\n _BaseTuner, \"_get_booster_best_score\", return_value=1.1\n ):\n tuner.tune_num_leaves()\n\n def test_resume_run(self) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, callbacks=[log_evaluation(-1)]\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner.tune_regularization_factors()\n\n n_trials = len(study.trials)\n assert n_trials == len(study.trials)\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner2.tune_regularization_factors()\n assert n_trials == len(study.trials)\n\n @pytest.mark.parametrize(\n \"verbosity, level\",\n [\n (None, optuna.logging.INFO),\n (-2, optuna.logging.CRITICAL),\n (-1, optuna.logging.CRITICAL),\n (0, optuna.logging.WARNING),\n (1, optuna.logging.INFO),\n (2, optuna.logging.DEBUG),\n ],\n )\n def test_run_verbosity(self, verbosity: int, level: int) -> None:\n # We need to reconstruct our default handler to properly capture stderr.\n optuna.logging._reset_library_root_logger()\n optuna.logging.set_verbosity(optuna.logging.INFO)\n\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n tuner = LightGBMTuner(\n params,\n dataset,\n valid_sets=dataset,\n study=study,\n verbosity=verbosity,\n callbacks=[log_evaluation(-1)],\n time_budget=1,\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner.run()\n\n assert optuna.logging.get_verbosity() == level\n assert tuner.lgbm_params[\"verbose\"] == -1\n\n @pytest.mark.parametrize(\"show_progress_bar, expected\", [(True, 6), (False, 0)])\n def test_run_show_progress_bar(self, show_progress_bar: bool, expected: int) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTuner(\n params,\n dataset,\n valid_sets=dataset,\n study=study,\n callbacks=[log_evaluation(-1)],\n time_budget=1,\n show_progress_bar=show_progress_bar,\n )\n\n with mock.patch.object(\n _BaseTuner, \"_get_booster_best_score\", return_value=1.0\n ), mock.patch(\"tqdm.tqdm\") as mock_tqdm:\n tuner.run()\n\n assert mock_tqdm.call_count == expected\n\n def test_get_best_booster(self) -> None:\n unexpected_value = 20 # out of scope.\n\n params: Dict = {\"verbose\": -1, \"lambda_l1\": unexpected_value}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, callbacks=[log_evaluation(-1)]\n )\n\n with pytest.raises(ValueError):\n tuner.get_best_booster()\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=0.0):\n tuner.tune_regularization_factors()\n\n best_booster = tuner.get_best_booster()\n assert best_booster.params[\"lambda_l1\"] != unexpected_value\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n\n # Resumed study does not have the best booster.\n with pytest.raises(ValueError):\n tuner2.get_best_booster()\n\n @pytest.mark.parametrize(\"dir_exists, expected\", [(False, True), (True, False)])\n def test_model_dir(self, dir_exists: bool, expected: bool) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n with mock.patch(\"optuna.integration._lightgbm_tuner.optimize.os.mkdir\") as m:\n with mock.patch(\"os.path.exists\", return_value=dir_exists):\n LightGBMTuner(params, dataset, valid_sets=dataset, model_dir=\"./booster\")\n assert m.called == expected\n\n def test_best_booster_with_model_dir(self) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n with TemporaryDirectory() as tmpdir:\n tuner = LightGBMTuner(\n params,\n dataset,\n valid_sets=dataset,\n study=study,\n model_dir=tmpdir,\n callbacks=[log_evaluation(-1)],\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=0.0):\n tuner.tune_regularization_factors()\n\n best_booster = tuner.get_best_booster()\n\n tuner2 = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, model_dir=tmpdir\n )\n best_booster2 = tuner2.get_best_booster()\n\n assert best_booster.params == best_booster2.params\n\n @pytest.mark.parametrize(\"direction, overall_best\", [(\"minimize\", 1), (\"maximize\", 2)])\n def test_create_stepwise_study(self, direction: str, overall_best: int) -> None:\n\n tuner = LightGBMTuner({}, None, valid_sets=lgb.Dataset(np.zeros((10, 10))))\n\n def objective(trial: optuna.trial.Trial, value: float) -> float:\n\n trial.set_system_attr(\n optuna.integration._lightgbm_tuner.optimize._STEP_NAME_KEY,\n \"step{:.0f}\".format(value),\n )\n return trial.suggest_float(\"x\", value, value)\n\n study = optuna.create_study(direction=direction)\n study_step1 = tuner._create_stepwise_study(study, \"step1\")\n\n with pytest.raises(ValueError):\n study_step1.best_trial\n\n study_step1.optimize(lambda t: objective(t, 1), n_trials=1)\n\n study_step2 = tuner._create_stepwise_study(study, \"step2\")\n\n # `study` has a trial, but `study_step2` has no trials.\n with pytest.raises(ValueError):\n study_step2.best_trial\n\n study_step2.optimize(lambda t: objective(t, 2), n_trials=2)\n\n assert len(study_step1.trials) == 1\n assert len(study_step2.trials) == 2\n assert len(study.trials) == 3\n\n assert study_step1.best_trial.value == 1\n assert study_step2.best_trial.value == 2\n assert study.best_trial.value == overall_best\n\n def test_optuna_callback(self) -> None:\n params: Dict[str, Any] = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n callback_mock = mock.MagicMock()\n\n study = optuna.create_study()\n tuner = LightGBMTuner(\n params,\n dataset,\n valid_sets=dataset,\n study=study,\n callbacks=[log_evaluation(-1)],\n optuna_callbacks=[callback_mock],\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner._tune_params([\"num_leaves\"], 10, optuna.samplers.TPESampler(), \"num_leaves\")\n\n assert callback_mock.call_count == 10\n\n def test_tune_best_score_reproducibility(self) -> None:\n california = sklearn.datasets.fetch_california_housing()\n X_trainval, X_test, y_trainval, y_test = train_test_split(\n california.data, california.target, random_state=0\n )\n\n train = lgb.Dataset(X_trainval, y_trainval)\n valid = lgb.Dataset(X_test, y_test)\n params = {\n \"objective\": \"regression\",\n \"metric\": \"rmse\",\n \"random_seed\": 0,\n \"deterministic\": True,\n \"force_col_wise\": True,\n \"verbosity\": -1,\n }\n\n tuner_first_try = lgb.LightGBMTuner(\n params,\n train,\n valid_sets=valid,\n early_stopping_rounds=3,\n optuna_seed=10,\n callbacks=[log_evaluation(-1)],\n )\n tuner_first_try.run()\n best_score_first_try = tuner_first_try.best_score\n\n tuner_second_try = lgb.LightGBMTuner(\n params,\n train,\n valid_sets=valid,\n early_stopping_rounds=3,\n optuna_seed=10,\n callbacks=[log_evaluation(-1)],\n )\n tuner_second_try.run()\n best_score_second_try = tuner_second_try.best_score\n\n assert best_score_second_try == best_score_first_try\n\n\nclass TestLightGBMTunerCV(object):\n def _get_tunercv_object(\n self,\n params: Dict[str, Any] = {},\n train_set: Optional[lgb.Dataset] = None,\n kwargs_options: Dict[str, Any] = {},\n study: Optional[optuna.study.Study] = None,\n ) -> LightGBMTunerCV:\n\n # Required keyword arguments.\n kwargs: Dict[str, Any] = dict(num_boost_round=5, early_stopping_rounds=2, study=study)\n kwargs.update(kwargs_options)\n\n runner = LightGBMTunerCV(params, train_set, **kwargs)\n return runner\n\n def test_deprecated_args(self) -> None:\n dummy_dataset = lgb.Dataset(None)\n\n with pytest.warns(FutureWarning):\n LightGBMTunerCV({}, dummy_dataset, verbosity=1)\n\n @pytest.mark.parametrize(\n \"metric, study_direction\",\n [\n (\"auc\", \"minimize\"),\n (\"mse\", \"maximize\"),\n (None, \"maximize\"), # The default metric is binary_logloss.\n ],\n )\n def test_inconsistent_study_direction(self, metric: str, study_direction: str) -> None:\n\n params: Dict[str, Any] = {}\n if metric is not None:\n params[\"metric\"] = metric\n train_set = lgb.Dataset(None)\n study = optuna.create_study(direction=study_direction)\n with pytest.raises(ValueError) as excinfo:\n LightGBMTunerCV(\n params, train_set, num_boost_round=5, early_stopping_rounds=2, study=study\n )\n\n assert excinfo.type == ValueError\n assert str(excinfo.value).startswith(\"Study direction is inconsistent with the metric\")\n\n def test_with_minimum_required_args(self) -> None:\n\n runner = self._get_tunercv_object()\n assert \"num_boost_round\" in runner.lgbm_kwargs\n assert \"num_boost_round\" not in runner.auto_options\n assert runner.lgbm_kwargs[\"num_boost_round\"] == 5\n\n def test_tune_feature_fraction(self) -> None:\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 7\n\n def test_tune_num_leaves(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(num_leaves=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_num_leaves()\n\n assert runner.lgbm_params[\"num_leaves\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_bagging(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(bagging_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_bagging()\n\n assert runner.lgbm_params[\"bagging_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 10\n\n def test_tune_feature_fraction_stage2(self) -> None:\n unexpected_value = 0.5\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(feature_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction_stage2()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 6\n\n def test_tune_regularization_factors(self) -> None:\n unexpected_value = 20 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(lambda_l1=unexpected_value) # set default as unexpected value.\n )\n assert len(runner.study.trials) == 0\n runner.tune_regularization_factors()\n\n assert runner.lgbm_params[\"lambda_l1\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_min_data_in_leaf(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(\n min_child_samples=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_min_data_in_leaf()\n\n assert runner.lgbm_params[\"min_child_samples\"] != unexpected_value\n assert len(runner.study.trials) == 5\n\n def test_resume_run(self) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTunerCV(params, dataset, study=study)\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner.tune_regularization_factors()\n\n n_trials = len(study.trials)\n assert n_trials == len(study.trials)\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner2.tune_regularization_factors()\n assert n_trials == len(study.trials)\n\n @pytest.mark.parametrize(\n \"verbosity, level\",\n [\n (None, optuna.logging.INFO),\n (-2, optuna.logging.CRITICAL),\n (-1, optuna.logging.CRITICAL),\n (0, optuna.logging.WARNING),\n (1, optuna.logging.INFO),\n (2, optuna.logging.DEBUG),\n ],\n )\n def test_run_verbosity(self, verbosity: int, level: int) -> None:\n # We need to reconstruct our default handler to properly capture stderr.\n optuna.logging._reset_library_root_logger()\n optuna.logging.set_verbosity(optuna.logging.INFO)\n\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n tuner = LightGBMTunerCV(\n params, dataset, study=study, verbosity=verbosity, time_budget=1\n )\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner.run()\n\n assert optuna.logging.get_verbosity() == level\n assert tuner.lgbm_params[\"verbose\"] == -1\n\n @pytest.mark.parametrize(\"show_progress_bar, expected\", [(True, 6), (False, 0)])\n def test_run_show_progress_bar(self, show_progress_bar: bool, expected: int) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTunerCV(\n params, dataset, study=study, time_budget=1, show_progress_bar=show_progress_bar\n )\n\n with mock.patch.object(\n _OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]\n ), mock.patch(\"tqdm.tqdm\") as mock_tqdm:\n tuner.run()\n\n assert mock_tqdm.call_count == expected\n\n def test_optuna_callback(self) -> None:\n params: Dict[str, Any] = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n callback_mock = mock.MagicMock()\n\n study = optuna.create_study()\n tuner = LightGBMTunerCV(params, dataset, study=study, optuna_callbacks=[callback_mock])\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner._tune_params([\"num_leaves\"], 10, optuna.samplers.TPESampler(), \"num_leaves\")\n\n assert callback_mock.call_count == 10\n\n @pytest.mark.parametrize(\"dir_exists, expected\", [(False, True), (True, False)])\n def test_model_dir(self, dir_exists: bool, expected: bool) -> None:\n unexpected_value = 20 # out of scope.\n\n params: Dict = {\"verbose\": -1, \"lambda_l1\": unexpected_value}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n with mock.patch(\"os.mkdir\") as m:\n with mock.patch(\"os.path.exists\", return_value=dir_exists):\n LightGBMTunerCV(params, dataset, model_dir=\"./booster\")\n assert m.called == expected\n\n def test_get_best_booster(self) -> None:\n unexpected_value = 20 # out of scope.\n\n params: Dict = {\"verbose\": -1, \"lambda_l1\": unexpected_value}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n study = optuna.create_study()\n\n with TemporaryDirectory() as tmpdir:\n tuner = LightGBMTunerCV(\n params, dataset, study=study, model_dir=tmpdir, return_cvbooster=True\n )\n\n with pytest.raises(ValueError):\n tuner.get_best_booster()\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner.tune_regularization_factors()\n\n best_boosters = tuner.get_best_booster().boosters\n for booster in best_boosters:\n assert booster.params[\"lambda_l1\"] != unexpected_value\n\n tuner2 = LightGBMTunerCV(\n params, dataset, study=study, model_dir=tmpdir, return_cvbooster=True\n )\n best_boosters2 = tuner2.get_best_booster().boosters\n for booster, booster2 in zip(best_boosters, best_boosters2):\n assert booster.params == booster2.params\n\n def test_get_best_booster_with_error(self) -> None:\n params: Dict = {\"verbose\": -1}\n dataset = lgb.Dataset(np.zeros((10, 10)))\n study = optuna.create_study()\n\n tuner = LightGBMTunerCV(\n params, dataset, study=study, model_dir=None, return_cvbooster=True\n )\n # No trial is completed yet.\n with pytest.raises(ValueError):\n tuner.get_best_booster()\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner.tune_regularization_factors()\n\n tuner2 = LightGBMTunerCV(\n params, dataset, study=study, model_dir=None, return_cvbooster=True\n )\n # Resumed the study does not have the best booster.\n with pytest.raises(ValueError):\n tuner2.get_best_booster()\n\n with TemporaryDirectory() as tmpdir:\n tuner3 = LightGBMTunerCV(\n params, dataset, study=study, model_dir=tmpdir, return_cvbooster=True\n )\n # The booster was not saved hence not found in the `model_dir`.\n with pytest.raises(ValueError):\n tuner3.get_best_booster()\n\n def test_tune_best_score_reproducibility(self) -> None:\n california = sklearn.datasets.fetch_california_housing()\n X_trainval, X_test, y_trainval, y_test = train_test_split(\n california.data, california.target, random_state=0\n )\n\n train = lgb.Dataset(X_trainval, y_trainval)\n params = {\n \"objective\": \"regression\",\n \"metric\": \"rmse\",\n \"random_seed\": 0,\n \"deterministic\": True,\n \"force_col_wise\": True,\n \"verbosity\": -1,\n }\n\n tuner_first_try = lgb.LightGBMTunerCV(\n params,\n train,\n early_stopping_rounds=3,\n folds=KFold(n_splits=3),\n optuna_seed=10,\n )\n tuner_first_try.run()\n best_score_first_try = tuner_first_try.best_score\n\n tuner_second_try = lgb.LightGBMTunerCV(\n params,\n train,\n early_stopping_rounds=3,\n folds=KFold(n_splits=3),\n optuna_seed=10,\n )\n tuner_second_try.run()\n best_score_second_try = tuner_second_try.best_score\n\n assert best_score_second_try == best_score_first_try\n",
"from collections import defaultdict\nimport hashlib\nimport itertools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nimport warnings\n\nimport numpy as np\n\nimport optuna\nfrom optuna._experimental import ExperimentalWarning\nfrom optuna.distributions import BaseDistribution\nfrom optuna.samplers._base import BaseSampler\nfrom optuna.samplers._random import RandomSampler\nfrom optuna.samplers._search_space import IntersectionSearchSpace\nfrom optuna.samplers.nsgaii._crossover import perform_crossover\nfrom optuna.samplers.nsgaii._crossovers._base import BaseCrossover\nfrom optuna.samplers.nsgaii._crossovers._uniform import UniformCrossover\nfrom optuna.study import Study\nfrom optuna.study import StudyDirection\nfrom optuna.study._multi_objective import _dominates\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n# Define key names of `Trial.system_attrs`.\n_CONSTRAINTS_KEY = \"nsga2:constraints\"\n_GENERATION_KEY = \"nsga2:generation\"\n_POPULATION_CACHE_KEY_PREFIX = \"nsga2:population\"\n\n\nclass NSGAIISampler(BaseSampler):\n \"\"\"Multi-objective sampler using the NSGA-II algorithm.\n\n NSGA-II stands for \"Nondominated Sorting Genetic Algorithm II\",\n which is a well known, fast and elitist multi-objective genetic algorithm.\n\n For further information about NSGA-II, please refer to the following paper:\n\n - `A fast and elitist multiobjective genetic algorithm: NSGA-II\n <https://ieeexplore.ieee.org/document/996017>`_\n\n Args:\n population_size:\n Number of individuals (trials) in a generation.\n\n mutation_prob:\n Probability of mutating each parameter when creating a new individual.\n If :obj:`None` is specified, the value ``1.0 / len(parent_trial.params)`` is used\n where ``parent_trial`` is the parent trial of the target individual.\n\n crossover:\n Crossover to be applied when creating child individuals.\n The available crossovers are listed here:\n https://optuna.readthedocs.io/en/stable/reference/samplers/nsgaii.html.\n\n :class:`~optuna.samplers.nsgaii.UniformCrossover` is always applied to parameters\n sampled from :class:`~optuna.distributions.CategoricalDistribution`, and by\n default for parameters sampled from other distributions unless this argument\n is specified.\n\n For more information on each of the crossover method, please refer to\n specific crossover documentation.\n\n crossover_prob:\n Probability that a crossover (parameters swapping between parents) will occur\n when creating a new individual.\n\n swapping_prob:\n Probability of swapping each parameter of the parents during crossover.\n\n seed:\n Seed for random number generator.\n\n constraints_func:\n An optional function that computes the objective constraints. It must take a\n :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must\n be a sequence of :obj:`float` s. A value strictly larger than 0 means that a\n constraints is violated. A value equal to or smaller than 0 is considered feasible.\n If ``constraints_func`` returns more than one value for a trial, that trial is\n considered feasible if and only if all values are equal to 0 or smaller.\n\n The ``constraints_func`` will be evaluated after each successful trial.\n The function won't be called when trials fail or they are pruned, but this behavior is\n subject to change in the future releases.\n\n The constraints are handled by the constrained domination. A trial x is said to\n constrained-dominate a trial y, if any of the following conditions is true:\n\n 1. Trial x is feasible and trial y is not.\n 2. Trial x and y are both infeasible, but trial x has a smaller overall violation.\n 3. Trial x and y are feasible and trial x dominates trial y.\n\n .. note::\n Added in v2.5.0 as an experimental feature. The interface may change in newer\n versions without prior notice. See\n https://github.com/optuna/optuna/releases/tag/v2.5.0.\n\n Raises:\n ValueError:\n If ``crossover`` is not instance of :class:`~optuna.samplers.nsgaii.BaseCrossover`.\n Or, if ``population_size <= n_parents``.\n The `n_parents` is determined by each crossover.\n For :class:`~optuna.samplers.nsgaii.UNDXCrossover` and\n :class:`~optuna.samplers.nsgaii.SPXCrossover`, ``n_parents=3``, and for the other\n algorithms, ``n_parents=2``.\n \"\"\"\n\n def __init__(\n self,\n *,\n population_size: int = 50,\n mutation_prob: Optional[float] = None,\n crossover: Optional[BaseCrossover] = None,\n crossover_prob: float = 0.9,\n swapping_prob: float = 0.5,\n seed: Optional[int] = None,\n constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,\n ) -> None:\n # TODO(ohta): Reconsider the default value of each parameter.\n\n if not isinstance(population_size, int):\n raise TypeError(\"`population_size` must be an integer value.\")\n\n if population_size < 2:\n raise ValueError(\"`population_size` must be greater than or equal to 2.\")\n\n if not (mutation_prob is None or 0.0 <= mutation_prob <= 1.0):\n raise ValueError(\n \"`mutation_prob` must be None or a float value within the range [0.0, 1.0].\"\n )\n\n if not (0.0 <= crossover_prob <= 1.0):\n raise ValueError(\"`crossover_prob` must be a float value within the range [0.0, 1.0].\")\n\n if not (0.0 <= swapping_prob <= 1.0):\n raise ValueError(\"`swapping_prob` must be a float value within the range [0.0, 1.0].\")\n\n if constraints_func is not None:\n warnings.warn(\n \"The constraints_func option is an experimental feature.\"\n \" The interface can change in the future.\",\n ExperimentalWarning,\n )\n\n if crossover is None:\n crossover = UniformCrossover(swapping_prob)\n\n if not isinstance(crossover, BaseCrossover):\n raise ValueError(\n f\"'{crossover}' is not a valid crossover.\"\n \" For valid crossovers see\"\n \" https://optuna.readthedocs.io/en/stable/reference/samplers.html.\"\n )\n if population_size < crossover.n_parents:\n raise ValueError(\n f\"Using {crossover},\"\n f\" the population size should be greater than or equal to {crossover.n_parents}.\"\n f\" The specified `population_size` is {population_size}.\"\n )\n\n self._population_size = population_size\n self._mutation_prob = mutation_prob\n self._crossover = crossover\n self._crossover_prob = crossover_prob\n self._swapping_prob = swapping_prob\n self._random_sampler = RandomSampler(seed=seed)\n self._rng = np.random.RandomState(seed)\n self._constraints_func = constraints_func\n self._search_space = IntersectionSearchSpace()\n\n def reseed_rng(self) -> None:\n self._random_sampler.reseed_rng()\n self._rng = np.random.RandomState()\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n search_space: Dict[str, BaseDistribution] = {}\n for name, distribution in self._search_space.calculate(study).items():\n if distribution.single():\n # The `untransform` method of `optuna._transform._SearchSpaceTransform`\n # does not assume a single value,\n # so single value objects are not sampled with the `sample_relative` method,\n # but with the `sample_independent` method.\n continue\n search_space[name] = distribution\n return search_space\n\n def sample_relative(\n self,\n study: Study,\n trial: FrozenTrial,\n search_space: Dict[str, BaseDistribution],\n ) -> Dict[str, Any]:\n parent_generation, parent_population = self._collect_parent_population(study)\n trial_id = trial._trial_id\n\n generation = parent_generation + 1\n study._storage.set_trial_system_attr(trial_id, _GENERATION_KEY, generation)\n\n dominates_func = _dominates if self._constraints_func is None else _constrained_dominates\n\n if parent_generation >= 0:\n # We choose a child based on the specified crossover method.\n if self._rng.rand() < self._crossover_prob:\n child_params = perform_crossover(\n self._crossover,\n study,\n parent_population,\n search_space,\n self._rng,\n self._swapping_prob,\n dominates_func,\n )\n else:\n parent_population_size = len(parent_population)\n parent_params = parent_population[self._rng.choice(parent_population_size)].params\n child_params = {name: parent_params[name] for name in search_space.keys()}\n\n n_params = len(child_params)\n if self._mutation_prob is None:\n mutation_prob = 1.0 / max(1.0, n_params)\n else:\n mutation_prob = self._mutation_prob\n\n params = {}\n for param_name in child_params.keys():\n if self._rng.rand() >= mutation_prob:\n params[param_name] = child_params[param_name]\n return params\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n # Following parameters are randomly sampled here.\n # 1. A parameter in the initial population/first generation.\n # 2. A parameter to mutate.\n # 3. A parameter excluded from the intersection search space.\n\n return self._random_sampler.sample_independent(\n study, trial, param_name, param_distribution\n )\n\n def _collect_parent_population(self, study: Study) -> Tuple[int, List[FrozenTrial]]:\n trials = study.get_trials(deepcopy=False)\n\n generation_to_runnings = defaultdict(list)\n generation_to_population = defaultdict(list)\n for trial in trials:\n if _GENERATION_KEY not in trial.system_attrs:\n continue\n\n generation = trial.system_attrs[_GENERATION_KEY]\n if trial.state != optuna.trial.TrialState.COMPLETE:\n if trial.state == optuna.trial.TrialState.RUNNING:\n generation_to_runnings[generation].append(trial)\n continue\n\n # Do not use trials whose states are not COMPLETE, or `constraint` will be unavailable.\n generation_to_population[generation].append(trial)\n\n hasher = hashlib.sha256()\n parent_population: List[FrozenTrial] = []\n parent_generation = -1\n while True:\n generation = parent_generation + 1\n population = generation_to_population[generation]\n\n # Under multi-worker settings, the population size might become larger than\n # `self._population_size`.\n if len(population) < self._population_size:\n break\n\n # [NOTE]\n # It's generally safe to assume that once the above condition is satisfied,\n # there are no additional individuals added to the generation (i.e., the members of\n # the generation have been fixed).\n # If the number of parallel workers is huge, this assumption can be broken, but\n # this is a very rare case and doesn't significantly impact optimization performance.\n # So we can ignore the case.\n\n # The cache key is calculated based on the key of the previous generation and\n # the remaining running trials in the current population.\n # If there are no running trials, the new cache key becomes exactly the same as\n # the previous one, and the cached content will be overwritten. This allows us to\n # skip redundant cache key calculations when this method is called for the subsequent\n # trials.\n for trial in generation_to_runnings[generation]:\n hasher.update(bytes(str(trial.number), \"utf-8\"))\n\n cache_key = \"{}:{}\".format(_POPULATION_CACHE_KEY_PREFIX, hasher.hexdigest())\n cached_generation, cached_population_numbers = study.system_attrs.get(\n cache_key, (-1, [])\n )\n if cached_generation >= generation:\n generation = cached_generation\n population = [trials[n] for n in cached_population_numbers]\n else:\n population.extend(parent_population)\n population = self._select_elite_population(study, population)\n\n # To reduce the number of system attribute entries,\n # we cache the population information only if there are no running trials\n # (i.e., the information of the population has been fixed).\n # Usually, if there are no too delayed running trials, the single entry\n # will be used.\n if len(generation_to_runnings[generation]) == 0:\n population_numbers = [t.number for t in population]\n study.set_system_attr(cache_key, (generation, population_numbers))\n\n parent_generation = generation\n parent_population = population\n\n return parent_generation, parent_population\n\n def _select_elite_population(\n self, study: Study, population: List[FrozenTrial]\n ) -> List[FrozenTrial]:\n elite_population: List[FrozenTrial] = []\n population_per_rank = self._fast_non_dominated_sort(population, study.directions)\n for population in population_per_rank:\n if len(elite_population) + len(population) < self._population_size:\n elite_population.extend(population)\n else:\n n = self._population_size - len(elite_population)\n _crowding_distance_sort(population)\n elite_population.extend(population[:n])\n break\n\n return elite_population\n\n def _fast_non_dominated_sort(\n self,\n population: List[FrozenTrial],\n directions: List[optuna.study.StudyDirection],\n ) -> List[List[FrozenTrial]]:\n dominated_count: DefaultDict[int, int] = defaultdict(int)\n dominates_list = defaultdict(list)\n\n dominates = _dominates if self._constraints_func is None else _constrained_dominates\n\n for p, q in itertools.combinations(population, 2):\n if dominates(p, q, directions):\n dominates_list[p.number].append(q.number)\n dominated_count[q.number] += 1\n elif dominates(q, p, directions):\n dominates_list[q.number].append(p.number)\n dominated_count[p.number] += 1\n\n population_per_rank = []\n while population:\n non_dominated_population = []\n i = 0\n while i < len(population):\n if dominated_count[population[i].number] == 0:\n individual = population[i]\n if i == len(population) - 1:\n population.pop()\n else:\n population[i] = population.pop()\n non_dominated_population.append(individual)\n else:\n i += 1\n\n for x in non_dominated_population:\n for y in dominates_list[x.number]:\n dominated_count[y] -= 1\n\n assert non_dominated_population\n population_per_rank.append(non_dominated_population)\n\n return population_per_rank\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n assert state in [TrialState.COMPLETE, TrialState.FAIL, TrialState.PRUNED]\n if state == TrialState.COMPLETE and self._constraints_func is not None:\n constraints = None\n try:\n con = self._constraints_func(trial)\n if not isinstance(con, (tuple, list)):\n warnings.warn(\n f\"Constraints should be a sequence of floats but got {type(con).__name__}.\"\n )\n constraints = tuple(con)\n except Exception:\n raise\n finally:\n assert constraints is None or isinstance(constraints, tuple)\n\n study._storage.set_trial_system_attr(\n trial._trial_id,\n _CONSTRAINTS_KEY,\n constraints,\n )\n self._random_sampler.after_trial(study, trial, state, values)\n\n\ndef _crowding_distance_sort(population: List[FrozenTrial]) -> None:\n manhattan_distances = defaultdict(float)\n for i in range(len(population[0].values)):\n population.sort(key=lambda x: cast(float, x.values[i]))\n\n v_min = population[0].values[i]\n v_max = population[-1].values[i]\n assert v_min is not None\n assert v_max is not None\n\n width = v_max - v_min\n if width == 0:\n continue\n\n manhattan_distances[population[0].number] = float(\"inf\")\n manhattan_distances[population[-1].number] = float(\"inf\")\n\n for j in range(1, len(population) - 1):\n v_high = population[j + 1].values[i]\n v_low = population[j - 1].values[i]\n assert v_high is not None\n assert v_low is not None\n\n manhattan_distances[population[j].number] += (v_high - v_low) / width\n\n population.sort(key=lambda x: manhattan_distances[x.number])\n population.reverse()\n\n\ndef _constrained_dominates(\n trial0: FrozenTrial, trial1: FrozenTrial, directions: Sequence[StudyDirection]\n) -> bool:\n \"\"\"Checks constrained-domination.\n\n A trial x is said to constrained-dominate a trial y, if any of the following conditions is\n true:\n 1) Trial x is feasible and trial y is not.\n 2) Trial x and y are both infeasible, but solution x has a smaller overall constraint\n violation.\n 3) Trial x and y are feasible and trial x dominates trial y.\n \"\"\"\n\n constraints0 = trial0.system_attrs.get(_CONSTRAINTS_KEY)\n constraints1 = trial1.system_attrs.get(_CONSTRAINTS_KEY)\n\n if constraints0 is None:\n warnings.warn(\n f\"Trial {trial0.number} does not have constraint values.\"\n \" It will be dominated by the other trials.\"\n )\n\n if constraints1 is None:\n warnings.warn(\n f\"Trial {trial1.number} does not have constraint values.\"\n \" It will be dominated by the other trials.\"\n )\n\n if constraints0 is None and constraints1 is None:\n # Neither Trial x nor y has constraints values\n return _dominates(trial0, trial1, directions)\n\n if constraints0 is not None and constraints1 is None:\n # Trial x has constraint values, but y doesn't.\n return True\n\n if constraints0 is None and constraints1 is not None:\n # If Trial y has constraint values, but x doesn't.\n return False\n\n assert isinstance(constraints0, (list, tuple))\n assert isinstance(constraints1, (list, tuple))\n\n if len(constraints0) != len(constraints1):\n raise ValueError(\"Trials with different numbers of constraints cannot be compared.\")\n\n if trial0.state != TrialState.COMPLETE:\n return False\n\n if trial1.state != TrialState.COMPLETE:\n return True\n\n satisfy_constraints0 = all(v <= 0 for v in constraints0)\n satisfy_constraints1 = all(v <= 0 for v in constraints1)\n\n if satisfy_constraints0 and satisfy_constraints1:\n # Both trials satisfy the constraints.\n return _dominates(trial0, trial1, directions)\n\n if satisfy_constraints0:\n # trial0 satisfies the constraints, but trial1 violates them.\n return True\n\n if satisfy_constraints1:\n # trial1 satisfies the constraints, but trial0 violates them.\n return False\n\n # Both trials violate the constraints.\n violation0 = sum(v for v in constraints0 if v > 0)\n violation1 = sum(v for v in constraints1 if v > 0)\n return violation0 < violation1\n",
"import sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Sequence\n\nimport numpy as np\n\nimport optuna\nfrom optuna import logging\nfrom optuna._experimental import experimental\nfrom optuna._imports import _LazyImport\nfrom optuna._transform import _SearchSpaceTransform\nfrom optuna.distributions import BaseDistribution\nfrom optuna.distributions import CategoricalDistribution\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n_logger = logging.get_logger(__name__)\n\n_SUGGESTED_STATES = (TrialState.COMPLETE, TrialState.PRUNED)\n\n\n@experimental(\"3.0.0\")\nclass QMCSampler(BaseSampler):\n \"\"\"A Quasi Monte Carlo Sampler that generates low-discrepancy sequences.\n\n Quasi Monte Carlo (QMC) sequences are designed to have lower discrepancies than\n standard random seqeunces. They are known to perform better than the standard\n randam sequences in hyperparameter optimization.\n\n For further information about the use of QMC sequences for hyperparameter optimization,\n please refer to the following paper:\n\n - `Bergstra, James, and Yoshua Bengio. Random search for hyper-parameter optimization.\n Journal of machine learning research 13.2, 2012.\n <https://jmlr.org/papers/v13/bergstra12a.html>`_\n\n We use the QMC implementations in Scipy. For the details of the QMC algorithm,\n see the Scipy API references on `scipy.stats.qmc\n <https://scipy.github.io/devdocs/reference/stats.qmc.html>`_.\n\n .. note:\n If your search space contains categorical parameters, it samples the catagorical\n parameters by its `independent_sampler` without using QMC algorithm.\n\n .. note::\n The search space of the sampler is determined by either previous trials in the study or\n the first trial that this sampler samples.\n\n If there are previous trials in the study, :class:`~optuna.samplers.QMCSamper` infers its\n search space using the trial which was created first in the study.\n\n Otherwise (if the study has no previous trials), :class:`~optuna.samplers.QMCSampler`\n samples the first trial using its `independent_sampler` and then infers the search space\n in the second trial.\n\n As mentioned above, the search space of the :class:`~optuna.sampler.QMCSampler` is\n determined by the first trial of the study. Once the search space is determined, it cannot\n be changed afterwards.\n\n .. note:\n `QMCSampler` is not supported for Python 3.6 as it depends on `scipy.stat.qmc` module which\n only supports Python 3.7 or the later versions.\n\n Args:\n qmc_type:\n The type of QMC sequence to be sampled. This must be one of\n `\"halton\"` and `\"sobol\"`. Default is `\"sobol\"`.\n\n .. note::\n Sobol' sequence is designed to have low-discrepancy property when the number of\n samples is :math:`n=2^m` for each positive integer :math:`m`. When it is possible\n to pre-specify the number of trials suggested by `QMCSampler`, it is recommended\n that the number of trials should be set as power of two.\n\n scramble:\n If this option is :obj:`True`, scrambling (randomization) is applied to the QMC\n sequences.\n\n seed:\n A seed for `QMCSampler`. This argument is used only when `scramble` is :obj:`True`.\n If this is :obj:`None`, the seed is initialized randomly. Default is :obj:`None`.\n\n .. note::\n When using multiple :class:`~optuna.samplers.QMCSampler`'s in parallel and/or\n distributed optimization, all the samplers must share the same seed when the\n `scrambling` is enabled. Otherwise, the low-discrepancy property of the samples\n will be degraded.\n\n independent_sampler:\n A :class:`~optuna.samplers.BaseSampler` instance that is used for independent\n sampling. The first trial of the study and the parameters not contained in the\n relative search space are sampled by this sampler.\n\n If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used\n as the default.\n\n .. seealso::\n :class:`~optuna.samplers` module provides built-in independent samplers\n such as :class:`~optuna.samplers.RandomSampler` and\n :class:`~optuna.samplers.TPESampler`.\n\n warn_independent_sampling:\n If this is :obj:`True`, a warning message is emitted when\n the value of a parameter is sampled by using an independent sampler.\n\n Note that the parameters of the first trial in a study are sampled via an\n independent sampler in most cases, so no warning messages are emitted in such cases.\n\n warn_asyncronous_seeding:\n If this is :obj:`True`, a warning message is emitted when the scrambling\n (randomization) is applied to the QMC sequence and the random seed of the sampler is\n not set manually.\n\n .. note::\n When using parallel and/or distributed optimization without manually\n setting the seed, the seed is set randomly for each instances of\n :class:`~optuna.samplers.QMCSampler` for different workers, which ends up\n asyncronous seeding for multiple samplers used in the optimization.\n\n .. seealso::\n See parameter ``seed`` in :class:`~optuna.samplers.QMCSampler`.\n\n\n Raises:\n ValueError:\n If ``qmc_type`` is not one of 'halton' and 'sobol`.\n\n\n Example:\n\n Optimize a simple quadratic function by using :class:`~optuna.samplers.QMCSampler`.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -1, 1)\n y = trial.suggest_int(\"y\", -1, 1)\n return x**2 + y\n\n\n sampler = optuna.samplers.QMCSampler()\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=8)\n\n \"\"\"\n\n def __init__(\n self,\n *,\n qmc_type: str = \"sobol\",\n scramble: bool = False, # default is False for simplicity in distributed environment.\n seed: Optional[int] = None,\n independent_sampler: Optional[BaseSampler] = None,\n warn_asyncronous_seeding: bool = True,\n warn_independent_sampling: bool = True,\n ) -> None:\n\n version = sys.version_info\n if version < (3, 7, 0):\n version_txt = str(version[0]) + \".\" + str(version[1]) + \".\" + str(version[2])\n message = (\n f\"`QMCSampler` is not supported for Python {version_txt}. \"\n \"Consider using Python 3.7 or later.\"\n )\n raise ValueError(message)\n\n self._scramble = scramble\n self._seed = seed or np.random.PCG64().random_raw()\n self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed)\n self._initial_search_space: Optional[Dict[str, BaseDistribution]] = None\n self._warn_independent_sampling = warn_independent_sampling\n\n if qmc_type in (\"halton\", \"sobol\"):\n self._qmc_type = qmc_type\n else:\n message = (\n f'The `qmc_type`, \"{qmc_type}\", is not a valid. '\n 'It must be one of \"halton\" and \"sobol\".'\n )\n raise ValueError(message)\n\n if seed is None and scramble and warn_asyncronous_seeding:\n # Sobol/Halton sequences without scrambling do not use seed.\n self._log_asyncronous_seeding()\n\n def reseed_rng(self) -> None:\n\n # We must not reseed the `self._seed` like below. Otherwise, workers will have different\n # seed under parallel execution because `self.reseed_rng()` is called when starting each\n # parallel executor.\n # >>> self._seed = np.random.MT19937().random_raw()\n\n self._independent_sampler.reseed_rng()\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n if self._initial_search_space is not None:\n return self._initial_search_space\n\n past_trials = study.get_trials(deepcopy=False, states=_SUGGESTED_STATES)\n # The initial trial is sampled by the independent sampler.\n if len(past_trials) == 0:\n return {}\n # If an initial trial was already made,\n # construct search_space of this sampler from the initial trial.\n first_trial = min(past_trials, key=lambda t: t.number)\n self._initial_search_space = self._infer_initial_search_space(first_trial)\n return self._initial_search_space\n\n def _infer_initial_search_space(self, trial: FrozenTrial) -> Dict[str, BaseDistribution]:\n\n search_space: Dict[str, BaseDistribution] = {}\n for param_name, distribution in trial.distributions.items():\n if isinstance(distribution, CategoricalDistribution):\n continue\n search_space[param_name] = distribution\n\n return search_space\n\n @staticmethod\n def _log_asyncronous_seeding() -> None:\n _logger.warning(\n \"No seed is provided for `QMCSampler` and the seed is set randomly. \"\n \"If you are running multiple `QMCSampler`s in parallel and/or distributed \"\n \" environment, the same seed must be used in all samplers to ensure that resulting \"\n \"samples are taken from the same QMC sequence. \"\n )\n\n def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None:\n _logger.warning(\n f\"The parameter '{param_name}' in trial#{trial.number} is sampled independently \"\n f\"by using `{self._independent_sampler.__class__.__name__}` instead of `QMCSampler` \"\n \"(optimization performance may be degraded). \"\n \"`QMCSampler` does not support dynamic search space or `CategoricalDistribution`. \"\n \"You can suppress this warning by setting `warn_independent_sampling` \"\n \"to `False` in the constructor of `QMCSampler`, \"\n \"if this independent sampling is intended behavior.\"\n )\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if self._initial_search_space is not None:\n if self._warn_independent_sampling:\n self._log_independent_sampling(trial, param_name)\n\n return self._independent_sampler.sample_independent(\n study, trial, param_name, param_distribution\n )\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n\n if search_space == {}:\n return {}\n\n sample = self._sample_qmc(study, search_space)\n trans = _SearchSpaceTransform(search_space)\n sample = trans.bounds[:, 0] + sample * (trans.bounds[:, 1] - trans.bounds[:, 0])\n return trans.untransform(sample[0, :])\n\n def after_trial(\n self,\n study: \"optuna.Study\",\n trial: \"optuna.trial.FrozenTrial\",\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n self._independent_sampler.after_trial(study, trial, state, values)\n\n def _sample_qmc(self, study: Study, search_space: Dict[str, BaseDistribution]) -> np.ndarray:\n\n # Lazy import because the `scipy.stats.qmc` is slow to import.\n qmc_module = _LazyImport(\"scipy.stats.qmc\")\n\n sample_id = self._find_sample_id(study, search_space)\n d = len(search_space)\n\n if self._qmc_type == \"halton\":\n qmc_engine = qmc_module.Halton(d, seed=self._seed, scramble=self._scramble)\n elif self._qmc_type == \"sobol\":\n qmc_engine = qmc_module.Sobol(d, seed=self._seed, scramble=self._scramble)\n else:\n raise ValueError(\"Invalid `qmc_type`\")\n\n forward_size = sample_id # `sample_id` starts from 0.\n qmc_engine.fast_forward(forward_size)\n sample = qmc_engine.random(1)\n\n return sample\n\n def _find_sample_id(self, study: Study, search_space: Dict[str, BaseDistribution]) -> int:\n\n qmc_id = \"\"\n qmc_id += self._qmc_type\n # Sobol/Halton sequences without scrambling do not use seed.\n if self._scramble:\n qmc_id += f\" (scramble=True, seed={self._seed})\"\n else:\n qmc_id += \" (scramble=False)\"\n key_qmc_id = qmc_id + \"'s last sample id\"\n\n # TODO(kstoneriv3): Here, we ideally assume that the following block is\n # an atomic transaction. Without such an assumption, the current implementation\n # only ensures that each `sample_id` is sampled at least once.\n system_attrs = study._storage.get_study_system_attrs(study._study_id)\n if key_qmc_id in system_attrs.keys():\n sample_id = system_attrs[key_qmc_id]\n sample_id += 1\n else:\n sample_id = 0\n study._storage.set_study_system_attr(study._study_id, key_qmc_id, sample_id)\n\n return sample_id\n",
"import itertools\nfrom typing import Callable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\n\nfrom matplotlib.collections import PathCollection\nimport numpy as np\nimport pytest\n\nimport optuna\nfrom optuna.trial import FrozenTrial\nfrom optuna.visualization.matplotlib import plot_pareto_front\n\n\ndef allclose_as_set(\n points1: Union[List[List[float]], np.ndarray], points2: Union[List[List[float]], np.ndarray]\n) -> bool:\n p1 = points1 if isinstance(points1, list) else points1.tolist()\n p2 = points2 if isinstance(points2, list) else points2.tolist()\n return np.allclose(sorted(p1), sorted(p2))\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"include_dominated_trials\", [False, True])\[email protected](\"axis_order\", [None, [0, 1], [1, 0]])\[email protected](\"targets\", [None, lambda t: (t.values[0], t.values[1])])\ndef test_plot_pareto_front_2d(\n include_dominated_trials: bool,\n axis_order: Optional[List[int]],\n targets: Optional[Callable[[FrozenTrial], Sequence[float]]],\n) -> None:\n if axis_order is not None and targets is not None:\n pytest.skip(\"skip using both axis_order and targets\")\n # Test with no trial.\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n figure = plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n )\n\n assert len(figure.get_lines()) == 0\n\n # Test with three trials.\n study.enqueue_trial({\"x\": 1, \"y\": 1})\n study.enqueue_trial({\"x\": 1, \"y\": 0})\n study.enqueue_trial({\"x\": 0, \"y\": 1})\n study.optimize(lambda t: [t.suggest_int(\"x\", 0, 1), t.suggest_int(\"y\", 0, 1)], n_trials=3)\n\n figure = plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n assert len(figure.get_lines()) == 0\n\n if axis_order is not None:\n pareto_front_points = np.array([[1.0, 0.0], [0.0, 1.0]])[:, axis_order]\n else:\n pareto_front_points = np.array([[1.0, 0.0], [0.0, 1.0]])\n assert pareto_front_points.shape == (2, 2)\n\n path_offsets = list(map(lambda pc: pc.get_offsets(), figure.findobj(PathCollection)))\n exists_pareto_front = any(\n map(lambda po: allclose_as_set(po, pareto_front_points), path_offsets)\n )\n exists_dominated_trials = any(\n map(lambda po: allclose_as_set(po, np.array([[1.0, 1.0]])), path_offsets)\n )\n assert exists_pareto_front\n if include_dominated_trials:\n assert exists_dominated_trials\n\n # Test with `target_names` argument.\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[],\n include_dominated_trials=include_dominated_trials,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[\"Foo\"],\n include_dominated_trials=include_dominated_trials,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[\"Foo\", \"Bar\", \"Baz\"],\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n\n target_names = [\"Foo\", \"Bar\"]\n figure = plot_pareto_front(\n study=study,\n target_names=target_names,\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n assert len(figure.get_lines()) == 0\n if axis_order is None:\n assert figure.get_xlabel() == target_names[0]\n assert figure.get_ylabel() == target_names[1]\n else:\n assert figure.get_xlabel() == target_names[axis_order[0]]\n assert figure.get_ylabel() == target_names[axis_order[1]]\n\n if axis_order is not None:\n pareto_front_points = np.array([[1.0, 0.0], [0.0, 1.0]])[:, axis_order]\n else:\n pareto_front_points = np.array([[1.0, 0.0], [0.0, 1.0]])\n assert pareto_front_points.shape == (2, 2)\n\n path_offsets = list(map(lambda pc: pc.get_offsets(), figure.findobj(PathCollection)))\n exists_pareto_front = any(\n map(lambda po: allclose_as_set(po, pareto_front_points), path_offsets)\n )\n exists_dominated_trials = any(\n map(lambda po: allclose_as_set(po, np.array([[1.0, 1.0]])), path_offsets)\n )\n assert exists_pareto_front\n if include_dominated_trials:\n assert exists_dominated_trials\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"include_dominated_trials\", [False, True])\[email protected](\n \"axis_order\", [None] + list(itertools.permutations(range(3), 3)) # type: ignore\n)\[email protected](\"targets\", [None, lambda t: (t.values[0], t.values[1], t.values[2])])\ndef test_plot_pareto_front_3d(\n include_dominated_trials: bool,\n axis_order: Optional[List[int]],\n targets: Optional[Callable[[FrozenTrial], Sequence[float]]],\n) -> None:\n if axis_order is not None and targets is not None:\n pytest.skip(\"skip using both axis_order and targets\")\n # Test with no trial.\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\"])\n figure = plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n )\n assert len(figure.get_lines()) == 0\n\n # Test with three trials.\n study.enqueue_trial({\"x\": 1, \"y\": 1, \"z\": 1})\n study.enqueue_trial({\"x\": 1, \"y\": 0, \"z\": 1})\n study.enqueue_trial({\"x\": 1, \"y\": 1, \"z\": 0})\n study.optimize(\n lambda t: [t.suggest_int(\"x\", 0, 1), t.suggest_int(\"y\", 0, 1), t.suggest_int(\"z\", 0, 1)],\n n_trials=3,\n )\n\n figure = plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n assert len(figure.get_lines()) == 0\n\n if axis_order is not None:\n pareto_front_points = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])[:, axis_order][:, 0:2]\n else:\n pareto_front_points = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])[:, 0:2]\n assert pareto_front_points.shape == (2, 2)\n\n path_offsets = list(map(lambda pc: pc.get_offsets(), figure.findobj(PathCollection)))\n exists_pareto_front = any(\n map(lambda po: allclose_as_set(po, pareto_front_points), path_offsets)\n )\n exists_dominated_trials = any(map(lambda po: allclose_as_set(po, [[1.0, 1.0]]), path_offsets))\n assert exists_pareto_front\n if include_dominated_trials:\n assert exists_dominated_trials\n\n # Test with `target_names` argument.\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[],\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[\"Foo\"],\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[\"Foo\", \"Bar\"],\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n plot_pareto_front(\n study=study,\n target_names=[\"Foo\", \"Bar\", \"Baz\", \"Qux\"],\n include_dominated_trials=include_dominated_trials,\n axis_order=axis_order,\n targets=targets,\n )\n\n target_names = [\"Foo\", \"Bar\", \"Baz\"]\n figure = plot_pareto_front(\n study=study, target_names=target_names, axis_order=axis_order, targets=targets\n )\n\n assert len(figure.get_lines()) == 0\n\n if axis_order is None:\n assert figure.get_xlabel() == target_names[0]\n assert figure.get_ylabel() == target_names[1]\n assert figure.get_zlabel() == target_names[2]\n else:\n assert figure.get_xlabel() == target_names[axis_order[0]]\n assert figure.get_ylabel() == target_names[axis_order[1]]\n assert figure.get_zlabel() == target_names[axis_order[2]]\n\n if axis_order is not None:\n pareto_front_points = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])[:, axis_order][:, 0:2]\n else:\n pareto_front_points = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])[:, 0:2]\n assert pareto_front_points.shape == (2, 2)\n\n path_offsets = list(map(lambda pc: pc.get_offsets(), figure.findobj(PathCollection)))\n exists_pareto_front = any(\n map(lambda po: allclose_as_set(po, pareto_front_points), path_offsets)\n )\n exists_dominated_trials = any(map(lambda po: allclose_as_set(po, [[1.0, 1.0]]), path_offsets))\n assert exists_pareto_front\n if include_dominated_trials:\n assert exists_dominated_trials\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"include_dominated_trials\", [False, True])\ndef test_plot_pareto_front_unsupported_dimensions(include_dominated_trials: bool) -> None:\n # Unsupported: n_objectives == 1.\n with pytest.raises(ValueError):\n study = optuna.create_study(directions=[\"minimize\"])\n study.optimize(lambda t: [0], n_trials=1)\n plot_pareto_front(study=study, include_dominated_trials=include_dominated_trials)\n\n with pytest.raises(ValueError):\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(lambda t: [0], n_trials=1)\n plot_pareto_front(study=study, include_dominated_trials=include_dominated_trials)\n\n # Unsupported: n_objectives == 4.\n with pytest.raises(ValueError):\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\", \"minimize\"])\n study.optimize(lambda t: [0, 0, 0, 0], n_trials=1)\n plot_pareto_front(study=study, include_dominated_trials=include_dominated_trials)\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\"dimension\", [2, 3])\[email protected](\"include_dominated_trials\", [False, True])\ndef test_plot_pareto_front_invalid_axis_order(\n dimension: int, include_dominated_trials: bool\n) -> None:\n study = optuna.create_study(directions=[\"minimize\"] * dimension)\n study.optimize(lambda t: [0] * dimension, n_trials=1)\n\n # Invalid: len(axis_order) != dimension\n with pytest.raises(ValueError):\n invalid_axis_order = list(range(dimension + 1))\n assert len(invalid_axis_order) != dimension\n plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=invalid_axis_order,\n )\n\n # Invalid: np.unique(axis_order).size != dimension\n with pytest.raises(ValueError):\n invalid_axis_order = list(range(dimension))\n invalid_axis_order[1] = invalid_axis_order[0]\n assert np.unique(invalid_axis_order).size != dimension\n plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=invalid_axis_order,\n )\n\n # Invalid: max(axis_order) > (dimension - 1)\n with pytest.raises(ValueError):\n invalid_axis_order = list(range(dimension))\n invalid_axis_order[-1] += 1\n assert max(invalid_axis_order) > (dimension - 1)\n plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=invalid_axis_order,\n )\n\n # Invalid: min(axis_order) < 0\n with pytest.raises(ValueError):\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(lambda t: [0] * 2, n_trials=1)\n invalid_axis_order = list(range(dimension))\n invalid_axis_order[0] -= 1\n assert min(invalid_axis_order) < 0\n plot_pareto_front(\n study=study,\n include_dominated_trials=include_dominated_trials,\n axis_order=invalid_axis_order,\n )\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\ndef test_plot_pareto_front_targets_without_target_names() -> None:\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\"])\n with pytest.raises(\n ValueError,\n match=\"If `targets` is specified for empty studies, `target_names` must be specified.\",\n ):\n plot_pareto_front(\n study=study,\n target_names=None,\n targets=lambda t: (t.values[0], t.values[1], t.values[2]),\n )\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\n \"targets\",\n [\n lambda t: (t.values[0]),\n ],\n)\ndef test_plot_pareto_front_invalid_target_values(\n targets: Optional[Callable[[FrozenTrial], Sequence[float]]]\n) -> None:\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\", \"minimize\"])\n study.optimize(lambda t: [0, 0, 0, 0], n_trials=3)\n with pytest.raises(\n ValueError,\n match=\"targets` should return a sequence of target values. your `targets`\"\n \" returns <class 'float'>\",\n ):\n plot_pareto_front(\n study=study,\n targets=targets,\n )\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\[email protected](\n \"targets\",\n [\n lambda t: (t.values[0],),\n lambda t: (t.values[0], t.values[1], t.values[2], t.values[3]),\n ],\n)\ndef test_plot_pareto_front_n_targets_unsupported(\n targets: Callable[[FrozenTrial], Sequence[float]]\n) -> None:\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\", \"minimize\"])\n study.optimize(lambda t: [0, 0, 0, 0], n_trials=3)\n n_targets = len(targets(study.best_trials[0]))\n with pytest.raises(\n ValueError,\n match=\"`plot_pareto_front` function only supports 2 or 3 targets.\"\n \" you used {} targets now.\".format(n_targets),\n ):\n plot_pareto_front(\n study=study,\n targets=targets,\n )\n\n\[email protected](\"ignore::optuna.exceptions.ExperimentalWarning\")\ndef test_plot_pareto_front_using_axis_order_and_targets() -> None:\n study = optuna.create_study(directions=[\"minimize\", \"minimize\", \"minimize\"])\n with pytest.raises(\n ValueError,\n match=\"Using both `targets` and `axis_order` is not supported.\"\n \" Use either `targets` or `axis_order`.\",\n ):\n plot_pareto_front(\n study=study,\n axis_order=[0, 1, 2],\n targets=lambda t: (t.values[0], t.values[1], t.values[2]),\n )\n"
] | [
[
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.random.RandomState"
],
[
"numpy.random.PCG64"
],
[
"numpy.array",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edchengg/MyNLP | [
"350c14efebc1440d6e132c6c516f0a02625df320"
] | [
"examples/conll2003ner/conll2003_ner.py"
] | [
"'''\nNER example for CoNLL 2003\n\n'''\nimport argparse\nimport random\nimport numpy as np\nimport torch\nfrom edcnlp.dataloader.feature import Example\nfrom edcnlp.dataloader.loader import examples_to_dataloader\nfrom edcnlp.model.taskModel import TokenClassification\nfrom edcnlp.utils.utils import display, build_pretrained_model_from_huggingface\nfrom edcnlp.utils.trainer import Trainer\nfrom seqeval.metrics import f1_score, classification_report\nimport torch.nn.functional as F\n# take args\nparser = argparse.ArgumentParser()\n\n## Required parameters\nparser.add_argument(\"--source_language\", default='en', type=str,\n help=\"The target language\")\nparser.add_argument(\"--target_language\", default='en', type=str,\n help=\"The target language\")\nparser.add_argument(\"--train_dir\", default='/home/cheny/MyNLP/examples/conll2003ner/en/train.txt', type=str,\n help=\"The target language\")\nparser.add_argument(\"--dev_dir\", default='/home/cheny/MyNLP/examples/conll2003ner/en/dev.txt', type=str,\n help=\"The target language\")\nparser.add_argument(\"--test_dir\", default='/home/cheny/MyNLP/examples/conll2003ner/en/test.txt', type=str,\n help=\"The target language\")\nparser.add_argument(\"--pretrained_model\", default='Bert_base_cased', type=str,\n help=\"list: 'MBert_base, Bert_large, Bert_base, Roberta_base, Roberta_large, XLMRoberta_base, XLMRoberta_large\")\nparser.add_argument(\"--output_dir\", default='save', type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\nparser.add_argument(\"--model_name\", default='model', type=str,\n help=\"Checkpoint and config save prefix\")\nparser.add_argument(\"--train_batchsize\", default=32, type=int)\nparser.add_argument(\"--eval_batchsize\", default=32, type=int)\nparser.add_argument(\"--learning_rate\", default=5e-5, type=float)\nparser.add_argument(\"--max_epoch\", default=5, type=int)\nparser.add_argument(\"--seed\", default=0, type=int)\nparser.add_argument(\"--dropout_ratio\", default=0.4, type=float)\nparser.add_argument(\"--gpuid\", default='1', type=str)\nparser.add_argument(\"--pos_dim\", default=0, type=int)\nparser.add_argument(\"--deprel_dim\", default=0, type=int)\nparser.add_argument(\"--ner_dim\", default=0, type=int)\nparser.add_argument(\"--freeze\", default='0', type=str,\n help='embedding: freeze embedding, 0: no freeze, n: freeze layers under n')\nparser.add_argument(\"--train_max_seq_length\", default=128, type=int)\nparser.add_argument(\"--eval_max_seq_length\", default=128, type=int)\nparser.add_argument(\"--train_num_duplicate\", default=20, type=int)\nparser.add_argument(\"--eval_num_duplicate\", default=20, type=int)\nparser.add_argument(\"--warmup_proportion\", default=0.4, type=float)\nparser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\nparser.add_argument(\"--crf\", default=0, type=int,\n help=\"Use CRF = 1, else = 0\")\n\n\n\n# dataprocessor\nclass CoNLL2003Processor(object):\n '''Processor for CoNLL-2003 data set.'''\n\n def __init__(self):\n self.label = [\"O\",\n \"B-MISC\",\n \"I-MISC\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\"]\n self.create_label_map()\n\n def get_examples(self,\n data_dir):\n tsv = self.read_tsv(data_dir)\n examples = self.create_examples(tsv)\n return examples\n\n def get_labels(self):\n return self.label\n\n def create_label_map(self):\n self.label_map = {k: idx for idx, k in enumerate(self.label)}\n\n def create_examples(self,\n lines):\n examples = []\n for i, (sentence, label) in enumerate(lines):\n text = sentence\n label = [self.label_map[l] for l in label]\n examples.append(Example(token=text, label=label))\n return examples\n\n def read_tsv(self,\n filename):\n '''\n read file\n '''\n print('Reading file: ', filename)\n f = open(filename, encoding='utf-8')\n data = []\n sentence = []\n label = []\n for line in f:\n if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == \"\\n\":\n\n if len(sentence) > 0:\n # Add Sentence and label to data\n data.append((sentence, label))\n sentence = []\n label = []\n continue\n splits = line.split(' ')\n # Word\n sentence.append(splits[0])\n # NER Label\n label.append(splits[-1][:-1])\n\n if len(sentence) > 0:\n data.append((sentence, label))\n sentence = []\n label = []\n\n print('Data size: ', len(data))\n return data\n\ndef evaluator(model,\n dataloader):\n model.eval()\n device = model.get_device()\n label_map = model.get_label_map()\n data_size = dataloader.size\n y_true = [[] for _ in range(data_size)]\n y_pred = [[] for _ in range(data_size)]\n dev_loss = 0\n\n for step, batch in enumerate(dataloader.dataloader):\n batch = tuple(t.to(device) for t in batch)\n input_ids, token_type_ids, attention_mask, valid_ids, pos_ids, ner_ids, deprel_ids, eval_idx, label_ids, label_mask, sen_id = batch\n\n with torch.no_grad():\n loss, logits = model(input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n valid_ids=valid_ids,\n pos_ids=pos_ids,\n ner_ids=ner_ids,\n deprel_ids=deprel_ids,\n labels=label_ids,\n label_mask=label_mask)\n dev_loss += loss.item()\n\n logits = torch.argmax(F.log_softmax(logits, dim=2), dim=2)\n logits = logits.detach().cpu().numpy()\n label_ids = label_ids.to('cpu').numpy()\n label_mask = label_mask.to('cpu').numpy()\n eval_idx = eval_idx.to('cpu').numpy()\n sen_id = sen_id.to('cpu').numpy()\n for bz_idx, label_i in enumerate(label_ids):\n temp_gold = []\n temp_pred = []\n for tok_idx, tok_i in enumerate(label_i):\n if label_mask[bz_idx][tok_idx] == 0:\n # stop when label mask = 0\n s_id = sen_id[bz_idx]\n y_true[s_id].extend(temp_gold)\n y_pred[s_id].extend(temp_pred)\n break\n else:\n if eval_idx[bz_idx][tok_idx] == 1:\n # get all prediction when label mask == 1\n temp_gold.append(label_map[label_ids[bz_idx][tok_idx]])\n temp_pred.append(label_map[logits[bz_idx][tok_idx]])\n\n res = f1_score(y_true, y_pred)\n print(classification_report(y_true, y_pred))\n avg_loss = dev_loss / len(dataloader)\n return res, avg_loss\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n option = vars(args)\n\n print('=' * 30)\n print('Configuration...')\n display(option)\n\n print('=' * 30)\n print('Building Pretrained Model...')\n Pretrained_model, tokenizer = build_pretrained_model_from_huggingface(option)\n # process data\n print('='* 30)\n print('Processing Data...')\n processor = CoNLL2003Processor()\n label_list = processor.get_labels()\n train_examples = processor.get_examples(option['train_dir'])\n dev_examples = processor.get_examples(option['dev_dir'])\n test_examples = processor.get_examples(option['test_dir'])\n option['num_labels'] = len(label_list)\n # create dataloader\n # input_ids, token_type_ids, attention_mask, valid_ids, pos_ids, ner_ids, deprel_ids, sen_id\n keys = ['input_ids', 'input_mask', 'token_type_ids', 'valid_idx', 'pos_ids', 'ner_ids', 'deprel_ids', 'eval_idx', 'label_ids', 'label_mask', 'sen_id']\n\n print('=' * 30)\n print('Building Dataloader...')\n train_dataloader = examples_to_dataloader(train_examples,\n option,\n tokenizer,\n set_type='train',\n keys=keys)\n dev_dataloader = examples_to_dataloader(dev_examples,\n option,\n tokenizer,\n set_type='dev',\n keys=keys)\n test_dataloader = examples_to_dataloader(test_examples,\n option,\n tokenizer,\n set_type='test',\n keys=keys)\n\n # model\n print('=' * 30)\n print('Building Model...')\n model = TokenClassification(Pretrained_model, option)\n model.set_label_map(label_list)\n model.to(torch.device('cuda:' + option['gpuid']))\n model.set_device('cuda:' + option['gpuid'])\n\n # trainer\n print('=' * 30)\n print('Training...')\n trainer = Trainer(option=option,\n model=model,\n train_dataloader=train_dataloader,\n dev_dataloader=dev_dataloader,\n evaluator=evaluator)\n trainer.train()\n # test\n print('=' * 30)\n print('Testing...')\n res, _ = evaluator(model, test_dataloader)\n print('Test RES: ', res)"
] | [
[
"torch.nn.functional.log_softmax",
"numpy.random.seed",
"torch.manual_seed",
"torch.no_grad",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stjordanis/mmocr | [
"e267d0628141cc74a10b6daf2946a88958e5c24b"
] | [
"mmocr/core/visualize.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport os\nimport shutil\nimport urllib\nimport warnings\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport mmocr.utils as utils\n\n\ndef overlay_mask_img(img, mask):\n \"\"\"Draw mask boundaries on image for visualization.\n\n Args:\n img (ndarray): The input image.\n mask (ndarray): The instance mask.\n\n Returns:\n img (ndarray): The output image with instance boundaries on it.\n \"\"\"\n assert isinstance(img, np.ndarray)\n assert isinstance(mask, np.ndarray)\n\n contours, _ = cv2.findContours(\n mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.drawContours(img, contours, -1, (0, 255, 0), 1)\n\n return img\n\n\ndef show_feature(features, names, to_uint8, out_file=None):\n \"\"\"Visualize a list of feature maps.\n\n Args:\n features (list(ndarray)): The feature map list.\n names (list(str)): The visualized title list.\n to_uint8 (list(1|0)): The list indicating whether to convent\n feature maps to uint8.\n out_file (str): The output file name. If set to None,\n the output image will be shown without saving.\n \"\"\"\n assert utils.is_ndarray_list(features)\n assert utils.is_type_list(names, str)\n assert utils.is_type_list(to_uint8, int)\n assert utils.is_none_or_type(out_file, str)\n assert utils.equal_len(features, names, to_uint8)\n\n num = len(features)\n row = col = math.ceil(math.sqrt(num))\n\n for i, (f, n) in enumerate(zip(features, names)):\n plt.subplot(row, col, i + 1)\n plt.title(n)\n if to_uint8[i]:\n f = f.astype(np.uint8)\n plt.imshow(f)\n if out_file is None:\n plt.show()\n else:\n plt.savefig(out_file)\n\n\ndef show_img_boundary(img, boundary):\n \"\"\"Show image and instance boundaires.\n\n Args:\n img (ndarray): The input image.\n boundary (list[float or int]): The input boundary.\n \"\"\"\n assert isinstance(img, np.ndarray)\n assert utils.is_type_list(boundary, int) or utils.is_type_list(\n boundary, float)\n\n cv2.polylines(\n img, [np.array(boundary).astype(np.int32).reshape(-1, 1, 2)],\n True,\n color=(0, 255, 0),\n thickness=1)\n plt.imshow(img)\n plt.show()\n\n\ndef show_pred_gt(preds,\n gts,\n show=False,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Show detection and ground truth for one image.\n\n Args:\n preds (list[list[float]]): The detection boundary list.\n gts (list[list[float]]): The ground truth boundary list.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): The value of waitKey param.\n out_file (str): The filename of the output.\n \"\"\"\n assert utils.is_2dlist(preds)\n assert utils.is_2dlist(gts)\n assert isinstance(show, bool)\n assert isinstance(win_name, str)\n assert isinstance(wait_time, int)\n assert utils.is_none_or_type(out_file, str)\n\n p_xy = [p for boundary in preds for p in boundary]\n gt_xy = [g for gt in gts for g in gt]\n\n max_xy = np.max(np.array(p_xy + gt_xy).reshape(-1, 2), axis=0)\n\n width = int(max_xy[0]) + 100\n height = int(max_xy[1]) + 100\n\n img = np.ones((height, width, 3), np.int8) * 255\n pred_color = mmcv.color_val('red')\n gt_color = mmcv.color_val('blue')\n thickness = 1\n\n for boundary in preds:\n cv2.polylines(\n img, [np.array(boundary).astype(np.int32).reshape(-1, 1, 2)],\n True,\n color=pred_color,\n thickness=thickness)\n for gt in gts:\n cv2.polylines(\n img, [np.array(gt).astype(np.int32).reshape(-1, 1, 2)],\n True,\n color=gt_color,\n thickness=thickness)\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n return img\n\n\ndef imshow_pred_boundary(img,\n boundaries_with_scores,\n labels,\n score_thr=0,\n boundary_color='blue',\n text_color='blue',\n thickness=1,\n font_scale=0.5,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None,\n show_score=False):\n \"\"\"Draw boundaries and class labels (with scores) on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n boundaries_with_scores (list[list[float]]): Boundaries with scores.\n labels (list[int]): Labels of boundaries.\n score_thr (float): Minimum score of boundaries to be shown.\n boundary_color (str or tuple or :obj:`Color`): Color of boundaries.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename of the output.\n show_score (bool): Whether to show text instance score.\n \"\"\"\n assert isinstance(img, (str, np.ndarray))\n assert utils.is_2dlist(boundaries_with_scores)\n assert utils.is_type_list(labels, int)\n assert utils.equal_len(boundaries_with_scores, labels)\n if len(boundaries_with_scores) == 0:\n warnings.warn('0 text found in ' + out_file)\n return None\n\n utils.valid_boundary(boundaries_with_scores[0])\n img = mmcv.imread(img)\n\n scores = np.array([b[-1] for b in boundaries_with_scores])\n inds = scores > score_thr\n boundaries = [boundaries_with_scores[i][:-1] for i in np.where(inds)[0]]\n scores = [scores[i] for i in np.where(inds)[0]]\n labels = [labels[i] for i in np.where(inds)[0]]\n\n boundary_color = mmcv.color_val(boundary_color)\n text_color = mmcv.color_val(text_color)\n font_scale = 0.5\n\n for boundary, score in zip(boundaries, scores):\n boundary_int = np.array(boundary).astype(np.int32)\n\n cv2.polylines(\n img, [boundary_int.reshape(-1, 1, 2)],\n True,\n color=boundary_color,\n thickness=thickness)\n\n if show_score:\n label_text = f'{score:.02f}'\n cv2.putText(img, label_text,\n (boundary_int[0], boundary_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n return img\n\n\ndef imshow_text_char_boundary(img,\n text_quads,\n boundaries,\n char_quads,\n chars,\n show=False,\n thickness=1,\n font_scale=0.5,\n win_name='',\n wait_time=-1,\n out_file=None):\n \"\"\"Draw text boxes and char boxes on img.\n\n Args:\n img (str or ndarray): The img to be displayed.\n text_quads (list[list[int|float]]): The text boxes.\n boundaries (list[list[int|float]]): The boundary list.\n char_quads (list[list[list[int|float]]]): A 2d list of char boxes.\n char_quads[i] is for the ith text, and char_quads[i][j] is the jth\n char of the ith text.\n chars (list[list[char]]). The string for each text box.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename of the output.\n \"\"\"\n assert isinstance(img, (np.ndarray, str))\n assert utils.is_2dlist(text_quads)\n assert utils.is_2dlist(boundaries)\n assert utils.is_3dlist(char_quads)\n assert utils.is_2dlist(chars)\n assert utils.equal_len(text_quads, char_quads, boundaries)\n\n img = mmcv.imread(img)\n char_color = [mmcv.color_val('blue'), mmcv.color_val('green')]\n text_color = mmcv.color_val('red')\n text_inx = 0\n for text_box, boundary, char_box, txt in zip(text_quads, boundaries,\n char_quads, chars):\n text_box = np.array(text_box)\n boundary = np.array(boundary)\n\n text_box = text_box.reshape(-1, 2).astype(np.int32)\n cv2.polylines(\n img, [text_box.reshape(-1, 1, 2)],\n True,\n color=text_color,\n thickness=thickness)\n if boundary.shape[0] > 0:\n cv2.polylines(\n img, [boundary.reshape(-1, 1, 2)],\n True,\n color=text_color,\n thickness=thickness)\n\n for b in char_box:\n b = np.array(b)\n c = char_color[text_inx % 2]\n b = b.astype(np.int32)\n cv2.polylines(\n img, [b.reshape(-1, 1, 2)], True, color=c, thickness=thickness)\n\n label_text = ''.join(txt)\n cv2.putText(img, label_text, (text_box[0, 0], text_box[0, 1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n text_inx = text_inx + 1\n\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n return img\n\n\ndef tile_image(images):\n \"\"\"Combined multiple images to one vertically.\n\n Args:\n images (list[np.ndarray]): Images to be combined.\n \"\"\"\n assert isinstance(images, list)\n assert len(images) > 0\n\n for i, _ in enumerate(images):\n if len(images[i].shape) == 2:\n images[i] = cv2.cvtColor(images[i], cv2.COLOR_GRAY2BGR)\n\n widths = [img.shape[1] for img in images]\n heights = [img.shape[0] for img in images]\n h, w = sum(heights), max(widths)\n vis_img = np.zeros((h, w, 3), dtype=np.uint8)\n\n offset_y = 0\n for image in images:\n img_h, img_w = image.shape[:2]\n vis_img[offset_y:(offset_y + img_h), 0:img_w, :] = image\n offset_y += img_h\n\n return vis_img\n\n\ndef imshow_text_label(img,\n pred_label,\n gt_label,\n show=False,\n win_name='',\n wait_time=-1,\n out_file=None):\n \"\"\"Draw predicted texts and ground truth texts on images.\n\n Args:\n img (str or np.ndarray): Image filename or loaded image.\n pred_label (str): Predicted texts.\n gt_label (str): Ground truth texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str): The filename of the output.\n \"\"\"\n assert isinstance(img, (np.ndarray, str))\n assert isinstance(pred_label, str)\n assert isinstance(gt_label, str)\n assert isinstance(show, bool)\n assert isinstance(win_name, str)\n assert isinstance(wait_time, int)\n\n img = mmcv.imread(img)\n\n src_h, src_w = img.shape[:2]\n resize_height = 64\n resize_width = int(1.0 * src_w / src_h * resize_height)\n img = cv2.resize(img, (resize_width, resize_height))\n h, w = img.shape[:2]\n\n if is_contain_chinese(pred_label):\n pred_img = draw_texts_by_pil(img, [pred_label], None)\n else:\n pred_img = np.ones((h, w, 3), dtype=np.uint8) * 255\n cv2.putText(pred_img, pred_label, (5, 40), cv2.FONT_HERSHEY_SIMPLEX,\n 0.9, (0, 0, 255), 2)\n images = [pred_img, img]\n\n if gt_label != '':\n if is_contain_chinese(gt_label):\n gt_img = draw_texts_by_pil(img, [gt_label], None)\n else:\n gt_img = np.ones((h, w, 3), dtype=np.uint8) * 255\n cv2.putText(gt_img, gt_label, (5, 40), cv2.FONT_HERSHEY_SIMPLEX,\n 0.9, (255, 0, 0), 2)\n images.append(gt_img)\n\n img = tile_image(images)\n\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n return img\n\n\ndef imshow_node(img,\n result,\n boxes,\n idx_to_cls={},\n show=False,\n win_name='',\n wait_time=-1,\n out_file=None):\n\n img = mmcv.imread(img)\n h, w = img.shape[:2]\n\n max_value, max_idx = torch.max(result['nodes'].detach().cpu(), -1)\n node_pred_label = max_idx.numpy().tolist()\n node_pred_score = max_value.numpy().tolist()\n\n texts, text_boxes = [], []\n for i, box in enumerate(boxes):\n new_box = [[box[0], box[1]], [box[2], box[1]], [box[2], box[3]],\n [box[0], box[3]]]\n Pts = np.array([new_box], np.int32)\n cv2.polylines(\n img, [Pts.reshape((-1, 1, 2))],\n True,\n color=(255, 255, 0),\n thickness=1)\n x_min = int(min([point[0] for point in new_box]))\n y_min = int(min([point[1] for point in new_box]))\n\n # text\n pred_label = str(node_pred_label[i])\n if pred_label in idx_to_cls:\n pred_label = idx_to_cls[pred_label]\n pred_score = '{:.2f}'.format(node_pred_score[i])\n text = pred_label + '(' + pred_score + ')'\n texts.append(text)\n\n # text box\n font_size = int(\n min(\n abs(new_box[3][1] - new_box[0][1]),\n abs(new_box[1][0] - new_box[0][0])))\n char_num = len(text)\n text_box = [\n x_min * 2, y_min, x_min * 2 + font_size * char_num, y_min,\n x_min * 2 + font_size * char_num, y_min + font_size, x_min * 2,\n y_min + font_size\n ]\n text_boxes.append(text_box)\n\n pred_img = np.ones((h, w * 2, 3), dtype=np.uint8) * 255\n pred_img = draw_texts_by_pil(\n pred_img, texts, text_boxes, draw_box=False, on_ori_img=True)\n\n vis_img = np.ones((h, w * 3, 3), dtype=np.uint8) * 255\n vis_img[:, :w] = img\n vis_img[:, w:] = pred_img\n\n if show:\n mmcv.imshow(vis_img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(vis_img, out_file)\n\n return vis_img\n\n\ndef gen_color():\n \"\"\"Generate BGR color schemes.\"\"\"\n color_list = [(101, 67, 254), (154, 157, 252), (173, 205, 249),\n (123, 151, 138), (187, 200, 178), (148, 137, 69),\n (169, 200, 200), (155, 175, 131), (154, 194, 182),\n (178, 190, 137), (140, 211, 222), (83, 156, 222)]\n return color_list\n\n\ndef draw_polygons(img, polys):\n \"\"\"Draw polygons on image.\n\n Args:\n img (np.ndarray): The original image.\n polys (list[list[float]]): Detected polygons.\n Return:\n out_img (np.ndarray): Visualized image.\n \"\"\"\n dst_img = img.copy()\n color_list = gen_color()\n out_img = dst_img\n for idx, poly in enumerate(polys):\n poly = np.array(poly).reshape((-1, 1, 2)).astype(np.int32)\n cv2.drawContours(\n img,\n np.array([poly]),\n -1,\n color_list[idx % len(color_list)],\n thickness=cv2.FILLED)\n out_img = cv2.addWeighted(dst_img, 0.5, img, 0.5, 0)\n return out_img\n\n\ndef get_optimal_font_scale(text, width):\n \"\"\"Get optimal font scale for cv2.putText.\n\n Args:\n text (str): Text in one box.\n width (int): The box width.\n \"\"\"\n for scale in reversed(range(0, 60, 1)):\n textSize = cv2.getTextSize(\n text,\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=scale / 10,\n thickness=1)\n new_width = textSize[0][0]\n if new_width <= width:\n return scale / 10\n return 1\n\n\ndef draw_texts(img, texts, boxes=None, draw_box=True, on_ori_img=False):\n \"\"\"Draw boxes and texts on empty img.\n\n Args:\n img (np.ndarray): The original image.\n texts (list[str]): Recognized texts.\n boxes (list[list[float]]): Detected bounding boxes.\n draw_box (bool): Whether draw box or not. If False, draw text only.\n on_ori_img (bool): If True, draw box and text on input image,\n else, on a new empty image.\n Return:\n out_img (np.ndarray): Visualized image.\n \"\"\"\n color_list = gen_color()\n h, w = img.shape[:2]\n if boxes is None:\n boxes = [[0, 0, w, 0, w, h, 0, h]]\n assert len(texts) == len(boxes)\n\n if on_ori_img:\n out_img = img\n else:\n out_img = np.ones((h, w, 3), dtype=np.uint8) * 255\n for idx, (box, text) in enumerate(zip(boxes, texts)):\n if draw_box:\n new_box = [[x, y] for x, y in zip(box[0::2], box[1::2])]\n Pts = np.array([new_box], np.int32)\n cv2.polylines(\n out_img, [Pts.reshape((-1, 1, 2))],\n True,\n color=color_list[idx % len(color_list)],\n thickness=1)\n min_x = int(min(box[0::2]))\n max_y = int(\n np.mean(np.array(box[1::2])) + 0.2 *\n (max(box[1::2]) - min(box[1::2])))\n font_scale = get_optimal_font_scale(\n text, int(max(box[0::2]) - min(box[0::2])))\n cv2.putText(out_img, text, (min_x, max_y), cv2.FONT_HERSHEY_SIMPLEX,\n font_scale, (0, 0, 0), 1)\n\n return out_img\n\n\ndef draw_texts_by_pil(img,\n texts,\n boxes=None,\n draw_box=True,\n on_ori_img=False,\n font_size=None,\n fill_color=None,\n draw_pos=None,\n return_text_size=False):\n \"\"\"Draw boxes and texts on empty image, especially for Chinese.\n\n Args:\n img (np.ndarray): The original image.\n texts (list[str]): Recognized texts.\n boxes (list[list[float]]): Detected bounding boxes.\n draw_box (bool): Whether draw box or not. If False, draw text only.\n on_ori_img (bool): If True, draw box and text on input image,\n else on a new empty image.\n font_size (int, optional): Size to create a font object for a font.\n fill_color (tuple(int), optional): Fill color for text.\n draw_pos (list[tuple(int)], optional): Start point to draw each text.\n return_text_size (bool): If True, return the list of text size.\n\n Returns:\n (np.ndarray, list[tuple]) or np.ndarray: Return a tuple\n ``(out_img, text_sizes)``, where ``out_img`` is the output image\n with texts drawn on it and ``text_sizes`` are the size of drawing\n texts. If ``return_text_size`` is False, only the output image will be\n returned.\n \"\"\"\n\n color_list = gen_color()\n h, w = img.shape[:2]\n if boxes is None:\n boxes = [[0, 0, w, 0, w, h, 0, h]]\n if draw_pos is None:\n draw_pos = [None for _ in texts]\n assert len(boxes) == len(texts) == len(draw_pos)\n\n if fill_color is None:\n fill_color = (0, 0, 0)\n\n if on_ori_img:\n out_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n else:\n out_img = Image.new('RGB', (w, h), color=(255, 255, 255))\n out_draw = ImageDraw.Draw(out_img)\n\n text_sizes = []\n for idx, (box, text, ori_point) in enumerate(zip(boxes, texts, draw_pos)):\n if len(text) == 0:\n continue\n min_x, max_x = min(box[0::2]), max(box[0::2])\n min_y, max_y = min(box[1::2]), max(box[1::2])\n color = tuple(list(color_list[idx % len(color_list)])[::-1])\n if draw_box:\n out_draw.line(box, fill=color, width=1)\n dirname, _ = os.path.split(os.path.abspath(__file__))\n font_path = os.path.join(dirname, 'font.TTF')\n if not os.path.exists(font_path):\n url = ('http://download.openmmlab.com/mmocr/data/font.TTF')\n print(f'Downloading {url} ...')\n local_filename, _ = urllib.request.urlretrieve(url)\n shutil.move(local_filename, font_path)\n tmp_font_size = font_size\n if tmp_font_size is None:\n box_width = max(max_x - min_x, max_y - min_y)\n tmp_font_size = int(0.9 * box_width / len(text))\n fnt = ImageFont.truetype(font_path, tmp_font_size)\n if ori_point is None:\n ori_point = (min_x + 1, min_y + 1)\n out_draw.text(ori_point, text, font=fnt, fill=fill_color)\n text_sizes.append(fnt.getsize(text))\n\n del out_draw\n\n out_img = cv2.cvtColor(np.asarray(out_img), cv2.COLOR_RGB2BGR)\n\n if return_text_size:\n return out_img, text_sizes\n\n return out_img\n\n\ndef is_contain_chinese(check_str):\n \"\"\"Check whether string contains Chinese or not.\n\n Args:\n check_str (str): String to be checked.\n\n Return True if contains Chinese, else False.\n \"\"\"\n for ch in check_str:\n if u'\\u4e00' <= ch <= u'\\u9fff':\n return True\n return False\n\n\ndef det_recog_show_result(img, end2end_res, out_file=None):\n \"\"\"Draw `result`(boxes and texts) on `img`.\n\n Args:\n img (str or np.ndarray): The image to be displayed.\n end2end_res (dict): Text detect and recognize results.\n out_file (str): Image path where the visualized image should be saved.\n Return:\n out_img (np.ndarray): Visualized image.\n \"\"\"\n img = mmcv.imread(img)\n boxes, texts = [], []\n for res in end2end_res['result']:\n boxes.append(res['box'])\n texts.append(res['text'])\n box_vis_img = draw_polygons(img, boxes)\n\n if is_contain_chinese(''.join(texts)):\n text_vis_img = draw_texts_by_pil(img, texts, boxes)\n else:\n text_vis_img = draw_texts(img, texts, boxes)\n\n h, w = img.shape[:2]\n out_img = np.ones((h, w * 2, 3), dtype=np.uint8)\n out_img[:, :w, :] = box_vis_img\n out_img[:, w:, :] = text_vis_img\n\n if out_file:\n mmcv.imwrite(out_img, out_file)\n\n return out_img\n\n\ndef draw_edge_result(img, result, edge_thresh=0.5, keynode_thresh=0.5):\n \"\"\"Draw text and their relationship on empty images.\n\n Args:\n img (np.ndarray): The original image.\n result (dict): The result of model forward_test, including:\n - img_metas (list[dict]): List of meta information dictionary.\n - nodes (Tensor): Node prediction with size:\n number_node * node_classes.\n - edges (Tensor): Edge prediction with size: number_edge * 2.\n edge_thresh (float): Score threshold for edge classification.\n keynode_thresh (float): Score threshold for node\n (``key``) classification.\n\n Returns:\n np.ndarray: The image with key, value and relation drawn on it.\n \"\"\"\n\n h, w = img.shape[:2]\n\n vis_area_width = w // 3 * 2\n vis_area_height = h\n dist_key_to_value = vis_area_width // 2\n dist_pair_to_pair = 30\n\n bbox_x1 = dist_pair_to_pair\n bbox_y1 = 0\n\n new_w = vis_area_width\n new_h = vis_area_height\n pred_edge_img = np.ones((new_h, new_w, 3), dtype=np.uint8) * 255\n\n nodes = result['nodes'].detach().cpu()\n texts = result['img_metas'][0]['ori_texts']\n num_nodes = result['nodes'].size(0)\n edges = result['edges'].detach().cpu()[:, -1].view(num_nodes, num_nodes)\n\n # (i, j) will be a valid pair\n # either edge_score(node_i->node_j) > edge_thresh\n # or edge_score(node_j->node_i) > edge_thresh\n pairs = (torch.max(edges, edges.T) > edge_thresh).nonzero(as_tuple=True)\n\n # 1. \"for n1, n2 in zip(*pairs) if n1 < n2\":\n # Only (n1, n2) will be included if n1 < n2 but not (n2, n1), to\n # avoid duplication.\n # 2. \"(n1, n2) if nodes[n1, 1] > nodes[n1, 2]\":\n # nodes[n1, 1] is the score that this node is predicted as key,\n # nodes[n1, 2] is the score that this node is predicted as value.\n # If nodes[n1, 1] > nodes[n1, 2], n1 will be the index of key,\n # so that n2 will be the index of value.\n result_pairs = [(n1, n2) if nodes[n1, 1] > nodes[n1, 2] else (n2, n1)\n for n1, n2 in zip(*pairs) if n1 < n2]\n\n result_pairs.sort()\n key_current_idx = -1\n pos_current = (-1, -1)\n newline_flag = False\n\n key_font_size = 15\n value_font_size = 15\n key_font_color = (0, 0, 0)\n value_font_color = (0, 0, 255)\n arrow_color = (0, 0, 255)\n for pair in result_pairs:\n key_idx = int(pair[0].item())\n if nodes[key_idx, 1] < keynode_thresh:\n continue\n if key_idx != key_current_idx:\n # move y-coords down for a new key\n bbox_y1 += 10\n # enlarge blank area to show key-value info\n if newline_flag:\n bbox_x1 += vis_area_width\n tmp_img = np.ones(\n (new_h, new_w + vis_area_width, 3), dtype=np.uint8) * 255\n tmp_img[:new_h, :new_w] = pred_edge_img\n pred_edge_img = tmp_img\n new_w += vis_area_width\n newline_flag = False\n bbox_y1 = 10\n key_text = texts[key_idx]\n key_pos = (bbox_x1, bbox_y1)\n value_idx = pair[1].item()\n value_text = texts[value_idx]\n value_pos = (bbox_x1 + dist_key_to_value, bbox_y1)\n if key_idx != key_current_idx:\n # draw text for a new key\n key_current_idx = key_idx\n pred_edge_img, text_sizes = draw_texts_by_pil(\n pred_edge_img, [key_text],\n draw_box=False,\n on_ori_img=True,\n font_size=key_font_size,\n font_color=key_font_color,\n draw_pos=[key_pos],\n return_text_size=True)\n pos_right_bottom = (key_pos[0] + text_sizes[0][0],\n key_pos[1] + text_sizes[0][1])\n pos_current = (pos_right_bottom[0] + 5, bbox_y1 + 10)\n pred_edge_img = cv2.arrowedLine(\n pred_edge_img, (pos_right_bottom[0] + 5, bbox_y1 + 10),\n (bbox_x1 + dist_key_to_value - 5, bbox_y1 + 10), arrow_color,\n 1)\n else:\n # draw arrow from key to value\n if newline_flag:\n tmp_img = np.ones((new_h + dist_pair_to_pair, new_w, 3),\n dtype=np.uint8) * 255\n tmp_img[:new_h, :new_w] = pred_edge_img\n pred_edge_img = tmp_img\n new_h += dist_pair_to_pair\n pred_edge_img = cv2.arrowedLine(pred_edge_img, pos_current,\n (bbox_x1 + dist_key_to_value - 5,\n bbox_y1 + 10), arrow_color, 1)\n # draw text for value\n pred_edge_img = draw_texts_by_pil(\n pred_edge_img, [value_text],\n draw_box=False,\n on_ori_img=True,\n font_size=value_font_size,\n font_color=value_font_color,\n draw_pos=[value_pos],\n return_text_size=False)\n bbox_y1 += dist_pair_to_pair\n if bbox_y1 + dist_pair_to_pair >= new_h:\n newline_flag = True\n\n return pred_edge_img\n\n\ndef imshow_edge(img,\n result,\n boxes,\n show=False,\n win_name='',\n wait_time=-1,\n out_file=None):\n \"\"\"Display the prediction results of the nodes and edges of the KIE model.\n\n Args:\n img (np.ndarray): The original image.\n result (dict): The result of model forward_test, including:\n - img_metas (list[dict]): List of meta information dictionary.\n - nodes (Tensor): Node prediction with size: \\\n number_node * node_classes.\n - edges (Tensor): Edge prediction with size: number_edge * 2.\n boxes (list): The text boxes corresponding to the nodes.\n show (bool): Whether to show the image. Default: False.\n win_name (str): The window name. Default: ''\n wait_time (float): Value of waitKey param. Default: 0.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n np.ndarray: The image with key, value and relation drawn on it.\n \"\"\"\n img = mmcv.imread(img)\n h, w = img.shape[:2]\n color_list = gen_color()\n\n for i, box in enumerate(boxes):\n new_box = [[box[0], box[1]], [box[2], box[1]], [box[2], box[3]],\n [box[0], box[3]]]\n Pts = np.array([new_box], np.int32)\n cv2.polylines(\n img, [Pts.reshape((-1, 1, 2))],\n True,\n color=color_list[i % len(color_list)],\n thickness=1)\n\n pred_img_h = h\n pred_img_w = w\n\n pred_edge_img = draw_edge_result(img, result)\n pred_img_h = max(pred_img_h, pred_edge_img.shape[0])\n pred_img_w += pred_edge_img.shape[1]\n\n vis_img = np.zeros((pred_img_h, pred_img_w, 3), dtype=np.uint8)\n vis_img[:h, :w] = img\n vis_img[:, w:] = 255\n\n height_t, width_t = pred_edge_img.shape[:2]\n vis_img[:height_t, w:(w + width_t)] = pred_edge_img\n\n if show:\n mmcv.imshow(vis_img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(vis_img, out_file)\n res_dic = {\n 'boxes': boxes,\n 'nodes': result['nodes'].detach().cpu(),\n 'edges': result['edges'].detach().cpu(),\n 'metas': result['img_metas'][0]\n }\n mmcv.dump(res_dic, f'{out_file}_res.pkl')\n\n return vis_img\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.max",
"matplotlib.pyplot.title",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davidelbaze/plotter | [
"79871d87b6d1d35ce72159e664dd4e09eef559b7"
] | [
"examples/gtk3_example.py"
] | [
"import sys\nimport gi\n\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import GLib, Gtk, GObject\n\nimport numpy as np\n\nfrom plotter.gtk3_plotter import PlotSelector\n\n\ndef create_to_plot():\n mat = np.random.random((100, 100))\n trace = np.random.random(100)\n to_plot = [\n {\"title\": \"Matrix\", \"type\": \"matrix\", \"data\": mat},\n {\"title\": \"Trace\", \"type\": \"trace\", \"data\": trace},\n ]\n return to_plot\n\n\ndef update_fig(fig):\n if fig == 0:\n return np.random.random((100, 100))\n elif fig == 1:\n return np.random.random(100)\n else:\n return None\n\n\nfigs = create_to_plot()\n\nps = PlotSelector(figs, update_fig)\n\nwin = Gtk.Window()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.set_default_size(400, 400)\nwin.set_title(\"Embedding in GTK\")\nwin.add(ps)\nwin.show_all()\nGtk.main()\n"
] | [
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Herding/SeeST | [
"6a0a9fea2e0abe91bc30785f769eefb1ccba07b3"
] | [
"TraFlow/utils/evaluator.py"
] | [
"\"\"\"评价指标\"\"\"\nimport torch\nfrom torch import Tensor\n\n\ndef mape(hat_y, y, masked_value=torch.tensor(0.)):\n \"\"\"MAPE\n\n Args:\n hat_y: 预测值\n y: 真实值\n masked_value: 遮掩运算过程中会异常的值,默认为0\n\n Return:\n ('mape', mape): 评价指标名称,评价结果\n \"\"\"\n masked_val_mtx = torch.ones_like(y) * masked_value\n mask = torch.ne(y, masked_val_mtx)\n\n zeros = torch.zeros_like(y)\n mape = torch.where(mask, (y - hat_y) / y, zeros)\n mape = torch.mean(torch.abs(mape))\n return 'mape', mape * 100\n \ndef mae(hat_y, y):\n \"\"\"MAE\n \n Args:\n hat_y: 预测值\n y: 真实值\n\n Return:\n ('mae', mae): 评价指标名称,评价结果\n \"\"\"\n mae = torch.mean(torch.abs(y-hat_y))\n return 'mae', mae\n \ndef rmse(hat_y, y):\n \"\"\"RMSE\n\n Args:\n hat_y: 预测值\n y: 真实值\n \n Return:\n ('rmse', rmse): 评价指标名称,评价结果\n \"\"\"\n rmse = torch.sqrt(torch.mean(torch.pow(y - hat_y, 2)))\n return 'rmse', rmse\n"
] | [
[
"torch.abs",
"torch.zeros_like",
"torch.tensor",
"torch.pow",
"torch.where",
"torch.ones_like",
"torch.ne"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
philtrade/cudf | [
"a4d5c281c9ede5cd31aeaa6c0d131d932a951554"
] | [
"python/cudf/cudf/core/multiindex.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n\nimport numbers\nimport pickle\nimport warnings\nfrom collections.abc import Sequence\n\nimport cupy\nimport numpy as np\nimport pandas as pd\n\nimport cudf\nfrom cudf.core.column import column\nfrom cudf.core.index import Index, as_index\n\n\nclass MultiIndex(Index):\n \"\"\"A multi-level or hierarchical index.\n\n Provides N-Dimensional indexing into Series and DataFrame objects.\n\n Properties\n ---\n levels: Labels for each category in the index hierarchy.\n codes: Assignment of individual items into the categories of the hierarchy.\n names: Name for each level\n \"\"\"\n\n def __init__(\n self, levels=None, codes=None, labels=None, names=None, **kwargs\n ):\n from cudf.core.series import Series\n from cudf import DataFrame\n\n super().__init__()\n\n self._name = None\n\n column_names = []\n if labels:\n warnings.warn(\n \"the 'labels' keyword is deprecated, use 'codes' \" \"instead\",\n FutureWarning,\n )\n if labels and not codes:\n codes = labels\n\n # early termination enables lazy evaluation of codes\n if \"source_data\" in kwargs:\n source_data = kwargs[\"source_data\"].copy(deep=False)\n source_data.reset_index(drop=True, inplace=True)\n\n if isinstance(source_data, pd.DataFrame):\n nan_as_null = kwargs.get(\"nan_as_null\", None)\n source_data = DataFrame.from_pandas(\n source_data, nan_as_null=nan_as_null\n )\n names = names if names is not None else source_data._data.names\n # if names are unique\n # try using those as the source_data column names:\n if len(dict.fromkeys(names)) == len(names):\n source_data.columns = names\n self._data = source_data._data\n self.names = names\n self._codes = codes\n self._levels = levels\n return\n\n # name setup\n if isinstance(\n names,\n (\n Sequence,\n pd.core.indexes.frozen.FrozenNDArray,\n pd.core.indexes.frozen.FrozenList,\n ),\n ):\n if sum(x is None for x in names) > 1:\n column_names = list(range(len(codes)))\n else:\n column_names = names\n elif names is None:\n column_names = list(range(len(codes)))\n else:\n column_names = names\n\n if len(levels) == 0:\n raise ValueError(\"Must pass non-zero number of levels/codes\")\n\n if not isinstance(codes, DataFrame) and not isinstance(\n codes[0], (Sequence, pd.core.indexes.frozen.FrozenNDArray)\n ):\n raise TypeError(\"Codes is not a Sequence of sequences\")\n\n if isinstance(codes, DataFrame):\n self._codes = codes\n elif len(levels) == len(codes):\n self._codes = DataFrame()\n for i, codes in enumerate(codes):\n name = column_names[i] or i\n codes = column.as_column(codes)\n self._codes[name] = codes.astype(np.int64)\n else:\n raise ValueError(\n \"MultiIndex has unequal number of levels and \"\n \"codes and is inconsistent!\"\n )\n\n self._levels = [Series(level) for level in levels]\n self._validate_levels_and_codes(self._levels, self._codes)\n\n source_data = DataFrame()\n for i, name in enumerate(self._codes.columns):\n codes = as_index(self._codes[name]._column)\n if -1 in self._codes[name].values:\n # Must account for null(s) in _source_data column\n level = DataFrame(\n {name: [None] + list(self._levels[i])},\n index=range(-1, len(self._levels[i])),\n )\n else:\n level = DataFrame({name: self._levels[i]})\n\n import cudf._lib as libcudf\n\n source_data[name] = libcudf.copying.gather(\n level, codes._data.columns[0]\n )._data[name]\n\n self._data = source_data._data\n self.names = names\n\n @property\n def names(self):\n return self._names\n\n @names.setter\n def names(self, value):\n value = [None] * self.nlevels if value is None else value\n assert len(value) == self.nlevels\n self._names = pd.core.indexes.frozen.FrozenList(value)\n\n @classmethod\n def _from_table(cls, table, names=None):\n df = cudf.DataFrame(table._data)\n if names is None:\n names = df.columns\n return MultiIndex.from_frame(df, names=names)\n\n @property\n def _source_data(self):\n return cudf.DataFrame(self._data)\n\n @_source_data.setter\n def _source_data(self, value):\n self._data = value._data\n self._compute_levels_and_codes()\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n def _validate_levels_and_codes(self, levels, codes):\n if len(levels) != len(codes.columns):\n raise ValueError(\n \"MultiIndex has unequal number of levels and \"\n \"codes and is inconsistent!\"\n )\n code_length = len(codes[codes.columns[0]])\n for index, code in enumerate(codes):\n if code_length != len(codes[code]):\n raise ValueError(\n \"MultiIndex length of codes does not match \"\n \"and is inconsistent!\"\n )\n for index, code in enumerate(codes):\n if codes[code].max() > len(levels[index]) - 1:\n raise ValueError(\n \"MultiIndex code %d contains value %d larger \"\n \"than maximum level size at this position\"\n )\n\n def copy(self, deep=True):\n mi = MultiIndex(source_data=self._source_data.copy(deep))\n if self._levels is not None:\n mi._levels = [s.copy(deep) for s in self._levels]\n if self._codes is not None:\n mi._codes = self._codes.copy(deep)\n if self.names is not None:\n mi.names = self.names.copy()\n return mi\n\n def deepcopy(self):\n return self.copy(deep=True)\n\n def __copy__(self):\n return self.copy(deep=True)\n\n def _popn(self, n):\n \"\"\" Returns a copy of this index without the left-most n values.\n\n Removes n names, labels, and codes in order to build a new index\n for results.\n \"\"\"\n result = MultiIndex(source_data=self._source_data.iloc[:, n:])\n if self.names is not None:\n result.names = self.names[n:]\n return result\n\n def __repr__(self):\n return (\n \"MultiIndex(levels=\"\n + str(self.levels)\n + \",\\ncodes=\"\n + str(self.codes)\n + \")\"\n )\n\n @property\n def codes(self):\n if self._codes is None:\n self._compute_levels_and_codes()\n return self._codes\n\n @property\n def nlevels(self):\n return self._source_data.shape[1]\n\n @property\n def levels(self):\n if self._levels is None:\n self._compute_levels_and_codes()\n return self._levels\n\n @property\n def labels(self):\n warnings.warn(\n \"This feature is deprecated in pandas and will be\"\n \"dropped from cudf as well.\",\n FutureWarning,\n )\n return self.codes\n\n def isin(self, values, level=None):\n \"\"\"Return a boolean array where the index values are in values.\n\n Compute boolean array of whether each index value is found in\n the passed set of values. The length of the returned boolean\n array matches the length of the index.\n\n Parameters\n ----------\n values : set, list-like, Index or Multi-Index\n Sought values.\n level : str or int, optional\n Name or position of the index level to use (if the index\n is a MultiIndex).\n Returns\n -------\n is_contained : cupy array\n CuPy array of boolean values.\n Notes\n -------\n When `level` is None, `values` can only be MultiIndex, or a\n set/list-like tuples.\n When `level` is provided, `values` can be Index or MultiIndex,\n or a set/list-like tuples.\n \"\"\"\n from cudf.utils.dtypes import is_list_like\n\n if level is None:\n if isinstance(values, cudf.MultiIndex):\n values_idx = values\n elif (\n (\n isinstance(\n values,\n (\n cudf.Series,\n cudf.Index,\n cudf.DataFrame,\n column.ColumnBase,\n ),\n )\n )\n or (not is_list_like(values))\n or (\n is_list_like(values)\n and len(values) > 0\n and not isinstance(values[0], tuple)\n )\n ):\n raise TypeError(\n \"values need to be a Multi-Index or set/list-like tuple \\\n squences when `level=None`.\"\n )\n else:\n values_idx = cudf.MultiIndex.from_tuples(\n values, names=self.names\n )\n\n res = []\n for name in self.names:\n level_idx = self.get_level_values(name)\n value_idx = values_idx.get_level_values(name)\n\n existence = level_idx.isin(value_idx)\n res.append(existence)\n\n result = res[0]\n for i in res[1:]:\n result = result & i\n else:\n level_series = self.get_level_values(level)\n result = level_series.isin(values)\n\n return result\n\n def mask(self, cond, other=None, inplace=False):\n raise NotImplementedError(\n \".mask is not supported for MultiIndex operations\"\n )\n\n def where(self, cond, other=None, inplace=False):\n raise NotImplementedError(\n \".where is not supported for MultiIndex operations\"\n )\n\n def _compute_levels_and_codes(self):\n levels = []\n from cudf import DataFrame\n\n codes = DataFrame()\n for name in self._source_data.columns:\n code, cats = self._source_data[name].factorize()\n codes[name] = code.reset_index(drop=True).astype(np.int64)\n cats.name = None\n cats = cats.reset_index(drop=True)._copy_construct(name=None)\n levels.append(cats)\n\n self._levels = levels\n self._codes = codes\n\n def _compute_validity_mask(self, index, row_tuple, max_length):\n \"\"\" Computes the valid set of indices of values in the lookup\n \"\"\"\n from cudf import DataFrame\n from cudf import Series\n from cudf import concat\n\n lookup = DataFrame()\n for idx, row in enumerate(row_tuple):\n if isinstance(row, slice) and row == slice(None):\n continue\n lookup[index._source_data.columns[idx]] = Series(row)\n data_table = concat(\n [\n index._source_data,\n DataFrame(\n {\"idx\": Series(cupy.arange(len(index._source_data)))}\n ),\n ],\n axis=1,\n )\n result = lookup.merge(data_table)[\"idx\"]\n # Avoid computing levels unless the result of the merge is empty,\n # which suggests that a KeyError should be raised.\n if len(result) == 0:\n for idx, row in enumerate(row_tuple):\n if row == slice(None):\n continue\n if row not in index.levels[idx]._column:\n raise KeyError(row)\n return result\n\n def _get_valid_indices_by_tuple(self, index, row_tuple, max_length):\n from cudf import Series\n\n # Instructions for Slicing\n # if tuple, get first and last elements of tuple\n # if open beginning tuple, get 0 to highest valid_index\n # if open ending tuple, get highest valid_index to len()\n # if not open end or beginning, get range lowest beginning index\n # to highest ending index\n if isinstance(row_tuple, slice):\n if (\n isinstance(row_tuple.start, numbers.Number)\n or isinstance(row_tuple.stop, numbers.Number)\n or row_tuple == slice(None)\n ):\n stop = row_tuple.stop or max_length\n start, stop, step = row_tuple.indices(stop)\n return cupy.arange(start, stop, step)\n start_values = self._compute_validity_mask(\n index, row_tuple.start, max_length\n )\n stop_values = self._compute_validity_mask(\n index, row_tuple.stop, max_length\n )\n return Series(\n cupy.arange(start_values.min(), stop_values.max() + 1)\n )\n elif isinstance(row_tuple, numbers.Number):\n return row_tuple\n return self._compute_validity_mask(index, row_tuple, max_length)\n\n def _index_and_downcast(self, result, index, index_key):\n from cudf import DataFrame\n from cudf import Series\n\n if isinstance(index_key, (numbers.Number, slice)):\n index_key = [index_key]\n if (\n len(index_key) > 0 and not isinstance(index_key, tuple)\n ) or isinstance(index_key[0], slice):\n index_key = index_key[0]\n\n slice_access = False\n if isinstance(index_key, slice):\n slice_access = True\n out_index = DataFrame()\n # Select the last n-k columns where n is the number of _source_data\n # columns and k is the length of the indexing tuple\n size = 0\n if not isinstance(index_key, (numbers.Number, slice)):\n size = len(index_key)\n for k in range(size, len(index._source_data.columns)):\n if index.names is None:\n name = k\n else:\n name = index.names[k]\n out_index.insert(\n len(out_index.columns),\n name,\n index._source_data[index._source_data.columns[k]],\n )\n\n if len(result) == 1 and size == 0 and slice_access is False:\n # If the final result is one row and it was not mapped into\n # directly, return a Series with a tuple as name.\n result = result.T\n result = result[result._data.names[0]]\n elif len(result) == 0 and slice_access is False:\n # Pandas returns an empty Series with a tuple as name\n # the one expected result column\n series_name = []\n for idx, code in enumerate(index._source_data.columns):\n series_name.append(index._source_data[code][0])\n result = Series([])\n result.name = tuple(series_name)\n elif len(out_index.columns) == 1:\n # If there's only one column remaining in the output index, convert\n # it into an Index and name the final index values according\n # to the _source_data column names\n last_column = index._source_data.columns[-1]\n out_index = index._source_data[last_column]\n out_index = as_index(out_index)\n out_index.name = index.names[len(index.names) - 1]\n index = out_index\n elif len(out_index.columns) > 1:\n # Otherwise pop the leftmost levels, names, and codes from the\n # source index until it has the correct number of columns (n-k)\n result.reset_index(drop=True)\n index = index._popn(size)\n if isinstance(index_key, tuple):\n result = result.set_index(index)\n return result\n\n def _get_row_major(self, df, row_tuple):\n from cudf import Series\n\n if pd.api.types.is_bool_dtype(row_tuple):\n return df[row_tuple]\n\n valid_indices = self._get_valid_indices_by_tuple(\n df.index, row_tuple, len(df.index)\n )\n indices = Series(valid_indices)\n result = df.take(indices)\n final = self._index_and_downcast(result, result.index, row_tuple)\n return final\n\n def _split_tuples(self, tuples):\n if len(tuples) == 1:\n return tuples, slice(None)\n elif isinstance(tuples[0], tuple):\n row = tuples[0]\n if len(tuples) == 1:\n column = slice(None)\n else:\n column = tuples[1]\n return row, column\n elif isinstance(tuples[0], slice):\n return tuples\n else:\n return tuples, slice(None)\n\n def __len__(self):\n return len(next(iter(self._data.columns)))\n\n def equals(self, other):\n if self is other:\n return True\n if len(self) != len(other):\n return False\n return self == other\n\n def __eq__(self, other):\n if not hasattr(other, \"_levels\"):\n return False\n # Lazy comparison\n if isinstance(other, MultiIndex) or hasattr(other, \"_source_data\"):\n for self_col, other_col in zip(\n self._source_data._data.values(),\n other._source_data._data.values(),\n ):\n if not self_col.equals(other_col):\n return False\n return self.names == other.names\n else:\n # Lazy comparison isn't possible - MI was created manually.\n # Actually compare the MI, not its source data (it doesn't have\n # any).\n equal_levels = self.levels == other.levels\n if isinstance(equal_levels, np.ndarray):\n equal_levels = equal_levels.all()\n return (\n equal_levels\n and self.codes.equals(other.codes)\n and self.names == other.names\n )\n\n @property\n def is_contiguous(self):\n return True\n\n @property\n def size(self):\n return len(self._source_data)\n\n def take(self, indices):\n from collections.abc import Sequence\n from cudf import Series\n from numbers import Integral\n\n if isinstance(indices, (Integral, Sequence)):\n indices = np.array(indices)\n elif isinstance(indices, Series):\n if indices.has_nulls:\n raise ValueError(\"Column must have no nulls.\")\n indices = indices\n elif isinstance(indices, slice):\n start, stop, step = indices.indices(len(self))\n indices = cupy.arange(start, stop, step)\n result = MultiIndex(source_data=self._source_data.take(indices))\n if self._codes is not None:\n result._codes = self._codes.take(indices)\n if self._levels is not None:\n result._levels = self._levels\n result.names = self.names\n return result\n\n def serialize(self):\n \"\"\"Serialize into pickle format suitable for file storage or network\n transmission.\n \"\"\"\n header = {}\n header[\"type-serialized\"] = pickle.dumps(type(self))\n header[\"names\"] = pickle.dumps(self.names)\n\n header[\"source_data\"], frames = self._source_data.serialize()\n\n return header, frames\n\n @classmethod\n def deserialize(cls, header, frames):\n \"\"\"Convert from pickle format into Index\n \"\"\"\n names = pickle.loads(header[\"names\"])\n\n source_data_typ = pickle.loads(\n header[\"source_data\"][\"type-serialized\"]\n )\n source_data = source_data_typ.deserialize(\n header[\"source_data\"], frames\n )\n\n names = pickle.loads(header[\"names\"])\n return MultiIndex(names=names, source_data=source_data)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self.codes):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __getitem__(self, index):\n # TODO: This should be a take of the _source_data only\n match = self.take(index)\n if isinstance(index, slice):\n return match\n result = []\n for level, item in enumerate(match.codes):\n result.append(match.levels[level][match.codes[item].iloc[0]])\n return tuple(result)\n\n def to_frame(self, index=True, name=None):\n df = self._source_data\n if index:\n df = df.set_index(self)\n if name is not None:\n if len(name) != len(self.levels):\n raise ValueError(\n \"'name' should have th same length as \"\n \"number of levels on index.\"\n )\n df.columns = name\n return df\n\n def get_level_values(self, level):\n \"\"\"\n Return the values at the requested level\n\n Parameters\n ----------\n level : int or label\n\n Returns\n -------\n An Index containing the values at the requested level.\n \"\"\"\n colnames = list(self._source_data.columns)\n if level not in colnames:\n if isinstance(level, int):\n if level < 0:\n level = level + len(colnames)\n if level < 0 or level >= len(colnames):\n raise IndexError(f\"Invalid level number: '{level}'\")\n level_idx = level\n level = colnames[level_idx]\n elif level in self.names:\n level_idx = list(self.names).index(level)\n level = colnames[level_idx]\n else:\n raise KeyError(f\"Level not found: '{level}'\")\n else:\n level_idx = colnames.index(level)\n level_values = as_index(\n self._source_data._data[level], name=self.names[level_idx]\n )\n return level_values\n\n def _to_frame(self):\n from cudf import DataFrame, Series\n\n # for each column of codes\n # replace column with mapping from integers to levels\n df = self.codes.copy(deep=False)\n for idx, col in enumerate(df.columns):\n # use merge as a replace fn\n level = DataFrame(\n {\n \"idx\": Series(\n cupy.arange(len(self.levels[idx]), dtype=df[col].dtype)\n ),\n \"level\": self.levels[idx],\n }\n )\n code = DataFrame({\"idx\": df[col]})\n df[col] = code.merge(level).level\n return df\n\n @property\n def _values(self):\n return list([i for i in self])\n\n @classmethod\n def _concat(cls, objs):\n from cudf import DataFrame, MultiIndex\n\n source_data = [o._source_data for o in objs]\n source_data = DataFrame._concat(source_data)\n names = [None for x in source_data.columns]\n objs = list(filter(lambda o: o.names is not None, objs))\n for o in range(len(objs)):\n for i, name in enumerate(objs[o].names):\n names[i] = names[i] or name\n return MultiIndex(names=names, source_data=source_data)\n\n @classmethod\n def from_tuples(cls, tuples, names=None):\n # Use Pandas for handling Python host objects\n pdi = pd.MultiIndex.from_tuples(tuples, names=names)\n result = cls.from_pandas(pdi)\n return result\n\n @classmethod\n def from_frame(cls, dataframe, names=None):\n return cls(source_data=dataframe, names=names)\n\n @classmethod\n def from_product(cls, arrays, names=None):\n # Use Pandas for handling Python host objects\n pdi = pd.MultiIndex.from_product(arrays, names=names)\n result = cls.from_pandas(pdi)\n return result\n\n def to_pandas(self):\n if hasattr(self, \"_source_data\"):\n result = self._source_data.to_pandas()\n result.columns = self.names\n return pd.MultiIndex.from_frame(result)\n\n pandas_codes = []\n for code in self.codes.columns:\n pandas_codes.append(self.codes[code].to_array())\n\n # We do two things here to mimic Pandas behavior:\n # 1. as_index() on each level, so DatetimeColumn becomes DatetimeIndex\n # 2. convert levels to numpy array so empty levels become Float64Index\n levels = np.array(\n [as_index(level).to_pandas() for level in self.levels]\n )\n\n # Backwards compatibility:\n # Construct a dummy MultiIndex and check for the codes attr.\n # This indicates that it is pandas >= 0.24\n # If no codes attr is present it is pandas <= 0.23\n if hasattr(pd.MultiIndex([[]], [[]]), \"codes\"):\n pandas_mi = pd.MultiIndex(levels=levels, codes=pandas_codes)\n else:\n pandas_mi = pd.MultiIndex(levels=levels, labels=pandas_codes)\n if self.names is not None:\n pandas_mi.names = self.names\n return pandas_mi\n\n @classmethod\n def from_pandas(cls, multiindex, nan_as_null=None):\n \"\"\"\n Convert from a Pandas MultiIndex\n\n Raises\n ------\n TypeError for invalid input type.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> pmi = pd.MultiIndex(levels=[['a', 'b'], ['c', 'd']],\n codes=[[0, 1], [1, ]])\n >>> cudf.from_pandas(pmi)\n MultiIndex( ... )\n \"\"\"\n if not isinstance(multiindex, pd.MultiIndex):\n raise TypeError(\"not a pandas.MultiIndex\")\n\n mi = cls(\n names=multiindex.names,\n source_data=multiindex.to_frame(),\n nan_as_null=nan_as_null,\n )\n\n return mi\n\n @property\n def is_unique(self):\n if not hasattr(self, \"_is_unique\"):\n self._is_unique = len(self._source_data) == len(\n self._source_data.drop_duplicates(ignore_index=True)\n )\n return self._is_unique\n\n @property\n def is_monotonic_increasing(self):\n if not hasattr(self, \"_is_monotonic_increasing\"):\n self._is_monotonic_increasing = self._is_sorted(\n ascending=None, null_position=None\n )\n return self._is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self):\n if not hasattr(self, \"_is_monotonic_decreasing\"):\n self._is_monotonic_decreasing = self._is_sorted(\n ascending=[False] * len(self.levels), null_position=None\n )\n return self._is_monotonic_decreasing\n\n def argsort(self, ascending=True):\n return self._source_data.argsort(ascending=ascending)\n\n def unique(self):\n return MultiIndex.from_frame(self._source_data.drop_duplicates())\n\n def memory_usage(self, deep=False):\n n = 0\n for col in self._source_data._columns:\n n += col._memory_usage(deep=deep)\n if self._levels:\n for level in self._levels:\n n += level.memory_usage(deep=deep)\n if self._codes:\n for col in self._codes._columns:\n n += col._memory_usage(deep=deep)\n return n\n\n def difference(self, other, sort=None):\n temp_self = self\n temp_other = other\n if hasattr(self, \"to_pandas\"):\n temp_self = self.to_pandas()\n if hasattr(other, \"to_pandas\"):\n temp_other = self.to_pandas()\n return temp_self.difference(temp_other, sort)\n\n def nan_to_num(*args, **kwargs):\n return args[0]\n\n def array_equal(*args, **kwargs):\n return args[0] == args[1]\n\n def __array_function__(self, func, types, args, kwargs):\n cudf_df_module = MultiIndex\n\n for submodule in func.__module__.split(\".\")[1:]:\n # point cudf to the correct submodule\n if hasattr(cudf_df_module, submodule):\n cudf_df_module = getattr(cudf_df_module, submodule)\n else:\n return NotImplemented\n\n fname = func.__name__\n\n handled_types = [cudf_df_module, np.ndarray]\n\n for t in types:\n if t not in handled_types:\n return NotImplemented\n\n if hasattr(cudf_df_module, fname):\n cudf_func = getattr(cudf_df_module, fname)\n # Handle case if cudf_func is same as numpy function\n if cudf_func is func:\n return NotImplemented\n else:\n return cudf_func(*args, **kwargs)\n else:\n return NotImplemented\n\n def _mimic_inplace(self, other, inplace=False):\n if inplace is True:\n for in_col, oth_col in zip(\n self._source_data._columns, other._source_data._columns,\n ):\n in_col._mimic_inplace(oth_col, inplace=True)\n else:\n return other\n"
] | [
[
"pandas.MultiIndex.from_frame",
"pandas.MultiIndex",
"pandas.MultiIndex.from_tuples",
"pandas.core.indexes.frozen.FrozenList",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas.api.types.is_bool_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
SMU-HCI-Lab/tutorials | [
"621d5d65ac1556ecbc62ec885ca12d7e860af832"
] | [
"NLP/MLForNLP/lib/logisticreg_wdbc.py"
] | [
"import logisticreg\nimport csv\nimport numpy as np\n\n\nn_test = 100\nX = []\ny = []\n\n# Data: https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/\nwith open(\"wdbc.data\") as fp:\n for row in csv.reader(fp):\n if row[1] == \"B\":\n y.append(0)\n else:\n y.append(1)\n X.append(row[2:])\n\n\ny = np.array(y, dtype=np.float64)\nX = np.array(X, dtype=np.float64)\n\ny_train = y[:-n_test]\nX_train = X[:-n_test]\ny_test = y[-n_test:]\nX_test = X[-n_test:]\n\nmodel = logisticreg.LogisticRegression(tol=0.01)\nmodel.fit(X_train, y_train)\n\ny_predict = model.predict(X_test)\nn_hits = (y_test == y_predict).sum()\n\nprint(\"Accuracy: {}/{} = {}\".format(n_hits, n_test, n_hits / n_test))"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrisquince/BayesPaths | [
"d632578079daa9edd65d5a60f9244c3f2017cc14"
] | [
"scripts/Add_color_CQE.py"
] | [
"import os\nimport sys\nimport argparse\nimport numpy as np\nfrom collections import Counter,defaultdict\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\n\n#Color_scheme=[\"#0277BD\",]\n\nColor_scheme=['#42A5F5','#66BB6A','#FFEB3B','#EF5350','#FF00FF']\n\n#Color_scheme=[\"#F0A3FF\", \"#0075DC\", \"#993F00\",\"#4C005C\",\"#2BCE48\",\"#FFCC99\",\"#808080\",\"#94FFB5\",\"#8F7C00\",\"#9DCC00\",\"#C20088\",\"#003380\",\"#FFA405\",\"#FFA8BB\",\"#426600\",\"#FF0010\",\"#5EF1F2\",\"#00998F\",\"#740AFF\",\"#990000\",\"#FFFF00\"]\nNColors = len(Color_scheme)\n\ndef merge_color(Listcolor,List_merged):\n total_color=np.zeros(3)\n for color in Listcolor:\n total_color=total_color+np.array([int(color[1:3],16),int(color[3:5],16),int(color[5:],16)])\n int_to_hex=lambda x:hex(int(x))[2:].upper()\n Correct_int_to_hex=lambda x:int_to_hex(x)*(int_to_hex(x)!=\"0\")*(len(int_to_hex(x))!=1)+\"00\"*(int_to_hex(x)==\"0\")+(\"0\"+int_to_hex(x))*(int_to_hex(x)!=\"0\")*(len(int_to_hex(x))==1)\n Merged_color=\"#\"+\"\".join([Correct_int_to_hex(value) for value in total_color/len(Listcolor)])\n if len(Listcolor)>1:\n List_merged.append((tuple([Color_scheme.index(color) for color in Listcolor]),Merged_color))\n return Merged_color\n \ndef main(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"gfa_file\", help=\"unitig fasta file in Bcalm2 format\")\n\n parser.add_argument(\"strain_file\", help=\"strain assignments tab delimited\")\n\n parser.add_argument(\"strain_idx\", help=\"strain index\")\n\n args = parser.parse_args()\n\n #import ipdb; ipdb.set_trace()\n idx = int(args.strain_idx)\n mapUnitigs = {}\n set_Bugs = set([idx])\n with open(args.strain_file) as f:\n for line in f:\n \n line = line.rstrip()\n \n tokens = line.split('\\t')\n \n unitig = tokens.pop(0)\n \n mapUnitigs[unitig] = [idx]\n \n list_merged=[]\n\n list_Bugs=sorted(list(set_Bugs))\n color_Bugs = {}\n bidx = 0\n for bug in list_Bugs:\n color_Bugs[bug] = Color_scheme[bidx % NColors + idx]\n bidx += 1\n mapUnitigColor = {}\n \n for unitig,strains in mapUnitigs.items():\n if len(strains) > 0:\n strain_colors = [color_Bugs[strain] for strain in strains]\n mapUnitigColor[unitig] = merge_color(strain_colors,list_merged)\n else:\n mapUnitigColor[unitig] = \"#d3d3d3\"\n \n with open(args.gfa_file) as f:\n for line in f:\n line=line.rstrip()\n toks = line.split('\\t')\n \n if toks[0]==\"S\":\n unitig=toks[1]\n if unitig not in mapUnitigColor:\n color=\"#d3d3d3\"\n else:\n color = mapUnitigColor[unitig]\n \n toks.append(\"CL:z:\"+color+\"\\tC2:z:\"+color+\"\\n\")\n \n tString = '\\t'.join(toks)\n \n print(tString)\n else:\n print(line)\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NoVarlok/sova-tts-engine | [
"1b7c0b3591bb7f823be648093de279881e194d05"
] | [
"utils/utils.py"
] | [
"\"\"\"\nBSD 3-Clause License\n\nCopyright (c) 2018, NVIDIA Corporation\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nimport os\nimport numpy as np\nfrom collections import namedtuple\n\nimport torch\n\nInputs = namedtuple(\"Inputs\", [\"text\", \"mels\", \"gate\", \"text_len\", \"mel_len\"])\n\nInputsCTC = namedtuple(\"InputsCTC\", [\"text\", \"length\"])\n\nOutputs = namedtuple(\"Outputs\", [\"mels\", \"mels_postnet\", \"gate\", \"alignments\"])\nOutputsGST = namedtuple(\"OutputsGST\", [\"style_emb\", \"gst_weights\"])\n\n\ndef calculate_global_mean(data_loader, path=None):\n \"\"\"\n Based on https://github.com/bfs18/tacotron2\n \"\"\"\n sums = []\n frames = []\n print(\"Calculating global mean...\")\n for i, batch in enumerate(data_loader):\n print(\"\\rProcessing batch #{} out of {}\".format(i + 1, len(data_loader)), end=\"\")\n inputs, *_ = batch\n # padded values are 0.\n sums.append(inputs.mels.double().sum(dim=(0, 2)))\n frames.append(inputs.mel_len.double().sum())\n\n global_mean = (sum(sums) / sum(frames)).float()\n\n if path is not None:\n np.save(path, global_mean.numpy())\n\n return to_gpu(global_mean)\n\n\ndef load_global_mean(path):\n assert os.path.exists(path)\n global_mean = np.load(path)\n\n return to_gpu(torch.tensor(global_mean))\n\n\ndef get_mask_from_lengths(lengths):\n max_len = lengths.max()\n ids = torch.arange(max_len, device=lengths.device)\n mask = ids < lengths.unsqueeze(1)\n return mask\n\n\ndef get_mask_3d(widths, heights):\n mask_width = get_mask_from_lengths(widths)\n mask_height = get_mask_from_lengths(heights)\n mask_3d = mask_width.unsqueeze(2) & mask_height.unsqueeze(1)\n return mask_3d\n\n\ndef get_drop_frame_mask_from_lengths(lengths, drop_frame_rate):\n \"\"\"\n Based on https://github.com/bfs18/tacotron2\n \"\"\"\n batch_size = lengths.size(0)\n max_len = torch.max(lengths).item()\n mask = get_mask_from_lengths(lengths).float()\n drop_mask = torch.empty([batch_size, max_len], device=lengths.device).uniform_(0., 1.) < drop_frame_rate\n drop_mask = drop_mask.float() * mask\n return drop_mask\n\n\ndef dropout_frame(mels, global_mean, mel_lengths, drop_frame_rate):\n \"\"\"\n Based on https://github.com/bfs18/tacotron2\n \"\"\"\n drop_mask = get_drop_frame_mask_from_lengths(mel_lengths, drop_frame_rate)\n dropped_mels = (mels * (1.0 - drop_mask).unsqueeze(1) +\n global_mean[None, :, None] * drop_mask.unsqueeze(1))\n return dropped_mels\n\n\ndef load_filepaths_and_text(filename, split=\"|\"):\n with open(filename, encoding='utf-8') as f:\n filepaths_and_text = [line.strip().split(split) for line in f]\n return filepaths_and_text\n\n\ndef to_gpu(x):\n x = x.contiguous()\n\n if torch.cuda.is_available():\n x = x.cuda(non_blocking=True)\n return torch.autograd.Variable(x)\n\n\ndef to_numpy(tensor):\n return tensor.data.cpu().numpy()"
] | [
[
"torch.max",
"torch.empty",
"torch.tensor",
"torch.cuda.is_available",
"torch.arange",
"numpy.load",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kenfranko/tensorboard | [
"0b56d33ef422cdf7916e3ba2cb1674a669c42db7",
"0b56d33ef422cdf7916e3ba2cb1674a669c42db7"
] | [
"tensorboard/uploader/uploader_test.py",
"tensorboard/compat/proto/proto_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorboard.uploader.uploader.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport os\nimport re\n\nimport grpc\nimport grpc_testing\n\ntry:\n # python version >= 3.3\n from unittest import mock\nexcept ImportError:\n import mock # pylint: disable=unused-import\n\nimport tensorflow as tf\n\nfrom google.protobuf import message\nfrom tensorboard import data_compat\nfrom tensorboard import dataclass_compat\nfrom tensorboard.compat.proto import tensor_shape_pb2\nfrom tensorboard.uploader.proto import experiment_pb2\nfrom tensorboard.uploader.proto import scalar_pb2\nfrom tensorboard.uploader.proto import server_info_pb2\nfrom tensorboard.uploader.proto import write_service_pb2\nfrom tensorboard.uploader.proto import write_service_pb2_grpc\nfrom tensorboard.uploader import test_util\nfrom tensorboard.uploader import upload_tracker\nfrom tensorboard.uploader import uploader as uploader_lib\nfrom tensorboard.uploader import logdir_loader\nfrom tensorboard.uploader import util\nfrom tensorboard.compat.proto import event_pb2\nfrom tensorboard.compat.proto import graph_pb2\nfrom tensorboard.compat.proto import summary_pb2\nfrom tensorboard.compat.proto import tensor_pb2\nfrom tensorboard.compat.proto import types_pb2\nfrom tensorboard.plugins.histogram import metadata as histograms_metadata\nfrom tensorboard.plugins.histogram import summary_v2 as histogram_v2\nfrom tensorboard.plugins.graph import metadata as graphs_metadata\nfrom tensorboard.plugins.scalar import metadata as scalars_metadata\nfrom tensorboard.plugins.scalar import summary_v2 as scalar_v2\nfrom tensorboard.summary import v1 as summary_v1\nfrom tensorboard.util import test_util as tb_test_util\nfrom tensorboard.util import tensor_util\n\n\ndef _create_example_graph_bytes(large_attr_size):\n graph_def = graph_pb2.GraphDef()\n graph_def.node.add(name=\"alice\", op=\"Person\")\n graph_def.node.add(name=\"bob\", op=\"Person\")\n\n graph_def.node[1].attr[\"small\"].s = b\"small_attr_value\"\n graph_def.node[1].attr[\"large\"].s = b\"l\" * large_attr_size\n graph_def.node.add(\n name=\"friendship\", op=\"Friendship\", input=[\"alice\", \"bob\"]\n )\n return graph_def.SerializeToString()\n\n\nclass AbortUploadError(Exception):\n \"\"\"Exception used in testing to abort the upload process.\"\"\"\n\n\ndef _create_mock_client():\n # Create a stub instance (using a test channel) in order to derive a mock\n # from it with autospec enabled. Mocking TensorBoardWriterServiceStub itself\n # doesn't work with autospec because grpc constructs stubs via metaclassing.\n test_channel = grpc_testing.channel(\n service_descriptors=[], time=grpc_testing.strict_real_time()\n )\n stub = write_service_pb2_grpc.TensorBoardWriterServiceStub(test_channel)\n mock_client = mock.create_autospec(stub)\n fake_exp_response = write_service_pb2.CreateExperimentResponse(\n experiment_id=\"123\", url=\"should not be used!\"\n )\n mock_client.CreateExperiment.return_value = fake_exp_response\n mock_client.GetOrCreateBlobSequence.side_effect = (\n write_service_pb2.GetOrCreateBlobSequenceResponse(\n blob_sequence_id=\"blob%d\" % i\n )\n for i in itertools.count()\n )\n return mock_client\n\n\n# By default allow at least one plugin for each upload type: Scalar, Tensor, and\n# Blobs.\n_SCALARS_HISTOGRAMS_AND_GRAPHS = frozenset(\n (\n scalars_metadata.PLUGIN_NAME,\n histograms_metadata.PLUGIN_NAME,\n graphs_metadata.PLUGIN_NAME,\n )\n)\n\n# Sentinel for `_create_*` helpers, for arguments for which we want to\n# supply a default other than the `None` used by the code under test.\n_USE_DEFAULT = object()\n\n\ndef _create_uploader(\n writer_client=_USE_DEFAULT,\n logdir=None,\n max_scalar_request_size=_USE_DEFAULT,\n max_blob_request_size=_USE_DEFAULT,\n max_blob_size=_USE_DEFAULT,\n logdir_poll_rate_limiter=_USE_DEFAULT,\n rpc_rate_limiter=_USE_DEFAULT,\n tensor_rpc_rate_limiter=_USE_DEFAULT,\n blob_rpc_rate_limiter=_USE_DEFAULT,\n name=None,\n description=None,\n verbosity=0, # Use 0 to minimize littering the test output.\n one_shot=None,\n):\n if writer_client is _USE_DEFAULT:\n writer_client = _create_mock_client()\n if max_scalar_request_size is _USE_DEFAULT:\n max_scalar_request_size = 128000\n if max_blob_request_size is _USE_DEFAULT:\n max_blob_request_size = 128000\n if max_blob_size is _USE_DEFAULT:\n max_blob_size = 12345\n if logdir_poll_rate_limiter is _USE_DEFAULT:\n logdir_poll_rate_limiter = util.RateLimiter(0)\n if rpc_rate_limiter is _USE_DEFAULT:\n rpc_rate_limiter = util.RateLimiter(0)\n if tensor_rpc_rate_limiter is _USE_DEFAULT:\n tensor_rpc_rate_limiter = util.RateLimiter(0)\n if blob_rpc_rate_limiter is _USE_DEFAULT:\n blob_rpc_rate_limiter = util.RateLimiter(0)\n\n upload_limits = server_info_pb2.UploadLimits(\n max_scalar_request_size=max_scalar_request_size,\n max_tensor_request_size=128000,\n max_tensor_point_size=11111,\n max_blob_request_size=max_blob_request_size,\n max_blob_size=max_blob_size,\n )\n\n return uploader_lib.TensorBoardUploader(\n writer_client,\n logdir,\n allowed_plugins=_SCALARS_HISTOGRAMS_AND_GRAPHS,\n upload_limits=upload_limits,\n logdir_poll_rate_limiter=logdir_poll_rate_limiter,\n rpc_rate_limiter=rpc_rate_limiter,\n tensor_rpc_rate_limiter=tensor_rpc_rate_limiter,\n blob_rpc_rate_limiter=blob_rpc_rate_limiter,\n name=name,\n description=description,\n verbosity=verbosity,\n one_shot=one_shot,\n )\n\n\ndef _create_request_sender(\n experiment_id=None, api=None, allowed_plugins=_USE_DEFAULT,\n):\n if api is _USE_DEFAULT:\n api = _create_mock_client()\n if allowed_plugins is _USE_DEFAULT:\n allowed_plugins = _SCALARS_HISTOGRAMS_AND_GRAPHS\n\n upload_limits = server_info_pb2.UploadLimits(\n max_scalar_request_size=128000,\n max_tensor_request_size=128000,\n max_tensor_point_size=11111,\n max_blob_size=12345,\n )\n\n rpc_rate_limiter = util.RateLimiter(0)\n tensor_rpc_rate_limiter = util.RateLimiter(0)\n blob_rpc_rate_limiter = util.RateLimiter(0)\n\n return uploader_lib._BatchedRequestSender(\n experiment_id=experiment_id,\n api=api,\n allowed_plugins=allowed_plugins,\n upload_limits=upload_limits,\n rpc_rate_limiter=rpc_rate_limiter,\n tensor_rpc_rate_limiter=tensor_rpc_rate_limiter,\n blob_rpc_rate_limiter=blob_rpc_rate_limiter,\n tracker=upload_tracker.UploadTracker(verbosity=0),\n )\n\n\ndef _create_scalar_request_sender(\n experiment_id=None, api=_USE_DEFAULT, max_request_size=_USE_DEFAULT\n):\n if api is _USE_DEFAULT:\n api = _create_mock_client()\n if max_request_size is _USE_DEFAULT:\n max_request_size = 128000\n return uploader_lib._ScalarBatchedRequestSender(\n experiment_id=experiment_id,\n api=api,\n rpc_rate_limiter=util.RateLimiter(0),\n max_request_size=max_request_size,\n tracker=upload_tracker.UploadTracker(verbosity=0),\n )\n\n\ndef _create_tensor_request_sender(\n experiment_id=None,\n api=_USE_DEFAULT,\n max_request_size=_USE_DEFAULT,\n max_tensor_point_size=_USE_DEFAULT,\n):\n if api is _USE_DEFAULT:\n api = _create_mock_client()\n if max_request_size is _USE_DEFAULT:\n max_request_size = 128000\n if max_tensor_point_size is _USE_DEFAULT:\n max_tensor_point_size = 11111\n return uploader_lib._TensorBatchedRequestSender(\n experiment_id=experiment_id,\n api=api,\n rpc_rate_limiter=util.RateLimiter(0),\n max_request_size=max_request_size,\n max_tensor_point_size=max_tensor_point_size,\n tracker=upload_tracker.UploadTracker(verbosity=0),\n )\n\n\nclass TensorboardUploaderTest(tf.test.TestCase):\n def test_create_experiment(self):\n logdir = \"/logs/foo\"\n uploader = _create_uploader(_create_mock_client(), logdir)\n eid = uploader.create_experiment()\n self.assertEqual(eid, \"123\")\n\n def test_create_experiment_with_name(self):\n logdir = \"/logs/foo\"\n mock_client = _create_mock_client()\n new_name = \"This is the new name\"\n uploader = _create_uploader(mock_client, logdir, name=new_name)\n eid = uploader.create_experiment()\n self.assertEqual(eid, \"123\")\n mock_client.CreateExperiment.assert_called_once()\n (args, _) = mock_client.CreateExperiment.call_args\n\n expected_request = write_service_pb2.CreateExperimentRequest(\n name=new_name,\n )\n self.assertEqual(args[0], expected_request)\n\n def test_create_experiment_with_description(self):\n logdir = \"/logs/foo\"\n mock_client = _create_mock_client()\n new_description = \"\"\"\n **description**\"\n may have \"strange\" unicode chars 🌴 \\\\/<>\n \"\"\"\n uploader = _create_uploader(\n mock_client, logdir, description=new_description\n )\n eid = uploader.create_experiment()\n self.assertEqual(eid, \"123\")\n mock_client.CreateExperiment.assert_called_once()\n (args, _) = mock_client.CreateExperiment.call_args\n\n expected_request = write_service_pb2.CreateExperimentRequest(\n description=new_description,\n )\n self.assertEqual(args[0], expected_request)\n\n def test_create_experiment_with_all_metadata(self):\n logdir = \"/logs/foo\"\n mock_client = _create_mock_client()\n new_description = \"\"\"\n **description**\"\n may have \"strange\" unicode chars 🌴 \\\\/<>\n \"\"\"\n new_name = \"This is a cool name.\"\n uploader = _create_uploader(\n mock_client, logdir, name=new_name, description=new_description\n )\n eid = uploader.create_experiment()\n self.assertEqual(eid, \"123\")\n mock_client.CreateExperiment.assert_called_once()\n (args, _) = mock_client.CreateExperiment.call_args\n\n expected_request = write_service_pb2.CreateExperimentRequest(\n name=new_name, description=new_description,\n )\n self.assertEqual(args[0], expected_request)\n\n def test_start_uploading_without_create_experiment_fails(self):\n mock_client = _create_mock_client()\n uploader = _create_uploader(mock_client, \"/logs/foo\")\n with self.assertRaisesRegex(RuntimeError, \"call create_experiment()\"):\n uploader.start_uploading()\n\n def test_start_uploading_scalars(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tensor_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tracker = mock.MagicMock()\n with mock.patch.object(\n upload_tracker, \"UploadTracker\", return_value=mock_tracker\n ):\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n # Send each Event below in a separate WriteScalarRequest\n max_scalar_request_size=100,\n rpc_rate_limiter=mock_rate_limiter,\n tensor_rpc_rate_limiter=mock_tensor_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n verbosity=1, # In order to test the upload tracker.\n )\n uploader.create_experiment()\n\n def scalar_event(tag, value):\n return event_pb2.Event(summary=scalar_v2.scalar_pb(tag, value))\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\n \"run 1\": _apply_compat(\n [scalar_event(\"1.1\", 5.0), scalar_event(\"1.2\", 5.0)]\n ),\n \"run 2\": _apply_compat(\n [scalar_event(\"2.1\", 5.0), scalar_event(\"2.2\", 5.0)]\n ),\n },\n {\n \"run 3\": _apply_compat(\n [scalar_event(\"3.1\", 5.0), scalar_event(\"3.2\", 5.0)]\n ),\n \"run 4\": _apply_compat(\n [scalar_event(\"4.1\", 5.0), scalar_event(\"4.2\", 5.0)]\n ),\n \"run 5\": _apply_compat(\n [scalar_event(\"5.1\", 5.0), scalar_event(\"5.2\", 5.0)]\n ),\n },\n AbortUploadError,\n ]\n\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n self.assertEqual(4 + 6, mock_client.WriteScalar.call_count)\n self.assertEqual(4 + 6, mock_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_tensor_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_blob_rate_limiter.tick.call_count)\n\n # Check upload tracker calls.\n self.assertEqual(mock_tracker.send_tracker.call_count, 2)\n self.assertEqual(mock_tracker.scalars_tracker.call_count, 10)\n self.assertLen(mock_tracker.scalars_tracker.call_args[0], 1)\n self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)\n self.assertEqual(mock_tracker.blob_tracker.call_count, 0)\n\n def test_start_uploading_scalars_one_shot(self):\n \"\"\"Check that one-shot uploading stops without AbortUploadError.\"\"\"\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tensor_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tracker = mock.MagicMock()\n with mock.patch.object(\n upload_tracker, \"UploadTracker\", return_value=mock_tracker\n ):\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n # Send each Event below in a separate WriteScalarRequest\n max_scalar_request_size=100,\n rpc_rate_limiter=mock_rate_limiter,\n tensor_rpc_rate_limiter=mock_tensor_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n verbosity=1, # In order to test the upload tracker.\n one_shot=True,\n )\n uploader.create_experiment()\n\n def scalar_event(tag, value):\n return event_pb2.Event(summary=scalar_v2.scalar_pb(tag, value))\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\n \"run 1\": _apply_compat(\n [scalar_event(\"1.1\", 5.0), scalar_event(\"1.2\", 5.0)]\n ),\n \"run 2\": _apply_compat(\n [scalar_event(\"2.1\", 5.0), scalar_event(\"2.2\", 5.0)]\n ),\n },\n # Note the lack of AbortUploadError here.\n ]\n\n with mock.patch.object(uploader, \"_logdir_loader\", mock_logdir_loader):\n uploader.start_uploading()\n\n self.assertEqual(4, mock_client.WriteScalar.call_count)\n self.assertEqual(4, mock_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_tensor_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_blob_rate_limiter.tick.call_count)\n\n # Check upload tracker calls.\n self.assertEqual(mock_tracker.send_tracker.call_count, 1)\n self.assertEqual(mock_tracker.scalars_tracker.call_count, 4)\n self.assertLen(mock_tracker.scalars_tracker.call_args[0], 1)\n self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)\n self.assertEqual(mock_tracker.blob_tracker.call_count, 0)\n\n def test_start_uploading_tensors(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tensor_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tracker = mock.MagicMock()\n with mock.patch.object(\n upload_tracker, \"UploadTracker\", return_value=mock_tracker\n ):\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n rpc_rate_limiter=mock_rate_limiter,\n tensor_rpc_rate_limiter=mock_tensor_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n verbosity=1, # In order to test the upload tracker.\n )\n uploader.create_experiment()\n\n def tensor_event(tag, value):\n return event_pb2.Event(\n summary=histogram_v2.histogram_pb(tag, value)\n )\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\n \"run 1\": _apply_compat(\n [tensor_event(\"1.1\", [5.0]), tensor_event(\"1.2\", [5.0])]\n ),\n },\n AbortUploadError,\n ]\n\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n self.assertEqual(1, mock_client.WriteTensor.call_count)\n self.assertEqual(0, mock_rate_limiter.tick.call_count)\n self.assertEqual(1, mock_tensor_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_blob_rate_limiter.tick.call_count)\n\n # Check upload tracker calls.\n self.assertEqual(mock_tracker.send_tracker.call_count, 1)\n self.assertEqual(mock_tracker.scalars_tracker.call_count, 0)\n tensors_tracker = mock_tracker.tensors_tracker\n self.assertEqual(tensors_tracker.call_count, 1)\n self.assertLen(tensors_tracker.call_args[0], 4)\n self.assertEqual(tensors_tracker.call_args[0][0], 2) # num_tensors\n self.assertEqual(\n tensors_tracker.call_args[0][1], 0\n ) # num_tensors_skipped\n # tensor_bytes: avoid asserting the exact value as it's hard to reason about.\n self.assertGreater(tensors_tracker.call_args[0][2], 0)\n self.assertEqual(\n tensors_tracker.call_args[0][3], 0\n ) # tensor_bytes_skipped\n self.assertEqual(mock_tracker.blob_tracker.call_count, 0)\n\n def test_start_uploading_graphs(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tensor_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_tracker = mock.MagicMock()\n with mock.patch.object(\n upload_tracker, \"UploadTracker\", return_value=mock_tracker\n ):\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n # Verify behavior with lots of small chunks\n max_blob_request_size=100,\n rpc_rate_limiter=mock_rate_limiter,\n tensor_rpc_rate_limiter=mock_tensor_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n verbosity=1, # In order to test tracker.\n )\n uploader.create_experiment()\n\n # Of course a real Event stream will never produce the same Event twice,\n # but is this test context it's fine to reuse this one.\n graph_event = event_pb2.Event(\n graph_def=_create_example_graph_bytes(950)\n )\n expected_graph_def = graph_pb2.GraphDef.FromString(\n graph_event.graph_def\n )\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\n \"run 1\": _apply_compat([graph_event, graph_event]),\n \"run 2\": _apply_compat([graph_event, graph_event]),\n },\n {\n \"run 3\": _apply_compat([graph_event, graph_event]),\n \"run 4\": _apply_compat([graph_event, graph_event]),\n \"run 5\": _apply_compat([graph_event, graph_event]),\n },\n AbortUploadError,\n ]\n\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n self.assertEqual(1, mock_client.CreateExperiment.call_count)\n self.assertEqual(10, mock_client.WriteBlob.call_count)\n for (i, call) in enumerate(mock_client.WriteBlob.call_args_list):\n requests = list(call[0][0])\n data = b\"\".join(r.data for r in requests)\n actual_graph_def = graph_pb2.GraphDef.FromString(data)\n self.assertProtoEquals(expected_graph_def, actual_graph_def)\n self.assertEqual(\n set(r.blob_sequence_id for r in requests), {\"blob%d\" % i},\n )\n self.assertEqual(0, mock_rate_limiter.tick.call_count)\n self.assertEqual(0, mock_tensor_rate_limiter.tick.call_count)\n self.assertEqual(10, mock_blob_rate_limiter.tick.call_count)\n\n # Check upload tracker calls.\n self.assertEqual(mock_tracker.send_tracker.call_count, 2)\n self.assertEqual(mock_tracker.scalars_tracker.call_count, 0)\n self.assertEqual(mock_tracker.tensors_tracker.call_count, 0)\n self.assertEqual(mock_tracker.blob_tracker.call_count, 10)\n self.assertLen(mock_tracker.blob_tracker.call_args[0], 1)\n self.assertGreater(mock_tracker.blob_tracker.call_args[0][0], 0)\n\n def test_upload_skip_large_blob(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n # Verify behavior with lots of small chunks\n max_blob_request_size=100,\n max_blob_size=100,\n rpc_rate_limiter=mock_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n )\n uploader.create_experiment()\n\n graph_event = event_pb2.Event(\n graph_def=_create_example_graph_bytes(950)\n )\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\"run 1\": _apply_compat([graph_event])},\n AbortUploadError,\n ]\n\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n self.assertEqual(1, mock_client.CreateExperiment.call_count)\n self.assertEqual(0, mock_client.WriteBlob.call_count)\n self.assertEqual(0, mock_rate_limiter.tick.call_count)\n self.assertEqual(1, mock_blob_rate_limiter.tick.call_count)\n\n def test_filter_graphs(self):\n # Three graphs: one short, one long, one corrupt.\n bytes_0 = _create_example_graph_bytes(123)\n bytes_1 = _create_example_graph_bytes(9999)\n # invalid (truncated) proto: length-delimited field 1 (0x0a) of\n # length 0x7f specified, but only len(\"bogus\") = 5 bytes given\n # <https://developers.google.com/protocol-buffers/docs/encoding>\n bytes_2 = b\"\\x0a\\x7fbogus\"\n\n logdir = self.get_temp_dir()\n for (i, b) in enumerate([bytes_0, bytes_1, bytes_2]):\n run_dir = os.path.join(logdir, \"run_%04d\" % i)\n event = event_pb2.Event(step=0, wall_time=123 * i, graph_def=b)\n with tb_test_util.FileWriter(run_dir) as writer:\n writer.add_event(event)\n\n limiter = mock.create_autospec(util.RateLimiter)\n limiter.tick.side_effect = [None, AbortUploadError]\n mock_client = _create_mock_client()\n uploader = _create_uploader(\n mock_client, logdir, logdir_poll_rate_limiter=limiter,\n )\n uploader.create_experiment()\n\n with self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n\n actual_blobs = []\n for call in mock_client.WriteBlob.call_args_list:\n requests = call[0][0]\n actual_blobs.append(b\"\".join(r.data for r in requests))\n\n actual_graph_defs = []\n for blob in actual_blobs:\n try:\n actual_graph_defs.append(graph_pb2.GraphDef.FromString(blob))\n except message.DecodeError:\n actual_graph_defs.append(None)\n\n with self.subTest(\"graphs with small attr values should be unchanged\"):\n expected_graph_def_0 = graph_pb2.GraphDef.FromString(bytes_0)\n self.assertEqual(actual_graph_defs[0], expected_graph_def_0)\n\n with self.subTest(\"large attr values should be filtered out\"):\n expected_graph_def_1 = graph_pb2.GraphDef.FromString(bytes_1)\n del expected_graph_def_1.node[1].attr[\"large\"]\n expected_graph_def_1.node[1].attr[\"_too_large_attrs\"].list.s.append(\n b\"large\"\n )\n requests = list(mock_client.WriteBlob.call_args[0][0])\n self.assertEqual(actual_graph_defs[1], expected_graph_def_1)\n\n with self.subTest(\"corrupt graphs should be skipped\"):\n self.assertLen(actual_blobs, 2)\n\n def test_upload_server_error(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n rpc_rate_limiter=mock_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n )\n uploader.create_experiment()\n\n # Of course a real Event stream will never produce the same Event twice,\n # but is this test context it's fine to reuse this one.\n graph_event = event_pb2.Event(\n graph_def=_create_example_graph_bytes(950)\n )\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\"run 1\": _apply_compat([graph_event])},\n {\"run 1\": _apply_compat([graph_event])},\n AbortUploadError,\n ]\n\n mock_client.WriteBlob.side_effect = [\n [write_service_pb2.WriteBlobResponse()],\n test_util.grpc_error(grpc.StatusCode.INTERNAL, \"nope\"),\n ]\n\n # This demonstrates that the INTERNAL error is NOT handled, so the\n # uploader will die if this happens.\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(grpc.RpcError):\n uploader.start_uploading()\n self.assertEqual(1, mock_client.CreateExperiment.call_count)\n self.assertEqual(2, mock_client.WriteBlob.call_count)\n self.assertEqual(0, mock_rate_limiter.tick.call_count)\n self.assertEqual(2, mock_blob_rate_limiter.tick.call_count)\n\n def test_upload_same_graph_twice(self):\n mock_client = _create_mock_client()\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n mock_blob_rate_limiter = mock.create_autospec(util.RateLimiter)\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n rpc_rate_limiter=mock_rate_limiter,\n blob_rpc_rate_limiter=mock_blob_rate_limiter,\n )\n uploader.create_experiment()\n\n graph_event = event_pb2.Event(\n graph_def=_create_example_graph_bytes(950)\n )\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\"run 1\": _apply_compat([graph_event])},\n {\"run 1\": _apply_compat([graph_event])},\n AbortUploadError,\n ]\n\n mock_client.WriteBlob.side_effect = [\n [write_service_pb2.WriteBlobResponse()],\n test_util.grpc_error(grpc.StatusCode.ALREADY_EXISTS, \"nope\"),\n ]\n\n # This demonstrates that the ALREADY_EXISTS error is handled gracefully.\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n self.assertEqual(1, mock_client.CreateExperiment.call_count)\n self.assertEqual(2, mock_client.WriteBlob.call_count)\n self.assertEqual(0, mock_rate_limiter.tick.call_count)\n self.assertEqual(2, mock_blob_rate_limiter.tick.call_count)\n\n def test_upload_empty_logdir(self):\n logdir = self.get_temp_dir()\n mock_client = _create_mock_client()\n uploader = _create_uploader(mock_client, logdir)\n uploader.create_experiment()\n uploader._upload_once()\n mock_client.WriteScalar.assert_not_called()\n\n def test_upload_polls_slowly_once_done(self):\n class Success(Exception):\n pass\n\n mock_rate_limiter = mock.create_autospec(util.RateLimiter)\n upload_call_count_box = [0]\n\n def mock_upload_once():\n upload_call_count_box[0] += 1\n tick_count = mock_rate_limiter.tick.call_count\n self.assertEqual(tick_count, upload_call_count_box[0])\n if tick_count >= 3:\n raise Success()\n\n uploader = _create_uploader(\n logdir=self.get_temp_dir(),\n logdir_poll_rate_limiter=mock_rate_limiter,\n )\n uploader._upload_once = mock_upload_once\n\n uploader.create_experiment()\n with self.assertRaises(Success):\n uploader.start_uploading()\n\n def test_upload_swallows_rpc_failure(self):\n logdir = self.get_temp_dir()\n with tb_test_util.FileWriter(logdir) as writer:\n writer.add_test_summary(\"foo\")\n mock_client = _create_mock_client()\n uploader = _create_uploader(mock_client, logdir)\n uploader.create_experiment()\n error = test_util.grpc_error(grpc.StatusCode.INTERNAL, \"Failure\")\n mock_client.WriteScalar.side_effect = error\n uploader._upload_once()\n mock_client.WriteScalar.assert_called_once()\n\n def test_upload_full_logdir(self):\n logdir = self.get_temp_dir()\n mock_client = _create_mock_client()\n uploader = _create_uploader(mock_client, logdir)\n uploader.create_experiment()\n\n # Convenience helpers for constructing expected requests.\n run = write_service_pb2.WriteScalarRequest.Run\n tag = write_service_pb2.WriteScalarRequest.Tag\n point = scalar_pb2.ScalarPoint\n\n # First round\n writer = tb_test_util.FileWriter(logdir)\n writer.add_test_summary(\"foo\", simple_value=5.0, step=1)\n writer.add_test_summary(\"foo\", simple_value=6.0, step=2)\n writer.add_test_summary(\"foo\", simple_value=7.0, step=3)\n writer.add_test_summary(\"bar\", simple_value=8.0, step=3)\n writer.flush()\n writer_a = tb_test_util.FileWriter(os.path.join(logdir, \"a\"))\n writer_a.add_test_summary(\"qux\", simple_value=9.0, step=2)\n writer_a.flush()\n uploader._upload_once()\n self.assertEqual(1, mock_client.WriteScalar.call_count)\n request1 = mock_client.WriteScalar.call_args[0][0]\n _clear_wall_times(request1)\n expected_request1 = write_service_pb2.WriteScalarRequest(\n experiment_id=\"123\",\n runs=[\n run(\n name=\".\",\n tags=[\n tag(\n name=\"foo\",\n metadata=test_util.scalar_metadata(\"foo\"),\n points=[\n point(step=1, value=5.0),\n point(step=2, value=6.0),\n point(step=3, value=7.0),\n ],\n ),\n tag(\n name=\"bar\",\n metadata=test_util.scalar_metadata(\"bar\"),\n points=[point(step=3, value=8.0)],\n ),\n ],\n ),\n run(\n name=\"a\",\n tags=[\n tag(\n name=\"qux\",\n metadata=test_util.scalar_metadata(\"qux\"),\n points=[point(step=2, value=9.0)],\n )\n ],\n ),\n ],\n )\n self.assertProtoEquals(expected_request1, request1)\n mock_client.WriteScalar.reset_mock()\n\n # Second round\n writer.add_test_summary(\"foo\", simple_value=10.0, step=5)\n writer.add_test_summary(\"baz\", simple_value=11.0, step=1)\n writer.flush()\n writer_b = tb_test_util.FileWriter(os.path.join(logdir, \"b\"))\n writer_b.add_test_summary(\"xyz\", simple_value=12.0, step=1)\n writer_b.flush()\n uploader._upload_once()\n self.assertEqual(1, mock_client.WriteScalar.call_count)\n request2 = mock_client.WriteScalar.call_args[0][0]\n _clear_wall_times(request2)\n expected_request2 = write_service_pb2.WriteScalarRequest(\n experiment_id=\"123\",\n runs=[\n run(\n name=\".\",\n tags=[\n tag(\n name=\"foo\",\n metadata=test_util.scalar_metadata(\"foo\"),\n points=[point(step=5, value=10.0)],\n ),\n tag(\n name=\"baz\",\n metadata=test_util.scalar_metadata(\"baz\"),\n points=[point(step=1, value=11.0)],\n ),\n ],\n ),\n run(\n name=\"b\",\n tags=[\n tag(\n name=\"xyz\",\n metadata=test_util.scalar_metadata(\"xyz\"),\n points=[point(step=1, value=12.0)],\n )\n ],\n ),\n ],\n )\n self.assertProtoEquals(expected_request2, request2)\n mock_client.WriteScalar.reset_mock()\n\n # Empty third round\n uploader._upload_once()\n mock_client.WriteScalar.assert_not_called()\n\n def test_verbosity_zero_creates_upload_tracker_with_verbosity_zero(self):\n mock_client = _create_mock_client()\n mock_tracker = mock.MagicMock()\n with mock.patch.object(\n upload_tracker, \"UploadTracker\", return_value=mock_tracker\n ) as mock_constructor:\n uploader = _create_uploader(\n mock_client,\n \"/logs/foo\",\n verbosity=0, # Explicitly set verbosity to 0.\n )\n uploader.create_experiment()\n\n def scalar_event(tag, value):\n return event_pb2.Event(summary=scalar_v2.scalar_pb(tag, value))\n\n mock_logdir_loader = mock.create_autospec(logdir_loader.LogdirLoader)\n mock_logdir_loader.get_run_events.side_effect = [\n {\n \"run 1\": _apply_compat(\n [scalar_event(\"1.1\", 5.0), scalar_event(\"1.2\", 5.0)]\n ),\n },\n AbortUploadError,\n ]\n\n with mock.patch.object(\n uploader, \"_logdir_loader\", mock_logdir_loader\n ), self.assertRaises(AbortUploadError):\n uploader.start_uploading()\n\n self.assertEqual(mock_constructor.call_count, 1)\n self.assertEqual(\n mock_constructor.call_args[1], {\"verbosity\": 0, \"one_shot\": False}\n )\n self.assertEqual(mock_tracker.scalars_tracker.call_count, 1)\n\n\nclass BatchedRequestSenderTest(tf.test.TestCase):\n def _populate_run_from_events(\n self, scalar_run, tensor_run, events, allowed_plugins=_USE_DEFAULT\n ):\n mock_client = _create_mock_client()\n builder = _create_request_sender(\n experiment_id=\"123\",\n api=mock_client,\n allowed_plugins=allowed_plugins,\n )\n builder.send_requests({\"\": _apply_compat(events)})\n scalar_requests = [\n c[0][0] for c in mock_client.WriteScalar.call_args_list\n ]\n if scalar_requests:\n self.assertLen(scalar_requests, 1)\n self.assertLen(scalar_requests[0].runs, 1)\n scalar_run.MergeFrom(scalar_requests[0].runs[0])\n tensor_requests = [\n c[0][0] for c in mock_client.WriteTensor.call_args_list\n ]\n if tensor_requests:\n self.assertLen(tensor_requests, 1)\n self.assertLen(tensor_requests[0].runs, 1)\n tensor_run.MergeFrom(tensor_requests[0].runs[0])\n\n def test_empty_events(self):\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, [])\n self.assertProtoEquals(\n scalar_run, write_service_pb2.WriteScalarRequest.Run()\n )\n self.assertProtoEquals(\n tensor_run, write_service_pb2.WriteTensorRequest.Run()\n )\n\n def test_scalar_and_tensor_events(self):\n events = [\n event_pb2.Event(summary=scalar_v2.scalar_pb(\"scalar1\", 5.0)),\n event_pb2.Event(summary=scalar_v2.scalar_pb(\"scalar2\", 5.0)),\n event_pb2.Event(\n summary=histogram_v2.histogram_pb(\"histogram\", [5.0])\n ),\n event_pb2.Event(\n summary=histogram_v2.histogram_pb(\"histogram\", [6.0])\n ),\n ]\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, events)\n scalar_tag_counts = _extract_tag_counts(scalar_run)\n self.assertEqual(scalar_tag_counts, {\"scalar1\": 1, \"scalar2\": 1})\n tensor_tag_counts = _extract_tag_counts(tensor_run)\n self.assertEqual(tensor_tag_counts, {\"histogram\": 2})\n\n def test_skips_non_scalar_and_non_tensor_events(self):\n events = [\n event_pb2.Event(summary=scalar_v2.scalar_pb(\"scalar1\", 5.0)),\n event_pb2.Event(file_version=\"brain.Event:2\"),\n event_pb2.Event(\n summary=histogram_v2.histogram_pb(\"histogram\", [5.0])\n ),\n ]\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, events)\n scalar_tag_counts = _extract_tag_counts(scalar_run)\n self.assertEqual(scalar_tag_counts, {\"scalar1\": 1})\n tensor_tag_counts = _extract_tag_counts(tensor_run)\n self.assertEqual(tensor_tag_counts, {\"histogram\": 1})\n\n def test_skips_non_scalar_events_in_scalar_time_series(self):\n events = [\n event_pb2.Event(file_version=\"brain.Event:2\"),\n event_pb2.Event(summary=scalar_v2.scalar_pb(\"scalar1\", 5.0)),\n event_pb2.Event(summary=scalar_v2.scalar_pb(\"scalar2\", 5.0)),\n event_pb2.Event(\n summary=histogram_v2.histogram_pb(\"scalar2\", [5.0])\n ),\n ]\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, events)\n scalar_tag_counts = _extract_tag_counts(scalar_run)\n self.assertEqual(scalar_tag_counts, {\"scalar1\": 1, \"scalar2\": 1})\n tensor_tag_counts = _extract_tag_counts(tensor_run)\n self.assertEqual(tensor_tag_counts, {})\n\n def test_skips_events_from_disallowed_plugins(self):\n event = event_pb2.Event(\n step=1, wall_time=123.456, summary=scalar_v2.scalar_pb(\"foo\", 5.0)\n )\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(\n scalar_run,\n tensor_run,\n [event],\n allowed_plugins=frozenset(\"not-scalars\"),\n )\n expected_scalar_run = write_service_pb2.WriteScalarRequest.Run()\n self.assertProtoEquals(scalar_run, expected_scalar_run)\n expected_tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self.assertProtoEquals(tensor_run, expected_tensor_run)\n\n def test_remembers_first_metadata_in_time_series(self):\n scalar_1 = event_pb2.Event(summary=scalar_v2.scalar_pb(\"loss\", 4.0))\n scalar_2 = event_pb2.Event(summary=scalar_v2.scalar_pb(\"loss\", 3.0))\n scalar_2.summary.value[0].ClearField(\"metadata\")\n events = [\n event_pb2.Event(file_version=\"brain.Event:2\"),\n scalar_1,\n scalar_2,\n ]\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, events)\n scalar_tag_counts = _extract_tag_counts(scalar_run)\n self.assertEqual(scalar_tag_counts, {\"loss\": 2})\n\n def test_expands_multiple_values_in_event(self):\n event = event_pb2.Event(step=1, wall_time=123.456)\n event.summary.value.add(tag=\"foo\", simple_value=1.0)\n event.summary.value.add(tag=\"foo\", simple_value=2.0)\n event.summary.value.add(tag=\"foo\", simple_value=3.0)\n scalar_run = write_service_pb2.WriteScalarRequest.Run()\n tensor_run = write_service_pb2.WriteTensorRequest.Run()\n self._populate_run_from_events(scalar_run, tensor_run, [event])\n expected_scalar_run = write_service_pb2.WriteScalarRequest.Run()\n foo_tag = expected_scalar_run.tags.add()\n foo_tag.name = \"foo\"\n foo_tag.metadata.display_name = \"foo\"\n foo_tag.metadata.plugin_data.plugin_name = \"scalars\"\n foo_tag.metadata.data_class = summary_pb2.DATA_CLASS_SCALAR\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=1.0\n )\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=2.0\n )\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=3.0\n )\n self.assertProtoEquals(scalar_run, expected_scalar_run)\n\n\nclass ScalarBatchedRequestSenderTest(tf.test.TestCase):\n def _add_events(self, sender, run_name, events):\n for event in events:\n for value in event.summary.value:\n sender.add_event(run_name, event, value, value.metadata)\n\n def _add_events_and_flush(self, events):\n mock_client = _create_mock_client()\n sender = _create_scalar_request_sender(\n experiment_id=\"123\", api=mock_client,\n )\n self._add_events(sender, \"\", events)\n sender.flush()\n\n requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list]\n self.assertLen(requests, 1)\n self.assertLen(requests[0].runs, 1)\n return requests[0].runs[0]\n\n def test_aggregation_by_tag(self):\n def make_event(step, wall_time, tag, value):\n return event_pb2.Event(\n step=step,\n wall_time=wall_time,\n summary=scalar_v2.scalar_pb(tag, value),\n )\n\n events = [\n make_event(1, 1.0, \"one\", 11.0),\n make_event(1, 2.0, \"two\", 22.0),\n make_event(2, 3.0, \"one\", 33.0),\n make_event(2, 4.0, \"two\", 44.0),\n make_event(\n 1, 5.0, \"one\", 55.0\n ), # Should preserve duplicate step=1.\n make_event(1, 6.0, \"three\", 66.0),\n ]\n run_proto = self._add_events_and_flush(events)\n tag_data = {\n tag.name: [\n (p.step, p.wall_time.ToSeconds(), p.value) for p in tag.points\n ]\n for tag in run_proto.tags\n }\n self.assertEqual(\n tag_data,\n {\n \"one\": [(1, 1.0, 11.0), (2, 3.0, 33.0), (1, 5.0, 55.0)],\n \"two\": [(1, 2.0, 22.0), (2, 4.0, 44.0)],\n \"three\": [(1, 6.0, 66.0)],\n },\n )\n\n def test_v1_summary(self):\n event = event_pb2.Event(step=1, wall_time=123.456)\n event.summary.value.add(tag=\"foo\", simple_value=5.0)\n run_proto = self._add_events_and_flush(_apply_compat([event]))\n expected_run_proto = write_service_pb2.WriteScalarRequest.Run()\n foo_tag = expected_run_proto.tags.add()\n foo_tag.name = \"foo\"\n foo_tag.metadata.display_name = \"foo\"\n foo_tag.metadata.plugin_data.plugin_name = \"scalars\"\n foo_tag.metadata.data_class = summary_pb2.DATA_CLASS_SCALAR\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=5.0\n )\n self.assertProtoEquals(run_proto, expected_run_proto)\n\n def test_v1_summary_tb_summary(self):\n tf_summary = summary_v1.scalar_pb(\"foo\", 5.0)\n tb_summary = summary_pb2.Summary.FromString(\n tf_summary.SerializeToString()\n )\n event = event_pb2.Event(step=1, wall_time=123.456, summary=tb_summary)\n run_proto = self._add_events_and_flush(_apply_compat([event]))\n expected_run_proto = write_service_pb2.WriteScalarRequest.Run()\n foo_tag = expected_run_proto.tags.add()\n foo_tag.name = \"foo/scalar_summary\"\n foo_tag.metadata.display_name = \"foo\"\n foo_tag.metadata.plugin_data.plugin_name = \"scalars\"\n foo_tag.metadata.data_class = summary_pb2.DATA_CLASS_SCALAR\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=5.0\n )\n self.assertProtoEquals(run_proto, expected_run_proto)\n\n def test_v2_summary(self):\n event = event_pb2.Event(\n step=1, wall_time=123.456, summary=scalar_v2.scalar_pb(\"foo\", 5.0)\n )\n run_proto = self._add_events_and_flush(_apply_compat([event]))\n expected_run_proto = write_service_pb2.WriteScalarRequest.Run()\n foo_tag = expected_run_proto.tags.add()\n foo_tag.name = \"foo\"\n foo_tag.metadata.plugin_data.plugin_name = \"scalars\"\n foo_tag.metadata.data_class = summary_pb2.DATA_CLASS_SCALAR\n foo_tag.points.add(\n step=1, wall_time=test_util.timestamp_pb(123456000000), value=5.0\n )\n self.assertProtoEquals(run_proto, expected_run_proto)\n\n def test_propagates_experiment_deletion(self):\n event = event_pb2.Event(step=1)\n event.summary.value.add(tag=\"foo\", simple_value=1.0)\n\n mock_client = _create_mock_client()\n sender = _create_scalar_request_sender(\"123\", mock_client)\n self._add_events(sender, \"run\", _apply_compat([event]))\n\n error = test_util.grpc_error(grpc.StatusCode.NOT_FOUND, \"nope\")\n mock_client.WriteScalar.side_effect = error\n with self.assertRaises(uploader_lib.ExperimentNotFoundError):\n sender.flush()\n\n def test_no_budget_for_base_request(self):\n mock_client = _create_mock_client()\n long_experiment_id = \"A\" * 12\n with self.assertRaises(RuntimeError) as cm:\n _create_scalar_request_sender(\n experiment_id=long_experiment_id,\n api=mock_client,\n max_request_size=12,\n )\n self.assertEqual(\n str(cm.exception), \"Byte budget too small for base request\"\n )\n\n def test_no_room_for_single_point(self):\n mock_client = _create_mock_client()\n event = event_pb2.Event(step=1, wall_time=123.456)\n event.summary.value.add(tag=\"foo\", simple_value=1.0)\n long_run_name = \"A\" * 12\n sender = _create_scalar_request_sender(\n \"123\", mock_client, max_request_size=12\n )\n with self.assertRaises(RuntimeError) as cm:\n self._add_events(sender, long_run_name, [event])\n self.assertEqual(str(cm.exception), \"add_event failed despite flush\")\n\n def test_break_at_run_boundary(self):\n mock_client = _create_mock_client()\n # Choose run name sizes such that one run fits in a 1024 byte request,\n # but not two.\n long_run_1 = \"A\" * 768\n long_run_2 = \"B\" * 768\n event_1 = event_pb2.Event(step=1)\n event_1.summary.value.add(tag=\"foo\", simple_value=1.0)\n event_2 = event_pb2.Event(step=2)\n event_2.summary.value.add(tag=\"bar\", simple_value=-2.0)\n\n sender = _create_scalar_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, long_run_1, _apply_compat([event_1]))\n self._add_events(sender, long_run_2, _apply_compat([event_2]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list]\n\n for request in requests:\n _clear_wall_times(request)\n\n # Expect two RPC calls despite a single explicit call to flush().\n expected = [\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n ]\n (\n expected[0]\n .runs.add(name=long_run_1)\n .tags.add(name=\"foo\", metadata=test_util.scalar_metadata(\"foo\"))\n .points.add(step=1, value=1.0)\n )\n (\n expected[1]\n .runs.add(name=long_run_2)\n .tags.add(name=\"bar\", metadata=test_util.scalar_metadata(\"bar\"))\n .points.add(step=2, value=-2.0)\n )\n self.assertEqual(requests, expected)\n\n def test_break_at_tag_boundary(self):\n mock_client = _create_mock_client()\n # Choose tag name sizes such that one tag fits in a 1024 byte requst,\n # but not two. Note that tag names appear in both `Tag.name` and the\n # summary metadata.\n long_tag_1 = \"a\" * 384\n long_tag_2 = \"b\" * 384\n event = event_pb2.Event(step=1)\n event.summary.value.add(tag=long_tag_1, simple_value=1.0)\n event.summary.value.add(tag=long_tag_2, simple_value=2.0)\n\n sender = _create_scalar_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, \"train\", _apply_compat([event]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list]\n for request in requests:\n _clear_wall_times(request)\n\n # Expect two RPC calls despite a single explicit call to flush().\n expected = [\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n ]\n (\n expected[0]\n .runs.add(name=\"train\")\n .tags.add(\n name=long_tag_1, metadata=test_util.scalar_metadata(long_tag_1)\n )\n .points.add(step=1, value=1.0)\n )\n (\n expected[1]\n .runs.add(name=\"train\")\n .tags.add(\n name=long_tag_2, metadata=test_util.scalar_metadata(long_tag_2)\n )\n .points.add(step=1, value=2.0)\n )\n self.assertEqual(requests, expected)\n\n def test_break_at_scalar_point_boundary(self):\n mock_client = _create_mock_client()\n point_count = 2000 # comfortably saturates a single 1024-byte request\n events = []\n for step in range(point_count):\n summary = scalar_v2.scalar_pb(\"loss\", -2.0 * step)\n if step > 0:\n summary.value[0].ClearField(\"metadata\")\n events.append(event_pb2.Event(summary=summary, step=step))\n\n sender = _create_scalar_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, \"train\", _apply_compat(events))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list]\n for request in requests:\n _clear_wall_times(request)\n\n self.assertGreater(len(requests), 1)\n self.assertLess(len(requests), point_count)\n # This is the observed number of requests when running the test. There\n # is no reasonable way to derive this value from just reading the code.\n # The number of requests does not have to be 33 to be correct but if it\n # changes it probably warrants some investigation or thought.\n self.assertEqual(33, len(requests))\n\n total_points_in_result = 0\n for request in requests:\n self.assertLen(request.runs, 1)\n run = request.runs[0]\n self.assertEqual(run.name, \"train\")\n self.assertLen(run.tags, 1)\n tag = run.tags[0]\n self.assertEqual(tag.name, \"loss\")\n for point in tag.points:\n self.assertEqual(point.step, total_points_in_result)\n self.assertEqual(point.value, -2.0 * point.step)\n total_points_in_result += 1\n self.assertLessEqual(request.ByteSize(), 1024)\n self.assertEqual(total_points_in_result, point_count)\n\n def test_prunes_tags_and_runs(self):\n mock_client = _create_mock_client()\n event_1 = event_pb2.Event(step=1)\n event_1.summary.value.add(tag=\"foo\", simple_value=1.0)\n event_2 = event_pb2.Event(step=2)\n event_2.summary.value.add(tag=\"bar\", simple_value=-2.0)\n\n add_point_call_count_box = [0]\n\n def mock_add_point(byte_budget_manager_self, point):\n # Simulate out-of-space error the first time that we try to store\n # the second point.\n add_point_call_count_box[0] += 1\n if add_point_call_count_box[0] == 2:\n raise uploader_lib._OutOfSpaceError()\n\n with mock.patch.object(\n uploader_lib._ByteBudgetManager, \"add_point\", mock_add_point,\n ):\n sender = _create_scalar_request_sender(\"123\", mock_client)\n self._add_events(sender, \"train\", _apply_compat([event_1]))\n self._add_events(sender, \"test\", _apply_compat([event_2]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list]\n for request in requests:\n _clear_wall_times(request)\n\n expected = [\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n write_service_pb2.WriteScalarRequest(experiment_id=\"123\"),\n ]\n (\n expected[0]\n .runs.add(name=\"train\")\n .tags.add(name=\"foo\", metadata=test_util.scalar_metadata(\"foo\"))\n .points.add(step=1, value=1.0)\n )\n (\n expected[1]\n .runs.add(name=\"test\")\n .tags.add(name=\"bar\", metadata=test_util.scalar_metadata(\"bar\"))\n .points.add(step=2, value=-2.0)\n )\n self.assertEqual(expected, requests)\n\n def test_wall_time_precision(self):\n # Test a wall time that is exactly representable in float64 but has enough\n # digits to incur error if converted to nanoseconds the naive way (* 1e9).\n event1 = event_pb2.Event(step=1, wall_time=1567808404.765432119)\n event1.summary.value.add(tag=\"foo\", simple_value=1.0)\n # Test a wall time where as a float64, the fractional part on its own will\n # introduce error if truncated to 9 decimal places instead of rounded.\n event2 = event_pb2.Event(step=2, wall_time=1.000000002)\n event2.summary.value.add(tag=\"foo\", simple_value=2.0)\n run_proto = self._add_events_and_flush(_apply_compat([event1, event2]))\n self.assertEqual(\n test_util.timestamp_pb(1567808404765432119),\n run_proto.tags[0].points[0].wall_time,\n )\n self.assertEqual(\n test_util.timestamp_pb(1000000002),\n run_proto.tags[0].points[1].wall_time,\n )\n\n\nclass TensorBatchedRequestSenderTest(tf.test.TestCase):\n def _add_events(self, sender, run_name, events):\n for event in events:\n for value in event.summary.value:\n sender.add_event(run_name, event, value, value.metadata)\n\n def _add_events_and_flush(self, events, max_tensor_point_size=_USE_DEFAULT):\n mock_client = _create_mock_client()\n sender = _create_tensor_request_sender(\n experiment_id=\"123\",\n api=mock_client,\n max_tensor_point_size=max_tensor_point_size,\n )\n self._add_events(sender, \"\", events)\n sender.flush()\n\n requests = [c[0][0] for c in mock_client.WriteTensor.call_args_list]\n self.assertLen(requests, 1)\n self.assertLen(requests[0].runs, 1)\n return requests[0].runs[0]\n\n def test_histogram_event(self):\n event = event_pb2.Event(\n step=1,\n wall_time=123.456,\n summary=histogram_v2.histogram_pb(\"foo\", [1.0]),\n )\n\n run_proto = self._add_events_and_flush(_apply_compat([event]))\n expected_run_proto = write_service_pb2.WriteTensorRequest.Run()\n foo_tag = expected_run_proto.tags.add()\n foo_tag.name = \"foo\"\n foo_tag.metadata.plugin_data.plugin_name = \"histograms\"\n foo_tag.metadata.data_class = summary_pb2.DATA_CLASS_TENSOR\n foo_tag.points.add(\n step=1,\n wall_time=test_util.timestamp_pb(123456000000),\n value=tensor_pb2.TensorProto(dtype=types_pb2.DT_DOUBLE),\n )\n # Simplify the tensor value a bit before making assertions on it.\n # We care that it is copied to the request but we don't need it to be\n # an extensive test.\n run_proto.tags[0].points[0].value.ClearField(\"tensor_shape\")\n run_proto.tags[0].points[0].value.ClearField(\"tensor_content\")\n self.assertProtoEquals(run_proto, expected_run_proto)\n\n def test_histogram_event_with_empty_tensor_content_errors_out(self):\n event = event_pb2.Event(step=42)\n event.summary.value.add(\n tag=\"one\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE,\n # Use empty tensor content to elicit an error.\n tensor_content=b\"\",\n ),\n )\n\n mock_client = _create_mock_client()\n sender = _create_tensor_request_sender(\"123\", mock_client)\n with self.assertRaisesRegexp(\n ValueError,\n re.compile(\n r\"failed to upload a tensor.*malformation.*tag.*\\'one\\'.*step.*42\",\n re.DOTALL,\n ),\n ):\n self._add_events(sender, \"run\", _apply_compat([event]))\n\n def test_histogram_event_with_incorrect_tensor_shape_errors_out(self):\n event = event_pb2.Event(step=1337)\n tensor_proto = tensor_util.make_tensor_proto([1.0, 2.0])\n # Add an extraneous dimension to the tensor shape in order to\n # elicit an error.\n tensor_proto.tensor_shape.dim.append(\n tensor_shape_pb2.TensorShapeProto.Dim(size=2)\n )\n event.summary.value.add(tag=\"two\", tensor=tensor_proto)\n\n mock_client = _create_mock_client()\n sender = _create_tensor_request_sender(\"123\", mock_client)\n with self.assertRaisesRegexp(\n ValueError,\n re.compile(\n r\"failed to upload a tensor.*malformation.*tag.*\\'two\\'.*step.*1337.\"\n r\"*shape\",\n re.DOTALL,\n ),\n ):\n self._add_events(sender, \"run\", _apply_compat([event]))\n\n def test_aggregation_by_tag(self):\n def make_event(step, wall_time, tag):\n event = event_pb2.Event(step=step, wall_time=wall_time)\n event.summary.value.add(\n tag=tag,\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n return event\n\n events = [\n make_event(1, 1.0, \"one\"),\n make_event(1, 2.0, \"two\"),\n make_event(2, 3.0, \"one\"),\n make_event(2, 4.0, \"two\"),\n make_event(1, 5.0, \"one\"), # Should preserve duplicate step=1.\n make_event(1, 6.0, \"three\"),\n ]\n run_proto = self._add_events_and_flush(events)\n tag_data = {\n tag.name: [(p.step, p.wall_time.ToSeconds()) for p in tag.points]\n for tag in run_proto.tags\n }\n self.assertEqual(\n tag_data,\n {\n \"one\": [(1, 1.0), (2, 3.0), (1, 5.0)],\n \"two\": [(1, 2.0), (2, 4.0)],\n \"three\": [(1, 6.0)],\n },\n )\n\n def test_propagates_experiment_deletion(self):\n event = event_pb2.Event(step=1)\n event.summary.value.add(\n tag=\"one\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n\n mock_client = _create_mock_client()\n sender = _create_tensor_request_sender(\"123\", mock_client)\n self._add_events(sender, \"run\", _apply_compat([event]))\n\n error = test_util.grpc_error(grpc.StatusCode.NOT_FOUND, \"nope\")\n mock_client.WriteTensor.side_effect = error\n with self.assertRaises(uploader_lib.ExperimentNotFoundError):\n sender.flush()\n\n def test_no_budget_for_base_request(self):\n mock_client = _create_mock_client()\n long_experiment_id = \"A\" * 12\n with self.assertRaises(RuntimeError) as cm:\n _create_tensor_request_sender(\n experiment_id=long_experiment_id,\n api=mock_client,\n max_request_size=12,\n )\n self.assertEqual(\n str(cm.exception), \"Byte budget too small for base request\"\n )\n\n def test_no_room_for_single_point(self):\n mock_client = _create_mock_client()\n event = event_pb2.Event(step=1)\n event.summary.value.add(\n tag=\"one\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n long_run_name = \"A\" * 12\n sender = _create_tensor_request_sender(\n \"123\", mock_client, max_request_size=12\n )\n with self.assertRaises(RuntimeError) as cm:\n self._add_events(sender, long_run_name, [event])\n self.assertEqual(str(cm.exception), \"add_event failed despite flush\")\n\n def test_break_at_run_boundary(self):\n mock_client = _create_mock_client()\n # Choose run name sizes such that one run fits in a 1024 byte request,\n # but not two.\n long_run_1 = \"A\" * 768\n long_run_2 = \"B\" * 768\n event_1 = event_pb2.Event(step=1)\n event_1.summary.value.add(\n tag=\"one\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n event_2 = event_pb2.Event(step=2)\n event_2.summary.value.add(\n tag=\"two\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[2.0]\n ),\n )\n\n sender = _create_tensor_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, long_run_1, _apply_compat([event_1]))\n self._add_events(sender, long_run_2, _apply_compat([event_2]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteTensor.call_args_list]\n\n # Expect two RPC calls despite a single explicit call to flush().\n self.assertEqual(2, len(requests))\n self.assertEqual(1, len(requests[0].runs))\n self.assertEqual(long_run_1, requests[0].runs[0].name)\n self.assertEqual(1, len(requests[1].runs))\n self.assertEqual(long_run_2, requests[1].runs[0].name)\n\n def test_break_at_tag_boundary(self):\n mock_client = _create_mock_client()\n # Choose tag name sizes such that one tag fits in a 1024 byte request,\n # but not two.\n long_tag_1 = \"a\" * 600\n long_tag_2 = \"b\" * 600\n event = event_pb2.Event(step=1, wall_time=1)\n event.summary.value.add(\n tag=long_tag_1,\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n event.summary.value.add(\n tag=long_tag_2,\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[2.0]\n ),\n )\n\n sender = _create_tensor_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, \"train\", _apply_compat([event]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteTensor.call_args_list]\n\n # Expect two RPC calls despite a single explicit call to flush().\n self.assertEqual(2, len(requests))\n # First RPC contains one tag.\n self.assertEqual(1, len(requests[0].runs))\n self.assertEqual(\"train\", requests[0].runs[0].name)\n self.assertEqual(1, len(requests[0].runs[0].tags))\n self.assertEqual(long_tag_1, requests[0].runs[0].tags[0].name)\n # Second RPC contains the other tag.\n self.assertEqual(1, len(requests[1].runs))\n self.assertEqual(\"train\", requests[1].runs[0].name)\n self.assertEqual(1, len(requests[1].runs[0].tags))\n self.assertEqual(long_tag_2, requests[1].runs[0].tags[0].name)\n\n def test_break_at_tensor_point_boundary(self):\n mock_client = _create_mock_client()\n point_count = 2000 # comfortably saturates a single 1024-byte request\n events = []\n for step in range(point_count):\n event = event_pb2.Event(step=step)\n tensor_proto = tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0 * step, -1.0 * step]\n )\n tensor_proto.tensor_shape.dim.append(\n tensor_shape_pb2.TensorShapeProto.Dim(size=2)\n )\n event.summary.value.add(tag=\"histo\", tensor=tensor_proto)\n events.append(event)\n\n sender = _create_tensor_request_sender(\n \"123\",\n mock_client,\n # Set a limit to request size\n max_request_size=1024,\n )\n self._add_events(sender, \"train\", _apply_compat(events))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteTensor.call_args_list]\n\n self.assertGreater(len(requests), 1)\n self.assertLess(len(requests), point_count)\n self.assertEqual(72, len(requests))\n\n total_points_in_result = 0\n for request in requests:\n self.assertLen(request.runs, 1)\n run = request.runs[0]\n self.assertEqual(run.name, \"train\")\n self.assertLen(run.tags, 1)\n tag = run.tags[0]\n self.assertEqual(tag.name, \"histo\")\n for point in tag.points:\n self.assertEqual(point.step, total_points_in_result)\n self.assertEqual(\n point.value.double_val,\n [1.0 * point.step, -1.0 * point.step],\n )\n total_points_in_result += 1\n self.assertLessEqual(request.ByteSize(), 1024)\n self.assertEqual(total_points_in_result, point_count)\n\n def test_strip_large_tensors(self):\n # Generate test data with varying tensor point sizes. Use raw bytes.\n event_1 = event_pb2.Event(step=1)\n event_1.summary.value.add(\n tag=\"one\",\n # This TensorProto has a byte size of 18.\n tensor=tensor_util.make_tensor_proto([1.0, 2.0]),\n )\n event_1.summary.value.add(\n tag=\"two\",\n # This TensorProto has a byte size of 22.\n tensor=tensor_util.make_tensor_proto([1.0, 2.0, 3.0]),\n )\n # This TensorProto has a 12-byte tensor_content.\n event_2 = event_pb2.Event(step=2)\n event_2.summary.value.add(\n tag=\"one\",\n # This TensorProto has a byte size of 18.\n tensor=tensor_util.make_tensor_proto([2.0, 4.0]),\n )\n event_2.summary.value.add(\n tag=\"two\",\n # This TensorProto has a byte size of 26.\n tensor=tensor_util.make_tensor_proto([1.0, 2.0, 3.0, 4.0]),\n )\n\n run_proto = self._add_events_and_flush(\n _apply_compat([event_1, event_2]),\n # Set threshold that will filter out the tensor point with 26 bytes\n # of data and above. The additional byte is for proto overhead.\n max_tensor_point_size=24,\n )\n tag_data = {\n tag.name: [(p.step, p.value.tensor_content) for p in tag.points]\n for tag in run_proto.tags\n }\n # A single tensor point is filtered out.\n self.assertEqual(\n tag_data,\n {\n \"one\": [\n (1, b\"\\x00\\x00\\x80?\\x00\\x00\\x00@\"),\n (2, b\"\\x00\\x00\\x00@\\x00\\x00\\x80@\"),\n ],\n \"two\": [(1, b\"\\x00\\x00\\x80?\\x00\\x00\\x00@\\x00\\x00@@\")],\n },\n )\n\n run_proto_2 = self._add_events_and_flush(\n _apply_compat([event_1, event_2]),\n # Set threshold that will filter out the tensor points with 22 and 26\n # bytes of data and above. The additional byte is for proto overhead.\n max_tensor_point_size=20,\n )\n tag_data_2 = {\n tag.name: [(p.step, p.value.tensor_content) for p in tag.points]\n for tag in run_proto_2.tags\n }\n # All tensor points from the same tag are filtered out, and the tag is pruned.\n self.assertEqual(\n tag_data_2,\n {\n \"one\": [\n (1, b\"\\x00\\x00\\x80?\\x00\\x00\\x00@\"),\n (2, b\"\\x00\\x00\\x00@\\x00\\x00\\x80@\"),\n ],\n },\n )\n\n def test_prunes_tags_and_runs(self):\n mock_client = _create_mock_client()\n event_1 = event_pb2.Event(step=1)\n event_1.summary.value.add(\n tag=\"one\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n event_2 = event_pb2.Event(step=2)\n event_2.summary.value.add(\n tag=\"two\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[2.0]\n ),\n )\n\n add_point_call_count_box = [0]\n\n def mock_add_point(byte_budget_manager_self, point):\n # Simulate out-of-space error the first time that we try to store\n # the second point.\n add_point_call_count_box[0] += 1\n if add_point_call_count_box[0] == 2:\n raise uploader_lib._OutOfSpaceError()\n\n with mock.patch.object(\n uploader_lib._ByteBudgetManager, \"add_point\", mock_add_point,\n ):\n sender = _create_tensor_request_sender(\"123\", mock_client)\n self._add_events(sender, \"train\", _apply_compat([event_1]))\n self._add_events(sender, \"test\", _apply_compat([event_2]))\n sender.flush()\n requests = [c[0][0] for c in mock_client.WriteTensor.call_args_list]\n\n # Expect two RPC calls despite a single explicit call to flush().\n self.assertEqual(2, len(requests))\n # First RPC contains one tag.\n self.assertEqual(1, len(requests[0].runs))\n self.assertEqual(\"train\", requests[0].runs[0].name)\n self.assertEqual(1, len(requests[0].runs[0].tags))\n self.assertEqual(\"one\", requests[0].runs[0].tags[0].name)\n # Second RPC contains the other tag.\n self.assertEqual(1, len(requests[1].runs))\n self.assertEqual(\"test\", requests[1].runs[0].name)\n self.assertEqual(1, len(requests[1].runs[0].tags))\n self.assertEqual(\"two\", requests[1].runs[0].tags[0].name)\n\n def test_wall_time_precision(self):\n # Test a wall time that is exactly representable in float64 but has enough\n # digits to incur error if converted to nanoseconds the naive way (* 1e9).\n event_1 = event_pb2.Event(step=1, wall_time=1567808404.765432119)\n event_1.summary.value.add(\n tag=\"tag\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[1.0]\n ),\n )\n # Test a wall time where as a float64, the fractional part on its own will\n # introduce error if truncated to 9 decimal places instead of rounded.\n event_2 = event_pb2.Event(step=2, wall_time=1.000000002)\n event_2.summary.value.add(\n tag=\"tag\",\n tensor=tensor_pb2.TensorProto(\n dtype=types_pb2.DT_DOUBLE, double_val=[2.0]\n ),\n )\n run_proto = self._add_events_and_flush(\n _apply_compat([event_1, event_2])\n )\n self.assertEqual(\n test_util.timestamp_pb(1567808404765432119),\n run_proto.tags[0].points[0].wall_time,\n )\n self.assertEqual(\n test_util.timestamp_pb(1000000002),\n run_proto.tags[0].points[1].wall_time,\n )\n\n\nclass DeleteExperimentTest(tf.test.TestCase):\n def _create_mock_client(self):\n # Create a stub instance (using a test channel) in order to derive a mock\n # from it with autospec enabled. Mocking TensorBoardWriterServiceStub itself\n # doesn't work with autospec because grpc constructs stubs via metaclassing.\n test_channel = grpc_testing.channel(\n service_descriptors=[], time=grpc_testing.strict_real_time()\n )\n stub = write_service_pb2_grpc.TensorBoardWriterServiceStub(test_channel)\n mock_client = mock.create_autospec(stub)\n return mock_client\n\n def test_success(self):\n mock_client = _create_mock_client()\n response = write_service_pb2.DeleteExperimentResponse()\n mock_client.DeleteExperiment.return_value = response\n\n uploader_lib.delete_experiment(mock_client, \"123\")\n\n expected_request = write_service_pb2.DeleteExperimentRequest()\n expected_request.experiment_id = \"123\"\n mock_client.DeleteExperiment.assert_called_once()\n (args, _) = mock_client.DeleteExperiment.call_args\n self.assertEqual(args[0], expected_request)\n\n def test_not_found(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.NOT_FOUND, \"nope\")\n mock_client.DeleteExperiment.side_effect = error\n\n with self.assertRaises(uploader_lib.ExperimentNotFoundError):\n uploader_lib.delete_experiment(mock_client, \"123\")\n\n def test_unauthorized(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.PERMISSION_DENIED, \"nope\")\n mock_client.DeleteExperiment.side_effect = error\n\n with self.assertRaises(uploader_lib.PermissionDeniedError):\n uploader_lib.delete_experiment(mock_client, \"123\")\n\n def test_internal_error(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.INTERNAL, \"travesty\")\n mock_client.DeleteExperiment.side_effect = error\n\n with self.assertRaises(grpc.RpcError) as cm:\n uploader_lib.delete_experiment(mock_client, \"123\")\n msg = str(cm.exception)\n self.assertIn(\"travesty\", msg)\n\n\nclass UpdateExperimentMetadataTest(tf.test.TestCase):\n def _create_mock_client(self):\n # Create a stub instance (using a test channel) in order to derive a mock\n # from it with autospec enabled. Mocking TensorBoardWriterServiceStub itself\n # doesn't work with autospec because grpc constructs stubs via metaclassing.\n test_channel = grpc_testing.channel(\n service_descriptors=[], time=grpc_testing.strict_real_time()\n )\n stub = write_service_pb2_grpc.TensorBoardWriterServiceStub(test_channel)\n mock_client = mock.create_autospec(stub)\n return mock_client\n\n def test_success(self):\n mock_client = _create_mock_client()\n new_name = \"a new name\"\n response = write_service_pb2.UpdateExperimentResponse()\n mock_client.UpdateExperiment.return_value = response\n\n uploader_lib.update_experiment_metadata(\n mock_client, \"123\", name=new_name\n )\n\n expected_request = write_service_pb2.UpdateExperimentRequest(\n experiment=experiment_pb2.Experiment(\n experiment_id=\"123\", name=new_name\n ),\n experiment_mask=experiment_pb2.ExperimentMask(name=True),\n )\n mock_client.UpdateExperiment.assert_called_once()\n (args, _) = mock_client.UpdateExperiment.call_args\n self.assertEqual(args[0], expected_request)\n\n def test_not_found(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.NOT_FOUND, \"nope\")\n mock_client.UpdateExperiment.side_effect = error\n\n with self.assertRaises(uploader_lib.ExperimentNotFoundError):\n uploader_lib.update_experiment_metadata(mock_client, \"123\", name=\"\")\n\n def test_unauthorized(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.PERMISSION_DENIED, \"nope\")\n mock_client.UpdateExperiment.side_effect = error\n\n with self.assertRaises(uploader_lib.PermissionDeniedError):\n uploader_lib.update_experiment_metadata(mock_client, \"123\", name=\"\")\n\n def test_invalid_argument(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(\n grpc.StatusCode.INVALID_ARGUMENT, \"too many\"\n )\n mock_client.UpdateExperiment.side_effect = error\n\n with self.assertRaises(uploader_lib.InvalidArgumentError) as cm:\n uploader_lib.update_experiment_metadata(mock_client, \"123\", name=\"\")\n msg = str(cm.exception)\n self.assertIn(\"too many\", msg)\n\n def test_internal_error(self):\n mock_client = _create_mock_client()\n error = test_util.grpc_error(grpc.StatusCode.INTERNAL, \"travesty\")\n mock_client.UpdateExperiment.side_effect = error\n\n with self.assertRaises(grpc.RpcError) as cm:\n uploader_lib.update_experiment_metadata(mock_client, \"123\", name=\"\")\n msg = str(cm.exception)\n self.assertIn(\"travesty\", msg)\n\n\nclass VarintCostTest(tf.test.TestCase):\n def test_varint_cost(self):\n self.assertEqual(uploader_lib._varint_cost(0), 1)\n self.assertEqual(uploader_lib._varint_cost(7), 1)\n self.assertEqual(uploader_lib._varint_cost(127), 1)\n self.assertEqual(uploader_lib._varint_cost(128), 2)\n self.assertEqual(uploader_lib._varint_cost(128 * 128 - 1), 2)\n self.assertEqual(uploader_lib._varint_cost(128 * 128), 3)\n\n\ndef _clear_wall_times(request):\n \"\"\"Clears the wall_time fields in a WriteScalarRequest to be\n deterministic.\"\"\"\n for run in request.runs:\n for tag in run.tags:\n for point in tag.points:\n point.ClearField(\"wall_time\")\n\n\ndef _apply_compat(events):\n initial_metadata = {}\n for event in events:\n event = data_compat.migrate_event(event)\n events = dataclass_compat.migrate_event(\n event, initial_metadata=initial_metadata\n )\n for event in events:\n yield event\n\n\ndef _extract_tag_counts(run_proto):\n return {tag.name: len(tag.points) for tag in run_proto.tags}\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Proto match tests between `tensorboard.compat.proto` and TensorFlow.\n\nThese tests verify that the local copy of TensorFlow protos are the same\nas those available directly from TensorFlow. Local protos are used to\nbuild `tensorboard-notf` without a TensorFlow dependency.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport difflib\nimport importlib\n\nimport tensorflow as tf\nfrom google.protobuf import descriptor_pb2\n\n\n# Keep this list synced with BUILD in current directory\nPROTO_IMPORTS = [\n (\n \"tensorflow.core.framework.allocation_description_pb2\",\n \"tensorboard.compat.proto.allocation_description_pb2\",\n ),\n (\n \"tensorflow.core.framework.api_def_pb2\",\n \"tensorboard.compat.proto.api_def_pb2\",\n ),\n (\n \"tensorflow.core.framework.attr_value_pb2\",\n \"tensorboard.compat.proto.attr_value_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.cluster_pb2\",\n \"tensorboard.compat.proto.cluster_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.config_pb2\",\n \"tensorboard.compat.proto.config_pb2\",\n ),\n (\n \"tensorflow.core.framework.cost_graph_pb2\",\n \"tensorboard.compat.proto.cost_graph_pb2\",\n ),\n (\n \"tensorflow.python.framework.cpp_shape_inference_pb2\",\n \"tensorboard.compat.proto.cpp_shape_inference_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.debug_pb2\",\n \"tensorboard.compat.proto.debug_pb2\",\n ),\n (\"tensorflow.core.util.event_pb2\", \"tensorboard.compat.proto.event_pb2\"),\n (\n \"tensorflow.core.framework.function_pb2\",\n \"tensorboard.compat.proto.function_pb2\",\n ),\n (\n \"tensorflow.core.framework.graph_pb2\",\n \"tensorboard.compat.proto.graph_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.meta_graph_pb2\",\n \"tensorboard.compat.proto.meta_graph_pb2\",\n ),\n (\n \"tensorflow.core.framework.node_def_pb2\",\n \"tensorboard.compat.proto.node_def_pb2\",\n ),\n (\n \"tensorflow.core.framework.op_def_pb2\",\n \"tensorboard.compat.proto.op_def_pb2\",\n ),\n (\n \"tensorflow.core.framework.resource_handle_pb2\",\n \"tensorboard.compat.proto.resource_handle_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.rewriter_config_pb2\",\n \"tensorboard.compat.proto.rewriter_config_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.saved_object_graph_pb2\",\n \"tensorboard.compat.proto.saved_object_graph_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.saver_pb2\",\n \"tensorboard.compat.proto.saver_pb2\",\n ),\n (\n \"tensorflow.core.framework.step_stats_pb2\",\n \"tensorboard.compat.proto.step_stats_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.struct_pb2\",\n \"tensorboard.compat.proto.struct_pb2\",\n ),\n (\n \"tensorflow.core.framework.summary_pb2\",\n \"tensorboard.compat.proto.summary_pb2\",\n ),\n (\n \"tensorflow.core.framework.tensor_pb2\",\n \"tensorboard.compat.proto.tensor_pb2\",\n ),\n (\n \"tensorflow.core.framework.tensor_description_pb2\",\n \"tensorboard.compat.proto.tensor_description_pb2\",\n ),\n (\n \"tensorflow.core.framework.tensor_shape_pb2\",\n \"tensorboard.compat.proto.tensor_shape_pb2\",\n ),\n (\n \"tensorflow.core.profiler.tfprof_log_pb2\",\n \"tensorboard.compat.proto.tfprof_log_pb2\",\n ),\n (\n \"tensorflow.core.protobuf.trackable_object_graph_pb2\",\n \"tensorboard.compat.proto.trackable_object_graph_pb2\",\n ),\n (\n \"tensorflow.core.framework.types_pb2\",\n \"tensorboard.compat.proto.types_pb2\",\n ),\n (\n \"tensorflow.core.framework.variable_pb2\",\n \"tensorboard.compat.proto.variable_pb2\",\n ),\n (\n \"tensorflow.core.framework.versions_pb2\",\n \"tensorboard.compat.proto.versions_pb2\",\n ),\n]\n\nPROTO_REPLACEMENTS = [\n (\"tensorflow/core/framework/\", \"tensorboard/compat/proto/\"),\n (\"tensorflow/core/protobuf/\", \"tensorboard/compat/proto/\"),\n (\"tensorflow/core/profiler/\", \"tensorboard/compat/proto/\"),\n (\"tensorflow/python/framework/\", \"tensorboard/compat/proto/\"),\n (\"tensorflow/core/util/\", \"tensorboard/compat/proto/\"),\n ('package: \"tensorflow.tfprof\"', 'package: \"tensorboard\"'),\n ('package: \"tensorflow\"', 'package: \"tensorboard\"'),\n ('type_name: \".tensorflow.tfprof', 'type_name: \".tensorboard'),\n ('type_name: \".tensorflow', 'type_name: \".tensorboard'),\n]\n\n\nMATCH_FAIL_MESSAGE_TEMPLATE = \"\"\"\n{}\n\nNOTE!\n====\nThis is expected to happen when TensorFlow updates their proto definitions.\nWe pin copies of the protos, but TensorFlow can freely update them at any\ntime.\n\nThe proper fix is:\n\n1. In your TensorFlow clone, check out the version of TensorFlow whose\n protos you want to update (e.g., `git checkout v2.2.0-rc0`)\n2. In your tensorboard repo, run:\n\n ./tensorboard/compat/proto/update.sh PATH_TO_TENSORFLOW_REPO\n\n3. Review and commit any changes.\n\"\"\"\n\n\nclass ProtoMatchTest(tf.test.TestCase):\n def test_each_proto_matches_tensorflow(self):\n failed_diffs = []\n for tf_path, tb_path in PROTO_IMPORTS:\n tf_pb2 = importlib.import_module(tf_path)\n tb_pb2 = importlib.import_module(tb_path)\n tf_descriptor = descriptor_pb2.FileDescriptorProto()\n tb_descriptor = descriptor_pb2.FileDescriptorProto()\n tf_pb2.DESCRIPTOR.CopyToProto(tf_descriptor)\n tb_pb2.DESCRIPTOR.CopyToProto(tb_descriptor)\n\n # Convert expected to be actual since this matches the\n # replacements done in proto/update.sh\n tb_string = str(tb_descriptor)\n tf_string = str(tf_descriptor)\n for orig, repl in PROTO_REPLACEMENTS:\n tf_string = tf_string.replace(orig, repl)\n\n diff = difflib.unified_diff(\n tb_string.splitlines(1),\n tf_string.splitlines(1),\n fromfile=tb_path,\n tofile=tf_path,\n )\n diff = \"\".join(diff)\n\n if diff:\n failed_diffs.append(diff)\n if failed_diffs:\n self.fail(MATCH_FAIL_MESSAGE_TEMPLATE.format(\"\".join(failed_diffs)))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nikit91/ERNIE | [
"a40e498e5b0adbfdce39a478737418199f773d96"
] | [
"code/run_tacred_new.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport logging\nimport argparse\nimport random\nfrom tqdm import tqdm, trange\nimport simplejson as json\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom knowledge_bert.tokenization import BertTokenizer\nfrom knowledge_bert.modeling import BertForSequenceClassification\nfrom knowledge_bert.optimization import BertAdam\nfrom knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef lim_ent_map(index, filePath):\n valid_ents = {}\n with open(filePath, 'r') as fin:\n for line in fin:\n vec = line.strip().split('\\t')\n uniqid = int(vec[index])\n valid_ents[uniqid] = 1\n return valid_ents\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.input_ent = input_ent\n self.ent_mask = ent_mask\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n \n @classmethod\n def _read_json(cls, input_file):\n with open(input_file, \"r\", encoding='utf-8') as f:\n return json.loads(f.read())\n\nclass TacredProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n examples = self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")\n labels = set([x.label for x in examples])\n return examples, list(labels)\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")\n\n def get_labels(self):\n \"\"\"Useless\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n for x in line['ents']:\n if x[1] == 1:\n x[1] = 0\n #print(line['text'][x[1]:x[2]].encode(\"utf-8\"))\n text_a = (line['text'], line['ents'])\n label = line['label']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=line['ann'], label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n \n label_list = sorted(label_list)\n label_map = {label : i for i, label in enumerate(label_list)}\n\n entity2id = {}\n with open(\"kg_embed/entity2id.txt\") as fin:\n fin.readline()\n for line in fin:\n qid, eid = line.strip().split('\\t')\n entity2id[qid] = int(eid)\n\n features = []\n for (ex_index, example) in enumerate(examples):\n\n ex_text_a = example.text_a[0]\n h, t = example.text_a[1]\n h_name = ex_text_a[h[1]:h[2]]\n t_name = ex_text_a[t[1]:t[2]]\n #ex_text_a = ex_text_a.replace(h_name, \"# \"+h_name+\" #\", 1)\n #ex_text_a = ex_text_a.replace(t_name, \"$ \"+t_name+\" $\", 1)\n # Add [HD] and [TL], which are \"#\" and \"$\" respectively.\n if h[1] < t[1]:\n ex_text_a = ex_text_a[:h[1]] + \"# \"+h_name+\" #\" + ex_text_a[h[2]:t[1]] + \"$ \"+t_name+\" $\" + ex_text_a[t[2]:]\n else:\n ex_text_a = ex_text_a[:t[1]] + \"$ \"+t_name+\" $\" + ex_text_a[t[2]:h[1]] + \"# \"+h_name+\" #\" + ex_text_a[h[2]:]\n\n ent_pos = [x for x in example.text_b if x[-1]>threshold]\n for x in ent_pos:\n cnt = 0\n if x[1] > h[2]:\n cnt += 2\n if x[1] >= h[1]:\n cnt += 2\n if x[1] >= t[1]:\n cnt += 2\n if x[1] > t[2]:\n cnt += 2\n x[1] += cnt\n x[2] += cnt\n tokens_a, entities_a = tokenizer.tokenize(ex_text_a, ent_pos)\n '''\n cnt = 0\n for x in entities_a:\n if x != \"UNK\":\n cnt += 1\n if cnt != len(ent_pos) and ent_pos[0][0] != 'Q46809':\n print(cnt, len(ent_pos))\n print(ex_text_a)\n print(ent_pos)\n for x in ent_pos:\n print(ex_text_a[x[1]:x[2]])\n exit(1)\n '''\n\n tokens_b = None\n if False:\n tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n entities_a = entities_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n ents = [\"UNK\"] + entities_a + [\"UNK\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n ents += entities_b + [\"UNK\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_ent = []\n ent_mask = []\n for ent in ents:\n if ent != \"UNK\" and ent in entity2id:\n input_ent.append(entity2id[ent])\n ent_mask.append(1)\n else:\n input_ent.append(-1)\n ent_mask.append(0)\n ent_mask[0] = 1\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n padding_ = [-1] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n input_ent += padding_\n ent_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(input_ent) == max_seq_length\n assert len(ent_mask) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"ents: %s\" % \" \".join(\n [str(x) for x in ents]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n input_ent=input_ent,\n ent_mask=ent_mask,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n ents_a.pop()\n else:\n tokens_b.pop()\n ents_b.pop()\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 1.0\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--ernie_model\", default=None, type=str, required=True,\n help=\"Ernie pre-trained model\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n default=False,\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n default=False,\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n default=False,\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.0,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n default=False,\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n default=False,\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--threshold', type=float, default=.3)\n parser.add_argument(\"--vec_file\",\n default=None,\n type=str,\n required=True,\n help=\"File with embeddings\")\n parser.add_argument(\"--use_lim_ents\",\n default=None,\n type=str,\n required=True,\n help=\"Whether to use limited entities\")\n\n args = parser.parse_args()\n\n processors = TacredProcessor\n\n num_labels_task = 80\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n os.makedirs(args.output_dir, exist_ok=True)\n\n\n processor = processors()\n label_list = None\n\n tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)\n\n train_examples = None\n num_train_steps = None\n train_examples, label_list = processor.get_train_examples(args.data_dir)\n num_labels = len(label_list)\n \n num_train_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)\n\n # Prepare model\n model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,\n cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),\n num_labels = num_labels)\n # if args.fp16:\n # model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']\n param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n if args.local_rank != -1:\n t_total = t_total // torch.distributed.get_world_size()\n if args.fp16:\n try:\n from apex import amp\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=True)\n if args.loss_scale == 0:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O2\")\n # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O2\", loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=t_total)\n global_step = 0\n if args.do_train:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n\n # check for limited ents\n lim_ents = {}\n lim_check = (args.use_lim_ents == \"y\")\n if lim_check:\n lim_ents = lim_ent_map(1, \"kg_embeddings/dbp_eid_2_wd_eid.txt\")\n logger.info(\"Limited entities flag is on. Count of unique entities considered: \" + str(len(lim_ents)))\n\n vecs = []\n vecs.append([0] * 100) # CLS\n lineindex = 1\n logger.info(\"Reading embeddings file from: \" + str(args.vec_file))\n with open(args.vec_file, 'r') as fin:\n for line in fin:\n vec = line.strip().split('\\t')\n if (lim_check and (lineindex in lim_ents)) or not lim_check:\n vec = [float(x) for x in vec]\n else:\n vec = vecs[0]\n vecs.append(vec)\n # increment line index\n lineindex = lineindex + 1\n embed = torch.FloatTensor(vecs)\n embed = torch.nn.Embedding.from_pretrained(embed)\n #embed = torch.nn.Embedding(5041175, 100)\n\n logger.info(\"Shape of entity embedding: \"+str(embed.weight.size()))\n del vecs\n\n # zeros = [0 for _ in range(args.max_seq_length)]\n # zeros_ent = [0 for _ in range(100)]\n # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)]\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)\n all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n output_loss_file = os.path.join(args.output_dir, \"loss\")\n loss_fout = open(output_loss_file, 'w')\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))\n input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch\n input_ent = embed(input_ent+1).to(device) # -1 -> 0\n loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n loss_fout.write(\"{}\\n\".format(loss.item()))\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n # modify learning rate with special warm up BERT uses\n lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n model_to_save = model.module if hasattr(model, 'module') else model\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin_{}\".format(global_step))\n torch.save(model_to_save.state_dict(), output_model_file)\n\n # Save a trained model\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torch.nn.Embedding.from_pretrained",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.tensor",
"numpy.argmax",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"numpy.sum",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
codesankalp/Indian-Road-Safety | [
"0892da419707a8ccc1d42d6f68aa20ab085f41f9"
] | [
"webapp/api/mlmodel.py"
] | [
"from .utils.label_map import convert_label_map_to_categories, load_labelmap, create_category_index\nfrom .utils.visualize import *\nimport os\nfrom django.conf import settings\nimport numpy as np\nimport sys\nimport tarfile\nimport tensorflow.compat.v1 as tf\nimport zipfile\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport base64\nimport io\n\n\ntf.disable_v2_behavior()\n\nmodel_path = settings.BASE_DIR / settings.ML_MODEL\nlabel_path = settings.BASE_DIR / settings.ML_LABEL\nNUM_CLASSES = 8\nIMAGE_SIZE = (12, 10)\n\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\nlabel_map = load_labelmap(label_path)\ncategories = convert_label_map_to_categories(\n label_map, max_num_classes=NUM_CLASSES, use_display_name=True\n)\ncategory_index = create_category_index(categories)\n\n\ndef detect(image_path):\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name(\n 'detection_scores:0'\n )\n detection_classes = detection_graph.get_tensor_by_name(\n 'detection_classes:0'\n )\n num_detections = detection_graph.get_tensor_by_name(\n 'num_detections:0'\n )\n image = Image.open(image_path)\n# print(image)\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image_np = load_image_into_numpy_array(image)\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [\n detection_boxes,\n detection_scores,\n detection_classes,\n num_detections\n ],\n feed_dict={image_tensor: image_np_expanded}\n )\n # print(np.squeeze(boxes),\n # np.squeeze(classes).astype(np.int32),\n # np.squeeze(scores), num, category_index, sep=\"\\n\")\n # Visualization of the results of a detection.\n _, ls = visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n min_score_thresh=0.3,\n use_normalized_coordinates=True,\n line_thickness=8\n )\n avg = sum(ls)/len(ls)\n plt.figure(figsize=IMAGE_SIZE)\n plt.axis('off')\n plt.imshow(image_np)\n pic_IObytes = io.BytesIO()\n img = io.BytesIO()\n plt.savefig(img, format='png')\n plt.savefig(pic_IObytes, format='png')\n pic_IObytes.seek(0)\n pic_hash = base64.b64encode(pic_IObytes.read())\n return (boxes, scores, classes, num, pic_hash, img, avg)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"tensorflow.compat.v1.import_graph_def",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.squeeze",
"matplotlib.pyplot.savefig",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.gfile.GFile",
"matplotlib.pyplot.axis",
"tensorflow.compat.v1.GraphDef",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xlnwel/g2rl | [
"e1261fdd2ce70724a99ddd174616cf013917b241",
"e1261fdd2ce70724a99ddd174616cf013917b241"
] | [
"nn/rnns/gru.py",
"core/optimizer.py"
] | [
"import tensorflow as tf\nfrom tensorflow.keras import layers, activations, initializers, regularizers, constraints\nfrom tensorflow.keras.mixed_precision import global_policy\n\nfrom core.module import Module\nfrom nn.registry import rnn_registry\nfrom nn.typing import GRUState\nfrom utility.tf_utils import assert_rank\n\n\nrnn_registry.register('gru')(layers.GRU)\n\n\nclass MGRUCell(layers.Layer):\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_update_bias=True,\n use_ln=False,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super().__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n self.use_ln = use_ln\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_update_bias = unit_update_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.state_size = GRUState(h=self.units)\n self.output_size = self.units\n\n def build(self, input_shapes):\n input_dim = input_shapes[0][-1]\n self.kernel = self.add_weight(\n shape=(input_dim + self.state_size[0], self.units * 3),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n # self.recurrent_kernel = self.add_weight(\n # shape=(self.units, self.units * 3),\n # name='recurrent_kernel',\n # initializer=self.recurrent_initializer,\n # regularizer=self.recurrent_regularizer,\n # constraint=self.recurrent_constraint)\n\n if self.use_bias:\n if self.unit_update_bias:\n def bias_initializer(_, *args, **kwargs):\n return tf.concat([\n self.bias_initializer((self.units * 2,), *args, **kwargs),\n -initializers.Ones()((self.units,), *args, **kwargs),\n ], -1)\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 3,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n if self.use_ln:\n self.x_ln = layers.LayerNormalization(name='x_ln')\n # self.h_ln = layers.LayerNormalization(name='h_ln')\n else:\n self.x_ln = lambda x: x\n # self.h_ln = lambda x: x\n\n def call(self, x, states):\n x, mask = tf.nest.flatten(x)\n h = states[0]\n assert_rank([x, h, mask], 2)\n if mask is not None:\n h = h * mask\n \n # it sigfinicantly increases the running time when separate normalizations are applied to x and h\n x = self.x_ln(tf.matmul(tf.concat([x, h], -1), self.kernel))\n # x = self.x_ln(tf.matmul(x, self.kernel)) + self.h_ln(tf.matmul(h, self.recurrent_kernel))\n if self.use_bias:\n x = tf.nn.bias_add(x, self.bias)\n r, c, z = tf.split(x, 3, 1)\n r, z = self.recurrent_activation(r), self.recurrent_activation(z)\n c = self.activation(c)\n h = z * c + (1-z) * h\n\n return h, GRUState(h)\n \n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n state_size = self.state_size\n if inputs is not None:\n assert batch_size is None or batch_size == tf.shape(inputs)[0]\n batch_size = tf.shape(inputs)[0]\n if dtype is None:\n dtype = global_policy().compute_dtype\n return GRUState(h=tf.zeros([batch_size, state_size[0]], dtype))\n\n\n@rnn_registry.register('mgru')\nclass MGRU(Module):\n def __init__(self, name='mgru', **config):\n super().__init__(name=name)\n config = config.copy()\n self._state_mask = config.pop('state_mask', True)\n cell = MGRUCell(**config)\n self._rnn = layers.RNN(cell, return_sequences=True, return_state=True)\n self.state_type = GRUState\n\n def call(self, x, state, mask, additional_input=[]):\n xs = [x] + additional_input\n mask = tf.expand_dims(mask, axis=-1)\n assert_rank(xs + [mask], 3)\n if not self._state_mask:\n # mask out inputs\n for i, v in enumerate(xs):\n xs[i] *= tf.cast(mask, v.dtype)\n x = tf.concat(xs, axis=-1) if len(xs) > 1 else xs[0]\n if not mask.dtype.is_compatible_with(global_policy().compute_dtype):\n mask = tf.cast(mask, global_policy().compute_dtype)\n x = self._rnn((x, mask), initial_state=state)\n x, state = x[0], GRUState(x[1])\n return x, state\n\n def reset_states(self, states=None):\n self._rnn.reset_states(states)\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n if inputs is None:\n assert batch_size is not None\n inputs = tf.zeros([batch_size, 1, 1])\n return GRUState(*self._rnn.cell.get_initial_state(inputs, dtype=dtype))\n\n @property\n def state_size(self):\n return self._rnn.cell.state_size\n\n @property\n def state_keys(self):\n return GRUState(*GRUState._fields)\n\n\nif __name__ == '__main__':\n from utility.timer import timeit\n # inputs\n shape = (32, 16, 256)\n x0 = tf.random.normal(shape)\n m = tf.random.uniform(shape[:2], 0, 2, dtype=tf.int32)\n em = tf.cast(m[..., None], tf.float32)\n run_times = 1000\n assert x0.shape.ndims == em.shape.ndims\n # keras lstm\n c = tf.keras.layers.GRUCell(256)\n l = tf.keras.layers.RNN(c, return_sequences=True, return_state=True)\n opt = tf.keras.optimizers.Adam(5e-5)\n lv = l.variables\n\n # def keras_gru_call():\n # for _ in range(run_times):\n # with tf.GradientTape() as tape:\n # x = l(x0, initial_state=None)\n # x, s = x[0], x[1:]\n # y = tf.ones_like(x)\n # loss = tf.reduce_mean((y-x)**2)\n # gs = tape.gradient(loss, lv)\n # opt.apply_gradients(zip(gs, lv))\n\n # timeit(keras_gru_call, to_print=True)\n\n # # custom lstm\n # c = MGRUCell(256)\n # l = tf.keras.layers.RNN(c, return_sequences=True, return_state=True)\n # opt = tf.keras.optimizers.Adam(5e-5)\n\n # def custom_gru_cell_call():\n # for _ in range(run_times):\n # with tf.GradientTape() as tape:\n # x = l((x0, em), initial_state=None)\n # x, s = x[0], x[1:]\n # y = tf.ones_like(x)\n # loss = tf.reduce_mean((y-x)**2)\n # gs = tape.gradient(loss, lv)\n # opt.apply_gradients(zip(gs, lv))\n \n # timeit(custom_gru_cell_call, to_print=True)\n\n # l = MGRU({'units': 256})\n # opt = tf.keras.optimizers.Adam(5e-5)\n\n # def custom_gru_call():\n # for _ in range(run_times):\n # with tf.GradientTape() as tape:\n # x, s = l(x0, None, m)\n # y = tf.ones_like(x)\n # loss = tf.reduce_mean((y-x)**2)\n # gs = tape.gradient(loss, lv)\n # opt.apply_gradients(zip(gs, lv))\n \n # timeit(custom_gru_call, to_print=True)\n\n c = MGRUCell(256, use_ln=True)\n l = tf.keras.layers.RNN(c, return_sequences=True, return_state=True)\n opt = tf.keras.optimizers.Adam(5e-5)\n\n def custom_gruln_call():\n for _ in range(run_times):\n with tf.GradientTape() as tape:\n x = l((x0, em), initial_state=None)\n x, s = x[0], x[1:]\n y = tf.ones_like(x)\n loss = tf.reduce_mean((y-x)**2)\n gs = tape.gradient(loss, lv)\n opt.apply_gradients(zip(gs, lv))\n \n timeit(custom_gruln_call, to_print=True)\n\n",
"import re\nimport logging\nimport tensorflow as tf\nfrom tensorflow.keras import mixed_precision as prec\n\nfrom core.log import do_logging\nfrom utility.schedule import TFPiecewiseSchedule\n\n\nlogger = logging.getLogger(__name__)\n\ndef select_optimizer(name):\n # add custom optimizers here\n opts = dict(\n adam=tf.keras.optimizers.Adam,\n rmsprop=tf.keras.optimizers.RMSprop,\n )\n if isinstance(name, str):\n return opts[name.lower()]\n return name\n\n\ndef create_optimizer(modules, config):\n if config.pop('schedule_lr', False):\n if not isinstance(config['lr'], (list, tuple)) \\\n or not isinstance(config['lr'][0], (list, tuple)):\n raise ValueError(f\"Require a list of tuples to schedule learning rate, but get lr={config['lr']}\")\n config['lr'] = TFPiecewiseSchedule(config['lr'])\n do_logging(f'The optimizer for modules{tuple(m.name for m in modules)} is constructed with arguments:', logger=logger)\n do_logging(config, prefix='\\t', logger=logger)\n opt = Optimizer(modules, **config)\n return opt\n\n\nclass Optimizer(tf.Module):\n def __init__(self, modules, *, opt_name='adam', lr, \n clip_norm=None, weight_decay=None, l2_reg=None,\n wdpattern=r'.*', scales=None, return_grads=False, \n **kwargs):\n self._modules = modules if isinstance(modules, (list, tuple)) else [modules]\n self._clip_norm = clip_norm\n self._weight_decay = weight_decay\n self._l2_reg = l2_reg\n self._wdpattern = wdpattern\n if scales is not None:\n assert isinstance(scales, (list, tuple)), scales\n assert len(scales) == len(self._modules), (len(scales), len(self._modules))\n self._scales = scales\n self._opt = select_optimizer(opt_name)(lr, **kwargs)\n self._return_grads = return_grads\n # useful for mixed precision training on GPUs to\n # avoid numerical underflow caused by using float16 gradients\n prec_policy = prec.global_policy()\n self._mpt = prec_policy.compute_dtype != prec_policy.variable_dtype\n if self._mpt:\n do_logging(\n 'Mixed precision training will be performed', \n logger=logger)\n self._opt = prec.LossScaleOptimizer(self._opt)\n # we do not initialize variables here as modules may not be initialized at this point\n self._variables = None\n\n def get_weights(self):\n return self._opt.get_weights()\n \n def set_weights(self, weights):\n self._opt.set_weights(weights)\n\n @property\n def variables(self):\n return self._opt.variables()\n \n def get_transformed_grads(self, var_list=[]):\n assert hasattr(self._opt, 'get_transformed_grads'), f'{self._opt} does not support \"get_transformed_grads\"'\n return self._opt.get_transformed_grads(var_list or self._variables)\n\n def __call__(self, tape=None, loss=None, grads=None, output_gradients=None):\n if loss is None and grads is None:\n raise ValueError('Neither loss nor grads is provided')\n if loss is not None and grads is not None:\n raise ValueError('Both loss and grads are provvided')\n if isinstance(loss, tf.Tensor) and loss.shape != ():\n raise ValueError(f'loss is expected to be a scalar Tensor, but get {loss}')\n\n if self._variables is None:\n variables = [m.trainable_variables for m in self._modules]\n for v, m in zip(variables, self._modules):\n do_logging(f'Found {len(v)} parameters for {m.name}', logger=logger)\n self._variables = tf.nest.flatten(variables)\n if self._scales is not None:\n scales = [[self._scales[i] for _ in m.trainable_variables] \n for i, m in enumerate(self._modules)]\n self._scales = tf.nest.flatten(scales)\n\n if grads is None:\n if tape is None:\n raise ValueError('tf.GradientTape is ')\n if self._l2_reg:\n loss = self._add_l2_regularization(loss)\n if self._mpt:\n with tape:\n loss = self._opt.get_scaled_loss(loss)\n grads = tape.gradient(loss, self._variables, output_gradients=output_gradients)\n if None in grads:\n raise ValueError(f'No grads for {self._variables[grads.index(None)].name}')\n if self._mpt:\n grads = self._opt.get_unscaled_gradients(grads)\n if self._scales is not None:\n assert len(grads) == len(self._scales), (len(grads), len(self._scales))\n grads = [g * s for g, s in zip(grads, self._scales)]\n norm = tf.linalg.global_norm(grads)\n if self._clip_norm:\n grads, _ = tf.clip_by_global_norm(grads, self._clip_norm, norm)\n if self._weight_decay:\n self._apply_weight_decay()\n self.grads = grads\n self._opt.apply_gradients(zip(grads, self._variables))\n\n if self._return_grads:\n return norm, {v.name: g for v, g in zip(self._variables, grads)}\n else:\n return norm\n \n def _add_l2_regularization(self, loss):\n do_logging(f'Apply L2 regularization with coefficient: {self._l2_reg}\\n\" \\\n \"Wait, are you sure you want to apply l2 regularization instead of weight decay?',\n logger=logger)\n for var in self._variables:\n loss += self._l2_reg * tf.nn.l2_loss(var)\n return loss\n\n def _apply_weight_decay(self):\n do_logging(f'Apply weight decay with coefficient: {self._weight_decay}',\n logger=logger)\n for var in self._variables:\n if re.search(self._wdpattern, var.name):\n print(var.name, self._weight_decay)\n var.assign((1 - self._weight_decay) * var)\n\nif __name__ == '__main__':\n l = tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(.01))\n tf.random.set_seed(0)\n opt = Optimizer('adam', l, 1, weight_decay=.1)\n x = tf.random.normal((32, 2))\n with tf.GradientTape() as t:\n y = l(x)\n loss = tf.reduce_mean((y - 1)**2)\n opt(t, loss)\n print(l.variables)\n "
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.nest.flatten",
"tensorflow.keras.initializers.Ones",
"tensorflow.keras.mixed_precision.global_policy",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.GRUCell",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.split",
"tensorflow.GradientTape",
"tensorflow.nn.bias_add",
"tensorflow.keras.layers.RNN",
"tensorflow.keras.constraints.get",
"tensorflow.reduce_mean",
"tensorflow.keras.regularizers.get",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.activations.get",
"tensorflow.random.normal"
],
[
"tensorflow.keras.mixed_precision.LossScaleOptimizer",
"tensorflow.reduce_mean",
"tensorflow.keras.regularizers.l2",
"tensorflow.linalg.global_norm",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.l2_loss",
"tensorflow.keras.mixed_precision.global_policy",
"tensorflow.nest.flatten",
"tensorflow.random.normal",
"tensorflow.random.set_seed",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
hwaipy/InteractionFreeNode | [
"88642b68430f57b028fd0f276a5709f89279e30d",
"88642b68430f57b028fd0f276a5709f89279e30d",
"88642b68430f57b028fd0f276a5709f89279e30d"
] | [
"runtime/python/Lib/site-packages/numpy/fft/_pocketfft.py",
"runtime/python/Lib/site-packages/numpy/core/tests/test_mem_overlap.py",
"runtime/python/Lib/site-packages/numpy/lib/scimath.py"
] | [
"\"\"\"\r\nDiscrete Fourier Transforms\r\n\r\nRoutines in this module:\r\n\r\nfft(a, n=None, axis=-1, norm=\"backward\")\r\nifft(a, n=None, axis=-1, norm=\"backward\")\r\nrfft(a, n=None, axis=-1, norm=\"backward\")\r\nirfft(a, n=None, axis=-1, norm=\"backward\")\r\nhfft(a, n=None, axis=-1, norm=\"backward\")\r\nihfft(a, n=None, axis=-1, norm=\"backward\")\r\nfftn(a, s=None, axes=None, norm=\"backward\")\r\nifftn(a, s=None, axes=None, norm=\"backward\")\r\nrfftn(a, s=None, axes=None, norm=\"backward\")\r\nirfftn(a, s=None, axes=None, norm=\"backward\")\r\nfft2(a, s=None, axes=(-2,-1), norm=\"backward\")\r\nifft2(a, s=None, axes=(-2, -1), norm=\"backward\")\r\nrfft2(a, s=None, axes=(-2,-1), norm=\"backward\")\r\nirfft2(a, s=None, axes=(-2, -1), norm=\"backward\")\r\n\r\ni = inverse transform\r\nr = transform of purely real data\r\nh = Hermite transform\r\nn = n-dimensional transform\r\n2 = 2-dimensional transform\r\n(Note: 2D routines are just nD routines with different default\r\nbehavior.)\r\n\r\n\"\"\"\r\n__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',\r\n 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']\r\n\r\nimport functools\r\n\r\nfrom numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt\r\nfrom . import _pocketfft_internal as pfi\r\nfrom numpy.core.multiarray import normalize_axis_index\r\nfrom numpy.core import overrides\r\n\r\n\r\narray_function_dispatch = functools.partial(\r\n overrides.array_function_dispatch, module='numpy.fft')\r\n\r\n\r\n# `inv_norm` is a float by which the result of the transform needs to be\r\n# divided. This replaces the original, more intuitive 'fct` parameter to avoid\r\n# divisions by zero (or alternatively additional checks) in the case of\r\n# zero-length axes during its computation.\r\ndef _raw_fft(a, n, axis, is_real, is_forward, inv_norm):\r\n axis = normalize_axis_index(axis, a.ndim)\r\n if n is None:\r\n n = a.shape[axis]\r\n\r\n fct = 1/inv_norm\r\n\r\n if a.shape[axis] != n:\r\n s = list(a.shape)\r\n index = [slice(None)]*len(s)\r\n if s[axis] > n:\r\n index[axis] = slice(0, n)\r\n a = a[tuple(index)]\r\n else:\r\n index[axis] = slice(0, s[axis])\r\n s[axis] = n\r\n z = zeros(s, a.dtype.char)\r\n z[tuple(index)] = a\r\n a = z\r\n\r\n if axis == a.ndim-1:\r\n r = pfi.execute(a, is_real, is_forward, fct)\r\n else:\r\n a = swapaxes(a, axis, -1)\r\n r = pfi.execute(a, is_real, is_forward, fct)\r\n r = swapaxes(r, axis, -1)\r\n return r\r\n\r\n\r\ndef _get_forward_norm(n, norm):\r\n if n < 1:\r\n raise ValueError(f\"Invalid number of FFT data points ({n}) specified.\")\r\n\r\n if norm is None or norm == \"backward\":\r\n return 1\r\n elif norm == \"ortho\":\r\n return sqrt(n)\r\n elif norm == \"forward\":\r\n return n\r\n raise ValueError(f'Invalid norm value {norm}; should be \"backward\",'\r\n '\"ortho\" or \"forward\".')\r\n\r\n\r\ndef _get_backward_norm(n, norm):\r\n if n < 1:\r\n raise ValueError(f\"Invalid number of FFT data points ({n}) specified.\")\r\n\r\n if norm is None or norm == \"backward\":\r\n return n\r\n elif norm == \"ortho\":\r\n return sqrt(n)\r\n elif norm == \"forward\":\r\n return 1\r\n raise ValueError(f'Invalid norm value {norm}; should be \"backward\", '\r\n '\"ortho\" or \"forward\".')\r\n\r\n\r\n_SWAP_DIRECTION_MAP = {\"backward\": \"forward\", None: \"forward\",\r\n \"ortho\": \"ortho\", \"forward\": \"backward\"}\r\n\r\n\r\ndef _swap_direction(norm):\r\n try:\r\n return _SWAP_DIRECTION_MAP[norm]\r\n except KeyError:\r\n raise ValueError(f'Invalid norm value {norm}; should be \"backward\", '\r\n '\"ortho\" or \"forward\".') from None\r\n\r\n\r\ndef _fft_dispatcher(a, n=None, axis=None, norm=None):\r\n return (a,)\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef fft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Compute the one-dimensional discrete Fourier Transform.\r\n\r\n This function computes the one-dimensional *n*-point discrete Fourier\r\n Transform (DFT) with the efficient Fast Fourier Transform (FFT)\r\n algorithm [CT].\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex.\r\n n : int, optional\r\n Length of the transformed axis of the output.\r\n If `n` is smaller than the length of the input, the input is cropped.\r\n If it is larger, the input is padded with zeros. If `n` is not given,\r\n the length of the input along the axis specified by `axis` is used.\r\n axis : int, optional\r\n Axis over which to compute the FFT. If not given, the last axis is\r\n used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `axis` is not a valid axis of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : for definition of the DFT and conventions used.\r\n ifft : The inverse of `fft`.\r\n fft2 : The two-dimensional FFT.\r\n fftn : The *n*-dimensional FFT.\r\n rfftn : The *n*-dimensional FFT of real input.\r\n fftfreq : Frequency bins for given FFT parameters.\r\n\r\n Notes\r\n -----\r\n FFT (Fast Fourier Transform) refers to a way the discrete Fourier\r\n Transform (DFT) can be calculated efficiently, by using symmetries in the\r\n calculated terms. The symmetry is highest when `n` is a power of 2, and\r\n the transform is therefore most efficient for these sizes.\r\n\r\n The DFT is defined, with the conventions used in this implementation, in\r\n the documentation for the `numpy.fft` module.\r\n\r\n References\r\n ----------\r\n .. [CT] Cooley, James W., and John W. Tukey, 1965, \"An algorithm for the\r\n machine calculation of complex Fourier series,\" *Math. Comput.*\r\n 19: 297-301.\r\n\r\n Examples\r\n --------\r\n >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))\r\n array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,\r\n 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,\r\n -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,\r\n 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])\r\n\r\n In this example, real input has an FFT which is Hermitian, i.e., symmetric\r\n in the real part and anti-symmetric in the imaginary part, as described in\r\n the `numpy.fft` documentation:\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> t = np.arange(256)\r\n >>> sp = np.fft.fft(np.sin(t))\r\n >>> freq = np.fft.fftfreq(t.shape[-1])\r\n >>> plt.plot(freq, sp.real, freq, sp.imag)\r\n [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = a.shape[axis]\r\n inv_norm = _get_forward_norm(n, norm)\r\n output = _raw_fft(a, n, axis, False, True, inv_norm)\r\n return output\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef ifft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Compute the one-dimensional inverse discrete Fourier Transform.\r\n\r\n This function computes the inverse of the one-dimensional *n*-point\r\n discrete Fourier transform computed by `fft`. In other words,\r\n ``ifft(fft(a)) == a`` to within numerical accuracy.\r\n For a general description of the algorithm and definitions,\r\n see `numpy.fft`.\r\n\r\n The input should be ordered in the same way as is returned by `fft`,\r\n i.e.,\r\n\r\n * ``a[0]`` should contain the zero frequency term,\r\n * ``a[1:n//2]`` should contain the positive-frequency terms,\r\n * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in\r\n increasing order starting from the most negative frequency.\r\n\r\n For an even number of input points, ``A[n//2]`` represents the sum of\r\n the values at the positive and negative Nyquist frequencies, as the two\r\n are aliased together. See `numpy.fft` for details.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex.\r\n n : int, optional\r\n Length of the transformed axis of the output.\r\n If `n` is smaller than the length of the input, the input is cropped.\r\n If it is larger, the input is padded with zeros. If `n` is not given,\r\n the length of the input along the axis specified by `axis` is used.\r\n See notes about padding issues.\r\n axis : int, optional\r\n Axis over which to compute the inverse DFT. If not given, the last\r\n axis is used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `axis` is not a valid axis of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : An introduction, with definitions and general explanations.\r\n fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse\r\n ifft2 : The two-dimensional inverse FFT.\r\n ifftn : The n-dimensional inverse FFT.\r\n\r\n Notes\r\n -----\r\n If the input parameter `n` is larger than the size of the input, the input\r\n is padded by appending zeros at the end. Even though this is the common\r\n approach, it might lead to surprising results. If a different padding is\r\n desired, it must be performed before calling `ifft`.\r\n\r\n Examples\r\n --------\r\n >>> np.fft.ifft([0, 4, 0, 0])\r\n array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary\r\n\r\n Create and plot a band-limited signal with random phases:\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> t = np.arange(400)\r\n >>> n = np.zeros((400,), dtype=complex)\r\n >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))\r\n >>> s = np.fft.ifft(n)\r\n >>> plt.plot(t, s.real, label='real')\r\n [<matplotlib.lines.Line2D object at ...>]\r\n >>> plt.plot(t, s.imag, '--', label='imaginary')\r\n [<matplotlib.lines.Line2D object at ...>]\r\n >>> plt.legend()\r\n <matplotlib.legend.Legend object at ...>\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = a.shape[axis]\r\n inv_norm = _get_backward_norm(n, norm)\r\n output = _raw_fft(a, n, axis, False, False, inv_norm)\r\n return output\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef rfft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Compute the one-dimensional discrete Fourier Transform for real input.\r\n\r\n This function computes the one-dimensional *n*-point discrete Fourier\r\n Transform (DFT) of a real-valued array by means of an efficient algorithm\r\n called the Fast Fourier Transform (FFT).\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array\r\n n : int, optional\r\n Number of points along transformation axis in the input to use.\r\n If `n` is smaller than the length of the input, the input is cropped.\r\n If it is larger, the input is padded with zeros. If `n` is not given,\r\n the length of the input along the axis specified by `axis` is used.\r\n axis : int, optional\r\n Axis over which to compute the FFT. If not given, the last axis is\r\n used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n If `n` is even, the length of the transformed axis is ``(n/2)+1``.\r\n If `n` is odd, the length is ``(n+1)/2``.\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `axis` is not a valid axis of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : For definition of the DFT and conventions used.\r\n irfft : The inverse of `rfft`.\r\n fft : The one-dimensional FFT of general (complex) input.\r\n fftn : The *n*-dimensional FFT.\r\n rfftn : The *n*-dimensional FFT of real input.\r\n\r\n Notes\r\n -----\r\n When the DFT is computed for purely real input, the output is\r\n Hermitian-symmetric, i.e. the negative frequency terms are just the complex\r\n conjugates of the corresponding positive-frequency terms, and the\r\n negative-frequency terms are therefore redundant. This function does not\r\n compute the negative frequency terms, and the length of the transformed\r\n axis of the output is therefore ``n//2 + 1``.\r\n\r\n When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains\r\n the zero-frequency term 0*fs, which is real due to Hermitian symmetry.\r\n\r\n If `n` is even, ``A[-1]`` contains the term representing both positive\r\n and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely\r\n real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains\r\n the largest positive frequency (fs/2*(n-1)/n), and is complex in the\r\n general case.\r\n\r\n If the input `a` contains an imaginary part, it is silently discarded.\r\n\r\n Examples\r\n --------\r\n >>> np.fft.fft([0, 1, 0, 0])\r\n array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary\r\n >>> np.fft.rfft([0, 1, 0, 0])\r\n array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary\r\n\r\n Notice how the final element of the `fft` output is the complex conjugate\r\n of the second element, for real input. For `rfft`, this symmetry is\r\n exploited to compute only the non-negative frequency terms.\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = a.shape[axis]\r\n inv_norm = _get_forward_norm(n, norm)\r\n output = _raw_fft(a, n, axis, True, True, inv_norm)\r\n return output\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef irfft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Computes the inverse of `rfft`.\r\n\r\n This function computes the inverse of the one-dimensional *n*-point\r\n discrete Fourier Transform of real input computed by `rfft`.\r\n In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical\r\n accuracy. (See Notes below for why ``len(a)`` is necessary here.)\r\n\r\n The input is expected to be in the form returned by `rfft`, i.e. the\r\n real zero-frequency term followed by the complex positive frequency terms\r\n in order of increasing frequency. Since the discrete Fourier Transform of\r\n real input is Hermitian-symmetric, the negative frequency terms are taken\r\n to be the complex conjugates of the corresponding positive frequency terms.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n The input array.\r\n n : int, optional\r\n Length of the transformed axis of the output.\r\n For `n` output points, ``n//2+1`` input points are necessary. If the\r\n input is longer than this, it is cropped. If it is shorter than this,\r\n it is padded with zeros. If `n` is not given, it is taken to be\r\n ``2*(m-1)`` where ``m`` is the length of the input along the axis\r\n specified by `axis`.\r\n axis : int, optional\r\n Axis over which to compute the inverse FFT. If not given, the last\r\n axis is used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n The length of the transformed axis is `n`, or, if `n` is not given,\r\n ``2*(m-1)`` where ``m`` is the length of the transformed axis of the\r\n input. To get an odd number of output points, `n` must be specified.\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `axis` is not a valid axis of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : For definition of the DFT and conventions used.\r\n rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.\r\n fft : The one-dimensional FFT.\r\n irfft2 : The inverse of the two-dimensional FFT of real input.\r\n irfftn : The inverse of the *n*-dimensional FFT of real input.\r\n\r\n Notes\r\n -----\r\n Returns the real valued `n`-point inverse discrete Fourier transform\r\n of `a`, where `a` contains the non-negative frequency terms of a\r\n Hermitian-symmetric sequence. `n` is the length of the result, not the\r\n input.\r\n\r\n If you specify an `n` such that `a` must be zero-padded or truncated, the\r\n extra/removed values will be added/removed at high frequencies. One can\r\n thus resample a series to `m` points via Fourier interpolation by:\r\n ``a_resamp = irfft(rfft(a), m)``.\r\n\r\n The correct interpretation of the hermitian input depends on the length of\r\n the original data, as given by `n`. This is because each input shape could\r\n correspond to either an odd or even length signal. By default, `irfft`\r\n assumes an even output length which puts the last entry at the Nyquist\r\n frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,\r\n the value is thus treated as purely real. To avoid losing information, the\r\n correct length of the real input **must** be given.\r\n\r\n Examples\r\n --------\r\n >>> np.fft.ifft([1, -1j, -1, 1j])\r\n array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary\r\n >>> np.fft.irfft([1, -1j, -1])\r\n array([0., 1., 0., 0.])\r\n\r\n Notice how the last term in the input to the ordinary `ifft` is the\r\n complex conjugate of the second term, and the output has zero imaginary\r\n part everywhere. When calling `irfft`, the negative frequencies are not\r\n specified, and the output array is purely real.\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = (a.shape[axis] - 1) * 2\r\n inv_norm = _get_backward_norm(n, norm)\r\n output = _raw_fft(a, n, axis, True, False, inv_norm)\r\n return output\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef hfft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Compute the FFT of a signal that has Hermitian symmetry, i.e., a real\r\n spectrum.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n The input array.\r\n n : int, optional\r\n Length of the transformed axis of the output. For `n` output\r\n points, ``n//2 + 1`` input points are necessary. If the input is\r\n longer than this, it is cropped. If it is shorter than this, it is\r\n padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``\r\n where ``m`` is the length of the input along the axis specified by\r\n `axis`.\r\n axis : int, optional\r\n Axis over which to compute the FFT. If not given, the last\r\n axis is used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n The length of the transformed axis is `n`, or, if `n` is not given,\r\n ``2*m - 2`` where ``m`` is the length of the transformed axis of\r\n the input. To get an odd number of output points, `n` must be\r\n specified, for instance as ``2*m - 1`` in the typical case,\r\n\r\n Raises\r\n ------\r\n IndexError\r\n If `axis` is not a valid axis of `a`.\r\n\r\n See also\r\n --------\r\n rfft : Compute the one-dimensional FFT for real input.\r\n ihfft : The inverse of `hfft`.\r\n\r\n Notes\r\n -----\r\n `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the\r\n opposite case: here the signal has Hermitian symmetry in the time\r\n domain and is real in the frequency domain. So here it's `hfft` for\r\n which you must supply the length of the result if it is to be odd.\r\n\r\n * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,\r\n * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.\r\n\r\n The correct interpretation of the hermitian input depends on the length of\r\n the original data, as given by `n`. This is because each input shape could\r\n correspond to either an odd or even length signal. By default, `hfft`\r\n assumes an even output length which puts the last entry at the Nyquist\r\n frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,\r\n the value is thus treated as purely real. To avoid losing information, the\r\n shape of the full signal **must** be given.\r\n\r\n Examples\r\n --------\r\n >>> signal = np.array([1, 2, 3, 4, 3, 2])\r\n >>> np.fft.fft(signal)\r\n array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary\r\n >>> np.fft.hfft(signal[:4]) # Input first half of signal\r\n array([15., -4., 0., -1., 0., -4.])\r\n >>> np.fft.hfft(signal, 6) # Input entire signal and truncate\r\n array([15., -4., 0., -1., 0., -4.])\r\n\r\n\r\n >>> signal = np.array([[1, 1.j], [-1.j, 2]])\r\n >>> np.conj(signal.T) - signal # check Hermitian symmetry\r\n array([[ 0.-0.j, -0.+0.j], # may vary\r\n [ 0.+0.j, 0.-0.j]])\r\n >>> freq_spectrum = np.fft.hfft(signal)\r\n >>> freq_spectrum\r\n array([[ 1., 1.],\r\n [ 2., -2.]])\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = (a.shape[axis] - 1) * 2\r\n new_norm = _swap_direction(norm)\r\n output = irfft(conjugate(a), n, axis, norm=new_norm)\r\n return output\r\n\r\n\r\n@array_function_dispatch(_fft_dispatcher)\r\ndef ihfft(a, n=None, axis=-1, norm=None):\r\n \"\"\"\r\n Compute the inverse FFT of a signal that has Hermitian symmetry.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array.\r\n n : int, optional\r\n Length of the inverse FFT, the number of points along\r\n transformation axis in the input to use. If `n` is smaller than\r\n the length of the input, the input is cropped. If it is larger,\r\n the input is padded with zeros. If `n` is not given, the length of\r\n the input along the axis specified by `axis` is used.\r\n axis : int, optional\r\n Axis over which to compute the inverse FFT. If not given, the last\r\n axis is used.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axis\r\n indicated by `axis`, or the last one if `axis` is not specified.\r\n The length of the transformed axis is ``n//2 + 1``.\r\n\r\n See also\r\n --------\r\n hfft, irfft\r\n\r\n Notes\r\n -----\r\n `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the\r\n opposite case: here the signal has Hermitian symmetry in the time\r\n domain and is real in the frequency domain. So here it's `hfft` for\r\n which you must supply the length of the result if it is to be odd:\r\n\r\n * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,\r\n * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.\r\n\r\n Examples\r\n --------\r\n >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])\r\n >>> np.fft.ifft(spectrum)\r\n array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary\r\n >>> np.fft.ihfft(spectrum)\r\n array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n if n is None:\r\n n = a.shape[axis]\r\n new_norm = _swap_direction(norm)\r\n output = conjugate(rfft(a, n, axis, norm=new_norm))\r\n return output\r\n\r\n\r\ndef _cook_nd_args(a, s=None, axes=None, invreal=0):\r\n if s is None:\r\n shapeless = 1\r\n if axes is None:\r\n s = list(a.shape)\r\n else:\r\n s = take(a.shape, axes)\r\n else:\r\n shapeless = 0\r\n s = list(s)\r\n if axes is None:\r\n axes = list(range(-len(s), 0))\r\n if len(s) != len(axes):\r\n raise ValueError(\"Shape and axes have different lengths.\")\r\n if invreal and shapeless:\r\n s[-1] = (a.shape[axes[-1]] - 1) * 2\r\n return s, axes\r\n\r\n\r\ndef _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):\r\n a = asarray(a)\r\n s, axes = _cook_nd_args(a, s, axes)\r\n itl = list(range(len(axes)))\r\n itl.reverse()\r\n for ii in itl:\r\n a = function(a, n=s[ii], axis=axes[ii], norm=norm)\r\n return a\r\n\r\n\r\ndef _fftn_dispatcher(a, s=None, axes=None, norm=None):\r\n return (a,)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef fftn(a, s=None, axes=None, norm=None):\r\n \"\"\"\r\n Compute the N-dimensional discrete Fourier Transform.\r\n\r\n This function computes the *N*-dimensional discrete Fourier Transform over\r\n any number of axes in an *M*-dimensional array by means of the Fast Fourier\r\n Transform (FFT).\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex.\r\n s : sequence of ints, optional\r\n Shape (length of each transformed axis) of the output\r\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\r\n This corresponds to ``n`` for ``fft(x, n)``.\r\n Along any axis, if the given shape is smaller than that of the input,\r\n the input is cropped. If it is larger, the input is padded with zeros.\r\n if `s` is not given, the shape of the input along the axes specified\r\n by `axes` is used.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the FFT. If not given, the last ``len(s)``\r\n axes are used, or all axes if `s` is also not specified.\r\n Repeated indices in `axes` means that the transform over that axis is\r\n performed multiple times.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or by a combination of `s` and `a`,\r\n as explained in the parameters section above.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\r\n and conventions used.\r\n ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.\r\n fft : The one-dimensional FFT, with definitions and conventions used.\r\n rfftn : The *n*-dimensional FFT of real input.\r\n fft2 : The two-dimensional FFT.\r\n fftshift : Shifts zero-frequency terms to centre of array\r\n\r\n Notes\r\n -----\r\n The output, analogously to `fft`, contains the term for zero frequency in\r\n the low-order corner of all axes, the positive frequency terms in the\r\n first half of all axes, the term for the Nyquist frequency in the middle\r\n of all axes and the negative frequency terms in the second half of all\r\n axes, in order of decreasingly negative frequency.\r\n\r\n See `numpy.fft` for details, definitions and conventions used.\r\n\r\n Examples\r\n --------\r\n >>> a = np.mgrid[:3, :3, :3][0]\r\n >>> np.fft.fftn(a, axes=(1, 2))\r\n array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary\r\n [ 0.+0.j, 0.+0.j, 0.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\r\n [[ 9.+0.j, 0.+0.j, 0.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\r\n [[18.+0.j, 0.+0.j, 0.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j]]])\r\n >>> np.fft.fftn(a, (2, 2), axes=(0, 1))\r\n array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary\r\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\r\n [[-2.+0.j, -2.+0.j, -2.+0.j],\r\n [ 0.+0.j, 0.+0.j, 0.+0.j]]])\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,\r\n ... 2 * np.pi * np.arange(200) / 34)\r\n >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)\r\n >>> FS = np.fft.fftn(S)\r\n >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))\r\n <matplotlib.image.AxesImage object at 0x...>\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n return _raw_fftnd(a, s, axes, fft, norm)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef ifftn(a, s=None, axes=None, norm=None):\r\n \"\"\"\r\n Compute the N-dimensional inverse discrete Fourier Transform.\r\n\r\n This function computes the inverse of the N-dimensional discrete\r\n Fourier Transform over any number of axes in an M-dimensional array by\r\n means of the Fast Fourier Transform (FFT). In other words,\r\n ``ifftn(fftn(a)) == a`` to within numerical accuracy.\r\n For a description of the definitions and conventions used, see `numpy.fft`.\r\n\r\n The input, analogously to `ifft`, should be ordered in the same way as is\r\n returned by `fftn`, i.e. it should have the term for zero frequency\r\n in all axes in the low-order corner, the positive frequency terms in the\r\n first half of all axes, the term for the Nyquist frequency in the middle\r\n of all axes and the negative frequency terms in the second half of all\r\n axes, in order of decreasingly negative frequency.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex.\r\n s : sequence of ints, optional\r\n Shape (length of each transformed axis) of the output\r\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\r\n This corresponds to ``n`` for ``ifft(x, n)``.\r\n Along any axis, if the given shape is smaller than that of the input,\r\n the input is cropped. If it is larger, the input is padded with zeros.\r\n if `s` is not given, the shape of the input along the axes specified\r\n by `axes` is used. See notes for issue on `ifft` zero padding.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the IFFT. If not given, the last ``len(s)``\r\n axes are used, or all axes if `s` is also not specified.\r\n Repeated indices in `axes` means that the inverse transform over that\r\n axis is performed multiple times.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or by a combination of `s` or `a`,\r\n as explained in the parameters section above.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\r\n and conventions used.\r\n fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.\r\n ifft : The one-dimensional inverse FFT.\r\n ifft2 : The two-dimensional inverse FFT.\r\n ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning\r\n of array.\r\n\r\n Notes\r\n -----\r\n See `numpy.fft` for definitions and conventions used.\r\n\r\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\r\n the input along the specified dimension. Although this is the common\r\n approach, it might lead to surprising results. If another form of zero\r\n padding is desired, it must be performed before `ifftn` is called.\r\n\r\n Examples\r\n --------\r\n >>> a = np.eye(4)\r\n >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))\r\n array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary\r\n [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],\r\n [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\r\n [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])\r\n\r\n\r\n Create and plot an image with band-limited frequency content:\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> n = np.zeros((200,200), dtype=complex)\r\n >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))\r\n >>> im = np.fft.ifftn(n).real\r\n >>> plt.imshow(im)\r\n <matplotlib.image.AxesImage object at 0x...>\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n return _raw_fftnd(a, s, axes, ifft, norm)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef fft2(a, s=None, axes=(-2, -1), norm=None):\r\n \"\"\"\r\n Compute the 2-dimensional discrete Fourier Transform.\r\n\r\n This function computes the *n*-dimensional discrete Fourier Transform\r\n over any axes in an *M*-dimensional array by means of the\r\n Fast Fourier Transform (FFT). By default, the transform is computed over\r\n the last two axes of the input array, i.e., a 2-dimensional FFT.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex\r\n s : sequence of ints, optional\r\n Shape (length of each transformed axis) of the output\r\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\r\n This corresponds to ``n`` for ``fft(x, n)``.\r\n Along each axis, if the given shape is smaller than that of the input,\r\n the input is cropped. If it is larger, the input is padded with zeros.\r\n if `s` is not given, the shape of the input along the axes specified\r\n by `axes` is used.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the FFT. If not given, the last two\r\n axes are used. A repeated index in `axes` means the transform over\r\n that axis is performed multiple times. A one-element sequence means\r\n that a one-dimensional FFT is performed.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or the last two axes if `axes` is not given.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length, or `axes` not given and\r\n ``len(s) != 2``.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\r\n and conventions used.\r\n ifft2 : The inverse two-dimensional FFT.\r\n fft : The one-dimensional FFT.\r\n fftn : The *n*-dimensional FFT.\r\n fftshift : Shifts zero-frequency terms to the center of the array.\r\n For two-dimensional input, swaps first and third quadrants, and second\r\n and fourth quadrants.\r\n\r\n Notes\r\n -----\r\n `fft2` is just `fftn` with a different default for `axes`.\r\n\r\n The output, analogously to `fft`, contains the term for zero frequency in\r\n the low-order corner of the transformed axes, the positive frequency terms\r\n in the first half of these axes, the term for the Nyquist frequency in the\r\n middle of the axes and the negative frequency terms in the second half of\r\n the axes, in order of decreasingly negative frequency.\r\n\r\n See `fftn` for details and a plotting example, and `numpy.fft` for\r\n definitions and conventions used.\r\n\r\n\r\n Examples\r\n --------\r\n >>> a = np.mgrid[:5, :5][0]\r\n >>> np.fft.fft2(a)\r\n array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary\r\n 0. +0.j , 0. +0.j ],\r\n [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,\r\n 0. +0.j , 0. +0.j ],\r\n [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,\r\n 0. +0.j , 0. +0.j ],\r\n [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,\r\n 0. +0.j , 0. +0.j ],\r\n [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,\r\n 0. +0.j , 0. +0.j ]])\r\n\r\n \"\"\"\r\n return _raw_fftnd(a, s, axes, fft, norm)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef ifft2(a, s=None, axes=(-2, -1), norm=None):\r\n \"\"\"\r\n Compute the 2-dimensional inverse discrete Fourier Transform.\r\n\r\n This function computes the inverse of the 2-dimensional discrete Fourier\r\n Transform over any number of axes in an M-dimensional array by means of\r\n the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``\r\n to within numerical accuracy. By default, the inverse transform is\r\n computed over the last two axes of the input array.\r\n\r\n The input, analogously to `ifft`, should be ordered in the same way as is\r\n returned by `fft2`, i.e. it should have the term for zero frequency\r\n in the low-order corner of the two axes, the positive frequency terms in\r\n the first half of these axes, the term for the Nyquist frequency in the\r\n middle of the axes and the negative frequency terms in the second half of\r\n both axes, in order of decreasingly negative frequency.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, can be complex.\r\n s : sequence of ints, optional\r\n Shape (length of each axis) of the output (``s[0]`` refers to axis 0,\r\n ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.\r\n Along each axis, if the given shape is smaller than that of the input,\r\n the input is cropped. If it is larger, the input is padded with zeros.\r\n if `s` is not given, the shape of the input along the axes specified\r\n by `axes` is used. See notes for issue on `ifft` zero padding.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the FFT. If not given, the last two\r\n axes are used. A repeated index in `axes` means the transform over\r\n that axis is performed multiple times. A one-element sequence means\r\n that a one-dimensional FFT is performed.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or the last two axes if `axes` is not given.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length, or `axes` not given and\r\n ``len(s) != 2``.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\r\n and conventions used.\r\n fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.\r\n ifftn : The inverse of the *n*-dimensional FFT.\r\n fft : The one-dimensional FFT.\r\n ifft : The one-dimensional inverse FFT.\r\n\r\n Notes\r\n -----\r\n `ifft2` is just `ifftn` with a different default for `axes`.\r\n\r\n See `ifftn` for details and a plotting example, and `numpy.fft` for\r\n definition and conventions used.\r\n\r\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\r\n the input along the specified dimension. Although this is the common\r\n approach, it might lead to surprising results. If another form of zero\r\n padding is desired, it must be performed before `ifft2` is called.\r\n\r\n Examples\r\n --------\r\n >>> a = 4 * np.eye(4)\r\n >>> np.fft.ifft2(a)\r\n array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary\r\n [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],\r\n [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\r\n [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])\r\n\r\n \"\"\"\r\n return _raw_fftnd(a, s, axes, ifft, norm)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef rfftn(a, s=None, axes=None, norm=None):\r\n \"\"\"\r\n Compute the N-dimensional discrete Fourier Transform for real input.\r\n\r\n This function computes the N-dimensional discrete Fourier Transform over\r\n any number of axes in an M-dimensional real array by means of the Fast\r\n Fourier Transform (FFT). By default, all axes are transformed, with the\r\n real transform performed over the last axis, while the remaining\r\n transforms are complex.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array, taken to be real.\r\n s : sequence of ints, optional\r\n Shape (length along each transformed axis) to use from the input.\r\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\r\n The final element of `s` corresponds to `n` for ``rfft(x, n)``, while\r\n for the remaining axes, it corresponds to `n` for ``fft(x, n)``.\r\n Along any axis, if the given shape is smaller than that of the input,\r\n the input is cropped. If it is larger, the input is padded with zeros.\r\n if `s` is not given, the shape of the input along the axes specified\r\n by `axes` is used.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the FFT. If not given, the last ``len(s)``\r\n axes are used, or all axes if `s` is also not specified.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : complex ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or by a combination of `s` and `a`,\r\n as explained in the parameters section above.\r\n The length of the last axis transformed will be ``s[-1]//2+1``,\r\n while the remaining transformed axes will have lengths according to\r\n `s`, or unchanged from the input.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT\r\n of real input.\r\n fft : The one-dimensional FFT, with definitions and conventions used.\r\n rfft : The one-dimensional FFT of real input.\r\n fftn : The n-dimensional FFT.\r\n rfft2 : The two-dimensional FFT of real input.\r\n\r\n Notes\r\n -----\r\n The transform for real input is performed over the last transformation\r\n axis, as by `rfft`, then the transform over the remaining axes is\r\n performed as by `fftn`. The order of the output is as for `rfft` for the\r\n final transformation axis, and as for `fftn` for the remaining\r\n transformation axes.\r\n\r\n See `fft` for details, definitions and conventions used.\r\n\r\n Examples\r\n --------\r\n >>> a = np.ones((2, 2, 2))\r\n >>> np.fft.rfftn(a)\r\n array([[[8.+0.j, 0.+0.j], # may vary\r\n [0.+0.j, 0.+0.j]],\r\n [[0.+0.j, 0.+0.j],\r\n [0.+0.j, 0.+0.j]]])\r\n\r\n >>> np.fft.rfftn(a, axes=(2, 0))\r\n array([[[4.+0.j, 0.+0.j], # may vary\r\n [4.+0.j, 0.+0.j]],\r\n [[0.+0.j, 0.+0.j],\r\n [0.+0.j, 0.+0.j]]])\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n s, axes = _cook_nd_args(a, s, axes)\r\n a = rfft(a, s[-1], axes[-1], norm)\r\n for ii in range(len(axes)-1):\r\n a = fft(a, s[ii], axes[ii], norm)\r\n return a\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef rfft2(a, s=None, axes=(-2, -1), norm=None):\r\n \"\"\"\r\n Compute the 2-dimensional FFT of a real array.\r\n\r\n Parameters\r\n ----------\r\n a : array\r\n Input array, taken to be real.\r\n s : sequence of ints, optional\r\n Shape of the FFT.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the FFT.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The result of the real 2-D FFT.\r\n\r\n See Also\r\n --------\r\n rfftn : Compute the N-dimensional discrete Fourier Transform for real\r\n input.\r\n\r\n Notes\r\n -----\r\n This is really just `rfftn` with different default behavior.\r\n For more details see `rfftn`.\r\n\r\n Examples\r\n --------\r\n >>> a = np.mgrid[:5, :5][0]\r\n >>> np.fft.rfft2(a)\r\n array([[ 50. +0.j , 0. +0.j , 0. +0.j ],\r\n [-12.5+17.20477401j, 0. +0.j , 0. +0.j ],\r\n [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],\r\n [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],\r\n [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])\r\n \"\"\"\r\n return rfftn(a, s, axes, norm)\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef irfftn(a, s=None, axes=None, norm=None):\r\n \"\"\"\r\n Computes the inverse of `rfftn`.\r\n\r\n This function computes the inverse of the N-dimensional discrete\r\n Fourier Transform for real input over any number of axes in an\r\n M-dimensional array by means of the Fast Fourier Transform (FFT). In\r\n other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical\r\n accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,\r\n and for the same reason.)\r\n\r\n The input should be ordered in the same way as is returned by `rfftn`,\r\n i.e. as for `irfft` for the final transformation axis, and as for `ifftn`\r\n along all the other axes.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n Input array.\r\n s : sequence of ints, optional\r\n Shape (length of each transformed axis) of the output\r\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the\r\n number of input points used along this axis, except for the last axis,\r\n where ``s[-1]//2+1`` points of the input are used.\r\n Along any axis, if the shape indicated by `s` is smaller than that of\r\n the input, the input is cropped. If it is larger, the input is padded\r\n with zeros. If `s` is not given, the shape of the input along the axes\r\n specified by axes is used. Except for the last axis which is taken to\r\n be ``2*(m-1)`` where ``m`` is the length of the input along that axis.\r\n axes : sequence of ints, optional\r\n Axes over which to compute the inverse FFT. If not given, the last\r\n `len(s)` axes are used, or all axes if `s` is also not specified.\r\n Repeated indices in `axes` means that the inverse transform over that\r\n axis is performed multiple times.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The truncated or zero-padded input, transformed along the axes\r\n indicated by `axes`, or by a combination of `s` or `a`,\r\n as explained in the parameters section above.\r\n The length of each transformed axis is as given by the corresponding\r\n element of `s`, or the length of the input in every axis except for the\r\n last one if `s` is not given. In the final transformed axis the length\r\n of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the\r\n length of the final transformed axis of the input. To get an odd\r\n number of output points in the final axis, `s` must be specified.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `s` and `axes` have different length.\r\n IndexError\r\n If an element of `axes` is larger than than the number of axes of `a`.\r\n\r\n See Also\r\n --------\r\n rfftn : The forward n-dimensional FFT of real input,\r\n of which `ifftn` is the inverse.\r\n fft : The one-dimensional FFT, with definitions and conventions used.\r\n irfft : The inverse of the one-dimensional FFT of real input.\r\n irfft2 : The inverse of the two-dimensional FFT of real input.\r\n\r\n Notes\r\n -----\r\n See `fft` for definitions and conventions used.\r\n\r\n See `rfft` for definitions and conventions used for real input.\r\n\r\n The correct interpretation of the hermitian input depends on the shape of\r\n the original data, as given by `s`. This is because each input shape could\r\n correspond to either an odd or even length signal. By default, `irfftn`\r\n assumes an even output length which puts the last entry at the Nyquist\r\n frequency; aliasing with its symmetric counterpart. When performing the\r\n final complex to real transform, the last value is thus treated as purely\r\n real. To avoid losing information, the correct shape of the real input\r\n **must** be given.\r\n\r\n Examples\r\n --------\r\n >>> a = np.zeros((3, 2, 2))\r\n >>> a[0, 0, 0] = 3 * 2 * 2\r\n >>> np.fft.irfftn(a)\r\n array([[[1., 1.],\r\n [1., 1.]],\r\n [[1., 1.],\r\n [1., 1.]],\r\n [[1., 1.],\r\n [1., 1.]]])\r\n\r\n \"\"\"\r\n a = asarray(a)\r\n s, axes = _cook_nd_args(a, s, axes, invreal=1)\r\n for ii in range(len(axes)-1):\r\n a = ifft(a, s[ii], axes[ii], norm)\r\n a = irfft(a, s[-1], axes[-1], norm)\r\n return a\r\n\r\n\r\n@array_function_dispatch(_fftn_dispatcher)\r\ndef irfft2(a, s=None, axes=(-2, -1), norm=None):\r\n \"\"\"\r\n Computes the inverse of `rfft2`.\r\n\r\n Parameters\r\n ----------\r\n a : array_like\r\n The input array\r\n s : sequence of ints, optional\r\n Shape of the real output to the inverse FFT.\r\n axes : sequence of ints, optional\r\n The axes over which to compute the inverse fft.\r\n Default is the last two axes.\r\n norm : {\"backward\", \"ortho\", \"forward\"}, optional\r\n .. versionadded:: 1.10.0\r\n\r\n Normalization mode (see `numpy.fft`). Default is \"backward\".\r\n Indicates which direction of the forward/backward pair of transforms\r\n is scaled and with what normalization factor.\r\n\r\n .. versionadded:: 1.20.0\r\n\r\n The \"backward\", \"forward\" values were added.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n The result of the inverse real 2-D FFT.\r\n\r\n See Also\r\n --------\r\n rfft2 : The forward two-dimensional FFT of real input,\r\n of which `irfft2` is the inverse.\r\n rfft : The one-dimensional FFT for real input.\r\n irfft : The inverse of the one-dimensional FFT of real input.\r\n irfftn : Compute the inverse of the N-dimensional FFT of real input.\r\n\r\n Notes\r\n -----\r\n This is really `irfftn` with different defaults.\r\n For more details see `irfftn`.\r\n\r\n Examples\r\n --------\r\n >>> a = np.mgrid[:5, :5][0]\r\n >>> A = np.fft.rfft2(a)\r\n >>> np.fft.irfft2(A, s=a.shape)\r\n array([[0., 0., 0., 0., 0.],\r\n [1., 1., 1., 1., 1.],\r\n [2., 2., 2., 2., 2.],\r\n [3., 3., 3., 3., 3.],\r\n [4., 4., 4., 4., 4.]])\r\n \"\"\"\r\n return irfftn(a, s, axes, norm)\r\n",
"import itertools\r\nimport pytest\r\n\r\nimport numpy as np\r\nfrom numpy.core._multiarray_tests import solve_diophantine, internal_overlap\r\nfrom numpy.core import _umath_tests\r\nfrom numpy.lib.stride_tricks import as_strided\r\nfrom numpy.testing import (\r\n assert_, assert_raises, assert_equal, assert_array_equal\r\n )\r\n\r\n\r\nndims = 2\r\nsize = 10\r\nshape = tuple([size] * ndims)\r\n\r\nMAY_SHARE_BOUNDS = 0\r\nMAY_SHARE_EXACT = -1\r\n\r\n\r\ndef _indices_for_nelems(nelems):\r\n \"\"\"Returns slices of length nelems, from start onwards, in direction sign.\"\"\"\r\n\r\n if nelems == 0:\r\n return [size // 2] # int index\r\n\r\n res = []\r\n for step in (1, 2):\r\n for sign in (-1, 1):\r\n start = size // 2 - nelems * step * sign // 2\r\n stop = start + nelems * step * sign\r\n res.append(slice(start, stop, step * sign))\r\n\r\n return res\r\n\r\n\r\ndef _indices_for_axis():\r\n \"\"\"Returns (src, dst) pairs of indices.\"\"\"\r\n\r\n res = []\r\n for nelems in (0, 2, 3):\r\n ind = _indices_for_nelems(nelems)\r\n res.extend(itertools.product(ind, ind)) # all assignments of size \"nelems\"\r\n\r\n return res\r\n\r\n\r\ndef _indices(ndims):\r\n \"\"\"Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.\"\"\"\r\n\r\n ind = _indices_for_axis()\r\n return itertools.product(ind, repeat=ndims)\r\n\r\n\r\ndef _check_assignment(srcidx, dstidx):\r\n \"\"\"Check assignment arr[dstidx] = arr[srcidx] works.\"\"\"\r\n\r\n arr = np.arange(np.product(shape)).reshape(shape)\r\n\r\n cpy = arr.copy()\r\n\r\n cpy[dstidx] = arr[srcidx]\r\n arr[dstidx] = arr[srcidx]\r\n\r\n assert_(np.all(arr == cpy),\r\n 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))\r\n\r\n\r\ndef test_overlapping_assignments():\r\n # Test automatically generated assignments which overlap in memory.\r\n\r\n inds = _indices(ndims)\r\n\r\n for ind in inds:\r\n srcidx = tuple([a[0] for a in ind])\r\n dstidx = tuple([a[1] for a in ind])\r\n\r\n _check_assignment(srcidx, dstidx)\r\n\r\n\r\[email protected]\r\ndef test_diophantine_fuzz():\r\n # Fuzz test the diophantine solver\r\n rng = np.random.RandomState(1234)\r\n\r\n max_int = np.iinfo(np.intp).max\r\n\r\n for ndim in range(10):\r\n feasible_count = 0\r\n infeasible_count = 0\r\n\r\n min_count = 500//(ndim + 1)\r\n\r\n while min(feasible_count, infeasible_count) < min_count:\r\n # Ensure big and small integer problems\r\n A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6\r\n U_max = rng.randint(0, 11, dtype=np.intp)**6\r\n\r\n A_max = min(max_int, A_max)\r\n U_max = min(max_int-1, U_max)\r\n\r\n A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))\r\n for j in range(ndim))\r\n U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))\r\n for j in range(ndim))\r\n\r\n b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))\r\n b = rng.randint(-1, b_ub+2, dtype=np.intp)\r\n\r\n if ndim == 0 and feasible_count < min_count:\r\n b = 0\r\n\r\n X = solve_diophantine(A, U, b)\r\n\r\n if X is None:\r\n # Check the simplified decision problem agrees\r\n X_simplified = solve_diophantine(A, U, b, simplify=1)\r\n assert_(X_simplified is None, (A, U, b, X_simplified))\r\n\r\n # Check no solution exists (provided the problem is\r\n # small enough so that brute force checking doesn't\r\n # take too long)\r\n ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))\r\n\r\n size = 1\r\n for r in ranges:\r\n size *= len(r)\r\n if size < 100000:\r\n assert_(not any(sum(w) == b for w in itertools.product(*ranges)))\r\n infeasible_count += 1\r\n else:\r\n # Check the simplified decision problem agrees\r\n X_simplified = solve_diophantine(A, U, b, simplify=1)\r\n assert_(X_simplified is not None, (A, U, b, X_simplified))\r\n\r\n # Check validity\r\n assert_(sum(a*x for a, x in zip(A, X)) == b)\r\n assert_(all(0 <= x <= ub for x, ub in zip(X, U)))\r\n feasible_count += 1\r\n\r\n\r\ndef test_diophantine_overflow():\r\n # Smoke test integer overflow detection\r\n max_intp = np.iinfo(np.intp).max\r\n max_int64 = np.iinfo(np.int64).max\r\n\r\n if max_int64 <= max_intp:\r\n # Check that the algorithm works internally in 128-bit;\r\n # solving this problem requires large intermediate numbers\r\n A = (max_int64//2, max_int64//2 - 10)\r\n U = (max_int64//2, max_int64//2 - 10)\r\n b = 2*(max_int64//2) - 10\r\n\r\n assert_equal(solve_diophantine(A, U, b), (1, 1))\r\n\r\n\r\ndef check_may_share_memory_exact(a, b):\r\n got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)\r\n\r\n assert_equal(np.may_share_memory(a, b),\r\n np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))\r\n\r\n a.fill(0)\r\n b.fill(0)\r\n a.fill(1)\r\n exact = b.any()\r\n\r\n err_msg = \"\"\r\n if got != exact:\r\n err_msg = \" \" + \"\\n \".join([\r\n \"base_a - base_b = %r\" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),\r\n \"shape_a = %r\" % (a.shape,),\r\n \"shape_b = %r\" % (b.shape,),\r\n \"strides_a = %r\" % (a.strides,),\r\n \"strides_b = %r\" % (b.strides,),\r\n \"size_a = %r\" % (a.size,),\r\n \"size_b = %r\" % (b.size,)\r\n ])\r\n\r\n assert_equal(got, exact, err_msg=err_msg)\r\n\r\n\r\ndef test_may_share_memory_manual():\r\n # Manual test cases for may_share_memory\r\n\r\n # Base arrays\r\n xs0 = [\r\n np.zeros([13, 21, 23, 22], dtype=np.int8),\r\n np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]\r\n ]\r\n\r\n # Generate all negative stride combinations\r\n xs = []\r\n for x in xs0:\r\n for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):\r\n xp = x[ss]\r\n xs.append(xp)\r\n\r\n for x in xs:\r\n # The default is a simple extent check\r\n assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))\r\n assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))\r\n\r\n # Exact checks\r\n check_may_share_memory_exact(x[:,0,:], x[:,1,:])\r\n check_may_share_memory_exact(x[:,::7], x[:,3::3])\r\n\r\n try:\r\n xp = x.ravel()\r\n if xp.flags.owndata:\r\n continue\r\n xp = xp.view(np.int16)\r\n except ValueError:\r\n continue\r\n\r\n # 0-size arrays cannot overlap\r\n check_may_share_memory_exact(x.ravel()[6:6],\r\n xp.reshape(13, 21, 23, 11)[:,::7])\r\n\r\n # Test itemsize is dealt with\r\n check_may_share_memory_exact(x[:,::7],\r\n xp.reshape(13, 21, 23, 11))\r\n check_may_share_memory_exact(x[:,::7],\r\n xp.reshape(13, 21, 23, 11)[:,3::3])\r\n check_may_share_memory_exact(x.ravel()[6:7],\r\n xp.reshape(13, 21, 23, 11)[:,::7])\r\n\r\n # Check unit size\r\n x = np.zeros([1], dtype=np.int8)\r\n check_may_share_memory_exact(x, x)\r\n check_may_share_memory_exact(x, x.copy())\r\n\r\n\r\ndef iter_random_view_pairs(x, same_steps=True, equal_size=False):\r\n rng = np.random.RandomState(1234)\r\n\r\n if equal_size and same_steps:\r\n raise ValueError()\r\n\r\n def random_slice(n, step):\r\n start = rng.randint(0, n+1, dtype=np.intp)\r\n stop = rng.randint(start, n+1, dtype=np.intp)\r\n if rng.randint(0, 2, dtype=np.intp) == 0:\r\n stop, start = start, stop\r\n step *= -1\r\n return slice(start, stop, step)\r\n\r\n def random_slice_fixed_size(n, step, size):\r\n start = rng.randint(0, n+1 - size*step)\r\n stop = start + (size-1)*step + 1\r\n if rng.randint(0, 2) == 0:\r\n stop, start = start-1, stop-1\r\n if stop < 0:\r\n stop = None\r\n step *= -1\r\n return slice(start, stop, step)\r\n\r\n # First a few regular views\r\n yield x, x\r\n for j in range(1, 7, 3):\r\n yield x[j:], x[:-j]\r\n yield x[...,j:], x[...,:-j]\r\n\r\n # An array with zero stride internal overlap\r\n strides = list(x.strides)\r\n strides[0] = 0\r\n xp = as_strided(x, shape=x.shape, strides=strides)\r\n yield x, xp\r\n yield xp, xp\r\n\r\n # An array with non-zero stride internal overlap\r\n strides = list(x.strides)\r\n if strides[0] > 1:\r\n strides[0] = 1\r\n xp = as_strided(x, shape=x.shape, strides=strides)\r\n yield x, xp\r\n yield xp, xp\r\n\r\n # Then discontiguous views\r\n while True:\r\n steps = tuple(rng.randint(1, 11, dtype=np.intp)\r\n if rng.randint(0, 5, dtype=np.intp) == 0 else 1\r\n for j in range(x.ndim))\r\n s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))\r\n\r\n t1 = np.arange(x.ndim)\r\n rng.shuffle(t1)\r\n\r\n if equal_size:\r\n t2 = t1\r\n else:\r\n t2 = np.arange(x.ndim)\r\n rng.shuffle(t2)\r\n\r\n a = x[s1]\r\n\r\n if equal_size:\r\n if a.size == 0:\r\n continue\r\n\r\n steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))\r\n if rng.randint(0, 5) == 0 else 1\r\n for p, s, pa in zip(x.shape, s1, a.shape))\r\n s2 = tuple(random_slice_fixed_size(p, s, pa)\r\n for p, s, pa in zip(x.shape, steps2, a.shape))\r\n elif same_steps:\r\n steps2 = steps\r\n else:\r\n steps2 = tuple(rng.randint(1, 11, dtype=np.intp)\r\n if rng.randint(0, 5, dtype=np.intp) == 0 else 1\r\n for j in range(x.ndim))\r\n\r\n if not equal_size:\r\n s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))\r\n\r\n a = a.transpose(t1)\r\n b = x[s2].transpose(t2)\r\n\r\n yield a, b\r\n\r\n\r\ndef check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):\r\n # Check that overlap problems with common strides are solved with\r\n # little work.\r\n x = np.zeros([17,34,71,97], dtype=np.int16)\r\n\r\n feasible = 0\r\n infeasible = 0\r\n\r\n pair_iter = iter_random_view_pairs(x, same_steps)\r\n\r\n while min(feasible, infeasible) < min_count:\r\n a, b = next(pair_iter)\r\n\r\n bounds_overlap = np.may_share_memory(a, b)\r\n may_share_answer = np.may_share_memory(a, b)\r\n easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))\r\n exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)\r\n\r\n if easy_answer != exact_answer:\r\n # assert_equal is slow...\r\n assert_equal(easy_answer, exact_answer)\r\n\r\n if may_share_answer != bounds_overlap:\r\n assert_equal(may_share_answer, bounds_overlap)\r\n\r\n if bounds_overlap:\r\n if exact_answer:\r\n feasible += 1\r\n else:\r\n infeasible += 1\r\n\r\n\r\[email protected]\r\ndef test_may_share_memory_easy_fuzz():\r\n # Check that overlap problems with common strides are always\r\n # solved with little work.\r\n\r\n check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,\r\n same_steps=True,\r\n min_count=2000)\r\n\r\n\r\[email protected]\r\ndef test_may_share_memory_harder_fuzz():\r\n # Overlap problems with not necessarily common strides take more\r\n # work.\r\n #\r\n # The work bound below can't be reduced much. Harder problems can\r\n # also exist but not be detected here, as the set of problems\r\n # comes from RNG.\r\n\r\n check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,\r\n same_steps=False,\r\n min_count=2000)\r\n\r\n\r\ndef test_shares_memory_api():\r\n x = np.zeros([4, 5, 6], dtype=np.int8)\r\n\r\n assert_equal(np.shares_memory(x, x), True)\r\n assert_equal(np.shares_memory(x, x.copy()), False)\r\n\r\n a = x[:,::2,::3]\r\n b = x[:,::3,::2]\r\n assert_equal(np.shares_memory(a, b), True)\r\n assert_equal(np.shares_memory(a, b, max_work=None), True)\r\n assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)\r\n\r\n\r\ndef test_may_share_memory_bad_max_work():\r\n x = np.zeros([1])\r\n assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)\r\n assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)\r\n\r\n\r\ndef test_internal_overlap_diophantine():\r\n def check(A, U, exists=None):\r\n X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)\r\n\r\n if exists is None:\r\n exists = (X is not None)\r\n\r\n if X is not None:\r\n assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))\r\n assert_(all(0 <= x <= u for x, u in zip(X, U)))\r\n assert_(any(x != u//2 for x, u in zip(X, U)))\r\n\r\n if exists:\r\n assert_(X is not None, repr(X))\r\n else:\r\n assert_(X is None, repr(X))\r\n\r\n # Smoke tests\r\n check((3, 2), (2*2, 3*2), exists=True)\r\n check((3*2, 2), (15*2, (3-1)*2), exists=False)\r\n\r\n\r\ndef test_internal_overlap_slices():\r\n # Slicing an array never generates internal overlap\r\n\r\n x = np.zeros([17,34,71,97], dtype=np.int16)\r\n\r\n rng = np.random.RandomState(1234)\r\n\r\n def random_slice(n, step):\r\n start = rng.randint(0, n+1, dtype=np.intp)\r\n stop = rng.randint(start, n+1, dtype=np.intp)\r\n if rng.randint(0, 2, dtype=np.intp) == 0:\r\n stop, start = start, stop\r\n step *= -1\r\n return slice(start, stop, step)\r\n\r\n cases = 0\r\n min_count = 5000\r\n\r\n while cases < min_count:\r\n steps = tuple(rng.randint(1, 11, dtype=np.intp)\r\n if rng.randint(0, 5, dtype=np.intp) == 0 else 1\r\n for j in range(x.ndim))\r\n t1 = np.arange(x.ndim)\r\n rng.shuffle(t1)\r\n s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))\r\n a = x[s1].transpose(t1)\r\n\r\n assert_(not internal_overlap(a))\r\n cases += 1\r\n\r\n\r\ndef check_internal_overlap(a, manual_expected=None):\r\n got = internal_overlap(a)\r\n\r\n # Brute-force check\r\n m = set()\r\n ranges = tuple(range(n) for n in a.shape)\r\n for v in itertools.product(*ranges):\r\n offset = sum(s*w for s, w in zip(a.strides, v))\r\n if offset in m:\r\n expected = True\r\n break\r\n else:\r\n m.add(offset)\r\n else:\r\n expected = False\r\n\r\n # Compare\r\n if got != expected:\r\n assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))\r\n if manual_expected is not None and expected != manual_expected:\r\n assert_equal(expected, manual_expected)\r\n return got\r\n\r\n\r\ndef test_internal_overlap_manual():\r\n # Stride tricks can construct arrays with internal overlap\r\n\r\n # We don't care about memory bounds, the array is not\r\n # read/write accessed\r\n x = np.arange(1).astype(np.int8)\r\n\r\n # Check low-dimensional special cases\r\n\r\n check_internal_overlap(x, False) # 1-dim\r\n check_internal_overlap(x.reshape([]), False) # 0-dim\r\n\r\n a = as_strided(x, strides=(3, 4), shape=(4, 4))\r\n check_internal_overlap(a, False)\r\n\r\n a = as_strided(x, strides=(3, 4), shape=(5, 4))\r\n check_internal_overlap(a, True)\r\n\r\n a = as_strided(x, strides=(0,), shape=(0,))\r\n check_internal_overlap(a, False)\r\n\r\n a = as_strided(x, strides=(0,), shape=(1,))\r\n check_internal_overlap(a, False)\r\n\r\n a = as_strided(x, strides=(0,), shape=(2,))\r\n check_internal_overlap(a, True)\r\n\r\n a = as_strided(x, strides=(0, -9993), shape=(87, 22))\r\n check_internal_overlap(a, True)\r\n\r\n a = as_strided(x, strides=(0, -9993), shape=(1, 22))\r\n check_internal_overlap(a, False)\r\n\r\n a = as_strided(x, strides=(0, -9993), shape=(0, 22))\r\n check_internal_overlap(a, False)\r\n\r\n\r\ndef test_internal_overlap_fuzz():\r\n # Fuzz check; the brute-force check is fairly slow\r\n\r\n x = np.arange(1).astype(np.int8)\r\n\r\n overlap = 0\r\n no_overlap = 0\r\n min_count = 100\r\n\r\n rng = np.random.RandomState(1234)\r\n\r\n while min(overlap, no_overlap) < min_count:\r\n ndim = rng.randint(1, 4, dtype=np.intp)\r\n\r\n strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)\r\n for j in range(ndim))\r\n shape = tuple(rng.randint(1, 30, dtype=np.intp)\r\n for j in range(ndim))\r\n\r\n a = as_strided(x, strides=strides, shape=shape)\r\n result = check_internal_overlap(a)\r\n\r\n if result:\r\n overlap += 1\r\n else:\r\n no_overlap += 1\r\n\r\n\r\ndef test_non_ndarray_inputs():\r\n # Regression check for gh-5604\r\n\r\n class MyArray:\r\n def __init__(self, data):\r\n self.data = data\r\n\r\n @property\r\n def __array_interface__(self):\r\n return self.data.__array_interface__\r\n\r\n class MyArray2:\r\n def __init__(self, data):\r\n self.data = data\r\n\r\n def __array__(self):\r\n return self.data\r\n\r\n for cls in [MyArray, MyArray2]:\r\n x = np.arange(5)\r\n\r\n assert_(np.may_share_memory(cls(x[::2]), x[1::2]))\r\n assert_(not np.shares_memory(cls(x[::2]), x[1::2]))\r\n\r\n assert_(np.shares_memory(cls(x[1::3]), x[::2]))\r\n assert_(np.may_share_memory(cls(x[1::3]), x[::2]))\r\n\r\n\r\ndef view_element_first_byte(x):\r\n \"\"\"Construct an array viewing the first byte of each element of `x`\"\"\"\r\n from numpy.lib.stride_tricks import DummyArray\r\n interface = dict(x.__array_interface__)\r\n interface['typestr'] = '|b1'\r\n interface['descr'] = [('', '|b1')]\r\n return np.asarray(DummyArray(interface, x))\r\n\r\n\r\ndef assert_copy_equivalent(operation, args, out, **kwargs):\r\n \"\"\"\r\n Check that operation(*args, out=out) produces results\r\n equivalent to out[...] = operation(*args, out=out.copy())\r\n \"\"\"\r\n\r\n kwargs['out'] = out\r\n kwargs2 = dict(kwargs)\r\n kwargs2['out'] = out.copy()\r\n\r\n out_orig = out.copy()\r\n out[...] = operation(*args, **kwargs2)\r\n expected = out.copy()\r\n out[...] = out_orig\r\n\r\n got = operation(*args, **kwargs).copy()\r\n\r\n if (got != expected).any():\r\n assert_equal(got, expected)\r\n\r\n\r\nclass TestUFunc:\r\n \"\"\"\r\n Test ufunc call memory overlap handling\r\n \"\"\"\r\n\r\n def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,\r\n count=5000):\r\n shapes = [7, 13, 8, 21, 29, 32]\r\n\r\n rng = np.random.RandomState(1234)\r\n\r\n for ndim in range(1, 6):\r\n x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)\r\n\r\n it = iter_random_view_pairs(x, same_steps=False, equal_size=True)\r\n\r\n min_count = count // (ndim + 1)**2\r\n\r\n overlapping = 0\r\n while overlapping < min_count:\r\n a, b = next(it)\r\n\r\n a_orig = a.copy()\r\n b_orig = b.copy()\r\n\r\n if get_out_axis_size is None:\r\n assert_copy_equivalent(operation, [a], out=b)\r\n\r\n if np.shares_memory(a, b):\r\n overlapping += 1\r\n else:\r\n for axis in itertools.chain(range(ndim), [None]):\r\n a[...] = a_orig\r\n b[...] = b_orig\r\n\r\n # Determine size for reduction axis (None if scalar)\r\n outsize, scalarize = get_out_axis_size(a, b, axis)\r\n if outsize == 'skip':\r\n continue\r\n\r\n # Slice b to get an output array of the correct size\r\n sl = [slice(None)] * ndim\r\n if axis is None:\r\n if outsize is None:\r\n sl = [slice(0, 1)] + [0]*(ndim - 1)\r\n else:\r\n sl = [slice(0, outsize)] + [0]*(ndim - 1)\r\n else:\r\n if outsize is None:\r\n k = b.shape[axis]//2\r\n if ndim == 1:\r\n sl[axis] = slice(k, k + 1)\r\n else:\r\n sl[axis] = k\r\n else:\r\n assert b.shape[axis] >= outsize\r\n sl[axis] = slice(0, outsize)\r\n b_out = b[tuple(sl)]\r\n\r\n if scalarize:\r\n b_out = b_out.reshape([])\r\n\r\n if np.shares_memory(a, b_out):\r\n overlapping += 1\r\n\r\n # Check result\r\n assert_copy_equivalent(operation, [a], out=b_out, axis=axis)\r\n\r\n @pytest.mark.slow\r\n def test_unary_ufunc_call_fuzz(self):\r\n self.check_unary_fuzz(np.invert, None, np.int16)\r\n\r\n @pytest.mark.slow\r\n def test_unary_ufunc_call_complex_fuzz(self):\r\n # Complex typically has a smaller alignment than itemsize\r\n self.check_unary_fuzz(np.negative, None, np.complex128, count=500)\r\n\r\n def test_binary_ufunc_accumulate_fuzz(self):\r\n def get_out_axis_size(a, b, axis):\r\n if axis is None:\r\n if a.ndim == 1:\r\n return a.size, False\r\n else:\r\n return 'skip', False # accumulate doesn't support this\r\n else:\r\n return a.shape[axis], False\r\n\r\n self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,\r\n dtype=np.int16, count=500)\r\n\r\n def test_binary_ufunc_reduce_fuzz(self):\r\n def get_out_axis_size(a, b, axis):\r\n return None, (axis is None or a.ndim == 1)\r\n\r\n self.check_unary_fuzz(np.add.reduce, get_out_axis_size,\r\n dtype=np.int16, count=500)\r\n\r\n def test_binary_ufunc_reduceat_fuzz(self):\r\n def get_out_axis_size(a, b, axis):\r\n if axis is None:\r\n if a.ndim == 1:\r\n return a.size, False\r\n else:\r\n return 'skip', False # reduceat doesn't support this\r\n else:\r\n return a.shape[axis], False\r\n\r\n def do_reduceat(a, out, axis):\r\n if axis is None:\r\n size = len(a)\r\n step = size//len(out)\r\n else:\r\n size = a.shape[axis]\r\n step = a.shape[axis] // out.shape[axis]\r\n idx = np.arange(0, size, step)\r\n return np.add.reduceat(a, idx, out=out, axis=axis)\r\n\r\n self.check_unary_fuzz(do_reduceat, get_out_axis_size,\r\n dtype=np.int16, count=500)\r\n\r\n def test_binary_ufunc_reduceat_manual(self):\r\n def check(ufunc, a, ind, out):\r\n c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())\r\n c2 = ufunc.reduceat(a, ind, out=out)\r\n assert_array_equal(c1, c2)\r\n\r\n # Exactly same input/output arrays\r\n a = np.arange(10000, dtype=np.int16)\r\n check(np.add, a, a[::-1].copy(), a)\r\n\r\n # Overlap with index\r\n a = np.arange(10000, dtype=np.int16)\r\n check(np.add, a, a[::-1], a)\r\n\r\n @pytest.mark.slow\r\n def test_unary_gufunc_fuzz(self):\r\n shapes = [7, 13, 8, 21, 29, 32]\r\n gufunc = _umath_tests.euclidean_pdist\r\n\r\n rng = np.random.RandomState(1234)\r\n\r\n for ndim in range(2, 6):\r\n x = rng.rand(*shapes[:ndim])\r\n\r\n it = iter_random_view_pairs(x, same_steps=False, equal_size=True)\r\n\r\n min_count = 500 // (ndim + 1)**2\r\n\r\n overlapping = 0\r\n while overlapping < min_count:\r\n a, b = next(it)\r\n\r\n if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:\r\n continue\r\n\r\n # Ensure the shapes are so that euclidean_pdist is happy\r\n if b.shape[-1] > b.shape[-2]:\r\n b = b[...,0,:]\r\n else:\r\n b = b[...,:,0]\r\n\r\n n = a.shape[-2]\r\n p = n * (n - 1) // 2\r\n if p <= b.shape[-1] and p > 0:\r\n b = b[...,:p]\r\n else:\r\n n = max(2, int(np.sqrt(b.shape[-1]))//2)\r\n p = n * (n - 1) // 2\r\n a = a[...,:n,:]\r\n b = b[...,:p]\r\n\r\n # Call\r\n if np.shares_memory(a, b):\r\n overlapping += 1\r\n\r\n with np.errstate(over='ignore', invalid='ignore'):\r\n assert_copy_equivalent(gufunc, [a], out=b)\r\n\r\n def test_ufunc_at_manual(self):\r\n def check(ufunc, a, ind, b=None):\r\n a0 = a.copy()\r\n if b is None:\r\n ufunc.at(a0, ind.copy())\r\n c1 = a0.copy()\r\n ufunc.at(a, ind)\r\n c2 = a.copy()\r\n else:\r\n ufunc.at(a0, ind.copy(), b.copy())\r\n c1 = a0.copy()\r\n ufunc.at(a, ind, b)\r\n c2 = a.copy()\r\n assert_array_equal(c1, c2)\r\n\r\n # Overlap with index\r\n a = np.arange(10000, dtype=np.int16)\r\n check(np.invert, a[::-1], a)\r\n\r\n # Overlap with second data array\r\n a = np.arange(100, dtype=np.int16)\r\n ind = np.arange(0, 100, 2, dtype=np.int16)\r\n check(np.add, a, ind, a[25:75])\r\n\r\n def test_unary_ufunc_1d_manual(self):\r\n # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)\r\n\r\n def check(a, b):\r\n a_orig = a.copy()\r\n b_orig = b.copy()\r\n\r\n b0 = b.copy()\r\n c1 = ufunc(a, out=b0)\r\n c2 = ufunc(a, out=b)\r\n assert_array_equal(c1, c2)\r\n\r\n # Trigger \"fancy ufunc loop\" code path\r\n mask = view_element_first_byte(b).view(np.bool_)\r\n\r\n a[...] = a_orig\r\n b[...] = b_orig\r\n c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()\r\n\r\n a[...] = a_orig\r\n b[...] = b_orig\r\n c2 = ufunc(a, out=b, where=mask.copy()).copy()\r\n\r\n # Also, mask overlapping with output\r\n a[...] = a_orig\r\n b[...] = b_orig\r\n c3 = ufunc(a, out=b, where=mask).copy()\r\n\r\n assert_array_equal(c1, c2)\r\n assert_array_equal(c1, c3)\r\n\r\n dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,\r\n np.float64, np.complex64, np.complex128]\r\n dtypes = [np.dtype(x) for x in dtypes]\r\n\r\n for dtype in dtypes:\r\n if np.issubdtype(dtype, np.integer):\r\n ufunc = np.invert\r\n else:\r\n ufunc = np.reciprocal\r\n\r\n n = 1000\r\n k = 10\r\n indices = [\r\n np.index_exp[:n],\r\n np.index_exp[k:k+n],\r\n np.index_exp[n-1::-1],\r\n np.index_exp[k+n-1:k-1:-1],\r\n np.index_exp[:2*n:2],\r\n np.index_exp[k:k+2*n:2],\r\n np.index_exp[2*n-1::-2],\r\n np.index_exp[k+2*n-1:k-1:-2],\r\n ]\r\n\r\n for xi, yi in itertools.product(indices, indices):\r\n v = np.arange(1, 1 + n*2 + k, dtype=dtype)\r\n x = v[xi]\r\n y = v[yi]\r\n\r\n with np.errstate(all='ignore'):\r\n check(x, y)\r\n\r\n # Scalar cases\r\n check(x[:1], y)\r\n check(x[-1:], y)\r\n check(x[:1].reshape([]), y)\r\n check(x[-1:].reshape([]), y)\r\n\r\n def test_unary_ufunc_where_same(self):\r\n # Check behavior at wheremask overlap\r\n ufunc = np.invert\r\n\r\n def check(a, out, mask):\r\n c1 = ufunc(a, out=out.copy(), where=mask.copy())\r\n c2 = ufunc(a, out=out, where=mask)\r\n assert_array_equal(c1, c2)\r\n\r\n # Check behavior with same input and output arrays\r\n x = np.arange(100).astype(np.bool_)\r\n check(x, x, x)\r\n check(x, x.copy(), x)\r\n check(x, x, x.copy())\r\n\r\n @pytest.mark.slow\r\n def test_binary_ufunc_1d_manual(self):\r\n ufunc = np.add\r\n\r\n def check(a, b, c):\r\n c0 = c.copy()\r\n c1 = ufunc(a, b, out=c0)\r\n c2 = ufunc(a, b, out=c)\r\n assert_array_equal(c1, c2)\r\n\r\n for dtype in [np.int8, np.int16, np.int32, np.int64,\r\n np.float32, np.float64, np.complex64, np.complex128]:\r\n # Check different data dependency orders\r\n\r\n n = 1000\r\n k = 10\r\n\r\n indices = []\r\n for p in [1, 2]:\r\n indices.extend([\r\n np.index_exp[:p*n:p],\r\n np.index_exp[k:k+p*n:p],\r\n np.index_exp[p*n-1::-p],\r\n np.index_exp[k+p*n-1:k-1:-p],\r\n ])\r\n\r\n for x, y, z in itertools.product(indices, indices, indices):\r\n v = np.arange(6*n).astype(dtype)\r\n x = v[x]\r\n y = v[y]\r\n z = v[z]\r\n\r\n check(x, y, z)\r\n\r\n # Scalar cases\r\n check(x[:1], y, z)\r\n check(x[-1:], y, z)\r\n check(x[:1].reshape([]), y, z)\r\n check(x[-1:].reshape([]), y, z)\r\n check(x, y[:1], z)\r\n check(x, y[-1:], z)\r\n check(x, y[:1].reshape([]), z)\r\n check(x, y[-1:].reshape([]), z)\r\n\r\n def test_inplace_op_simple_manual(self):\r\n rng = np.random.RandomState(1234)\r\n x = rng.rand(200, 200) # bigger than bufsize\r\n\r\n x += x.T\r\n assert_array_equal(x - x.T, 0)\r\n",
"\"\"\"\r\nWrapper functions to more user-friendly calling of certain math functions\r\nwhose output data-type is different than the input data-type in certain\r\ndomains of the input.\r\n\r\nFor example, for functions like `log` with branch cuts, the versions in this\r\nmodule provide the mathematically valid answers in the complex plane::\r\n\r\n >>> import math\r\n >>> from numpy.lib import scimath\r\n >>> scimath.log(-math.exp(1)) == (1+1j*math.pi)\r\n True\r\n\r\nSimilarly, `sqrt`, other base logarithms, `power` and trig functions are\r\ncorrectly handled. See their respective docstrings for specific examples.\r\n\r\nFunctions\r\n---------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n sqrt\r\n log\r\n log2\r\n logn\r\n log10\r\n power\r\n arccos\r\n arcsin\r\n arctanh\r\n\r\n\"\"\"\r\nimport numpy.core.numeric as nx\r\nimport numpy.core.numerictypes as nt\r\nfrom numpy.core.numeric import asarray, any\r\nfrom numpy.core.overrides import array_function_dispatch\r\nfrom numpy.lib.type_check import isreal\r\n\r\n\r\n__all__ = [\r\n 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',\r\n 'arctanh'\r\n ]\r\n\r\n\r\n_ln2 = nx.log(2.0)\r\n\r\n\r\ndef _tocomplex(arr):\r\n \"\"\"Convert its input `arr` to a complex array.\r\n\r\n The input is returned as a complex array of the smallest type that will fit\r\n the original data: types like single, byte, short, etc. become csingle,\r\n while others become cdouble.\r\n\r\n A copy of the input is always made.\r\n\r\n Parameters\r\n ----------\r\n arr : array\r\n\r\n Returns\r\n -------\r\n array\r\n An array with the same input data as the input but in complex form.\r\n\r\n Examples\r\n --------\r\n\r\n First, consider an input of type short:\r\n\r\n >>> a = np.array([1,2,3],np.short)\r\n\r\n >>> ac = np.lib.scimath._tocomplex(a); ac\r\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\r\n\r\n >>> ac.dtype\r\n dtype('complex64')\r\n\r\n If the input is of type double, the output is correspondingly of the\r\n complex double type as well:\r\n\r\n >>> b = np.array([1,2,3],np.double)\r\n\r\n >>> bc = np.lib.scimath._tocomplex(b); bc\r\n array([1.+0.j, 2.+0.j, 3.+0.j])\r\n\r\n >>> bc.dtype\r\n dtype('complex128')\r\n\r\n Note that even if the input was complex to begin with, a copy is still\r\n made, since the astype() method always copies:\r\n\r\n >>> c = np.array([1,2,3],np.csingle)\r\n\r\n >>> cc = np.lib.scimath._tocomplex(c); cc\r\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\r\n\r\n >>> c *= 2; c\r\n array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)\r\n\r\n >>> cc\r\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\r\n \"\"\"\r\n if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,\r\n nt.ushort, nt.csingle)):\r\n return arr.astype(nt.csingle)\r\n else:\r\n return arr.astype(nt.cdouble)\r\n\r\n\r\ndef _fix_real_lt_zero(x):\r\n \"\"\"Convert `x` to complex if it has real, negative components.\r\n\r\n Otherwise, output is just the array version of the input (via asarray).\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n\r\n Returns\r\n -------\r\n array\r\n\r\n Examples\r\n --------\r\n >>> np.lib.scimath._fix_real_lt_zero([1,2])\r\n array([1, 2])\r\n\r\n >>> np.lib.scimath._fix_real_lt_zero([-1,2])\r\n array([-1.+0.j, 2.+0.j])\r\n\r\n \"\"\"\r\n x = asarray(x)\r\n if any(isreal(x) & (x < 0)):\r\n x = _tocomplex(x)\r\n return x\r\n\r\n\r\ndef _fix_int_lt_zero(x):\r\n \"\"\"Convert `x` to double if it has real, negative components.\r\n\r\n Otherwise, output is just the array version of the input (via asarray).\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n\r\n Returns\r\n -------\r\n array\r\n\r\n Examples\r\n --------\r\n >>> np.lib.scimath._fix_int_lt_zero([1,2])\r\n array([1, 2])\r\n\r\n >>> np.lib.scimath._fix_int_lt_zero([-1,2])\r\n array([-1., 2.])\r\n \"\"\"\r\n x = asarray(x)\r\n if any(isreal(x) & (x < 0)):\r\n x = x * 1.0\r\n return x\r\n\r\n\r\ndef _fix_real_abs_gt_1(x):\r\n \"\"\"Convert `x` to complex if it has real components x_i with abs(x_i)>1.\r\n\r\n Otherwise, output is just the array version of the input (via asarray).\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n\r\n Returns\r\n -------\r\n array\r\n\r\n Examples\r\n --------\r\n >>> np.lib.scimath._fix_real_abs_gt_1([0,1])\r\n array([0, 1])\r\n\r\n >>> np.lib.scimath._fix_real_abs_gt_1([0,2])\r\n array([0.+0.j, 2.+0.j])\r\n \"\"\"\r\n x = asarray(x)\r\n if any(isreal(x) & (abs(x) > 1)):\r\n x = _tocomplex(x)\r\n return x\r\n\r\n\r\ndef _unary_dispatcher(x):\r\n return (x,)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef sqrt(x):\r\n \"\"\"\r\n Compute the square root of x.\r\n\r\n For negative input elements, a complex value is returned\r\n (unlike `numpy.sqrt` which returns NaN).\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The input value(s).\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The square root of `x`. If `x` was a scalar, so is `out`,\r\n otherwise an array is returned.\r\n\r\n See Also\r\n --------\r\n numpy.sqrt\r\n\r\n Examples\r\n --------\r\n For real, non-negative inputs this works just like `numpy.sqrt`:\r\n\r\n >>> np.lib.scimath.sqrt(1)\r\n 1.0\r\n >>> np.lib.scimath.sqrt([1, 4])\r\n array([1., 2.])\r\n\r\n But it automatically handles negative inputs:\r\n\r\n >>> np.lib.scimath.sqrt(-1)\r\n 1j\r\n >>> np.lib.scimath.sqrt([-1,4])\r\n array([0.+1.j, 2.+0.j])\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n return nx.sqrt(x)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef log(x):\r\n \"\"\"\r\n Compute the natural logarithm of `x`.\r\n\r\n Return the \"principal value\" (for a description of this, see `numpy.log`)\r\n of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``\r\n returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the\r\n complex principle value is returned.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The value(s) whose log is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The log of the `x` value(s). If `x` was a scalar, so is `out`,\r\n otherwise an array is returned.\r\n\r\n See Also\r\n --------\r\n numpy.log\r\n\r\n Notes\r\n -----\r\n For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`\r\n (note, however, that otherwise `numpy.log` and this `log` are identical,\r\n i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,\r\n notably, the complex principle value if ``x.imag != 0``).\r\n\r\n Examples\r\n --------\r\n >>> np.emath.log(np.exp(1))\r\n 1.0\r\n\r\n Negative arguments are handled \"correctly\" (recall that\r\n ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):\r\n\r\n >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)\r\n True\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n return nx.log(x)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef log10(x):\r\n \"\"\"\r\n Compute the logarithm base 10 of `x`.\r\n\r\n Return the \"principal value\" (for a description of this, see\r\n `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this\r\n is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``\r\n returns ``inf``). Otherwise, the complex principle value is returned.\r\n\r\n Parameters\r\n ----------\r\n x : array_like or scalar\r\n The value(s) whose log base 10 is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,\r\n otherwise an array object is returned.\r\n\r\n See Also\r\n --------\r\n numpy.log10\r\n\r\n Notes\r\n -----\r\n For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`\r\n (note, however, that otherwise `numpy.log10` and this `log10` are\r\n identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,\r\n and, notably, the complex principle value if ``x.imag != 0``).\r\n\r\n Examples\r\n --------\r\n\r\n (We set the printing precision so the example can be auto-tested)\r\n\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.emath.log10(10**1)\r\n 1.0\r\n\r\n >>> np.emath.log10([-10**1, -10**2, 10**2])\r\n array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n return nx.log10(x)\r\n\r\n\r\ndef _logn_dispatcher(n, x):\r\n return (n, x,)\r\n\r\n\r\n@array_function_dispatch(_logn_dispatcher)\r\ndef logn(n, x):\r\n \"\"\"\r\n Take log base n of x.\r\n\r\n If `x` contains negative inputs, the answer is computed and returned in the\r\n complex domain.\r\n\r\n Parameters\r\n ----------\r\n n : array_like\r\n The integer base(s) in which the log is taken.\r\n x : array_like\r\n The value(s) whose log base `n` is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The log base `n` of the `x` value(s). If `x` was a scalar, so is\r\n `out`, otherwise an array is returned.\r\n\r\n Examples\r\n --------\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.lib.scimath.logn(2, [4, 8])\r\n array([2., 3.])\r\n >>> np.lib.scimath.logn(2, [-4, -8, 8])\r\n array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n n = _fix_real_lt_zero(n)\r\n return nx.log(x)/nx.log(n)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef log2(x):\r\n \"\"\"\r\n Compute the logarithm base 2 of `x`.\r\n\r\n Return the \"principal value\" (for a description of this, see\r\n `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is\r\n a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns\r\n ``inf``). Otherwise, the complex principle value is returned.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The value(s) whose log base 2 is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,\r\n otherwise an array is returned.\r\n\r\n See Also\r\n --------\r\n numpy.log2\r\n\r\n Notes\r\n -----\r\n For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`\r\n (note, however, that otherwise `numpy.log2` and this `log2` are\r\n identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,\r\n and, notably, the complex principle value if ``x.imag != 0``).\r\n\r\n Examples\r\n --------\r\n We set the printing precision so the example can be auto-tested:\r\n\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.emath.log2(8)\r\n 3.0\r\n >>> np.emath.log2([-4, -8, 8])\r\n array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n return nx.log2(x)\r\n\r\n\r\ndef _power_dispatcher(x, p):\r\n return (x, p)\r\n\r\n\r\n@array_function_dispatch(_power_dispatcher)\r\ndef power(x, p):\r\n \"\"\"\r\n Return x to the power p, (x**p).\r\n\r\n If `x` contains negative values, the output is converted to the\r\n complex domain.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The input value(s).\r\n p : array_like of ints\r\n The power(s) to which `x` is raised. If `x` contains multiple values,\r\n `p` has to either be a scalar, or contain the same number of values\r\n as `x`. In the latter case, the result is\r\n ``x[0]**p[0], x[1]**p[1], ...``.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The result of ``x**p``. If `x` and `p` are scalars, so is `out`,\r\n otherwise an array is returned.\r\n\r\n See Also\r\n --------\r\n numpy.power\r\n\r\n Examples\r\n --------\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.lib.scimath.power([2, 4], 2)\r\n array([ 4, 16])\r\n >>> np.lib.scimath.power([2, 4], -2)\r\n array([0.25 , 0.0625])\r\n >>> np.lib.scimath.power([-2, 4], 2)\r\n array([ 4.-0.j, 16.+0.j])\r\n\r\n \"\"\"\r\n x = _fix_real_lt_zero(x)\r\n p = _fix_int_lt_zero(p)\r\n return nx.power(x, p)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef arccos(x):\r\n \"\"\"\r\n Compute the inverse cosine of x.\r\n\r\n Return the \"principal value\" (for a description of this, see\r\n `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that\r\n `abs(x) <= 1`, this is a real number in the closed interval\r\n :math:`[0, \\\\pi]`. Otherwise, the complex principle value is returned.\r\n\r\n Parameters\r\n ----------\r\n x : array_like or scalar\r\n The value(s) whose arccos is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so\r\n is `out`, otherwise an array object is returned.\r\n\r\n See Also\r\n --------\r\n numpy.arccos\r\n\r\n Notes\r\n -----\r\n For an arccos() that returns ``NAN`` when real `x` is not in the\r\n interval ``[-1,1]``, use `numpy.arccos`.\r\n\r\n Examples\r\n --------\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.emath.arccos(1) # a scalar is returned\r\n 0.0\r\n\r\n >>> np.emath.arccos([1,2])\r\n array([0.-0.j , 0.-1.317j])\r\n\r\n \"\"\"\r\n x = _fix_real_abs_gt_1(x)\r\n return nx.arccos(x)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef arcsin(x):\r\n \"\"\"\r\n Compute the inverse sine of x.\r\n\r\n Return the \"principal value\" (for a description of this, see\r\n `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that\r\n `abs(x) <= 1`, this is a real number in the closed interval\r\n :math:`[-\\\\pi/2, \\\\pi/2]`. Otherwise, the complex principle value is\r\n returned.\r\n\r\n Parameters\r\n ----------\r\n x : array_like or scalar\r\n The value(s) whose arcsin is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The inverse sine(s) of the `x` value(s). If `x` was a scalar, so\r\n is `out`, otherwise an array object is returned.\r\n\r\n See Also\r\n --------\r\n numpy.arcsin\r\n\r\n Notes\r\n -----\r\n For an arcsin() that returns ``NAN`` when real `x` is not in the\r\n interval ``[-1,1]``, use `numpy.arcsin`.\r\n\r\n Examples\r\n --------\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> np.emath.arcsin(0)\r\n 0.0\r\n\r\n >>> np.emath.arcsin([0,1])\r\n array([0. , 1.5708])\r\n\r\n \"\"\"\r\n x = _fix_real_abs_gt_1(x)\r\n return nx.arcsin(x)\r\n\r\n\r\n@array_function_dispatch(_unary_dispatcher)\r\ndef arctanh(x):\r\n \"\"\"\r\n Compute the inverse hyperbolic tangent of `x`.\r\n\r\n Return the \"principal value\" (for a description of this, see\r\n `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that\r\n ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is\r\n complex, the result is complex. Finally, `x = 1` returns``inf`` and\r\n ``x=-1`` returns ``-inf``.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n The value(s) whose arctanh is (are) required.\r\n\r\n Returns\r\n -------\r\n out : ndarray or scalar\r\n The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was\r\n a scalar so is `out`, otherwise an array is returned.\r\n\r\n\r\n See Also\r\n --------\r\n numpy.arctanh\r\n\r\n Notes\r\n -----\r\n For an arctanh() that returns ``NAN`` when real `x` is not in the\r\n interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does\r\n return +/-inf for ``x = +/-1``).\r\n\r\n Examples\r\n --------\r\n >>> np.set_printoptions(precision=4)\r\n\r\n >>> from numpy.testing import suppress_warnings\r\n >>> with suppress_warnings() as sup:\r\n ... sup.filter(RuntimeWarning)\r\n ... np.emath.arctanh(np.eye(2))\r\n array([[inf, 0.],\r\n [ 0., inf]])\r\n >>> np.emath.arctanh([1j])\r\n array([0.+0.7854j])\r\n\r\n \"\"\"\r\n x = _fix_real_abs_gt_1(x)\r\n return nx.arctanh(x)\r\n"
] | [
[
"numpy.core.swapaxes",
"numpy.core.zeros",
"numpy.core.take",
"numpy.core.conjugate",
"numpy.core.asarray",
"numpy.core.multiarray.normalize_axis_index",
"numpy.core.sqrt"
],
[
"numpy.product",
"numpy.sqrt",
"numpy.issubdtype",
"numpy.dtype",
"numpy.lib.stride_tricks.as_strided",
"numpy.all",
"numpy.iinfo",
"numpy.core._multiarray_tests.solve_diophantine",
"numpy.testing.assert_equal",
"numpy.may_share_memory",
"numpy.arange",
"numpy.core._multiarray_tests.internal_overlap",
"numpy.zeros",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.random.RandomState",
"numpy.add.reduceat",
"numpy.shares_memory",
"numpy.testing.assert_array_equal",
"numpy.lib.stride_tricks.DummyArray"
],
[
"numpy.core.numeric.arccos",
"numpy.lib.type_check.isreal",
"numpy.core.numeric.log",
"numpy.core.numeric.log2",
"numpy.core.numeric.power",
"numpy.core.numeric.log10",
"numpy.core.numeric.asarray",
"numpy.core.numeric.sqrt",
"numpy.core.numeric.arctanh",
"numpy.core.overrides.array_function_dispatch",
"numpy.core.numeric.arcsin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
li-yong/tushare | [
"26da8129fb770e26128b9c2cebc7ef72c9491243"
] | [
"tushare/stock/trading.py"
] | [
"# -*- coding:utf-8 -*- \r\n\"\"\"\r\n交易数据接口 \r\nCreated on 2014/07/31\r\n@author: Jimmy Liu\r\n@group : waditu\r\n@contact: [email protected]\r\n\"\"\"\r\nfrom __future__ import division\r\n\r\nimport time\r\nimport json\r\nimport lxml.html\r\nfrom lxml import etree\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tushare.stock import cons as ct\r\nimport re\r\nfrom pandas.compat import StringIO\r\nfrom tushare.util import dateu as du\r\nfrom tushare.util.formula import MA\r\nimport os\r\ntry:\r\n from urllib.request import urlopen, Request\r\nexcept ImportError:\r\n from urllib2 import urlopen, Request\r\n\r\n\r\ndef get_hist_data(code=None, start=None, end=None,\r\n ktype='D', retry_count=3,\r\n pause=0.001):\r\n \"\"\"\r\n 获取个股历史交易记录\r\n Parameters\r\n ------\r\n code:string\r\n 股票代码 e.g. 600848\r\n start:string\r\n 开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据\r\n end:string\r\n 结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据\r\n ktype:string\r\n 数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数 \r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n return\r\n -------\r\n DataFrame\r\n 属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率\r\n \"\"\"\r\n symbol = ct._code_to_symbol(code)\r\n url = ''\r\n if ktype.upper() in ct.K_LABELS:\r\n url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],\r\n ct.K_TYPE[ktype.upper()], symbol)\r\n elif ktype in ct.K_MIN_LABELS:\r\n url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],\r\n symbol, ktype)\r\n else:\r\n raise TypeError('ktype input error.')\r\n \r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n request = Request(url)\r\n lines = urlopen(request, timeout = 10).read()\r\n if len(lines) < 15: #no data\r\n return None\r\n except Exception as e:\r\n print(e)\r\n else:\r\n js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)\r\n cols = []\r\n if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):\r\n cols = ct.INX_DAY_PRICE_COLUMNS\r\n else:\r\n cols = ct.DAY_PRICE_COLUMNS\r\n if len(js['record'][0]) == 14:\r\n cols = ct.INX_DAY_PRICE_COLUMNS\r\n df = pd.DataFrame(js['record'], columns=cols)\r\n if ktype.upper() in ['D', 'W', 'M']:\r\n df = df.applymap(lambda x: x.replace(u',', u''))\r\n df[df==''] = 0\r\n for col in cols[1:]:\r\n df[col] = df[col].astype(float)\r\n if start is not None:\r\n df = df[df.date >= start]\r\n if end is not None:\r\n df = df[df.date <= end]\r\n if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):\r\n df = df.drop('turnover', axis=1)\r\n df = df.set_index('date')\r\n df = df.sort_index(ascending = False)\r\n return df\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef _parsing_dayprice_json(types=None, page=1):\r\n \"\"\"\r\n 处理当日行情分页数据,格式为json\r\n Parameters\r\n ------\r\n pageNum:页码\r\n return\r\n -------\r\n DataFrame 当日所有股票交易数据(DataFrame)\r\n \"\"\"\r\n ct._write_console()\r\n request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],\r\n ct.PAGES['jv'], types, page))\r\n text = urlopen(request, timeout=10).read()\r\n if text == 'null':\r\n return None\r\n reg = re.compile(r'\\,(.*?)\\:') \r\n text = reg.sub(r',\"\\1\":', text.decode('gbk') if ct.PY3 else text) \r\n text = text.replace('\"{symbol', '{\"symbol')\r\n text = text.replace('{symbol', '{\"symbol\"')\r\n if ct.PY3:\r\n jstr = json.dumps(text)\r\n else:\r\n jstr = json.dumps(text, encoding='GBK')\r\n js = json.loads(jstr)\r\n df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),\r\n columns=ct.DAY_TRADING_COLUMNS)\r\n df = df.drop('symbol', axis=1)\r\n# df = df.ix[df.volume > 0]\r\n return df\r\n\r\n\r\ndef get_tick_data(code=None, date=None, retry_count=3, pause=0.001,\r\n src='sn'):\r\n \"\"\"\r\n 获取分笔数据\r\n Parameters\r\n ------\r\n code:string\r\n 股票代码 e.g. 600848\r\n date:string\r\n 日期 format: YYYY-MM-DD\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数\r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n src : 数据源选择,可输入sn(新浪)、tt(腾讯)、nt(网易),默认sn\r\n return\r\n -------\r\n DataFrame 当日所有股票交易数据(DataFrame)\r\n 属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型\r\n \"\"\"\r\n if (src.strip() not in ct.TICK_SRCS):\r\n print(ct.TICK_SRC_ERROR)\r\n return None\r\n symbol = ct._code_to_symbol(code)\r\n symbol_dgt = ct._code_to_symbol_dgt(code)\r\n datestr = date.replace('-', '')\r\n url = {\r\n ct.TICK_SRCS[0] : ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],\r\n date, symbol),\r\n ct.TICK_SRCS[1] : ct.TICK_PRICE_URL_TT % (ct.P_TYPE['http'], ct.DOMAINS['tt'], ct.PAGES['idx'],\r\n symbol, datestr),\r\n ct.TICK_SRCS[2] : ct.TICK_PRICE_URL_NT % (ct.P_TYPE['http'], ct.DOMAINS['163'], date[0:4], \r\n datestr, symbol_dgt)\r\n }\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n if src == ct.TICK_SRCS[2]:\r\n df = pd.read_excel(url[src])\r\n df.columns = ct.TICK_COLUMNS\r\n else:\r\n re = Request(url[src])\r\n lines = urlopen(re, timeout=10).read()\r\n lines = lines.decode('GBK') \r\n if len(lines) < 20:\r\n return None\r\n df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,\r\n skiprows=[0]) \r\n except Exception as e:\r\n print(e)\r\n else:\r\n return df\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef get_sina_dd(code=None, date=None, vol=400, retry_count=3, pause=0.001):\r\n \"\"\"\r\n 获取sina大单数据\r\n Parameters\r\n ------\r\n code:string\r\n 股票代码 e.g. 600848\r\n date:string\r\n 日期 format:YYYY-MM-DD\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数\r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n return\r\n -------\r\n DataFrame 当日所有股票交易数据(DataFrame)\r\n 属性:股票代码 股票名称 交易时间 价格 成交量 前一笔价格 类型(买、卖、中性盘)\r\n \"\"\"\r\n if code is None or len(code)!=6 or date is None:\r\n return None\r\n symbol = ct._code_to_symbol(code)\r\n vol = vol*100\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n re = Request(ct.SINA_DD % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['sinadd'],\r\n symbol, vol, date))\r\n lines = urlopen(re, timeout=10).read()\r\n lines = lines.decode('GBK') \r\n if len(lines) < 100:\r\n return None\r\n df = pd.read_csv(StringIO(lines), names=ct.SINA_DD_COLS,\r\n skiprows=[0]) \r\n if df is not None:\r\n df['code'] = df['code'].map(lambda x: x[2:])\r\n except Exception as e:\r\n print(e)\r\n else:\r\n return df\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef get_today_ticks(code=None, retry_count=3, pause=0.001):\r\n \"\"\"\r\n 获取当日分笔明细数据\r\n Parameters\r\n ------\r\n code:string\r\n 股票代码 e.g. 600848\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数\r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n return\r\n -------\r\n DataFrame 当日所有股票交易数据(DataFrame)\r\n 属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型\r\n \"\"\"\r\n if code is None or len(code)!=6 :\r\n return None\r\n symbol = ct._code_to_symbol(code)\r\n date = du.today()\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],\r\n ct.PAGES['jv'], date,\r\n symbol))\r\n data_str = urlopen(request, timeout=10).read()\r\n data_str = data_str.decode('GBK')\r\n data_str = data_str[1:-1]\r\n data_str = eval(data_str, type('Dummy', (dict,), \r\n dict(__getitem__ = lambda s, n:n))())\r\n data_str = json.dumps(data_str)\r\n data_str = json.loads(data_str)\r\n pages = len(data_str['detailPages'])\r\n data = pd.DataFrame()\r\n ct._write_head()\r\n for pNo in range(1, pages+1):\r\n data = data.append(_today_ticks(symbol, date, pNo,\r\n retry_count, pause), ignore_index=True)\r\n except Exception as er:\r\n print(str(er))\r\n else:\r\n return data\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef _today_ticks(symbol, tdate, pageNo, retry_count, pause):\r\n ct._write_console()\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],\r\n ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],\r\n symbol, tdate, pageNo\r\n )) \r\n res = html.xpath('//table[@id=\\\"datatbl\\\"]/tbody/tr')\r\n if ct.PY3:\r\n sarr = [etree.tostring(node).decode('utf-8') for node in res]\r\n else:\r\n sarr = [etree.tostring(node) for node in res]\r\n sarr = ''.join(sarr)\r\n sarr = '<table>%s</table>'%sarr\r\n sarr = sarr.replace('--', '0')\r\n df = pd.read_html(StringIO(sarr), parse_dates=False)[0]\r\n df.columns = ct.TODAY_TICK_COLUMNS\r\n df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))\r\n except Exception as e:\r\n print(e)\r\n else:\r\n return df\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n \r\n \r\ndef get_today_all():\r\n \"\"\"\r\n 一次性获取最近一个日交易日所有股票的交易数据\r\n return\r\n -------\r\n DataFrame\r\n 属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率,成交额,市盈率,市净率,总市值,流通市值\r\n \"\"\"\r\n ct._write_head()\r\n df = _parsing_dayprice_json('hs_a', 1)\r\n if df is not None:\r\n for i in range(2, ct.PAGE_NUM[1]):\r\n newdf = _parsing_dayprice_json('hs_a', i)\r\n df = df.append(newdf, ignore_index=True)\r\n df = df.append(_parsing_dayprice_json('shfxjs', 1),\r\n ignore_index=True)\r\n return df\r\n\r\n\r\ndef get_realtime_quotes(symbols=None):\r\n \"\"\"\r\n 获取实时交易数据 getting real time quotes data\r\n 用于跟踪交易情况(本次执行的结果-上一次执行的数据)\r\n Parameters\r\n ------\r\n symbols : string, array-like object (list, tuple, Series).\r\n \r\n return\r\n -------\r\n DataFrame 实时交易数据\r\n 属性:0:name,股票名字\r\n 1:open,今日开盘价\r\n 2:pre_close,昨日收盘价\r\n 3:price,当前价格\r\n 4:high,今日最高价\r\n 5:low,今日最低价\r\n 6:bid,竞买价,即“买一”报价\r\n 7:ask,竞卖价,即“卖一”报价\r\n 8:volumn,成交量 maybe you need do volumn/100\r\n 9:amount,成交金额(元 CNY)\r\n 10:b1_v,委买一(笔数 bid volume)\r\n 11:b1_p,委买一(价格 bid price)\r\n 12:b2_v,“买二”\r\n 13:b2_p,“买二”\r\n 14:b3_v,“买三”\r\n 15:b3_p,“买三”\r\n 16:b4_v,“买四”\r\n 17:b4_p,“买四”\r\n 18:b5_v,“买五”\r\n 19:b5_p,“买五”\r\n 20:a1_v,委卖一(笔数 ask volume)\r\n 21:a1_p,委卖一(价格 ask price)\r\n ...\r\n 30:date,日期;\r\n 31:time,时间;\r\n \"\"\"\r\n symbols_list = ''\r\n if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):\r\n for code in symbols:\r\n symbols_list += ct._code_to_symbol(code) + ','\r\n else:\r\n symbols_list = ct._code_to_symbol(symbols)\r\n \r\n symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list \r\n request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],\r\n _random(), symbols_list))\r\n text = urlopen(request,timeout=10).read()\r\n text = text.decode('GBK')\r\n reg = re.compile(r'\\=\"(.*?)\\\";')\r\n data = reg.findall(text)\r\n regSym = re.compile(r'(?:sh|sz)(.*?)\\=')\r\n syms = regSym.findall(text)\r\n data_list = []\r\n syms_list = []\r\n for index, row in enumerate(data):\r\n if len(row)>1:\r\n data_list.append([astr for astr in row.split(',')])\r\n syms_list.append(syms[index])\r\n if len(syms_list) == 0:\r\n return None\r\n df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)\r\n df = df.drop('s', axis=1)\r\n df['code'] = syms_list\r\n ls = [cls for cls in df.columns if '_v' in cls]\r\n for txt in ls:\r\n df[txt] = df[txt].map(lambda x : x[:-2])\r\n return df\r\n\r\n\r\ndef get_h_data(code, start=None, end=None, autype='qfq',\r\n index=False, retry_count=3, pause=0.001, drop_factor=True):\r\n '''\r\n 获取历史复权数据\r\n Parameters\r\n ------\r\n code:string\r\n 股票代码 e.g. 600848\r\n start:string\r\n 开始日期 format:YYYY-MM-DD 为空时取当前日期\r\n end:string\r\n 结束日期 format:YYYY-MM-DD 为空时取去年今日\r\n autype:string\r\n 复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数 \r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n drop_factor : bool, 默认 True\r\n 是否移除复权因子,在分析过程中可能复权因子意义不大,但是如需要先储存到数据库之后再分析的话,有该项目会更加灵活\r\n return\r\n -------\r\n DataFrame\r\n date 交易日期 (index)\r\n open 开盘价\r\n high 最高价\r\n close 收盘价\r\n low 最低价\r\n volume 成交量\r\n amount 成交金额\r\n '''\r\n \r\n start = du.today_last_year() if start is None else start\r\n end = du.today() if end is None else end\r\n qs = du.get_quarts(start, end)\r\n qt = qs[0]\r\n ct._write_head()\r\n data = _parse_fq_data(_get_index_url(index, code, qt), index,\r\n retry_count, pause)\r\n if data is None:\r\n data = pd.DataFrame()\r\n if len(qs)>1:\r\n for d in range(1, len(qs)):\r\n qt = qs[d]\r\n ct._write_console()\r\n df = _parse_fq_data(_get_index_url(index, code, qt), index,\r\n retry_count, pause)\r\n if df is None: # 可能df为空,退出循环\r\n break\r\n else:\r\n data = data.append(df, ignore_index = True)\r\n if len(data) == 0 or len(data[(data.date >= start) & (data.date <= end)]) == 0:\r\n return pd.DataFrame()\r\n data = data.drop_duplicates('date')\r\n if index:\r\n data = data[(data.date >= start) & (data.date <= end)]\r\n data = data.set_index('date')\r\n data = data.sort_index(ascending = False)\r\n return data\r\n if autype == 'hfq':\r\n if drop_factor:\r\n data = data.drop('factor', axis=1)\r\n data = data[(data.date >= start) & (data.date <= end)]\r\n for label in ['open', 'high', 'close', 'low']:\r\n data[label] = data[label].map(ct.FORMAT)\r\n data[label] = data[label].astype(float)\r\n data = data.set_index('date')\r\n data = data.sort_index(ascending = False)\r\n return data\r\n else:\r\n if autype == 'qfq':\r\n if drop_factor:\r\n data = data.drop('factor', axis = 1)\r\n df = _parase_fq_factor(code, start, end)\r\n df = df.drop_duplicates('date')\r\n df = df.sort_values('date', ascending = False)\r\n firstDate = data.head(1)['date']\r\n frow = df[df.date == firstDate[0]]\r\n rt = get_realtime_quotes(code)\r\n if rt is None:\r\n return pd.DataFrame()\r\n if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):\r\n preClose = float(rt['pre_close'])\r\n else:\r\n if du.is_holiday(du.today()):\r\n preClose = float(rt['price'])\r\n else:\r\n if (du.get_hour() > 9) & (du.get_hour() < 18):\r\n preClose = float(rt['pre_close'])\r\n else:\r\n preClose = float(rt['price'])\r\n \r\n rate = float(frow['factor']) / preClose\r\n data = data[(data.date >= start) & (data.date <= end)]\r\n for label in ['open', 'high', 'low', 'close']:\r\n data[label] = data[label] / rate\r\n data[label] = data[label].map(ct.FORMAT)\r\n data[label] = data[label].astype(float)\r\n data = data.set_index('date')\r\n data = data.sort_index(ascending = False)\r\n return data\r\n else:\r\n for label in ['open', 'high', 'close', 'low']:\r\n data[label] = data[label] / data['factor']\r\n if drop_factor:\r\n data = data.drop('factor', axis=1)\r\n data = data[(data.date >= start) & (data.date <= end)]\r\n for label in ['open', 'high', 'close', 'low']:\r\n data[label] = data[label].map(ct.FORMAT)\r\n data = data.set_index('date')\r\n data = data.sort_index(ascending = False)\r\n data = data.astype(float)\r\n return data\r\n\r\n\r\ndef _parase_fq_factor(code, start, end):\r\n symbol = ct._code_to_symbol(code)\r\n request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],\r\n ct.DOMAINS['vsf'], symbol))\r\n text = urlopen(request, timeout=10).read()\r\n text = text[1:len(text)-1]\r\n text = text.decode('utf-8') if ct.PY3 else text\r\n text = text.replace('{_', '{\"')\r\n text = text.replace('total', '\"total\"')\r\n text = text.replace('data', '\"data\"')\r\n text = text.replace(':\"', '\":\"')\r\n text = text.replace('\",_', '\",\"')\r\n text = text.replace('_', '-')\r\n text = json.loads(text)\r\n df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})\r\n df['date'] = df['date'].map(_fun_except) # for null case\r\n if df['date'].dtypes == np.object:\r\n df['date'] = pd.to_datetime(df['date'])\r\n df = df.drop_duplicates('date')\r\n df['factor'] = df['factor'].astype(float)\r\n return df\r\n\r\n\r\ndef _fun_except(x):\r\n if len(x) > 10:\r\n return x[-10:]\r\n else:\r\n return x\r\n\r\n\r\ndef _parse_fq_data(url, index, retry_count, pause):\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n request = Request(url)\r\n text = urlopen(request, timeout=10).read()\r\n text = text.decode('GBK')\r\n html = lxml.html.parse(StringIO(text))\r\n res = html.xpath('//table[@id=\\\"FundHoldSharesTable\\\"]')\r\n if ct.PY3:\r\n sarr = [etree.tostring(node).decode('utf-8') for node in res]\r\n else:\r\n sarr = [etree.tostring(node) for node in res]\r\n sarr = ''.join(sarr)\r\n if sarr == '':\r\n return None\r\n df = pd.read_html(sarr, skiprows = [0, 1])[0]\r\n if len(df) == 0:\r\n return pd.DataFrame()\r\n if index:\r\n df.columns = ct.HIST_FQ_COLS[0:7]\r\n else:\r\n df.columns = ct.HIST_FQ_COLS\r\n if df['date'].dtypes == np.object:\r\n df['date'] = pd.to_datetime(df['date'])\r\n df = df.drop_duplicates('date')\r\n except ValueError as e:\r\n # 时间较早,已经读不到数据\r\n return None\r\n except Exception as e:\r\n print(e)\r\n else:\r\n return df\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef get_index():\r\n \"\"\"\r\n 获取大盘指数行情\r\n return\r\n -------\r\n DataFrame\r\n code:指数代码\r\n name:指数名称\r\n change:涨跌幅\r\n open:开盘价\r\n preclose:昨日收盘价\r\n close:收盘价\r\n high:最高价\r\n low:最低价\r\n volume:成交量(手)\r\n amount:成交金额(亿元)\r\n \"\"\"\r\n request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],\r\n ct.DOMAINS['sinahq']))\r\n text = urlopen(request, timeout=10).read()\r\n text = text.decode('GBK')\r\n text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')\r\n text = text.replace('\";', '').replace('\"', '').replace('=', ',')\r\n text = '%s%s'%(ct.INDEX_HEADER, text)\r\n df = pd.read_csv(StringIO(text), sep=',', thousands=',')\r\n df['change'] = (df['close'] / df['preclose'] - 1 ) * 100\r\n df['amount'] = df['amount'] / 100000000\r\n df['change'] = df['change'].map(ct.FORMAT)\r\n df['amount'] = df['amount'].map(ct.FORMAT4)\r\n df = df[ct.INDEX_COLS]\r\n df['code'] = df['code'].map(lambda x:str(x).zfill(6))\r\n df['change'] = df['change'].astype(float)\r\n df['amount'] = df['amount'].astype(float)\r\n return df\r\n \r\n\r\ndef _get_index_url(index, code, qt):\r\n if index:\r\n url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],\r\n code, qt[0], qt[1])\r\n else:\r\n url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],\r\n code, qt[0], qt[1])\r\n return url\r\n\r\n\r\ndef get_k_data(code=None, start='', end='',\r\n ktype='D', autype='qfq', \r\n index=False,\r\n retry_count=3,\r\n pause=0.001):\r\n \"\"\"\r\n 获取k线数据\r\n ---------\r\n Parameters:\r\n code:string\r\n 股票代码 e.g. 600848\r\n start:string\r\n 开始日期 format:YYYY-MM-DD 为空时取上市首日\r\n end:string\r\n 结束日期 format:YYYY-MM-DD 为空时取最近一个交易日\r\n autype:string\r\n 复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq\r\n ktype:string\r\n 数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D\r\n retry_count : int, 默认 3\r\n 如遇网络等问题重复执行的次数 \r\n pause : int, 默认 0\r\n 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题\r\n return\r\n -------\r\n DataFrame\r\n date 交易日期 (index)\r\n open 开盘价\r\n high 最高价\r\n close 收盘价\r\n low 最低价\r\n volume 成交量\r\n amount 成交额\r\n turnoverratio 换手率\r\n code 股票代码\r\n \"\"\"\r\n symbol = ct.INDEX_SYMBOL[code] if index else ct._code_to_symbol(code)\r\n url = ''\r\n dataflag = ''\r\n autype = '' if autype is None else autype\r\n if (start is not None) & (start != ''):\r\n end = du.today() if end is None or end == '' else end\r\n if ktype.upper() in ct.K_LABELS:\r\n fq = autype if autype is not None else ''\r\n if code[:1] in ('1', '5') or index:\r\n fq = ''\r\n kline = '' if autype is None else 'fq'\r\n if (start is None or start == '') & (end is None or end == ''):\r\n urls = [ct.KLINE_TT_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],\r\n kline, fq, symbol, \r\n ct.TT_K_TYPE[ktype.upper()], start, end,\r\n fq, _random(17))]\r\n else:\r\n years = du.tt_dates(start, end)\r\n urls = []\r\n for year in years:\r\n startdate = str(year) + '-01-01'\r\n enddate = str(year+1) + '-12-31'\r\n url = ct.KLINE_TT_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],\r\n kline, fq+str(year), symbol, \r\n ct.TT_K_TYPE[ktype.upper()], startdate, enddate,\r\n fq, _random(17))\r\n urls.append(url)\r\n dataflag = '%s%s'%(fq, ct.TT_K_TYPE[ktype.upper()])\r\n elif ktype in ct.K_MIN_LABELS:\r\n urls = [ct.KLINE_TT_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],\r\n symbol, ktype, ktype,\r\n _random(16))]\r\n dataflag = 'm%s'%ktype\r\n else:\r\n raise TypeError('ktype input error.')\r\n data = pd.DataFrame()\r\n for url in urls:\r\n data = data.append(_get_k_data(url, dataflag, \r\n symbol, code,\r\n index, ktype,\r\n retry_count, pause), \r\n ignore_index=True)\r\n if ktype not in ct.K_MIN_LABELS:\r\n if ((start is not None) & (start != '')) & ((end is not None) & (end != '')):\r\n if data.empty==False: \r\n data = data[(data.date >= start) & (data.date <= end)]\r\n return data\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n \r\n\r\ndef _get_k_data(url, dataflag='',\r\n symbol='',\r\n code = '',\r\n index = False,\r\n ktype = '',\r\n retry_count=3,\r\n pause=0.001):\r\n for _ in range(retry_count):\r\n time.sleep(pause)\r\n try:\r\n request = Request(url)\r\n lines = urlopen(request, timeout = 10).read()\r\n if len(lines) < 100: #no data\r\n return None\r\n except Exception as e:\r\n print(e)\r\n else:\r\n lines = lines.decode('utf-8') if ct.PY3 else lines\r\n lines = lines.split('=')[1]\r\n reg = re.compile(r',{\"nd.*?}') \r\n lines = re.subn(reg, '', lines) \r\n js = json.loads(lines[0])\r\n dataflag = dataflag if dataflag in list(js['data'][symbol].keys()) else ct.TT_K_TYPE[ktype.upper()]\r\n if len(js['data'][symbol][dataflag]) == 0:\r\n return None\r\n if len(js['data'][symbol][dataflag][0]) == 6:\r\n df = pd.DataFrame(js['data'][symbol][dataflag], \r\n columns = ct.KLINE_TT_COLS_MINS)\r\n else:\r\n df = pd.DataFrame(js['data'][symbol][dataflag], \r\n columns = ct.KLINE_TT_COLS)\r\n df['code'] = symbol if index else code\r\n if ktype in ct.K_MIN_LABELS:\r\n df['date'] = df['date'].map(lambda x: '%s-%s-%s %s:%s'%(x[0:4], x[4:6], \r\n x[6:8], x[8:10], \r\n x[10:12]))\r\n for col in df.columns[1:6]:\r\n df[col] = df[col].astype(float)\r\n return df\r\n\r\ndef get_hists(symbols, start=None, end=None,\r\n ktype='D', retry_count=3,\r\n pause=0.001):\r\n \"\"\"\r\n 批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口\r\n \"\"\"\r\n df = pd.DataFrame()\r\n if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):\r\n for symbol in symbols:\r\n data = get_hist_data(symbol, start=start, end=end,\r\n ktype=ktype, retry_count=retry_count,\r\n pause=pause)\r\n data['code'] = symbol\r\n df = df.append(data, ignore_index=True)\r\n return df\r\n else:\r\n return None\r\n \r\n \r\ndef get_day_all(date=None):\r\n \"\"\"\r\n 获取每日收盘行情\r\n Parameters:\r\n -------------\r\n date:交易日期,格式:YYYY-MM-DD\r\n \r\n Return:\r\n -------------\r\n DataFrame\r\n code 代码, name 名称, p_change 涨幅%,\r\n price 现价, change 涨跌, open 今开, high 最高,\r\n low 最低, preprice 昨收, pe 市盈(动),\r\n volratio 量比, turnover 换手%, range 振幅%%,\r\n volume 总量, selling 内盘, buying 外盘,\r\n amount 总金额, totals 总股本(万), industry 细分行业,\r\n area 地区, floats 流通股本(万), fvalues 流通市值,\r\n abvalues AB股总市值, avgprice 均价, strength 强弱度%,\r\n activity 活跃度, avgturnover 笔换手, attack 攻击波%,\r\n interval3 近3月涨幅 ,interval 近6月涨幅\r\n \"\"\"\r\n wdate = du.last_tddate() if date is None else date\r\n wdate = wdate.replace('-', '')\r\n if wdate < '20170614':\r\n return None\r\n datepre = '' if date is None else wdate[0:4] + wdate[4:6] + '/'\r\n df = pd.read_csv(ct.ALL_DAY_FILE%(datepre, \\\r\n 'hq' if date is None else wdate), \\\r\n dtype={'code':'object'})\r\n return df\r\n \r\n\r\ndef bar(code, conn=None, start_date=None, end_date=None, freq='D', asset='E', \r\n market='',\r\n adj = None,\r\n ma = [],\r\n factors = [],\r\n retry_count = 3):\r\n \"\"\"\r\n BAR数据\r\n Parameters:\r\n ------------\r\n code:证券代码,支持股票,ETF/LOF,期货/期权,港股\r\n con:服务器连接 ,通过ts.api()或者ts.xpi()获得\r\n start_date:开始日期 YYYY-MM-DD/YYYYMMDD\r\n end_date:结束日期 YYYY-MM-DD/YYYYMMDD\r\n freq:支持1/5/15/30/60分钟,周/月/季/年\r\n asset:证券类型 E:股票和交易所基金,INDEX:沪深指数,X:期货/期权/港股/中概美国/中证指数/国际指数\r\n market:市场代码,通过ts.get_markets()获取\r\n adj:复权类型,None不复权,qfq:前复权,hfq:后复权\r\n ma:均线,支持自定义均线频度,如:ma5/ma10/ma20/ma60/maN\r\n factors因子数据,目前支持以下两种:\r\n vr:量比,默认不返回,返回需指定:factor=['vr']\r\n tor:换手率,默认不返回,返回需指定:factor=['tor']\r\n 以上两种都需要:factor=['vr', 'tor']\r\n retry_count:网络重试次数\r\n \r\n Return\r\n ----------\r\n DataFrame\r\n code:代码\r\n open:开盘close/high/low/vol成交量/amount成交额/maN均价/vr量比/tor换手率\r\n \r\n 期货(asset='X')\r\n code/open/close/high/low/avg_price:均价 position:持仓量 vol:成交总量\r\n \"\"\"\r\n code = code.strip().upper()\r\n for _ in range(retry_count):\r\n try:\r\n if conn is None:\r\n print(ct.MSG_NOT_CONNECTED)\r\n return None\r\n api, xapi = conn\r\n ktype = freq.strip().upper()\r\n asset = asset.strip().upper()\r\n mkcode = _get_mkcode(code, asset=asset, xapi=xapi) if market == '' else market\r\n if asset in['E', 'INDEX']:\r\n func = getattr(api, ct.ASSET[asset])\r\n else:\r\n ktype = 'XD' if ktype == 'D' else ktype\r\n func = getattr(xapi, ct.ASSET['X'])\r\n if ktype in ct.KTYPE_LOW_COLS:\r\n data = pd.DataFrame()\r\n for i in range(100): \r\n ds = func(ct.KTYPE[ktype], mkcode, code, i * 800, 800)\r\n df = api.to_df(ds)\r\n data = data.append(df) if i == 0 else df.append(data, ignore_index=True)\r\n if len(ds) < 800:\r\n break\r\n data['datetime'] = data['datetime'].apply(lambda x: str(x[0:10]))\r\n if ktype in ct.KTYPE_ARR:\r\n data = pd.DataFrame()\r\n for i in range(100): \r\n ds = func(ct.KTYPE[ktype], mkcode, code, i * 800, 800)\r\n df = api.to_df(ds)\r\n data = data.append(df) if i == 0 else df.append(data, ignore_index=True)\r\n if len(ds) < 800:\r\n break\r\n data['datetime'] = pd.to_datetime(data['datetime'])\r\n data = data.assign(code=str(code)) \\\r\n .set_index('datetime', drop=True, inplace=False) \\\r\n .drop(ct.T_DROP_COLS, axis=1)[ None if start_date == '' else start_date : \r\n None if end_date == '' else end_date]\r\n data = data.sort_index(ascending=False)\r\n if asset in['E', 'INDEX']:\r\n data = data[ct.BAR_E_COLS]\r\n if ktype in ct.KTYPE_ARR:\r\n data['vol'] = data['vol'] / 100\r\n else:\r\n data = data[ct.BAR_X_COLS]\r\n if mkcode in [28, 29, 30, 47, 60]:\r\n data.columns = ct.BAR_X_FUTURE_COLS\r\n data = data[ct.BAR_X_FUTURE_RL_COLS]\r\n else:\r\n data = data.drop(['price', 'position'], axis=1)\r\n data.columns = ct.BAR_X_OTHER_COLS\r\n if asset == 'E':\r\n if adj is not None:\r\n df = factor_adj(code)\r\n if ktype in ct.KTYPE_LOW_COLS: \r\n data = data.merge(df, left_index=True, right_index=True)\r\n data['adj_factor'] = data['adj_factor'].fillna(method='bfill')\r\n else:\r\n def get_val(day):\r\n return df.ix[day]['adj_factor']\r\n data['adj_factor'] = data.index.map(lambda x: get_val(str(x)[0:10]))\r\n for col in ct.BAR_E_COLS[1:5]:\r\n if adj == 'hfq':\r\n data[col] = data[col] * data['adj_factor']\r\n else:\r\n data[col] = data[col] * data['adj_factor'] / float(df['adj_factor'][0])\r\n data[col] = data[col].map(ct.FORMAT)\r\n data = data.drop('adj_factor', axis=1)\r\n if factors is not None and len(factors) >0 :\r\n if 'tor' in factors:\r\n df = factor_shares(code)\r\n if ktype in ct.KTYPE_LOW_COLS: \r\n data = data.merge(df, left_index=True, right_index=True)\r\n data['floats'] = data['floats'].fillna(method='bfill')\r\n else:\r\n def get_val(day):\r\n return df.ix[day]['floats']\r\n data['floats'] = data.index.map(lambda x: get_val(str(x)[0:10]))\r\n data['tor'] = data['vol'] / data['floats'] \r\n data['tor'] = data['tor'].map(ct.FORMAT)\r\n data['tor'] = data['tor'].astype(float)\r\n data = data.drop('floats', axis=1)\r\n if 'vr' in factors:\r\n data['vol5'] = MA(data['vol'], 5)\r\n data['mean'] = data['vol5'].shift(-5)\r\n data['vr'] = (data['vol'] / data['mean']).map(ct.FORMAT)\r\n data['vr'] = data['vr'].astype(float)\r\n data = data.drop(['vol5', 'mean'], axis=1)\r\n if ma is not None and len(ma) > 0:\r\n for a in ma:\r\n if isinstance(a, int):\r\n data['ma%s'%a] = MA(data['close'], a).map(ct.FORMAT).shift(-(a-1))\r\n data['ma%s'%a] = data['ma%s'%a].astype(float)\r\n for col in ['open', 'high', 'low', 'close']:\r\n data[col] = data[col].astype(float)\r\n return data\r\n except Exception as e:\r\n print(e)\r\n else:\r\n return data\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\ndef _get_mkcode(code='', asset='E', xapi=None):\r\n mkcode = ''\r\n if asset == 'E':\r\n mkcode = ct._market_code(code)\r\n elif asset == 'INDEX':\r\n mkcode = ct._idx_market_code(code)\r\n else:\r\n if os.path.exists(ct.INST_PLK_F):\r\n mks = pd.read_pickle(ct.INST_PLK_F)\r\n else:\r\n mks = get_instrument(xapi)\r\n mks.to_pickle(ct.INST_PLK_F)\r\n mkcode = mks[mks.code == code]['market'].values[0]\r\n return mkcode\r\n\r\n\r\ndef tick(code, conn=None, date='', asset='E', market='', retry_count = 3):\r\n \"\"\"\r\n tick数据\r\n Parameters:\r\n ------------\r\n code:证券代码,支持股票,ETF/LOF,期货/期权,港股\r\n conn:服务器连接 ,通过ts.api()或者ts.xpi()获得\r\n date:日期\r\n asset:证券品种,E:沪深交易所股票和基金, INDEX:沪深交易所指数, X:其他证券品种,大致如下:\r\n 支持的扩展行情包括(asset='X'):\r\n 郑州商品期权 OZ 大连商品期权 OD 上海商品期权 OS\r\n 上海个股期权 QQ 香港指数 FH 郑州商品 QZ 大连商品 QD 上海期货 QS\r\n 香港主板 KH 香港权证 KR 开放式基金 FU 货币型基金 FB\r\n 招商理财产品 LC 招商货币产品 LB 国际指数 FW 国内宏观指标 HG 中国概念股 CH\r\n 美股知名公司 MG B股转H股 HB 股份转让 SB 股指期货 CZ 香港创业板 KG 香港信托基金 KT\r\n 国债预发行 GY 主力期货合约 MA\r\n 中证指数 ZZ 港股通 GH\r\n market:市场代码,通过ts.get_markets()获取\r\n \r\n Return\r\n ----------\r\n DataFrame\r\n date:日期\r\n time:时间\r\n price:成交价\r\n vol:成交量\r\n type:买卖方向,0-买入 1-卖出 2-集合竞价成交\r\n 期货 0:开仓 1:多开 -1:空开\r\n 期货多一列数据oi_change:增仓数据\r\n\r\n \"\"\"\r\n code = code.strip().upper()\r\n date = int(date.replace('-', ''))\r\n today = int(str(du.today()).replace('-', ''))\r\n for _ in range(retry_count):\r\n try:\r\n if conn is None:\r\n print(ct.MSG_NOT_CONNECTED)\r\n return None\r\n api, xapi = conn\r\n data = pd.DataFrame()\r\n mkcode = _get_mkcode(code, asset=asset, xapi=xapi) if market == '' else market\r\n con = api if asset in['E', 'INDEX'] else xapi\r\n for i in range(200):\r\n if date == today:\r\n ds = con.get_transaction_data(market=mkcode, code=code, start=i * 300, count=300)\r\n else:\r\n ds = con.get_history_transaction_data(market=mkcode, code=code, date=date, start=i * 300, count=300)\r\n df = api.to_df(ds)\r\n data = data.append(df) if i == 0 else df.append(data, ignore_index=True)\r\n if len(ds) < 300:\r\n break\r\n if asset in['E', 'INDEX']:\r\n data['date'] = date\r\n data['date'] = data['date'].map(lambda x: '%s-%s-%s '%(str(x)[0:4], str(x)[4:6], str(x)[6:8]))\r\n data['datetime'] = data['date'] + data['time']\r\n data = data[['datetime', 'price', 'vol', 'buyorsell']]\r\n data.columns = ['datetime', 'price', 'vol', 'type']\r\n else:\r\n if mkcode in [31, 71]:\r\n if date == today:\r\n data = data.drop(['hour', 'minute', 'nature_name', 'zengcang', 'direction', \r\n 'second', 'nature_mark', 'nature_value'], axis=1)\r\n else:\r\n data = data.drop(['hour', 'minute', 'nature_name', 'zengcang', 'direction'], axis=1)\r\n data.loc[data.nature== 512, 'nature' ] = 2\r\n data.loc[data.nature== 256, 'nature' ] = 1\r\n data = data.sort_values('date')\r\n data.columns = ['date', 'price', 'vol', 'type']\r\n elif mkcode in [28, 29, 30, 47, 60]:\r\n if date == today:\r\n data = data.drop(['hour', 'minute', 'nature', 'direction', \r\n 'second', 'nature_mark', 'nature_value'], axis=1)\r\n else:\r\n data = data.drop(['hour', 'minute', 'nature', 'direction'], axis=1)\r\n data.columns = ['date', 'price', 'vol', 'oi_change', 'type']\r\n else:\r\n data = data.drop(['hour', 'minute', 'nature_name', 'zengcang', 'direction', 'nature'], axis=1)\r\n \r\n except Exception as e:\r\n print(e)\r\n else:\r\n return data\r\n\r\n\r\n\r\ndef quotes(symbols, conn=None, asset='E', market=[], retry_count = 3):\r\n \"\"\"\r\n 获取实时快照\r\n Parameters\r\n ------\r\n symbols : string, array-like object (list, tuple, Series).\r\n \r\n return\r\n -------\r\n DataFrame 实时快照,5档行情\r\n \"\"\"\r\n for _ in range(retry_count):\r\n try:\r\n if conn is None:\r\n print(ct.MSG_NOT_CONNECTED)\r\n return None\r\n api, xapi = conn\r\n data = pd.DataFrame()\r\n if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):\r\n for code in symbols:\r\n mkcode = _get_mkcode(code, asset=asset, xapi=xapi)\r\n if asset == 'E':\r\n df = api.to_df(api.get_security_quotes([(mkcode, code)]))\r\n elif asset == 'INDEX':\r\n df = api.to_df(api.get_security_quotes([(mkcode, code)]))\r\n else:\r\n df = xapi.to_df(api.get_instrument_quote(mkcode, code))\r\n data = data.append(df)\r\n else:\r\n mkcode = _get_mkcode(symbols, asset=asset, xapi=xapi)\r\n if asset == 'E':\r\n data = api.to_df(api.get_security_quotes([(mkcode, symbols)]))\r\n elif asset == 'INDEX':\r\n data = api.to_df(api.get_security_quotes([(mkcode, symbols)]))\r\n else:\r\n data = xapi.to_df(xapi.get_instrument_quote(mkcode, symbols))\r\n if asset in ['E', 'INDEX']:\r\n data = data.drop(['market', 'active1', 'active2', 'reversed_bytes0', 'reversed_bytes1', 'reversed_bytes2',\r\n 'reversed_bytes3',\r\n 'reversed_bytes4',\r\n 'reversed_bytes5',\r\n 'reversed_bytes6',\r\n 'reversed_bytes7',\r\n 'reversed_bytes8',\r\n 'reversed_bytes9'], axis=1)\r\n else:\r\n data = data.drop(['market'], axis=1)\r\n except Exception as e:\r\n print(e)\r\n else:\r\n return data\r\n raise IOError(ct.NETWORK_URL_ERROR_MSG)\r\n\r\n\r\n\r\ndef get_security(api):\r\n \"\"\"\r\n 获取股票列表\r\n \"\"\"\r\n data = []\r\n for p in range(100):\r\n ds = api.get_security_list(0, p*1000)\r\n data += ds\r\n if len(ds) < 1000:\r\n break\r\n data = api.to_df(data)\r\n return data\r\n\r\n\r\ndef reset_instrument(xapi=None):\r\n \"\"\"\r\n 重新设置本地证券列表\r\n \"\"\"\r\n import tushare.util.conns as cs \r\n xapi = cs.xapi_x() if xapi is None else xapi\r\n data=[]\r\n for i in range(200): \r\n ds = xapi.get_instrument_info(i * 300, 300)\r\n data += ds\r\n if len(ds) < 300:\r\n break\r\n data = xapi.to_df(data)\r\n data.to_pickle(ct.INST_PLK_F)\r\n return data\r\n\r\n\r\n\r\ndef get_instrument(xapi=None):\r\n \"\"\"\r\n 获取证券列表\r\n \"\"\"\r\n import tushare.util.conns as cs \r\n xapi = cs.xapi_x() if xapi is None else xapi\r\n if xapi is None:\r\n print(ct.MSG_NOT_CONNECTED)\r\n return None\r\n data=[]\r\n for i in range(200): # range for python2/3\r\n ds = xapi.get_instrument_info(i * 300, 300)\r\n data += ds\r\n if len(ds) < 300:\r\n break\r\n data = xapi.to_df(data)\r\n return data\r\n\r\n\r\ndef get_markets(xapi=None):\r\n \"\"\"\r\n 获取市场代码\r\n \"\"\"\r\n if xapi is None:\r\n print(ct.MSG_NOT_CONNECTED)\r\n return None\r\n data = xapi.get_markets()\r\n data = xapi.to_df(data)\r\n return data\r\n \r\n \r\ndef factor_adj(code):\r\n df = pd.read_csv(ct.ADJ_FAC_URL%(ct.P_TYPE['http'],\r\n ct.DOMAINS['oss'], code))\r\n df = df.set_index('datetime')\r\n return df\r\n\r\n\r\ndef factor_shares(code):\r\n df = pd.read_csv(ct.SHS_FAC_URL%(ct.P_TYPE['http'],\r\n ct.DOMAINS['oss'], code))[['datetime', 'floats']]\r\n df = df.set_index('datetime')\r\n return df\r\n\r\n\r\ndef _random(n=13):\r\n from random import randint\r\n start = 10**(n-1)\r\n end = (10**n)-1\r\n return str(randint(start, end))\r\n\r\n\r\n\r\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.read_excel",
"pandas.compat.StringIO",
"pandas.DataFrame",
"pandas.read_html",
"pandas.read_json",
"pandas.read_pickle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Sherry-XLL/RecVAE | [
"e734dad3b59dd3b8101aa40c8ce849a992328e12"
] | [
"preprocessing.py"
] | [
"# based on https://github.com/dawenl/vae_cf\n\nimport os\nimport sys\n\nimport numpy as np\nfrom scipy import sparse\nimport pandas as pd\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str)\nparser.add_argument('--output_dir', type=str)\nparser.add_argument('--threshold', type=float)\nparser.add_argument('--min_items_per_user', type=int, default=5)\nparser.add_argument('--min_users_per_item', type=int, default=0)\nparser.add_argument('--heldout_users', type=int)\n\nargs = parser.parse_args()\n\ndataset = args.dataset\noutput_dir = args.output_dir\nthreshold = args.threshold\nmin_uc = args.min_items_per_user\nmin_sc = args.min_users_per_item\nn_heldout_users = args.heldout_users\n\nraw_data = pd.read_csv(dataset, header=0)\nraw_data = raw_data[raw_data['rating'] > threshold]\nraw_data.head()\n\n\ndef get_count(tp, id):\n playcount_groupbyid = tp[[id]].groupby(id, as_index=False)\n count = playcount_groupbyid.size()\n return count\n\n\ndef filter_triplets(tp, min_uc=min_uc, min_sc=min_sc): \n if min_sc > 0:\n itemcount = get_count(tp, 'movieId')\n tp = tp[tp['movieId'].isin(itemcount.index[itemcount['size'] >= min_sc])]\n \n if min_uc > 0:\n usercount = get_count(tp, 'userId')\n print(usercount.head())\n tp = tp[tp['userId'].isin(usercount.index[usercount['size'] >= min_uc])]\n \n usercount, itemcount = get_count(tp, 'userId'), get_count(tp, 'movieId') \n return tp, usercount, itemcount\n\n\nraw_data, user_activity, item_popularity = filter_triplets(raw_data)\n\nsparsity = 1. * raw_data.shape[0] / (user_activity.shape[0] * item_popularity.shape[0])\n\nprint(\"After filtering, there are %d watching events from %d users and %d movies (sparsity: %.3f%%)\" % \n (raw_data.shape[0], user_activity.shape[0], item_popularity.shape[0], sparsity * 100))\n\nunique_uid = user_activity.index\n\nnp.random.seed(98765)\nidx_perm = np.random.permutation(unique_uid.size)\nunique_uid = unique_uid[idx_perm]\n\nn_users = unique_uid.size\n\ntr_users = unique_uid[:(n_users - n_heldout_users * 2)]\nvd_users = unique_uid[(n_users - n_heldout_users * 2): (n_users - n_heldout_users)]\nte_users = unique_uid[(n_users - n_heldout_users):]\n\ntrain_plays = raw_data.loc[raw_data['userId'].isin(tr_users)]\n\nunique_sid = pd.unique(train_plays['movieId'])\n\nshow2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))\nprofile2id = dict((pid, i) for (i, pid) in enumerate(unique_uid))\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\nwith open(os.path.join(output_dir, 'unique_sid.txt'), 'w') as f:\n for sid in unique_sid:\n f.write('%s\\n' % sid)\n \nwith open(os.path.join(output_dir, 'unique_uid.txt'), 'w') as f:\n for uid in unique_uid:\n f.write('%s\\n' % uid)\n\n\ndef split_train_test_proportion(data, test_prop=0.2):\n data_grouped_by_user = data.groupby('userId')\n tr_list, te_list = list(), list()\n\n np.random.seed(98765)\n\n for i, (_, group) in enumerate(data_grouped_by_user):\n n_items_u = len(group)\n\n if n_items_u >= 5:\n idx = np.zeros(n_items_u, dtype='bool')\n idx[np.random.choice(n_items_u, size=int(test_prop * n_items_u), replace=False).astype('int64')] = True\n\n tr_list.append(group[np.logical_not(idx)])\n te_list.append(group[idx])\n else:\n tr_list.append(group)\n\n if i % 1000 == 0:\n print(\"%d users sampled\" % i)\n sys.stdout.flush()\n\n data_tr = pd.concat(tr_list)\n data_te = pd.concat(te_list)\n \n return data_tr, data_te\n\n\nvad_plays = raw_data.loc[raw_data['userId'].isin(vd_users)]\nvad_plays = vad_plays.loc[vad_plays['movieId'].isin(unique_sid)]\n\nvad_plays_tr, vad_plays_te = split_train_test_proportion(vad_plays)\n\ntest_plays = raw_data.loc[raw_data['userId'].isin(te_users)]\ntest_plays = test_plays.loc[test_plays['movieId'].isin(unique_sid)]\n\ntest_plays_tr, test_plays_te = split_train_test_proportion(test_plays)\n\ndef numerize(tp):\n uid = list(map(lambda x: profile2id[x], tp['userId']))\n sid = list(map(lambda x: show2id[x], tp['movieId']))\n return pd.DataFrame(data={'uid': uid, 'sid': sid}, columns=['uid', 'sid'])\n\n\ntrain_data = numerize(train_plays)\ntrain_data.to_csv(os.path.join(output_dir, 'train.csv'), index=False)\n\nvad_data_tr = numerize(vad_plays_tr)\nvad_data_tr.to_csv(os.path.join(output_dir, 'validation_tr.csv'), index=False)\n\nvad_data_te = numerize(vad_plays_te)\nvad_data_te.to_csv(os.path.join(output_dir, 'validation_te.csv'), index=False)\n\ntest_data_tr = numerize(test_plays_tr)\ntest_data_tr.to_csv(os.path.join(output_dir, 'test_tr.csv'), index=False)\n\ntest_data_te = numerize(test_plays_te)\ntest_data_te.to_csv(os.path.join(output_dir, 'test_te.csv'), index=False)\n\n"
] | [
[
"numpy.logical_not",
"pandas.concat",
"pandas.read_csv",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.permutation",
"pandas.unique",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
martindurant/xarray | [
"98a05f11c6f38489c82e86c9e9df796e7fb65fd2",
"98a05f11c6f38489c82e86c9e9df796e7fb65fd2"
] | [
"xarray/backends/memory.py",
"xarray/tests/test_utils.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport copy\n\nimport numpy as np\n\nfrom ..core.variable import Variable\nfrom ..core.pycompat import OrderedDict\n\nfrom .common import AbstractWritableDataStore\n\n\nclass InMemoryDataStore(AbstractWritableDataStore):\n \"\"\"\n Stores dimensions, variables and attributes in ordered dictionaries, making\n this store fast compared to stores which save to disk.\n\n This store exists purely for internal testing purposes.\n \"\"\"\n def __init__(self, variables=None, attributes=None, writer=None):\n self._variables = OrderedDict() if variables is None else variables\n self._attributes = OrderedDict() if attributes is None else attributes\n super(InMemoryDataStore, self).__init__(writer)\n\n def get_attrs(self):\n return self._attributes\n\n def get_variables(self):\n return self._variables\n\n def prepare_variable(self, k, v, *args, **kwargs):\n new_var = Variable(v.dims, np.empty_like(v), v.attrs)\n # we copy the variable and stuff all encodings in the\n # attributes to imitate what happens when writing to disk.\n new_var.attrs.update(v.encoding)\n self._variables[k] = new_var\n return new_var, v.data\n\n def set_attribute(self, k, v):\n # copy to imitate writing to disk.\n self._attributes[k] = copy.deepcopy(v)\n\n def set_dimension(self, d, l):\n # in this model, dimensions are accounted for in the variables\n pass\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport pickle\nimport pytest\n\nimport numpy as np\nimport pandas as pd\n\nfrom xarray.core import duck_array_ops, utils\nfrom xarray.core.pycompat import OrderedDict\nfrom . import TestCase\n\n\nclass TestAlias(TestCase):\n def test(self):\n def new_method():\n pass\n old_method = utils.alias(new_method, 'old_method')\n assert 'deprecated' in old_method.__doc__\n with self.assertWarns('deprecated'):\n old_method()\n\n\nclass TestSafeCastToIndex(TestCase):\n def test(self):\n dates = pd.date_range('2000-01-01', periods=10)\n x = np.arange(5)\n td = x * np.timedelta64(1, 'D')\n for expected, array in [\n (dates, dates.values),\n (pd.Index(x, dtype=object), x.astype(object)),\n (pd.Index(td), td),\n (pd.Index(td, dtype=object), td.astype(object)),\n ]:\n actual = utils.safe_cast_to_index(array)\n self.assertArrayEqual(expected, actual)\n self.assertEqual(expected.dtype, actual.dtype)\n\n\ndef test_multiindex_from_product_levels():\n result = utils.multiindex_from_product_levels([['b', 'a'], [1, 3, 2]])\n np.testing.assert_array_equal(\n result.labels, [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])\n np.testing.assert_array_equal(result.levels[0], ['b', 'a'])\n np.testing.assert_array_equal(result.levels[1], [1, 3, 2])\n\n other = pd.MultiIndex.from_product([['b', 'a'], [1, 3, 2]])\n np.testing.assert_array_equal(result.values, other.values)\n\n\nclass TestArrayEquiv(TestCase):\n def test_0d(self):\n # verify our work around for pd.isnull not working for 0-dimensional\n # object arrays\n self.assertTrue(\n duck_array_ops.array_equiv(0, np.array(0, dtype=object)))\n self.assertTrue(\n duck_array_ops.array_equiv(np.nan, np.array(np.nan, dtype=object)))\n self.assertFalse(\n duck_array_ops.array_equiv(0, np.array(1, dtype=object)))\n\n\nclass TestDictionaries(TestCase):\n def setUp(self):\n self.x = {'a': 'A', 'b': 'B'}\n self.y = {'c': 'C', 'b': 'B'}\n self.z = {'a': 'Z'}\n\n def test_equivalent(self):\n self.assertTrue(utils.equivalent(0, 0))\n self.assertTrue(utils.equivalent(np.nan, np.nan))\n self.assertTrue(utils.equivalent(0, np.array(0.0)))\n self.assertTrue(utils.equivalent([0], np.array([0])))\n self.assertTrue(utils.equivalent(np.array([0]), [0]))\n self.assertTrue(utils.equivalent(np.arange(3), 1.0 * np.arange(3)))\n self.assertFalse(utils.equivalent(0, np.zeros(3)))\n\n def test_safe(self):\n # should not raise exception:\n utils.update_safety_check(self.x, self.y)\n\n def test_unsafe(self):\n with self.assertRaises(ValueError):\n utils.update_safety_check(self.x, self.z)\n\n def test_ordered_dict_intersection(self):\n self.assertEqual({'b': 'B'},\n utils.ordered_dict_intersection(self.x, self.y))\n self.assertEqual({}, utils.ordered_dict_intersection(self.x, self.z))\n\n def test_dict_equiv(self):\n x = OrderedDict()\n x['a'] = 3\n x['b'] = np.array([1, 2, 3])\n y = OrderedDict()\n y['b'] = np.array([1.0, 2.0, 3.0])\n y['a'] = 3\n self.assertTrue(utils.dict_equiv(x, y)) # two nparrays are equal\n y['b'] = [1, 2, 3] # np.array not the same as a list\n self.assertTrue(utils.dict_equiv(x, y)) # nparray == list\n x['b'] = [1.0, 2.0, 3.0]\n self.assertTrue(utils.dict_equiv(x, y)) # list vs. list\n x['c'] = None\n self.assertFalse(utils.dict_equiv(x, y)) # new key in x\n x['c'] = np.nan\n y['c'] = np.nan\n self.assertTrue(utils.dict_equiv(x, y)) # as intended, nan is nan\n x['c'] = np.inf\n y['c'] = np.inf\n self.assertTrue(utils.dict_equiv(x, y)) # inf == inf\n y = dict(y)\n self.assertTrue(utils.dict_equiv(x, y)) # different dictionary types are fine\n y['b'] = 3 * np.arange(3)\n self.assertFalse(utils.dict_equiv(x, y)) # not equal when arrays differ\n\n def test_frozen(self):\n x = utils.Frozen(self.x)\n with self.assertRaises(TypeError):\n x['foo'] = 'bar'\n with self.assertRaises(TypeError):\n del x['a']\n with self.assertRaises(AttributeError):\n x.update(self.y)\n self.assertEqual(x.mapping, self.x)\n self.assertIn(repr(x), (\"Frozen({'a': 'A', 'b': 'B'})\",\n \"Frozen({'b': 'B', 'a': 'A'})\"))\n\n def test_sorted_keys_dict(self):\n x = {'a': 1, 'b': 2, 'c': 3}\n y = utils.SortedKeysDict(x)\n self.assertItemsEqual(y, ['a', 'b', 'c'])\n self.assertEqual(repr(utils.SortedKeysDict()),\n \"SortedKeysDict({})\")\n\n def test_chain_map(self):\n m = utils.ChainMap({'x': 0, 'y': 1}, {'x': -100, 'z': 2})\n self.assertIn('x', m)\n self.assertIn('y', m)\n self.assertIn('z', m)\n self.assertEqual(m['x'], 0)\n self.assertEqual(m['y'], 1)\n self.assertEqual(m['z'], 2)\n m['x'] = 100\n self.assertEqual(m['x'], 100)\n self.assertEqual(m.maps[0]['x'], 100)\n self.assertItemsEqual(['x', 'y', 'z'], m)\n\n\nclass Test_is_uniform_and_sorted(TestCase):\n\n def test_sorted_uniform(self):\n self.assertTrue(utils.is_uniform_spaced(np.arange(5)))\n\n def test_sorted_not_uniform(self):\n self.assertEqual(False, utils.is_uniform_spaced([-2, 1, 89]))\n\n def test_not_sorted_uniform(self):\n self.assertEqual(False, utils.is_uniform_spaced([1, -1, 3]))\n\n def test_not_sorted_not_uniform(self):\n self.assertEqual(False, utils.is_uniform_spaced([4, 1, 89]))\n\n def test_two_numbers(self):\n self.assertTrue(utils.is_uniform_spaced([0, 1.7]))\n\n def test_relative_tolerance(self):\n self.assertTrue(utils.is_uniform_spaced([0, 0.97, 2], rtol=0.1))\n\n\nclass Test_hashable(TestCase):\n\n def test_hashable(self):\n for v in [False, 1, (2, ), (3, 4), 'four']:\n self.assertTrue(utils.hashable(v))\n for v in [[5, 6], ['seven', '8'], {9: 'ten'}]:\n self.assertFalse(utils.hashable(v))\n"
] | [
[
"numpy.empty_like"
],
[
"numpy.arange",
"pandas.Index",
"numpy.testing.assert_array_equal",
"numpy.timedelta64",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
WangYongzhao/OpenNMT-tf | [
"b1c51e8a3560aa96c3359c5dfa3312d9771c3a32"
] | [
"opennmt/layers/transformer.py"
] | [
"\"\"\"Define layers related to the Google's Transformer model.\"\"\"\n\nimport tensorflow as tf\n\nfrom opennmt.layers import common\nfrom opennmt.utils import compat\n\n\ndef tile_sequence_length(sequence_length, num_heads):\n \"\"\"Tiles lengths :obj:`num_heads` times.\n\n Args:\n sequence_length: The sequence length.\n num_heads: The number of heads.\n\n Returns:\n A ``tf.Tensor`` where each length is replicated :obj:`num_heads` times.\n \"\"\"\n sequence_length = tf.tile(sequence_length, [num_heads])\n sequence_length = tf.reshape(sequence_length, [num_heads, -1])\n sequence_length = tf.transpose(sequence_length, perm=[1, 0])\n sequence_length = tf.reshape(sequence_length, [-1])\n return sequence_length\n\ndef build_sequence_mask(sequence_length,\n num_heads=None,\n maximum_length=None,\n dtype=tf.float32):\n \"\"\"Builds the dot product mask.\n\n Args:\n sequence_length: The sequence length.\n num_heads: The number of heads.\n maximum_length: Optional size of the returned time dimension. Otherwise\n it is the maximum of :obj:`sequence_length`.\n dtype: The type of the mask tensor.\n\n Returns:\n A broadcastable ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[batch_size, 1, 1, max_length]``.\n \"\"\"\n mask = tf.sequence_mask(sequence_length, maxlen=maximum_length, dtype=dtype)\n mask = tf.expand_dims(mask, axis=1)\n if num_heads is not None:\n mask = tf.expand_dims(mask, axis=1)\n return mask\n\ndef _lower_triangle_mask(sequence_length, maximum_length=None, dtype=tf.float32):\n batch_size = tf.shape(sequence_length)[0]\n if maximum_length is None:\n maximum_length = tf.reduce_max(sequence_length)\n mask = tf.ones([batch_size, maximum_length, maximum_length], dtype=dtype)\n mask = compat.tf_compat(v2=\"linalg.band_part\", v1=\"matrix_band_part\")(mask, -1, 0)\n return mask\n\ndef build_future_mask(sequence_length,\n num_heads=None,\n maximum_length=None,\n dtype=tf.float32):\n \"\"\"Builds the dot product mask for future positions.\n\n Args:\n sequence_length: The sequence length.\n num_heads: The number of heads.\n maximum_length: Optional size of the returned time dimension. Otherwise\n it is the maximum of :obj:`sequence_length`.\n dtype: The type of the mask tensor.\n\n Returns:\n A broadcastable ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[batch_size, 1, max_length, max_length]``.\n \"\"\"\n sequence_mask = tf.sequence_mask(sequence_length, maxlen=maximum_length, dtype=dtype)\n mask = _lower_triangle_mask(sequence_length, maximum_length=maximum_length, dtype=dtype)\n mask *= tf.expand_dims(sequence_mask, axis=1)\n if num_heads is not None:\n mask = tf.expand_dims(mask, axis=1)\n return mask\n\ndef cumulative_average_mask(sequence_length, maximum_length=None, dtype=tf.float32):\n \"\"\"Builds the mask to compute the cumulative average as described in\n https://arxiv.org/abs/1805.00631.\n\n Args:\n sequence_length: The sequence length.\n maximum_length: Optional size of the returned time dimension. Otherwise\n it is the maximum of :obj:`sequence_length`.\n dtype: The type of the mask tensor.\n\n Returns:\n A ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[batch_size, max_length, max_length]``.\n \"\"\"\n sequence_mask = tf.sequence_mask(sequence_length, maxlen=maximum_length, dtype=dtype)\n mask = _lower_triangle_mask(sequence_length, maximum_length=maximum_length, dtype=dtype)\n mask *= tf.expand_dims(sequence_mask, axis=2)\n weight = tf.range(1, tf.cast(tf.shape(mask)[1] + 1, dtype), dtype=dtype)\n mask /= tf.expand_dims(weight, 1)\n return mask\n\ndef cumulative_average(inputs, mask_or_step, cache=None):\n \"\"\"Computes the cumulative average as described in\n https://arxiv.org/abs/1805.00631.\n\n Args:\n inputs: The sequence to average. A tensor of shape :math:`[B, T, D]`.\n mask_or_step: If :obj:`cache` is set, this is assumed to be the current step\n of the dynamic decoding. Otherwise, it is the mask matrix used to compute\n the cumulative average.\n cache: A dictionnary containing the cumulative average of the previous step.\n\n Returns:\n The cumulative average, a tensor of the same shape and type as :obj:`inputs`.\n \"\"\"\n if cache is not None:\n step = tf.cast(mask_or_step, inputs.dtype)\n aa = (inputs + step * cache[\"prev_g\"]) / (step + 1.0)\n cache[\"prev_g\"] = aa\n return aa\n else:\n mask = mask_or_step\n return tf.matmul(mask, inputs)\n\ndef fused_projection(inputs, num_units, num_outputs=1):\n \"\"\"Projects the same input into multiple output spaces.\n\n Args:\n inputs: The inputs to project.\n num_units: The number of output units of each space.\n num_outputs: The number of output spaces.\n\n Returns:\n :obj:`num_outputs` ``tf.Tensor`` of depth :obj:`num_units`.\n \"\"\"\n return tf.split(\n tf.layers.conv1d(inputs, num_units * num_outputs, 1), num_outputs, axis=2)\n\ndef split_heads(inputs, num_heads):\n \"\"\"Splits a tensor in depth.\n\n Args:\n inputs: A ``tf.Tensor`` of shape :math:`[B, T, D]`.\n num_heads: The number of heads :math:`H`.\n\n Returns:\n A ``tf.Tensor`` of shape :math:`[B, H, T, D / H]`.\n \"\"\"\n static_shape = inputs.get_shape().as_list()\n depth = static_shape[-1]\n outputs = tf.reshape(\n inputs, [tf.shape(inputs)[0], tf.shape(inputs)[1], num_heads, depth // num_heads])\n outputs = tf.transpose(outputs, perm=[0, 2, 1, 3])\n return outputs\n\ndef combine_heads(inputs):\n \"\"\"Concatenates heads.\n\n Args:\n inputs: A ``tf.Tensor`` of shape :math:`[B, H, T, D]`.\n\n Returns:\n A ``tf.Tensor`` of shape :math:`[B, T, D * H]`.\n \"\"\"\n static_shape = inputs.get_shape().as_list()\n depth = static_shape[-1]\n num_heads = static_shape[1]\n outputs = tf.transpose(inputs, perm=[0, 2, 1, 3])\n outputs = tf.reshape(outputs, [tf.shape(outputs)[0], tf.shape(outputs)[1], depth * num_heads])\n return outputs\n\ndef dot_product_attention(queries,\n keys,\n values,\n mode,\n mask=None,\n dropout=0.0):\n \"\"\"Computes the dot product attention.\n\n Args:\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n keys: The sequence use to calculate attention scores. A tensor of shape\n :math:`[B, T_2, ...]`.\n values: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n mode: A ``tf.estimator.ModeKeys`` mode.\n mask: A ``tf.Tensor`` applied to the dot product.\n dropout: The probability to drop units from the inputs.\n\n Returns:\n A tuple ``(context vector, attention vector)``.\n \"\"\"\n # Dot product between queries and keys.\n dot = tf.matmul(queries, keys, transpose_b=True)\n\n if mask is not None:\n dot = tf.cast(tf.cast(dot, tf.float32) * mask + ((1.0 - mask) * tf.float32.min), dot.dtype)\n\n # Compute attention weights.\n attn = tf.cast(tf.nn.softmax(tf.cast(dot, tf.float32)), dot.dtype)\n drop_attn = tf.layers.dropout(\n attn,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Compute attention context.\n context = tf.matmul(drop_attn, values)\n\n return context, attn\n\ndef _generate_relative_positions_matrix(length_q, length_k,\n max_relative_position,\n cache=False):\n \"\"\"Generates matrix of relative positions between inputs.\"\"\"\n if not cache:\n if length_q == length_k:\n range_vec_q = range_vec_k = tf.range(length_q)\n else:\n range_vec_k = tf.range(length_k)\n range_vec_q = range_vec_k[-length_q:]\n distance_mat = range_vec_k[None, :] - range_vec_q[:, None]\n else:\n distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0)\n distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,\n max_relative_position)\n # Shift values to be >= 0. Each integer still uniquely identifies a relative\n # position difference.\n final_mat = distance_mat_clipped + max_relative_position\n return final_mat\n\n\ndef _generate_relative_positions_embeddings(length_q, length_k, depth,\n max_relative_position, name,\n cache=False):\n \"\"\"Generates tensor of size [1 if cache else length_q, length_k, depth].\"\"\"\n with tf.variable_scope(name):\n relative_positions_matrix = _generate_relative_positions_matrix(\n length_q, length_k, max_relative_position, cache=cache)\n vocab_size = max_relative_position * 2 + 1\n # Generates embedding for each relative position of dimension depth.\n embeddings_table = tf.get_variable(\"embeddings\", [vocab_size, depth])\n embeddings = tf.gather(embeddings_table, relative_positions_matrix)\n return embeddings\n\n\ndef _relative_attention_inner(x, y, z, transpose):\n \"\"\"Relative position-aware dot-product attention inner calculation.\n\n This batches matrix multiply calculations to avoid unnecessary broadcasting.\n\n Args:\n x: Tensor with shape [batch_size, heads, length or 1, length or depth].\n y: Tensor with shape [batch_size, heads, length or 1, depth].\n z: Tensor with shape [length or 1, length, depth].\n transpose: Whether to transpose inner matrices of y and z. Should be true if\n last dimension of x is depth, not length.\n\n Returns:\n A Tensor with shape [batch_size, heads, length, length or depth].\n \"\"\"\n batch_size = tf.shape(x)[0]\n heads = x.get_shape().as_list()[1]\n length = tf.shape(x)[2]\n\n # xy_matmul is [batch_size, heads, length or 1, length or depth]\n xy_matmul = tf.matmul(x, y, transpose_b=transpose)\n # x_t is [length or 1, batch_size, heads, length or depth]\n x_t = tf.transpose(x, [2, 0, 1, 3])\n # x_t_r is [length or 1, batch_size * heads, length or depth]\n x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1])\n # x_tz_matmul is [length or 1, batch_size * heads, length or depth]\n x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose)\n # x_tz_matmul_r is [length or 1, batch_size, heads, length or depth]\n x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1])\n # x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth]\n x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3])\n return xy_matmul + x_tz_matmul_r_t\n\ndef shape_list(x):\n \"\"\"Return list of dims, statically where possible.\"\"\"\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i, dim in enumerate(static):\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret\n\n\ndef dot_product_attention_relative(q, k, v, mode, mask=None, dropout=0.0,max_relative_position=1):\n\n if not max_relative_position:\n raise ValueError(\"Max relative position (%s) should be > 0 when using \"\n \"relative self attention.\" % (max_relative_position))\n with tf.variable_scope(\n None, default_name=\"dot_product_attention_relative\",\n values=[q, k, v]) as scope:\n\n # Use separate embeddings suitable for keys and values.\n depth = k.get_shape().as_list()[3]\n length_k = shape_list(k)[2]\n length_q = shape_list(q)[2]\n relations_keys = _generate_relative_positions_embeddings(\n length_q, length_k, depth, max_relative_position,\n \"relative_positions_keys\")\n relations_values = _generate_relative_positions_embeddings(\n length_q, length_k, depth, max_relative_position,\n \"relative_positions_values\")\n\n # Compute self attention considering the relative position embeddings.\n logits = _relative_attention_inner(q, k, relations_keys, True)\n weights = tf.nn.softmax(logits, name=\"attention_weights\")\n weights_drop = tf.layers.dropout(\n weights,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n\n return _relative_attention_inner(weights_drop, v, relations_values, False), weights,relations_keys\n\ndef multi_head_attention(num_heads,\n queries,\n memory,\n mode,\n num_units=None,\n mask=None,\n cache=None,\n dropout=0.0,\n return_attention=False,\n max_relative_positions=0):\n \"\"\"Computes the multi-head attention as described in\n https://arxiv.org/abs/1706.03762.\n\n Args:\n num_heads: The number of attention heads.\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n memory: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n If ``None``, computes self-attention.\n mode: A ``tf.estimator.ModeKeys`` mode.\n num_units: The number of hidden units. If not set, it is set to the input\n dimension.\n mask: A ``tf.Tensor`` applied to the dot product.\n cache: A dictionary containing pre-projected keys and values.\n dropout: The probability to drop units from the inputs.\n return_attention: Return the attention head probabilities in addition to the\n context.\n\n Returns:\n The concatenated attention context of each head and the attention\n probabilities (if :obj:`return_attention` is set).\n \"\"\"\n num_units = num_units or queries.get_shape().as_list()[-1]\n\n if num_units % num_heads != 0:\n raise ValueError(\"Multi head attention requires that num_units is a\"\n \" multiple of {}\".format(num_heads))\n\n if memory is None:\n queries, keys, values = fused_projection(queries, num_units, num_outputs=3)\n\n keys = split_heads(keys, num_heads)\n values = split_heads(values, num_heads)\n\n if cache is not None:\n keys = tf.concat([cache[\"self_keys\"], keys], axis=2)\n values = tf.concat([cache[\"self_values\"], values], axis=2)\n cache[\"self_keys\"] = keys\n cache[\"self_values\"] = values\n else:\n queries = tf.layers.conv1d(queries, num_units, 1)\n\n if cache is not None:\n def _project_and_split():\n k, v = fused_projection(memory, num_units, num_outputs=2)\n return split_heads(k, num_heads), split_heads(v, num_heads)\n\n keys, values = tf.cond(\n tf.equal(tf.shape(cache[\"memory_keys\"])[2], 0),\n true_fn=_project_and_split,\n false_fn=lambda: (cache[\"memory_keys\"], cache[\"memory_values\"]))\n cache[\"memory_keys\"] = keys\n cache[\"memory_values\"] = values\n else:\n keys, values = fused_projection(memory, num_units, num_outputs=2)\n keys = split_heads(keys, num_heads)\n values = split_heads(values, num_heads)\n\n queries = split_heads(queries, num_heads)\n queries *= (num_units // num_heads)**-0.5\n\n if max_relative_positions == 0:\n heads, attn = dot_product_attention(\n queries,\n keys,\n values,\n mode,\n mask=mask,\n dropout=dropout)\n else:\n heads, attn = dot_product_attention_relative(\n queries,\n keys,\n values,\n mode,\n mask=mask,\n dropout=dropout,\n max_relative_positions= max_relative_positions)\n\n # Concatenate all heads output.\n combined = combine_heads(heads)\n outputs = tf.layers.conv1d(combined, num_units, 1)\n\n if not return_attention:\n return outputs\n return outputs, attn\n\ndef feed_forward(x, inner_dim, mode, dropout=0.0):\n \"\"\"Implements the Transformer's \"Feed Forward\" layer.\n\n .. math::\n\n ffn(x) = max(0, x*W_1 + b_1)*W_2 + b_2\n\n Args:\n x: The input.\n inner_dim: The number of units of the inner linear transformation.\n mode: A ``tf.estimator.ModeKeys`` mode.\n dropout: The probability to drop units from the inner transformation.\n\n Returns:\n The transformed input.\n \"\"\"\n input_dim = x.get_shape().as_list()[-1]\n\n inner = tf.layers.conv1d(x, inner_dim, 1, activation=tf.nn.relu)\n inner = tf.layers.dropout(\n inner,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n outer = tf.layers.conv1d(inner, input_dim, 1)\n\n return outer\n\ndef norm(inputs):\n \"\"\"Layer normalizes :obj:`inputs`.\"\"\"\n return tf.contrib.layers.layer_norm(inputs, begin_norm_axis=-1)\n\ndef drop_and_add(inputs,\n outputs,\n mode,\n dropout=0.1):\n \"\"\"Drops units in the outputs and adds the previous values.\n\n Args:\n inputs: The input of the previous layer.\n outputs: The output of the previous layer.\n mode: A ``tf.estimator.ModeKeys`` mode.\n dropout: The probability to drop units in :obj:`outputs`.\n\n Returns:\n The residual and normalized output.\n \"\"\"\n outputs = tf.layers.dropout(\n outputs,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n\n input_dim = inputs.get_shape().as_list()[-1]\n output_dim = outputs.get_shape().as_list()[-1]\n\n if input_dim == output_dim:\n outputs += inputs\n return outputs\n\n\nclass FeedForwardNetwork(tf.keras.layers.Layer):\n \"\"\"Implements the Transformer's \"Feed Forward\" layer.\n\n .. math::\n\n ffn(x) = max(0, x*W_1 + b_1)*W_2 + b_2\n\n Note:\n Object-oriented implementation for TensorFlow 2.0.\n \"\"\"\n\n def __init__(self,\n inner_dim,\n output_dim,\n dropout=0.1,\n activation=tf.nn.relu,\n **kwargs):\n \"\"\"Initializes this layer.\n\n Args:\n inner_dim: The number of units of the inner linear transformation.\n output_dim: The number of units of the ouput linear transformation.\n dropout: The probability to drop units from the activation output.\n activation: The activation function to apply between the two linear\n transformations.\n kwargs: Additional layer arguments.\n \"\"\"\n super(FeedForwardNetwork, self).__init__(**kwargs)\n self.inner = tf.keras.layers.Dense(inner_dim, activation=activation, name=\"inner\")\n self.outer = tf.keras.layers.Dense(output_dim, name=\"outer\")\n self.dropout = dropout\n\n def call(self, inputs, training=None): # pylint: disable=arguments-differ\n \"\"\"Runs the layer.\"\"\"\n inner = self.inner(inputs)\n inner = common.dropout(inner, self.dropout, training=training)\n return self.outer(inner)\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n \"\"\"Computes the multi-head attention as described in\n https://arxiv.org/abs/1706.03762.\n\n Note:\n Object-oriented implementation for TensorFlow 2.0.\n \"\"\"\n\n def __init__(self,\n num_heads,\n num_units,\n dropout=0.1,\n return_attention=False,\n **kwargs):\n \"\"\"Initializes this layers.\n\n Args:\n num_heads: The number of attention heads.\n num_units: The number of hidden units.\n dropout: The probability to drop units from the inputs.\n return_attention: If ``True``, also return the attention weights of the\n first head.\n kwargs: Additional layer arguments.\n \"\"\"\n super(MultiHeadAttention, self).__init__(**kwargs)\n if num_units % num_heads != 0:\n raise ValueError(\"Multi head attention requires that num_units is a\"\n \" multiple of %s\" % num_heads)\n self.num_heads = num_heads\n self.num_units = num_units\n self.linear_queries = tf.keras.layers.Dense(num_units, name=\"linear_queries\")\n self.linear_keys = tf.keras.layers.Dense(num_units, name=\"linear_keys\")\n self.linear_values = tf.keras.layers.Dense(num_units, name=\"linear_values\")\n self.linear_output = tf.keras.layers.Dense(num_units, name=\"linear_output\")\n self.dropout = dropout\n self.return_attention = return_attention\n\n def call(self, inputs, memory=None, mask=None, cache=None, training=None): # pylint: disable=arguments-differ\n \"\"\"Runs the layer.\n\n Args:\n inputs: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n memory: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n If ``None``, computes self-attention.\n mask: A ``tf.Tensor`` applied to the dot product.\n cache: A dictionary containing pre-projected keys and values.\n training: Run in training mode.\n\n Returns:\n A tuple with the attention context, the updated cache and the attention\n probabilities of the first head (if :obj:`return_attention` is ``True``).\n \"\"\"\n\n def _compute_kv(x):\n keys = self.linear_keys(x)\n keys = split_heads(keys, self.num_heads)\n values = self.linear_values(x)\n values = split_heads(values, self.num_heads)\n return keys, values\n\n # Compute queries.\n queries = self.linear_queries(inputs)\n queries = split_heads(queries, self.num_heads)\n queries *= (self.num_units // self.num_heads)**-0.5\n\n # Compute keys and values.\n if memory is None:\n keys, values = _compute_kv(inputs)\n if cache:\n keys = tf.concat([cache[0], keys], axis=2)\n values = tf.concat([cache[1], values], axis=2)\n else:\n if cache:\n if not self.linear_keys.built:\n # Ensure that the variable names are not impacted by the tf.cond name\n # scope if the layers have not already been built.\n with tf.name_scope(self.linear_keys.name):\n self.linear_keys.build(memory.shape)\n with tf.name_scope(self.linear_values.name):\n self.linear_values.build(memory.shape)\n keys, values = tf.cond(\n tf.equal(tf.shape(cache[0])[2], 0),\n true_fn=lambda: _compute_kv(memory),\n false_fn=lambda: cache)\n else:\n keys, values = _compute_kv(memory)\n\n cache = (keys, values)\n\n # Dot product attention.\n dot = tf.matmul(queries, keys, transpose_b=True)\n if mask is not None:\n mask = tf.expand_dims(tf.cast(mask, tf.float32), 1) # Broadcast on heads dimension.\n dot = tf.cast(tf.cast(dot, tf.float32) * mask + ((1.0 - mask) * tf.float32.min), dot.dtype)\n attn = tf.cast(tf.nn.softmax(tf.cast(dot, tf.float32)), dot.dtype)\n drop_attn = common.dropout(attn, self.dropout, training=training)\n heads = tf.matmul(drop_attn, values)\n\n # Concatenate all heads output.\n combined = combine_heads(heads)\n outputs = self.linear_output(combined)\n if self.return_attention:\n return outputs, cache, attn\n return outputs, cache\n\n\nclass TransformerLayerWrapper(common.LayerWrapper):\n \"\"\"Layer wrapper that applies a standard Transformer preprocessing and\n postprocessing:\n\n .. code-block:: text\n\n y = layer_norm(x)\n y = dropout(layer(y)) + x\n \"\"\"\n\n def __init__(self, layer, output_dropout, **kwargs):\n \"\"\"Initializes the wrapper.\n\n Args:\n layer: The Transformer layer to wrap.\n output_dropout: The dropout to apply on the layer output.\n **kwargs: Additional layer arguments.\n \"\"\"\n super(TransformerLayerWrapper, self).__init__(\n layer,\n normalize_input=True,\n output_dropout=output_dropout,\n residual_connection=True,\n **kwargs)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.layers.conv1d",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.layers.dropout",
"tensorflow.cast",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.sequence_mask",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
joaohenggeler/software-vulnerability-collection-scripts | [
"4719f8c279ebd6b879b7d8bce2a2789cd28c6929"
] | [
"Scripts/modules/project.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\n\tThis module defines a class that represents a C/C++ project and that contains methods for interfacing with its vulnerabilities and source files.\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport random\nimport re\nimport sys\nfrom collections import defaultdict, namedtuple\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport bs4 # type: ignore\nimport clang.cindex # type: ignore\nimport git # type: ignore\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\nfrom clang.cindex import CursorKind, TranslationUnitLoadError # type: ignore\n\nfrom .common import log, GLOBAL_CONFIG, DEBUG_ENABLED, DEBUG_CONFIG, CURRENT_TIMESTAMP, change_datetime_string_format, deserialize_json_container, format_unix_timestamp, join_and_normalize_paths\nfrom .cve import Cve\nfrom .scraping import ScrapingManager, ScrapingRegex\n\n####################################################################################################\n\nCLANG_INDEX: clang.cindex.Index\n\ntry:\n\tclang_lib_path = GLOBAL_CONFIG['clang_lib_path']\n\tlog.info(f'Loading libclang from \"{clang_lib_path}\".')\n\t\n\ttry:\n\t\tclang.cindex.Config.set_library_path(clang_lib_path)\n\t\tCLANG_INDEX = clang.cindex.Index.create()\n\texcept Exception as error:\n\t\tclang.cindex.Config.set_library_file(clang_lib_path)\n\t\tCLANG_INDEX = clang.cindex.Index.create()\n\n\tlog.info(f'Loaded libclang successfully.')\n\nexcept Exception as error:\n\tlog.error(f'Failed to load libclang with the error: {repr(error)}')\n\n####################################################################################################\n\nclass Project:\n\t\"\"\" Represents a software project, its repository, and the vulnerabilities it's affected by. \"\"\"\n\n\tfull_name: str\n\tshort_name: str\n\tdatabase_id: int\n\tdatabase_name: str\n\tgithub_data_name: str\n\tvendor_id: int\n\tproduct_id: int\n\turl_pattern: str\n\trepository_path: str\n\trepository_base_name: str\n\tmaster_branch: str\n\tlanguage: str\n\tinclude_directory_path: Optional[str]\n\n\tSOURCE_FILE_EXTENSIONS: list = ['c', 'cpp', 'cc', 'cxx', 'c++', 'cp', 'h', 'hpp', 'hh', 'hxx']\n\tSOURCE_FILE_EXTENSIONS_WITH_WILDCARDS: list = ['*.' + extension for extension in SOURCE_FILE_EXTENSIONS] \n\n\trepository: git.Repo\n\n\toutput_directory_path: str\n\tscrape_all_branches: bool\n\n\tdef __init__(self, project_name: str, project_info: dict):\n\t\t\n\t\tself.full_name = project_name\n\t\tfor key, value in project_info.items():\n\t\t\tsetattr(self, key, value)\n\n\t\tself.repository_base_name = os.path.basename(self.repository_path)\n\n\t\tself.output_directory_path = os.path.join(self.output_directory_path, self.short_name)\n\t\tself.output_directory_path = os.path.abspath(self.output_directory_path)\n\n\t\ttry:\n\t\t\tself.repository = git.Repo(self.repository_path)\n\t\t\tlog.info(f'Loaded the project \"{self}\" located in \"{self.repository_path}\".')\n\t\texcept Exception as error:\n\t\t\tself.repository = None\n\t\t\tlog.error(f'Failed to get the repository for the project \"{self}\"\" with the error: {repr(error)}')\n\t\t\n\t\tif self.include_directory_path is not None:\n\t\t\tself.include_directory_path = join_and_normalize_paths(self.repository_path, self.include_directory_path)\n\n\tdef __str__(self):\n\t\treturn self.full_name\n\n\t####################################################################################################\n\n\t\"\"\"\n\t\tMethods used to initialize or perform basic operations used by all projects.\n\t\"\"\"\n\n\t@staticmethod\n\tdef get_project_list_from_config(config: dict = GLOBAL_CONFIG) -> list:\n\t\t\"\"\" Creates a list of projects given the current configuration. \"\"\"\n\n\t\toutput_directory_path = config['output_directory_path']\n\t\tscrape_all_branches = config['scrape_all_branches']\n\t\tproject_config = config['projects']\n\n\t\tproject_list = []\n\t\tfor full_name, info in project_config.items():\n\n\t\t\tshort_name = info['short_name']\n\n\t\t\tshould_be_allowed = GLOBAL_CONFIG['allowed_projects'].get(short_name)\n\t\t\tif not should_be_allowed:\n\t\t\t\tlog.info(f'Ignoring the project \"{full_name}\" ({short_name}).')\n\t\t\t\tcontinue\n\n\t\t\tinfo['output_directory_path'] = output_directory_path\n\t\t\tinfo['scrape_all_branches'] = scrape_all_branches\n\t\t\tproject: Project\n\t\t\n\t\t\tlog.info(f'Loading the project \"{full_name}\" ({short_name}) with the following configurations: {info}')\n\n\t\t\tif short_name == 'mozilla':\n\t\t\t\tproject = MozillaProject(full_name, info)\n\t\t\telif short_name == 'xen':\n\t\t\t\tproject = XenProject(full_name, info)\n\t\t\telif short_name == 'apache':\n\t\t\t\tproject = ApacheProject(full_name, info)\n\t\t\telif short_name == 'glibc':\n\t\t\t\tproject = GlibcProject(full_name, info)\n\t\t\telse:\n\t\t\t\tproject = Project(full_name, info)\n\n\t\t\tproject_list.append(project)\n\n\t\treturn project_list\n\n\t@staticmethod\n\tdef debug_ensure_all_project_repositories_were_loaded(project_list: list):\n\t\t\"\"\" Terminates the program if one or more projects are missing their repositories. This method does nothing outside debug mode. \"\"\"\n\n\t\tif DEBUG_ENABLED:\n\t\t\tfor project in project_list:\n\t\t\t\tif project.repository is None:\n\t\t\t\t\tlog.critical(f'The repository for project \"{project}\" was not loaded correctly.')\n\t\t\t\t\tsys.exit(1)\n\n\tdef get_base_output_csv_path(self, prefix: str) -> str:\n\t\t\"\"\" Creates the base output path for a CSV file with a given prefix. For example, using the prefix \"cve\" for the Mozilla project,\n\t\tthe file path would be: \"cve-1-mozilla-master-branch-20210401212440.csv\". \"\"\"\n\t\tused_branches = 'all-branches' if self.scrape_all_branches else 'master-branch'\n\t\tfilename = prefix + f'-{self.database_id}-{self.short_name}-{used_branches}-{CURRENT_TIMESTAMP}.csv'\n\t\treturn os.path.join(self.output_directory_path, filename)\n\n\tdef find_output_csv_files(self, prefix: str, subdirectory: Optional[str] = None, sort_key: Optional[Callable] = None) -> List[str]:\n\t\t\"\"\" Finds the paths to any CSV files that belong to this project by looking at their prefix. \"\"\"\n\t\t\n\t\tcsv_path = self.output_directory_path\n\n\t\tif subdirectory is not None:\n\t\t\tcsv_path = os.path.join(csv_path, subdirectory)\n\n\t\tcsv_path = os.path.join(csv_path, fr'{prefix}*-{self.database_id}-{self.short_name}-*')\n\t\tcsv_file_list = glob.glob(csv_path)\n\t\tcsv_file_list = sorted(csv_file_list, key=sort_key)\n\n\t\treturn csv_file_list\n\n\tdef create_output_subdirectory(self, subdirectory: str = '') -> None:\n\t\t\"\"\" Creates a subdirectory in the project's output directory. \"\"\"\n\t\tpath = os.path.join(self.output_directory_path, subdirectory)\n\t\tos.makedirs(path, exist_ok=True)\n\n\t####################################################################################################\n\n\t\"\"\"\n\t\tMethods used to interface with a project's repository.\n\t\"\"\"\n\n\tdef get_absolute_path_in_repository(self, relative_path: str) -> str:\n\t\t\"\"\" Converts the relative path of a file in the project's repository into an absolute one. \"\"\"\n\t\tfull_path = os.path.join(self.repository_path, relative_path)\n\t\treturn os.path.normpath(full_path)\n\n\tdef get_relative_path_in_repository(self, full_path: str) -> str:\n\t\t\"\"\" Converts the absolute path of a file in the project's repository into a relative one. \"\"\"\n\n\t\tpath = full_path.replace('\\\\', '/')\n\n\t\ttry:\n\t\t\t_, path = path.split(self.repository_base_name + '/', 1)\t\t\t\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\treturn path\n\n\tdef find_full_git_commit_hash(self, short_commit_hash: str) -> Optional[str]:\n\t\t\"\"\" Finds the full Git commit hash given the short hash. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\t# git show --format=\"%H\" --no-patch [SHORT HASH]\n\t\t\tfull_commit_hash = self.repository.git.show(short_commit_hash, format='%H', no_patch=True)\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tfull_commit_hash = None\n\t\t\tlog.error(f'Failed to find the full version of the commit hash \"{short_commit_hash}\" with the error: {repr(error)}')\n\n\t\treturn full_commit_hash\n\n\tdef find_git_commit_hashes_from_pattern(self, grep_pattern: str) -> list:\n\t\t\"\"\" Finds any Git commit hashes whose title and message match a given regex pattern. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn []\n\n\t\ttry:\n\t\t\t# git log --all --format=\"%H\" --grep=\"[REGEX]\" --regexp-ignore-case --extended-regexp\n\t\t\t# The --extended-regexp option enables the following special characters: ? + { | ( )\n\t\t\tlog_result = self.repository.git.log(all=True, format='%H', grep=grep_pattern, regexp_ignore_case=True, extended_regexp=True)\n\t\t\thash_list = log_result.splitlines()\n\t\texcept git.exc.GitCommandError as error:\n\t\t\thash_list = []\n\t\t\tlog.error(f'Failed to find commit hashes using the pattern \"{grep_pattern}\" with the error: {repr(error)}')\n\n\t\treturn hash_list\n\n\tdef is_git_commit_hash_valid(self, commit_hash: str) -> bool:\n\t\t\"\"\" Checks if a Git commit hash exists in the repository. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn False\n\n\t\ttry:\n\t\t\t# git branch --contains [HASH]\n\t\t\tself.repository.git.branch(contains=commit_hash)\n\t\t\tis_valid = True\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tis_valid = False\n\n\t\treturn is_valid\t\n\n\tdef remove_invalid_git_commit_hashes(self, cve: Cve):\n\t\t\"\"\" Removes any invalid Git commit hashes from a CVE. \"\"\"\n\n\t\tif self.repository is not None:\n\t\t\tcve.git_commit_hashes = [hash for hash in cve.git_commit_hashes if self.is_git_commit_hash_valid(hash)]\n\n\tdef is_git_commit_hash_in_master_branch(self, commit_hash: str) -> bool:\n\t\t\"\"\" Checks if a Git commit hash exists in the repository's master branch. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn False\n\n\t\tis_master = False\n\n\t\ttry:\n\t\t\t# git branch --contains [HASH] --format=\"%(refname:short)\"\n\t\t\tbranch_result = self.repository.git.branch(contains=commit_hash, format='%(refname:short)')\n\t\t\tis_master = self.master_branch in branch_result.splitlines()\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\t# If there's no such commit in the repository.\n\t\t\tpass\n\n\t\treturn is_master\n\t\n\tdef remove_git_commit_hashes_by_branch(self, cve: Cve):\n\t\t\"\"\" Removes any Git commit hashes from a CVE that do not exist in the master branch. If the configuration file specified every branch,\n\t\tthis method does nothing. \"\"\"\n\n\t\tif self.repository is not None and not self.scrape_all_branches:\n\t\t\tcve.git_commit_hashes = [hash for hash in cve.git_commit_hashes if self.is_git_commit_hash_in_master_branch(hash)]\n\n\tdef sort_git_commit_hashes_topologically(self, hash_list: List[str]) -> List[str]:\n\t\t\"\"\" Sorts a list of Git commit hashes topologically from oldest to newest. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn []\n\n\t\tif len(hash_list) <= 1:\n\t\t\treturn hash_list\n\n\t\ttry:\n\t\t\t# git rev-list --topo-order --reverse --no-walk=sorted [HASH 1] [...] [HASH N]\n\t\t\trev_list_result = self.repository.git.rev_list(*hash_list, topo_order=True, reverse=True, no_walk='sorted')\n\t\t\thash_list = rev_list_result.splitlines()\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\t# If there's no such commit in the repository.\n\t\t\tlog.error(f'Found one or more invalid commits while trying to sort the commit hashes topologically with the error: {repr(error)}')\n\t\t\thash_list = []\n\n\t\treturn hash_list\n\n\tdef filter_git_commit_hashes_by_source_file_extensions(self, hash_list: List[str]) -> List[str]:\n\t\t\"\"\" Filters a list of Git commit hashes so that only commits related to C/C++ files remain.\"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn []\n\n\t\ttry:\n\t\t\t# git rev-list [HASH 1] [...] [HASH N] -- [FILE EXTENSION 1] [...] [FILE EXTENSION N]\n\t\t\trev_list_result = self.repository.git.rev_list(*hash_list, '--', *Project.SOURCE_FILE_EXTENSIONS_WITH_WILDCARDS, no_walk='unsorted')\n\t\t\thash_list = rev_list_result.splitlines()\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\thash_list = []\n\t\t\tlog.error(f'Failed to filter the commit hashes with the error: {repr(error)}')\n\t\t\t\n\t\treturn hash_list\n\n\tdef find_changed_source_files_and_lines_between_git_commits(self, from_commit: str, to_commit: str) -> Iterator[ Tuple[str, List[List[int]], List[List[int]]] ]:\n\t\t\"\"\" Finds the paths and modified lines of any C/C++ source files that were changed between two commits.\"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn\n\n\t\ttry:\n\t\t\t# git diff --unified=0 [HASH FROM] [HASH TO] -- [FILE EXTENSION 1] [...] [FILE EXTENSION N]\n\t\t\t# For the parent commit: git diff --unified=0 [HASH]^ [HASH] -- [FILE EXTENSION 1] [...] [FILE EXTENSION N]\n\t\t\tdiff_result = self.repository.git.diff(from_commit, to_commit, '--', *Project.SOURCE_FILE_EXTENSIONS_WITH_WILDCARDS, unified=0)\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tlog.error(f'Failed to find the changed sources files and lines from the commit {from_commit} to {to_commit} with the error: {repr(error)}')\n\t\t\treturn\n\n\t\tlast_file_path: Optional[str] = None\n\t\tlast_from_lines_list: List[List[int]] = []\n\t\tlast_to_lines_list: List[List[int]] = []\n\t\n\t\tdef yield_last_file_if_it_exists() -> Iterator[ Tuple[str, List[List[int]], List[List[int]]] ]:\n\t\t\t\"\"\" Yields the previously found file path and its changed lines. \"\"\"\n\n\t\t\tnonlocal last_file_path, last_from_lines_list, last_to_lines_list\n\n\t\t\tif last_file_path is not None:\t\t\t\n\t\t\t\tyield (last_file_path, last_from_lines_list, last_to_lines_list)\n\t\t\t\tlast_file_path = None\n\t\t\t\tlast_from_lines_list = []\n\t\t\t\tlast_to_lines_list = []\n\n\t\tfor line in diff_result.splitlines():\n\n\t\t\t# E.g. \"+++ b/embedding/components/windowwatcher/src/nsPrompt.cpp\"\n\t\t\tif line.startswith('+++ '):\n\n\t\t\t\tyield from yield_last_file_if_it_exists()\n\t\t\t\t_, last_file_path = line.split('/', 1)\n\n\t\t\t\tif last_file_path == 'dev/null':\n\t\t\t\t\tlast_file_path = None\n\t\t\t\t\n\t\t\t# E.g. \"@@ -451,2 +428,2 @@ MakeDialogText(nsIChannel* aChannel, nsIAuthInformation* aAuthInfo,\"\n\t\t\t# E.g. \"@@ -263 +255,0 @@ do_test (int argc, char *argv[])\"\n\t\t\telif last_file_path is not None and line.startswith('@@'):\n\n\t\t\t\tmatch = ScrapingRegex.GIT_DIFF_LINE_NUMBERS.search(line)\n\t\t\t\tif match:\n\n\t\t\t\t\tdef append_line_numbers(line_list: List[List[int]], begin_group_name: str, total_group_name: str) -> None:\n\n\t\t\t\t\t\tline_begin = int(match.group(begin_group_name)) # type: ignore[union-attr]\n\n\t\t\t\t\t\tif line_begin == 0:\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\ttotal_lines = match.group(total_group_name) # type: ignore[union-attr]\n\t\t\t\t\t\ttotal_lines = int(total_lines) if total_lines is not None else 1\n\n\t\t\t\t\t\tline_end = line_begin + max(total_lines - 1, 0)\n\n\t\t\t\t\t\tline_list.append( [line_begin, line_end] )\n\n\t\t\t\t\tappend_line_numbers(last_from_lines_list, 'from_begin', 'from_total')\n\t\t\t\t\tappend_line_numbers(last_to_lines_list, 'to_begin', 'to_total')\n\n\t\t\t\telse:\n\t\t\t\t\tlog.error(f'Could not find the line number information for the file \"{last_file_path}\" (from {from_commit} to {to_commit}) in the diff line: \"{line}\".')\n\n\t\tyield from yield_last_file_if_it_exists()\n\n\t\t\"\"\"\n\t\t\tE.g. for Mozilla: git diff --unified=0 a714da4a56957c826a7cafa381c4d8df832172f2 a714da4a56957c826a7cafa381c4d8df832172f2^\n\n\t\t\tdiff --git a/embedding/components/windowwatcher/src/nsPrompt.cpp b/embedding/components/windowwatcher/src/nsPrompt.cpp\n\t\t\tindex a782689cc853..f95e19ed7c97 100644\n\t\t\t--- a/embedding/components/windowwatcher/src/nsPrompt.cpp\n\t\t\t+++ b/embedding/components/windowwatcher/src/nsPrompt.cpp\n\t\t\t@@ -58,3 +57,0 @@\n\t\t\t-#include \"nsIPrefService.h\"\n\t\t\t-#include \"nsIPrefLocalizedString.h\"\n\t\t\t-\n\t\t\t@@ -424,20 +420,0 @@ MakeDialogText(nsIChannel* aChannel, nsIAuthInformation* aAuthInfo,\n\t\t\t- // Trim obnoxiously long realms.\n\t\t\t- if (realm.Length() > 150) {\n\t\t\t- [...]\n\t\t\t- }\n\t\t\t@@ -451,2 +428,2 @@ MakeDialogText(nsIChannel* aChannel, nsIAuthInformation* aAuthInfo,\n\t\t\t- NS_NAMED_LITERAL_STRING(proxyText, \"EnterLoginForProxy\");\n\t\t\t- NS_NAMED_LITERAL_STRING(originText, \"EnterLoginForRealm\");\n\t\t\t+ NS_NAMED_LITERAL_STRING(proxyText, \"EnterUserPasswordForProxy\");\n\t\t\t+ NS_NAMED_LITERAL_STRING(originText, \"EnterUserPasswordForRealm\");\n\t\t\"\"\"\n\n\tdef find_changed_source_files_and_lines_since_parent_git_commit(self, commit_hash: str) -> Iterator[ Tuple[str, List[List[int]], List[List[int]]] ]:\n\t\t\"\"\" Finds the paths and modified lines of any C/C++ source files that were changed since the previous commit.\"\"\"\n\t\tyield from self.find_changed_source_files_and_lines_between_git_commits(commit_hash + '^', commit_hash)\n\n\tdef find_changed_source_files_in_parent_git_commit(self, commit_hash: str) -> Iterator[str]:\n\t\t\"\"\"\" Finds the paths of any C/C++ source files that were changed since the previous commit.\"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn\n\n\t\ttry:\n\t\t\t# git diff --name-only [HASH]^ [HASH] -- [FILE EXTENSION 1] [...] [FILE EXTENSION N]\n\t\t\tdiff_result = self.repository.git.diff(commit_hash + '^', commit_hash, '--', *Project.SOURCE_FILE_EXTENSIONS_WITH_WILDCARDS, name_only=True)\n\t\t\t\n\t\t\tfor file_path in diff_result.splitlines():\n\t\t\t\tyield file_path\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tlog.error(f'Failed to find the changed sources files from the commit \"{commit_hash}\" with the error: {repr(error)}')\n\t\t\treturn\n\n\tdef list_all_source_file_git_commit_hashes(self) -> List[str]:\n\t\t\"\"\" Lists all Git commit hashes between two dates where at least one C/C++ file was changed. This list is ordered topologically from oldest to newest. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn []\n\n\t\tafter_date = GLOBAL_CONFIG['neutral_after_author_date']\n\t\tbefore_date = GLOBAL_CONFIG['neutral_before_author_date']\n\t\t\n\t\thash_list = []\n\n\t\ttry:\n\t\t\t# git log --topo-order --reverse --do-walk --format=\"%H %as\" -- [FILE EXTENSION 1] [...] [FILE EXTENSION N]\n\t\t\tlog_result = self.repository.git.log('--', *Project.SOURCE_FILE_EXTENSIONS_WITH_WILDCARDS, topo_order=True, reverse=True, do_walk=True, format='%H %as')\n\t\t\t\n\t\t\tfor line in log_result.splitlines():\n\t\t\t\t\n\t\t\t\t# We have to do this manually instead of using the --after and --before options since those use\n\t\t\t\t# the commit date, and not the author date. The dates we compare use the YYYY-MM-DD format.\n\t\t\t\tcommit_hash, date = line.split(maxsplit=1)\n\t\t\t\tif after_date <= date <= before_date:\n\t\t\t\t\thash_list.append(commit_hash)\n\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tlog.error(f'Failed to list all commit hashes between \"{after_date}\" and \"{before_date}\" with the error: {repr(error)}')\n\n\t\treturn hash_list\n\n\tdef find_first_git_commit_hash(self) -> Optional[str]:\n\t\t\"\"\" Finds the first Git commit hash in a repository. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\t# git log --topo-order --reverse --do-walk --format=\"%H\" --\n\t\t\tlog_result = self.repository.git.log('--', topo_order=True, reverse=True, do_walk=True, format='%H')\n\t\t\tcommit_hash = log_result.splitlines()[0]\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tcommit_hash = None\n\t\t\tlog.error(f'Failed to find the first commit hash with the error: {repr(error)}')\n\n\t\treturn commit_hash\n\n\tdef find_last_changed_git_commit_hashes(self, commit_hash: str, file_path: str) -> List[str]:\n\t\t\"\"\" Finds any previous Git commit hashes where a given file was last changed. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn []\n\n\t\ttry:\n\t\t\t# git log [HASH] --parents --max-count=1 --format=\"%P\" -- [FILE PATH]\n\t\t\tcommit_list = self.repository.git.log(commit_hash, '--', file_path, parents=True, max_count=1, format='%P')\n\t\t\tcommit_list = commit_list.split()\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tcommit_list = []\n\t\t\tlog.error(f'Failed to find the parent of the commit hash \"{commit_hash}\" with the error: {repr(error)}')\n\n\t\treturn commit_list\n\n\tdef find_parent_git_commit_hashes(self, commit_hash: str) -> List[str]:\n\t\t\"\"\" Finds any previous Git commit hashes. \"\"\"\n\t\treturn self.find_last_changed_git_commit_hashes(commit_hash, '.')\n\n\tdef find_tag_name_from_git_commit_hash(self, commit_hash: str) -> Optional[str]:\n\t\t\"\"\" Finds the tag name associated with a Git commit hash. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\t# git name-rev --tags --name-only [HASH]\n\t\t\t# E.g. \"v4.4-rc6~22^2~24\" or \"v2.6.39-rc3^0\" or \"undefined\"\n\t\t\tname_rev_result = self.repository.git.name_rev(commit_hash, tags=True, name_only=True)\n\t\t\ttag_name = re.split(r'~|\\^', name_rev_result, 1)[0]\n\t\texcept git.exc.GitCommandError as error:\n\t\t\ttag_name = None\n\t\t\tlog.error(f'Failed to find the tag name for the commit hash \"{commit_hash}\" with the error: {repr(error)}')\n\n\t\treturn tag_name\n\n\tdef find_author_date_from_git_commit_hash(self, commit_hash: str) -> Optional[str]:\n\t\t\"\"\" Finds the author date (not the commit date) associated with a Git commit hash. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\t# git log --format=\"%ad\" --date=\"unix\" [HASH]\n\t\t\tlog_result = self.repository.git.log(commit_hash, format='%ad', date='unix')\n\t\t\ttimestamp = log_result.split('\\n', 1)[0]\n\t\t\tdate = format_unix_timestamp(timestamp)\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tdate = None\n\t\t\tlog.error(f'Failed to find the author date for the commit hash \"{commit_hash}\" with the error: {repr(error)}')\n\n\t\treturn date\n\n\tdef checkout_files_in_git_commit(self, commit_hash: str, file_path_list: list) -> bool:\n\t\t\"\"\" Performs the Git checkout operation on a specific list of files in a given Git commit. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn False\n\n\t\tsuccess = False\n\n\t\ttry:\n\t\t\t# git checkout [COMMIT] -- [FILE PATH 1] [FILE PATH 2] [...] [FILE PATH N]\n\t\t\tself.repository.git.checkout(commit_hash, '--', *file_path_list)\n\t\t\tsuccess = True\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tlog.error(f'Failed to checkout the files in commit \"{commit_hash}\" with the error: {repr(error)}')\n\t\t\t\n\t\treturn success\n\n\tdef checkout_entire_git_commit(self, commit_hash: str) -> bool:\n\t\t\"\"\" Performs the Git checkout operation for every file in a given Git commit. \"\"\"\n\t\treturn self.checkout_files_in_git_commit(commit_hash, ['.'])\n\n\tdef hard_reset_git_head(self):\n\t\t\"\"\" Performs a hard reset operation to the project's repository. \"\"\"\n\n\t\tif self.repository is None:\n\t\t\treturn\n\n\t\ttry:\n\t\t\t# git reset --hard\n\t\t\tself.repository.git.reset(hard=True)\n\t\texcept git.exc.GitCommandError as error:\n\t\t\tlog.error(f'Failed to hard reset the current HEAD with the error: {repr(error)}')\n\n\t####################################################################################################\n\n\t\"\"\"\n\t\tMethods used to scrape vulnerability metadata from sources like online databases, bug trackers,\n\t\tsecurity advisories, and the project's version control system.\n\t\"\"\"\n\n\tdef scrape_additional_information_from_security_advisories(self, cve: Cve):\n\t\t\"\"\" Scrapes any additional information from the project's security advisories. This method should be overriden by a project's subclass. \"\"\"\n\t\tpass\n\n\tdef scrape_additional_information_from_version_control(self, cve: Cve):\n\t\t\"\"\" Scrapes any additional information from the project's version control system. This method should be overriden by a project's subclass. \"\"\"\n\t\tpass\n\n\tdef scrape_vulnerabilities_from_cve_details(self) -> Iterator[Cve]:\n\t\t\"\"\" Scrapes any vulnerabilities related to this project from the CVE Details website. \"\"\"\n\n\t\tlog.info(f'Collecting the vulnerabilities for the \"{self}\" project ({self.vendor_id}, {self.product_id}):')\n\t\tresponse = Cve.CVE_DETAILS_SCRAPING_MANAGER.download_page('https://www.cvedetails.com/vulnerability-list.php', {'vendor_id': self.vendor_id, 'product_id': self.product_id})\n\n\t\tif response is None:\n\t\t\tlog.error('Could not download the first hub page. No vulnerabilities will be scraped for this project.')\n\t\t\treturn\n\t\t\n\t\tmain_soup = bs4.BeautifulSoup(response.text, 'html.parser')\n\n\t\tpage_div = main_soup.find('div', id='pagingb')\n\t\tpage_a_list = page_div.find_all('a', title=ScrapingRegex.PAGE_TITLE)\n\t\tpage_url_list = ['https://www.cvedetails.com' + page_a['href'] for page_a in page_a_list]\n\n\t\tif DEBUG_ENABLED:\n\t\t\tprevious_len = len(page_url_list)\n\t\t\tif previous_len > DEBUG_CONFIG['min_hub_pages']:\n\t\t\t\tpage_url_list = page_url_list[::DEBUG_CONFIG['hub_page_step']]\n\t\t\t\n\t\t\tlog.debug(f'Reduced the number of hub pages from {previous_len} to {len(page_url_list)}.')\n\n\t\telse:\n\t\t\tfirst_page = GLOBAL_CONFIG.get('start_at_cve_hub_page')\n\t\t\tif first_page is not None:\n\t\t\t\tlog.info(f'Starting at hub page {first_page} at the user\\'s request.')\n\t\t\t\tpage_url_list = page_url_list[first_page-1:]\n\n\t\tfor i, page_url in enumerate(page_url_list):\n\n\t\t\tlog.info(f'Scraping hub page {i+1} of {len(page_url_list)}...')\n\t\t\tpage_response = Cve.CVE_DETAILS_SCRAPING_MANAGER.download_page(page_url)\n\t\t\tif page_response is None:\n\t\t\t\tlog.error(f'Failed to download hub page {i+1}.')\n\t\t\t\tcontinue\n\t\n\t\t\tpage_soup = bs4.BeautifulSoup(page_response.text, 'html.parser')\n\t\t\tvulnerability_table = page_soup.find('table', id='vulnslisttable')\n\t\t\tcve_a_list = vulnerability_table.find_all('a', title=ScrapingRegex.CVE)\n\t\t\t\n\t\t\t# Test a random sample of CVEs from each page.\n\t\t\tif DEBUG_ENABLED:\n\t\t\t\tprevious_len = len(cve_a_list)\n\t\t\t\tif DEBUG_CONFIG['use_random_sampling']:\n\t\t\t\t\tcve_a_list = random.sample(cve_a_list, DEBUG_CONFIG['max_cves_per_hub_page'])\n\t\t\t\telse:\n\t\t\t\t\tcve_a_list = cve_a_list[:DEBUG_CONFIG['max_cves_per_hub_page']]\n\t\t\t\tlog.debug(f'Reduced the number of CVE pages from {previous_len} to {len(cve_a_list)}.')\n\n\t\t\tfor j, cve_a in enumerate(cve_a_list):\n\n\t\t\t\tcve_id = cve_a.get_text(strip=True)\n\t\t\t\tcve = Cve(cve_id, self)\n\n\t\t\t\tlog.info(f'Scraping the CVE page {j+1} of {len(cve_a_list)}: \"{cve.id}\" from \"{cve.url}\"...')\n\t\t\t\tdownload_success = cve.download_cve_details_page()\n\t\t\t\t\n\t\t\t\tif download_success:\n\t\t\t\t\tcve.scrape_dates_from_page()\n\t\t\t\t\tcve.scrape_basic_attributes_from_page()\n\t\t\t\t\tcve.scrape_affected_product_versions_from_page()\n\t\t\t\t\tcve.scrape_references_from_page()\n\n\t\t\t\t\tself.scrape_additional_information_from_security_advisories(cve)\n\t\t\t\t\tself.scrape_additional_information_from_version_control(cve)\n\n\t\t\t\t\tcve.remove_duplicated_values()\n\t\t\t\t\tself.remove_invalid_git_commit_hashes(cve)\n\t\t\t\t\tself.remove_git_commit_hashes_by_branch(cve)\n\t\t\t\telse:\n\t\t\t\t\tlog.error(f'Failed to download the page for {cve}.')\n\n\t\t\t\tyield cve\n\n\t####################################################################################################\n\n\t\"\"\"\n\t\tMethods used to find any files, functions, and classes affected by a project's vulnerabilities.\n\t\"\"\"\n\n\tdef find_code_units_in_file(self, file_path: str) -> Tuple[ List[dict], List[dict] ]:\n\t\t\"\"\" Lists any functions and classes in a source file in the project's repository. \"\"\"\n\n\t\tfunction_list: List[dict] = []\n\t\tclass_list: List[dict] = []\n\n\t\tsource_file_path = self.get_absolute_path_in_repository(file_path)\n\t\tsource_file_name = os.path.basename(source_file_path)\n\n\t\ttry:\n\t\t\twith open(source_file_path, 'r', encoding='utf-8', errors='replace') as source_file:\n\t\t\t\tsource_contents = source_file.read()\n\t\t\t\tif self.language == 'c++':\n\t\t\t\t\t# @Hack: This is a hacky way of getting clang to report C++ methods that belong to a class\n\t\t\t\t\t# that is not defined in the file that we're processing. Although we tell clang where to\n\t\t\t\t\t# look for the header files that define these classes, this wouldn't work for the Mozilla's\n\t\t\t\t\t# repository structure. By removing the \"<Class Name>::\" pattern from a function's definition,\n\t\t\t\t\t# we're essentially telling clang to consider them regular C-style functions. This works for\n\t\t\t\t\t# our purposes since we only care about a function's name and its beginning and ending line\n\t\t\t\t\t# numbers.\n\t\t\t\t\tsource_contents = re.sub(r'\\S+::', '', source_contents)\n\n\t\texcept Exception as error:\n\t\t\tlog.error(f'Failed to read the source file \"{source_file_path}\" with the error: {repr(error)}')\n\t\t\treturn (function_list, class_list)\n\n\t\ttry:\n\t\t\tclang_arguments = ['--language', self.language]\n\t\t\t\n\t\t\tif self.include_directory_path is not None:\n\t\t\t\tclang_arguments.extend(['--include-directory', self.include_directory_path])\n\n\t\t\tglobal CLANG_INDEX\n\t\t\ttu = CLANG_INDEX.parse(source_file_name, args=clang_arguments, unsaved_files=[ (source_file_name, source_contents) ])\n\t\t\t\n\t\t\tif DEBUG_ENABLED:\n\t\t\t\tfor diagnostic in tu.diagnostics:\n\t\t\t\t\tlog.debug(f'Diagnostic: {diagnostic}')\n\n\t\t\tFUNCTION_KINDS = [\tCursorKind.FUNCTION_DECL, CursorKind.CXX_METHOD, CursorKind.CONSTRUCTOR, CursorKind.DESTRUCTOR,\n\t\t\t\t\t\t\t\tCursorKind.CONVERSION_FUNCTION, CursorKind.FUNCTION_TEMPLATE]\n\n\t\t\tCLASS_KINDS = [CursorKind.STRUCT_DECL, CursorKind.UNION_DECL, CursorKind.CLASS_DECL, CursorKind.CLASS_TEMPLATE]\n\n\t\t\tKINDS_TO_NAME = {CursorKind.STRUCT_DECL: 'Struct', CursorKind.UNION_DECL: 'Union', CursorKind.CLASS_DECL: 'Class', CursorKind.CLASS_TEMPLATE: 'Class'}\n\n\t\t\tfor node in tu.cursor.walk_preorder():\n\n\t\t\t\t# This should have the same behavior as clang_Location_isFromMainFile().\n\t\t\t\tif node.location.file is not None and node.location.file.name == source_file_name and node.is_definition():\n\n\t\t\t\t\tdef add_to_list(code_unit_list: List[dict]):\n\t\t\t\t\t\t\"\"\" Helper method that adds the code unit's properties to the resulting list. \"\"\"\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\tunit_lines = [node.extent.start.line, node.extent.end.line]\n\t\t\t\t\t\tcode_unit_info = {'Name': node.spelling, 'Signature': node.displayname, 'Lines': unit_lines}\n\n\t\t\t\t\t\tkind_name = KINDS_TO_NAME.get(node.kind)\n\t\t\t\t\t\tif kind_name is not None:\n\t\t\t\t\t\t\tcode_unit_info.update({'Kind': kind_name})\n\n\t\t\t\t\t\tcode_unit_list.append(code_unit_info)\n\n\t\t\t\t\tif node.kind in FUNCTION_KINDS:\n\t\t\t\t\t\tadd_to_list(function_list)\n\t\t\t\t\telif node.kind in CLASS_KINDS:\n\t\t\t\t\t\tadd_to_list(class_list)\n\n\t\texcept TranslationUnitLoadError as error:\n\t\t\tlog.error(f'Failed to parse the source file \"{source_file_path}\" with the error: {repr(error)}')\n\n\t\treturn (function_list, class_list)\n\n\tdef iterate_and_checkout_file_timeline_in_repository(self, csv_file_path: str) -> Iterator[tuple]:\n\t\t\"\"\" Iterates over and performs a Git checkout operation on a list of files affected by the project's vulnerabilities.\n\t\t\n\t\tFor each neutral-vulnerable commit pair, the commit hash and vulnerability status are different, but the file list is the same\n\t\tsince it only uses the information relative to the neutral commit, even for the vulnerable one.\"\"\"\n\n\t\ttimeline = pd.read_csv(csv_file_path, usecols=[\t'File Path', 'Topological Index', 'Affected', 'Vulnerable',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'Commit Hash', 'Affected Functions', 'Affected Classes', 'CVEs'], dtype=str)\n\n\t\ttimeline = timeline.replace({np.nan: None})\n\t\ttimeline['Topological Index'] = pd.to_numeric(timeline['Topological Index'])\n\t\t\n\t\tif GLOBAL_CONFIG['start_at_checkout_commit_index'] is not None:\n\t\t\t\n\t\t\tis_allowed_commit = timeline['Topological Index'] >= GLOBAL_CONFIG['start_at_checkout_commit_index']\n\t\t\ttimeline = timeline[is_allowed_commit]\n\n\t\tfilter_commit_using_config = {}\n\t\tif GLOBAL_CONFIG['checkout_commit_index_list'] is not None:\n\n\t\t\tfilter_commit_using_config = {topological_index: True for topological_index in GLOBAL_CONFIG['checkout_commit_index_list']}\n\n\t\t\tallowed_commit_list = []\n\t\t\tfor topological_index in GLOBAL_CONFIG['checkout_commit_index_list']:\n\t\t\t\tallowed_commit_list.append(topological_index)\n\t\t\t\tallowed_commit_list.append(topological_index + 1)\n\t\t\t\tallowed_commit_list.append(topological_index + 2)\n\n\t\t\tis_allowed_commit = timeline['Topological Index'].isin(allowed_commit_list)\n\t\t\ttimeline = timeline[is_allowed_commit]\n\n\t\tgrouped_files = timeline.groupby(by=['Topological Index', 'Affected', 'Vulnerable', 'Commit Hash', 'CVEs'], dropna=False)\n\n\t\tChangedFiles = namedtuple('ChangedFiles', [\t'TopologicalIndex', 'Affected', 'Vulnerable', 'CommitHash', 'Cves',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'AbsoluteFilePaths', 'RelativeFilePaths', 'FilePathToFunctions', 'FilePathToClasses'])\n\n\t\tfor (topological_index, affected, vulnerable, commit_hash, cves), group_df in grouped_files:\n\n\t\t\tif filter_commit_using_config and not filter_commit_using_config.get(topological_index):\n\t\t\t\tcontinue\n\n\t\t\t# For any file in an affected commit (vulnerable or neutral), we know that their paths exist in that particular commit.\n\t\t\t# When we look at the files that weren't affected, we are now dealing with multiple changes across different commits.\n\t\t\t# Because of this, we must checkout the next commit (i.e. the next vulnerable commit) so that we can guarantee that\n\t\t\t# those files exist. For example, if we checked out the first commit in the project, we would be missing any files\n\t\t\t# that were added or changed between that commit and the next vulnerable one.\n\n\t\t\taffected = (affected == 'Yes')\n\n\t\t\tif affected:\n\t\t\t\tcommit_hash_to_checkout = commit_hash\n\n\t\t\telse:\n\t\t\t\tis_next_commit = (timeline['Topological Index'] == topological_index + 1) | (timeline['Topological Index'] == topological_index + 2)\n\t\t\t\t\n\t\t\t\tif is_next_commit.any():\n\t\t\t\t\tnext_group = timeline[is_next_commit].iloc[0]\n\t\t\t\t\tcommit_hash_to_checkout = next_group['Commit Hash']\n\t\t\t\telse:\n\t\t\t\t\tlog.warning(f'Defaulting to the current commit hash {commit_hash}.')\n\t\t\t\t\tcommit_hash_to_checkout = commit_hash\n\n\t\t\tcheckout_success = self.checkout_entire_git_commit(commit_hash_to_checkout)\n\t\t\tif checkout_success:\n\n\t\t\t\tvulnerable = (vulnerable == 'Yes')\n\t\t\t\tif pd.isna(cves):\n\t\t\t\t\tcves = None\n\n\t\t\t\trelative_file_path_list: list = group_df['File Path'].tolist()\n\t\t\t\tabsolute_file_path_list = [self.get_absolute_path_in_repository(file_path) for file_path in relative_file_path_list]\n\t\t\t\t\n\t\t\t\taffected_function_list = group_df['Affected Functions'].tolist()\n\t\t\t\taffected_function_list = [deserialize_json_container(function_list) for function_list in affected_function_list]\n\n\t\t\t\taffected_class_list = group_df['Affected Classes'].tolist()\n\t\t\t\taffected_class_list = [deserialize_json_container(class_list) for class_list in affected_class_list]\n\n\t\t\t\tdef map_file_paths_to_code_units(code_unit_list: list) -> dict:\n\t\t\t\t\t\"\"\" Maps the relative file paths in the repository to their code units. \"\"\"\n\n\t\t\t\t\t# It's possible that the SATs generate metrics or alerts related to files that we're not currently\n\t\t\t\t\t# iterating over (e.g. the header files of the current C/C++ source file). In those cases, we won't\n\t\t\t\t\t# have a list of code units.\n\t\t\t\t\tfile_path_to_code_units = defaultdict(lambda: [])\n\t\t\t\t\tfor file_path, units in zip(relative_file_path_list, code_unit_list):\n\t\t\t\t\t\tif units is not None:\n\t\t\t\t\t\t\tfile_path_to_code_units[file_path] = units\n\n\t\t\t\t\treturn file_path_to_code_units\n\n\t\t\t\tfile_path_to_functions = map_file_paths_to_code_units(affected_function_list)\n\t\t\t\tfile_path_to_classes = map_file_paths_to_code_units(affected_class_list)\n\n\t\t\t\tyield ChangedFiles(\ttopological_index, affected, vulnerable, commit_hash, cves,\n\t\t\t\t\t\t\t\t\tabsolute_file_path_list, relative_file_path_list, file_path_to_functions, file_path_to_classes)\n\n\t\t\telse:\n\t\t\t\tlog.error(f'Failed to checkout the commit {commit_hash_to_checkout} in the CSV file \"{csv_file_path}\".')\n\t\t\n\t\tself.hard_reset_git_head()\n\n####################################################################################################\n\nclass MozillaProject(Project):\n\t\"\"\" Represents the Mozilla project. \"\"\"\n\n\tMOZILLA_SCRAPING_MANAGER: ScrapingManager = ScrapingManager('https://www.mozilla.org')\n\n\tdef __init__(self, project_name: str, project_info: dict):\n\t\tsuper().__init__(project_name, project_info)\n\n\tdef scrape_additional_information_from_security_advisories(self, cve: Cve):\n\n\t\t# Download and extract information from any referenced Mozilla Foundation Security Advisories (MFSA) pages.\n\t\tfor mfsa_id, mfsa_url in zip(cve.advisory_ids, cve.advisory_urls):\n\n\t\t\tmfsa_info = {}\n\t\t\tlog.info(f'Scraping additional information from advisory page {mfsa_id}: \"{mfsa_url}\"...')\n\n\t\t\tmfsa_response = MozillaProject.MOZILLA_SCRAPING_MANAGER.download_page(mfsa_url)\n\t\t\tif mfsa_response is None:\n\t\t\t\tlog.error(f'Could not download the page for {mfsa_id}.')\n\t\t\t\tcontinue\n\n\t\t\tmfsa_soup = bs4.BeautifulSoup(mfsa_response.text, 'html.parser')\n\n\t\t\t\"\"\"\n\t\t\t[MFSA 2005-01 until (present)]\n\t\t\t<dl class=\"summary\">\n\t\t\t\t<dt>Announced</dt>\n\t\t\t\t<dd>November 20, 2012</dd>\n\t\t\t\t<dt>Reporter</dt>\n\t\t\t\t<dd>Mariusz Mlynski</dd>\n\t\t\t\t<dt>Impact</dt>\n\t\t\t\t<dd><span class=\"level critical\">Critical</span></dd>\n\t\t\t\t<dt>Products</dt>\n\t\t\t\t<dd>Firefox, Firefox ESR</dd>\n\t\t\t\t<dt>Fixed in</dt>\n\t\t\t\t<dd>\n\t\t\t\t\t<ul>\n\t\t\t\t\t\t<li>Firefox 17</li>\n\t\t\t\t\t\t<li>Firefox ESR 10.0.11</li>\n\t\t\t\t\t</ul>\n\t\t\t\t</dd>\n\t\t\t</dl>\n\n\t\t\tMFSA 2005-01 until MFSA 2016-84]\n\t\t\t<h3>References</h3>\n\n\t\t\t<p>Crashes referencing removed nodes (Jesse Ruderman, Martijn Wargers)</p>\n\t\t\t<ul>\n\t\t\t\t<li><a href=\"https://bugzilla.mozilla.org/show_bug.cgi?id=338391\">https://bugzilla.mozilla.org/show_bug.cgi?id=338391</a></li>\n\t\t\t\t<li><a href=\"https://bugzilla.mozilla.org/show_bug.cgi?id=340733\">https://bugzilla.mozilla.org/show_bug.cgi?id=340733</a></li>\n\t\t\t\t<li><a href=\"https://bugzilla.mozilla.org/show_bug.cgi?id=338129\">https://bugzilla.mozilla.org/show_bug.cgi?id=338129</a></li>\n\t\t\t</ul>\n\n\t\t\t<p>crypto.generateCRMFRequest callback can run on deleted context (shutdown)</p>\n\t\t\t<ul>\n\t\t\t\t<li>\n\t\t\t\t\t<a href=\"https://bugzilla.mozilla.org/show_bug.cgi?id=337462\">https://bugzilla.mozilla.org/show_bug.cgi?id=337462</a>\n\t\t\t\t\t<br>CVE-2006-3811\n\t\t\t\t</li>\n\t\t\t</ul>\n\n\t\t\t[MFSA 2016-85 until (present)]\n\t\t\t<section class=\"cve\">\n\t\t\t\t<h4 id=\"CVE-2018-12359\" class=\"level-heading\">\n\t\t\t\t\t<a href=\"#CVE-2018-12359\"><span class=\"anchor\">#</span>CVE-2018-12359: Buffer overflow using computed size of canvas element</a>\n\t\t\t\t</h4>\n\t\t\t\t<dl class=\"summary\">\n\t\t\t\t\t<dt>Reporter</dt>\n\t\t\t\t\t<dd>Nils</dd>\n\t\t\t\t\t<dt>Impact</dt>\n\t\t\t\t\t<dd><span class=\"level critical\">critical</span></dd>\n\t\t\t\t</dl>\n\t\t\t\t<h5>Description</h5>\n\t\t\t\t<p>A buffer overflow can occur when rendering canvas content while adjusting the height and width of the <code><canvas></code> element dynamically, causing data to be written outside of the currently computed boundaries. This results in a potentially exploitable crash.</p>\n\t\t\t\t<h5>References</h5>\n\t\t\t\t<ul>\n\t\t\t\t\t<li><a href=\"https://bugzilla.mozilla.org/show_bug.cgi?id=1459162\">Bug 1459162</a></li>\n\t\t\t\t</ul>\n\t\t\t</section>\n\n\t\t\t<section class=\"cve\">\n\t\t\t\t[...]\n\t\t\t</section>\t\t\t\t\t\t\t\t\n\t\t\t\"\"\"\n\n\t\t\t# Get the basic information for all MFSA layout versions.\n\t\t\tdl_summary = mfsa_soup.find('dl', class_='summary')\n\t\t\tif dl_summary is not None:\n\n\t\t\t\tdt_list = dl_summary.find_all('dt')\n\t\t\t\tdd_list = dl_summary.find_all('dd')\n\t\t\t\tfor dt, dd in zip(dt_list, dd_list):\n\n\t\t\t\t\tkey = dt.get_text(strip=True)\n\t\t\t\t\tvalue = dd.get_text(strip=True)\n\n\t\t\t\t\t# Change the format of specific fields so they're consistent with the rest of the CSV file.\n\t\t\t\t\tif key == 'Announced':\n\t\t\t\t\t\tvalue = change_datetime_string_format(value, '%B %d, %Y', '%Y-%m-%d', 'en_US.UTF-8')\n\t\t\t\t\telif key == 'Impact':\n\t\t\t\t\t\tvalue = value.title()\n\t\t\t\t\telif key == 'Products':\n\t\t\t\t\t\tvalue = [product.strip() for product in value.split(',')]\n\t\t\t\t\telif key == 'Fixed in':\n\t\t\t\t\t\tvalue = [li.get_text(strip=True) for li in dd.find_all('li')]\n\t\t\t\t\t\n\t\t\t\t\tkey = key.title()\n\t\t\t\t\tmfsa_info[key] = value\n\t\t\telse:\n\t\t\t\tlog.warning(f'No summary description list found for {mfsa_id}.')\n\n\t\t\t# Get the CVE information for all MFSA layout versions.\n\t\t\tcve_list = []\n\n\t\t\t# --> For MFSA 2005-01 until MFSA 2016-84.\n\t\t\th3_list = mfsa_soup.find_all('h3')\n\t\t\tfor h3 in h3_list:\n\n\t\t\t\th3_text = h3.get_text(strip=True)\n\t\t\t\tif h3_text == 'References':\n\n\t\t\t\t\tfor li in h3.find_all_next('li'):\n\t\t\t\t\t\t\n\t\t\t\t\t\tli_text = li.get_text(strip=True)\n\t\t\t\t\t\tmatch = ScrapingRegex.CVE.search(li_text)\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tcve_list.append(match.group(1))\n\n\t\t\t# --> For MFSA 2005-01 until the latest page.\n\t\t\tsection_list = mfsa_soup.find_all('section', class_='cve')\n\t\t\tfor section in section_list:\n\t\t\t\th4_cve = section.find('h4', id=ScrapingRegex.CVE)\n\t\t\t\tif h4_cve is not None:\n\t\t\t\t\tcve_list.append(h4_cve['id'])\n\n\t\t\tif cve_list:\n\t\t\t\tmfsa_info['CVEs'] = cve_list\n\n\t\t\tcve.advisory_info[mfsa_id] = mfsa_info\n\n\tdef scrape_additional_information_from_version_control(self, cve: Cve):\n\t\tfor id in cve.bugzilla_ids:\n\t\t\t# E.g. \" Bug 945192 - Followup to support Older SDKs in loaddlls.cpp. r=bbondy a=Sylvestre\"\n\t\t\tregex_id = re.escape(id)\n\t\t\tgrep_pattern = fr'^Bug \\b{regex_id}\\b'\n\t\t\thashes = self.find_git_commit_hashes_from_pattern(grep_pattern)\n\t\t\tcve.git_commit_hashes.extend(hashes)\n\n####################################################################################################\n\nclass XenProject(Project):\n\t\"\"\" Represents the Xen project. \"\"\"\n\n\tXEN_SCRAPING_MANAGER: ScrapingManager = ScrapingManager('https://xenbits.xen.org')\n\n\tdef __init__(self, project_name: str, project_info: dict):\n\t\tsuper().__init__(project_name, project_info)\n\n\tdef scrape_additional_information_from_security_advisories(self, cve: Cve):\n\n\t\t# Download and extract information from any referenced Xen Security Advisories (XSA) pages.\n\t\tfor xsa_full_id, xsa_url in zip(cve.advisory_ids, cve.advisory_urls):\n\t\t\t\n\t\t\txsa_info = {}\n\t\t\txsa_id = xsa_full_id.rsplit('-')[-1]\n\t\t\tlog.info(f'Scraping additional information from advisory page {xsa_full_id}: \"{xsa_url}\"...')\n\t\t\t\n\t\t\txsa_response = XenProject.XEN_SCRAPING_MANAGER.download_page(xsa_url)\n\t\t\tif xsa_response is not None:\n\n\t\t\t\txsa_soup = bs4.BeautifulSoup(xsa_response.text, 'html.parser')\n\n\t\t\t\t\"\"\"\n\t\t\t\t<table>\n\t\t\t\t\t<tbody>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>Advisory</th>\n\t\t\t\t\t\t\t<td><a href=\"advisory-55.html\">XSA-55</a></td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>Public release</th>\n\t\t\t\t\t\t\t<td>2013-06-03 16:18</td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>Updated</th>\n\t\t\t\t\t\t\t<td>2013-06-20 10:26</td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>Version</th>\n\t\t\t\t\t\t\t<td>5</td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>CVE(s)</th>\n\t\t\t\t\t\t\t<td><a href=\"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2194\">CVE-2013-2194</a> <a href=\"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2195\">CVE-2013-2195</a> <a href=\"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2196\">CVE-2013-2196</a></td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<th>Title</th>\n\t\t\t\t\t\t\t<td>Multiple vulnerabilities in libelf PV kernel handling</td>\n\t\t\t\t\t\t</tr>\n\t\t\t\t\t</tbody>\n\t\t\t\t</table>\n\t\t\t\t\"\"\"\n\n\t\t\t\txsa_info_table = xsa_soup.find('table')\n\t\t\t\tif xsa_info_table is not None:\n\n\t\t\t\t\txsa_info_th = xsa_info_table.find_all('th')\n\t\t\t\t\txsa_info_td = xsa_info_table.find_all('td')\n\t\t\t\t\tfor th, td in zip(xsa_info_th, xsa_info_td):\n\n\t\t\t\t\t\tkey = th.get_text(strip=True)\n\t\t\t\t\t\tvalue = td.get_text(strip=True)\n\n\t\t\t\t\t\t# Change the format of specific fields so they're consistent with the rest of the CSV file.\n\t\t\t\t\t\tif key == 'Advisory':\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif key == 'CVE(s)':\n\t\t\t\t\t\t\tkey = 'CVEs'\n\t\t\t\t\t\t\tvalue = [cve_a.get_text(strip=True) for cve_a in td.find_all('a')]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkey = key.title()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\txsa_info[key] = value\n\n\t\t\t\t\tcve.advisory_info[xsa_full_id] = xsa_info\n\n\t\t\t\telse:\n\t\t\t\t\tlog.warning(f'No information table found for {xsa_full_id}.')\n\n\t\t\telse:\n\t\t\t\tlog.error(f'Could not download the page for {xsa_full_id}.')\n\n\t\t\t##################################################\n\n\t\t\t# Download an additional page that contains this XSA's Git commit hashes.\n\t\t\txsa_meta_url = f'https://xenbits.xen.org/xsa/xsa{xsa_id}.meta'\n\t\t\tlog.info(f'Scraping commit hashes from the metadata file related to {xsa_full_id}: \"{xsa_meta_url}\"...')\n\t\t\t\n\t\t\txsa_meta_response = XenProject.XEN_SCRAPING_MANAGER.download_page(xsa_meta_url)\n\t\t\tif xsa_meta_response is not None:\n\n\t\t\t\t\"\"\"\n\t\t\t\t\"Recipes\":\n\t\t\t\t{\n\t\t\t\t\t\"4.5\":\n\t\t\t\t\t{\n\t\t\t\t\t\t\"XenVersion\": \"4.5\",\n\t\t\t\t\t\t\"Recipes\":\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"xen\":\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"StableRef\": \"83724d9f3ae21a3b96362742e2f052b19d9f559a\",\n\t\t\t\t\t\t\t\t\"Prereqs\": [],\n\t\t\t\t\t\t\t\t\"Patches\": [\"xsa237-4.5/*\"]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\n\t\t\t\t\t[...]\n\t\t\t\t}\n\t\t\t\t\"\"\"\n\n\t\t\t\ttry:\n\t\t\t\t\txsa_metadata = json.loads(xsa_meta_response.text)\n\t\t\t\texcept json.decoder.JSONDecodeError as error:\n\t\t\t\t\txsa_metadata = None\n\t\t\t\t\tlog.error(f'Failed to parse the JSON metadata for {xsa_full_id} with the error: {repr(error)}')\n\n\t\t\t\tdef nested_get(dictionary: dict, key_list: list):\n\t\t\t\t\t\"\"\" Tries to get a value from variously nested dictionaries by following a sequence of keys in a given order.\n\t\t\t\t\tIf any intermediate dictionary doesn't exist, this method returns None. \"\"\"\n\n\t\t\t\t\tvalue = None\n\t\t\t\t\tfor key in key_list:\n\t\t\t\t\t\tvalue = dictionary.get(key)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif value is None:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif isinstance(value, dict):\n\t\t\t\t\t\t\tdictionary = value\n\n\t\t\t\t\treturn value\n\n\t\t\t\tif xsa_metadata is not None:\n\n\t\t\t\t\t# Find every commit hash in the 'Recipes' dictionary.\n\t\t\t\t\tfor reciple_key, recipe_value in xsa_metadata['Recipes'].items():\n\n\t\t\t\t\t\tcommit_hash = nested_get(recipe_value, ['Recipes', 'xen', 'StableRef'])\n\n\t\t\t\t\t\tif commit_hash is not None:\n\t\t\t\t\t\t\tcve.git_commit_hashes.append(commit_hash)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlog.error(f'Could not find any commit hash for {xsa_full_id} in the \"{reciple_key}\" branch.')\n\n\t\t\telse:\n\t\t\t\tlog.error(f'Could not download the metadata file for {xsa_full_id}.')\n\n\tdef scrape_additional_information_from_version_control(self, cve: Cve):\n\t\tfor id in cve.advisory_ids:\n\t\t\t# E.g. \"This is CVE-2015-4164 / XSA-136.\"\n\t\t\t# E.g. \"This is XSA-136 / CVE-2015-4164.\"\n\t\t\t# E.g. \"This is XSA-215.\"\n\t\t\tregex_cve = re.escape(str(cve))\n\t\t\tregex_id = re.escape(id)\n\t\t\tgrep_pattern = fr'This is.*\\b({regex_cve}|{regex_id})\\b'\n\t\t\thashes = self.find_git_commit_hashes_from_pattern(grep_pattern)\n\t\t\tcve.git_commit_hashes.extend(hashes)\n\n####################################################################################################\n\nclass ApacheProject(Project):\n\t\"\"\" Represents the Apache HTTP Server project. \"\"\"\n\n\tdef __init__(self, project_name: str, project_info: dict):\n\t\tsuper().__init__(project_name, project_info)\n\n\tdef scrape_additional_information_from_version_control(self, cve: Cve):\n\t\t# E.g. \"SECURITY: CVE-2017-3167 (cve.mitre.org)\"\n\t\t# E.g. \"Merge r1642499 from trunk: *) SECURITY: CVE-2014-8109 (cve.mitre.org)\"\n\t\tregex_cve = re.escape(str(cve))\n\t\tgrep_pattern = fr'SECURITY:.*\\b{regex_cve}\\b'\n\t\thashes = self.find_git_commit_hashes_from_pattern(grep_pattern)\n\t\tcve.git_commit_hashes.extend(hashes)\n\n####################################################################################################\n\nclass GlibcProject(Project):\n\t\"\"\" Represents the GNU C Library (glibc) project. \"\"\"\n\n\tdef __init__(self, project_name: str, project_info: dict):\n\t\tsuper().__init__(project_name, project_info)\n\n\tdef scrape_additional_information_from_version_control(self, cve: Cve):\n\t\tfor id in cve.bugzilla_ids:\n\t\t\t# E.g. \"Don't ignore too long lines in nss_files (BZ #17079)\"\n\t\t\t# E.g. \"Fix integer overflows in internal memalign and malloc [BZ #22343] [BZ #22774]\"\n\t\t\t# E.g. \"Fix nan functions handling of payload strings (bug 16961, bug 16962).\"\n\t\t\t# E.g. Don't ignore too long lines in nss_files (BZ17079, CVE-2015-5277) Tested:\n\t\t\tregex_id = re.escape(id)\n\t\t\tgrep_pattern = fr'((BZ|Bug).*\\b{regex_id}\\b)|(\\bBZ{regex_id}\\b)'\n\t\t\thashes = self.find_git_commit_hashes_from_pattern(grep_pattern)\n\t\t\tcve.git_commit_hashes.extend(hashes)\n\nif __name__ == '__main__':\n\tpass"
] | [
[
"pandas.isna",
"pandas.read_csv",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jefpadfi/pdfkivygui | [
"2fc529065f723fe917065a7b4b5e3b6293dd9e1e"
] | [
"pdfkivygui/garden/matplotlib/backend_kivyagg.py"
] | [
"'''\nBackend KivyAgg\n=====\n\n.. image:: images/backend_agg_example.jpg\n :align: right\n\nThe :class:`FigureCanvasKivyAgg` widget is used to create a matplotlib graph.\nThe render will cover the whole are of the widget unless something different is\nspecified using a :meth:`blit`.\nWhen you are creating a FigureCanvasKivyAgg widget, you must at least\ninitialize it with a matplotlib figure object. This class uses agg to get a\nstatic image of the plot and then the image is render using a\n:class:`~kivy.graphics.texture.Texture`. See backend_kivy documentation for\nmore information since both backends can be used in the exact same way.\n\n\nExamples\n--------\n\nExample of a simple Hello world matplotlib App::\n\n fig, ax = plt.subplots()\n ax.text(0.6, 0.5, \"hello\", size=50, rotation=30.,\n ha=\"center\", va=\"center\",\n bbox=dict(boxstyle=\"round\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n ax.text(0.5, 0.4, \"world\", size=50, rotation=-30.,\n ha=\"right\", va=\"top\",\n bbox=dict(boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n canvas = FigureCanvasKivyAgg(figure=fig)\n\nThe object canvas can be added as a widget into the kivy tree widget.\nIf a change is done on the figure an update can be performed using\n:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.draw`.::\n\n # update graph\n canvas.draw()\n\nThe plot can be exported to png with\n:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.print_png`, as an\nargument receives the `filename`.::\n\n # export to png\n canvas.print_png(\"my_plot.png\")\n\n\nBackend KivyAgg Events\n-----------------------\n\nThe events available are the same events available from Backend Kivy.::\n\n def my_callback(event):\n print('press released from test', event.x, event.y, event.button)\n\n fig.canvas.mpl_connect('mpl_event', my_callback)\n\n'''\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\n__all__ = ('FigureCanvasKivyAgg')\n\nfrom matplotlib.backend_bases import register_backend, ShowBase\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\n\ntry:\n import kivy\nexcept ImportError:\n raise ImportError(\"this backend requires Kivy to be installed.\")\n\nfrom kivy.app import App\nfrom kivy.graphics.texture import Texture\nfrom kivy.graphics import Rectangle, Color\nfrom kivy.properties import ObjectProperty\nfrom kivy.base import EventLoop\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.core.image import Image\nfrom pdfkivygui.garden.matplotlib.backend_kivy import FigureCanvasKivy, FigureManagerKivy, NavigationToolbar2Kivy\n\nregister_backend('png', 'backend_kivyagg', 'PNG File Format')\n\ntoolbar = None\nmy_canvas = None\n\n\ndef new_figure_manager(num, *args, **kwargs):\n \"\"\" Create a new figure manager instance for the figure given. \"\"\"\n # if a main-level app must be created, this (and\n # new_figure_manager_given_figure) is the usual place to\n # do it -- see backend_wx, backend_wxagg and backend_tkagg for\n # examples. Not all GUIs require explicit instantiation of a\n # main-level app (egg backend_gtk, backend_gtkagg) for pylab\n FigureClass = kwargs.pop('FigureClass', Figure)\n thisFig = FigureClass(*args, **kwargs)\n return new_figure_manager_given_figure(num, thisFig)\n\n\ndef new_figure_manager_given_figure(num, figure):\n \"\"\" Create a new figure manager instance and a new figure canvas instance\n for the given figure. \"\"\"\n canvas = FigureCanvasKivyAgg(figure)\n manager = FigureManagerKivy(canvas, num)\n global my_canvas\n global toolbar\n toolbar = manager.toolbar.actionbar if manager.toolbar else None\n my_canvas = canvas\n return manager\n\n\nclass MPLKivyApp(App):\n \"\"\" Creates the App initializing a FloatLayout with a figure and toolbar\n widget. \"\"\"\n figure = ObjectProperty(None)\n toolbar = ObjectProperty(None)\n\n def build(self):\n EventLoop.ensure_window()\n layout = FloatLayout()\n if self.figure:\n self.figure.size_hint_y = 0.9\n layout.add_widget(self.figure)\n if self.toolbar:\n self.toolbar.size_hint_y = 0.1\n layout.add_widget(self.toolbar)\n return layout\n\n\nclass Show(ShowBase):\n \"\"\" mainloop needs to be overwritten to define the show() behavior for kivy\n framework. \"\"\"\n\n def mainloop(self):\n global my_canvas\n global toolbar\n app = App.get_running_app()\n if app is None:\n app = MPLKivyApp(figure=my_canvas, toolbar=toolbar)\n app.run()\n\n\nshow = Show()\n\n\nclass FigureCanvasKivyAgg(FigureCanvasKivy, FigureCanvasAgg):\n '''FigureCanvasKivyAgg class. See module documentation for more\n information.\n '''\n\n def __init__(self, figure, **kwargs):\n self.figure = figure\n self.bind(size=self._on_size_changed)\n super(FigureCanvasKivyAgg, self).__init__(figure=self.figure, **kwargs)\n self.img_texture = None\n self.img_rect = None\n self.blit()\n\n def draw(self):\n '''\n Draw the figure using the agg renderer\n '''\n self.canvas.clear()\n FigureCanvasAgg.draw(self)\n if self.blitbox is None:\n l, b, w, h = self.figure.bbox.bounds\n w, h = int(w), int(h)\n buf_rgba = self.get_renderer().buffer_rgba()\n else:\n bbox = self.blitbox\n l, b, r, t = bbox.extents\n w = int(r) - int(l)\n h = int(t) - int(b)\n t = int(b) + h\n reg = self.copy_from_bbox(bbox)\n buf_rgba = reg.to_string()\n texture = Texture.create(size=(w, h))\n texture.flip_vertical()\n color = self.figure.get_facecolor()\n with self.canvas:\n Color(*color)\n Rectangle(pos=self.pos, size=(w, h))\n Color(1.0, 1.0, 1.0, 1.0)\n self.img_rect = Rectangle(texture=texture, pos=self.pos,\n size=(w, h))\n texture.blit_buffer(bytes(buf_rgba), colorfmt='rgba', bufferfmt='ubyte')\n self.img_texture = texture\n\n filetypes = FigureCanvasKivy.filetypes.copy()\n filetypes['png'] = 'Portable Network Graphics'\n\n def _on_pos_changed(self, *args):\n if self.img_rect is not None:\n self.img_rect.pos = self.pos\n\n def _print_image(self, filename, *args, **kwargs):\n '''Write out format png. The image is saved with the filename given.\n '''\n l, b, w, h = self.figure.bbox.bounds\n img = None\n if self.img_texture is None:\n texture = Texture.create(size=(w, h))\n texture.blit_buffer(bytes(self.get_renderer().buffer_rgba()),\n colorfmt='rgba', bufferfmt='ubyte')\n texture.flip_vertical()\n img = Image(texture)\n else:\n img = Image(self.img_texture)\n img.save(filename)\n\n\n''' Standard names that backend.__init__ is expecting '''\nFigureCanvas = FigureCanvasKivyAgg\nFigureManager = FigureManagerKivy\nNavigationToolbar = NavigationToolbar2Kivy\nshow = show\n"
] | [
[
"matplotlib.backend_bases.register_backend",
"matplotlib.backends.backend_agg.FigureCanvasAgg.draw"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaybeShewill-CV/image-classification-tensorflow | [
"1587fa7acfaba6d33fb07c2c25248570c5d41927"
] | [
"tools/freeze_model.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/3/26 下午4:10\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/image-classification-tensorflow\n# @File : freeze_model.py\n# @IDE: PyCharm\n\"\"\"\nfreeze ckpt model file into pb model\n\"\"\"\nimport os.path as ops\nimport argparse\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\n\nimport cls_model_zoo\nfrom local_utils import config_utils\n\n\ndef init_args():\n \"\"\"\n\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--net', type=str, help='The network you used', default='xception')\n parser.add_argument('--dataset', type=str, help='The dataset', default='ilsvrc_2012')\n parser.add_argument('--weights_path', type=str, help='The ckpt weights file path')\n parser.add_argument('--pb_save_path', type=str, help='The converted pb file save path')\n\n return parser.parse_args()\n\n\ndef stats_graph(graph):\n \"\"\"\n\n :param graph:\n :return:\n \"\"\"\n flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())\n params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())\n print('FLOPs: {}; Trainable params: {}'.format(flops.total_float_ops, params.total_parameters))\n\n return\n\n\ndef freeze_model():\n \"\"\"\n\n :return:\n \"\"\"\n args = init_args()\n\n net_name = args.net\n dataset_name = args.dataset\n config_file_name = '{:s}_{:s}.yaml'.format(dataset_name, net_name)\n config_file_path = ops.join('./config', config_file_name)\n if not ops.exists(config_file_path):\n raise ValueError('Config file path: {:s} not exist'.format(config_file_path))\n\n cfg = config_utils.get_config(config_file_path=config_file_path)\n net = cls_model_zoo.get_model(cfg=cfg, phase='test')\n\n # construct compute graph\n input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 224, 224, 3], name='input_tensor')\n logits = net.inference(input_tensor=input_tensor, name=cfg.MODEL.MODEL_NAME, reuse=False)\n prob_score = tf.nn.softmax(logits, name='output_tensor')\n\n # define moving average version of the learned variables for eval\n with tf.variable_scope(name_or_scope='moving_avg'):\n variable_averages = tf.train.ExponentialMovingAverage(\n cfg.SOLVER.MOVING_AVE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n\n # define saver\n saver = tf.train.Saver(variables_to_restore)\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.per_process_gpu_memory_fraction = 0.85\n sess_config.gpu_options.allow_growth = False\n sess_config.gpu_options.allocator_type = 'BFC'\n\n sess = tf.Session(config=sess_config)\n\n with sess.as_default():\n saver.restore(sess, args.weights_path) # variables\n\n stats_graph(sess.graph)\n\n # generate protobuf\n converted_graph_def = graph_util.convert_variables_to_constants(\n sess,\n input_graph_def=sess.graph.as_graph_def(),\n output_node_names=[\"output_tensor\"]\n )\n\n with tf.gfile.GFile(args.pb_save_path, \"wb\") as f:\n f.write(converted_graph_def.SerializeToString())\n\n print('Convert completed!!!')\n\n return\n\n\nif __name__ == '__main__':\n \"\"\"\n main func\n \"\"\"\n freeze_model()\n"
] | [
[
"tensorflow.nn.softmax",
"tensorflow.gfile.GFile",
"tensorflow.profiler.ProfileOptionBuilder.trainable_variables_parameter",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.variable_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.profiler.ProfileOptionBuilder.float_operation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
mortarsynth/Audio-Signal-Processing-for-Music-Applications | [
"4674d9e15885401d69d4a468e3ad756ea2600523",
"4674d9e15885401d69d4a468e3ad756ea2600523"
] | [
"A7/test3.py",
"A3/A3Part4.py"
] | [
"import numpy as np\nfrom scipy.signal import get_window\nfrom scipy.fftpack import fft\nimport sys, os, math\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))\nimport dftModel as DFT\nimport utilFunctions as UF\nimport harmonicModel as HM\nimport matplotlib.pyplot as plt\n\n## hprModel Demo\nfilename = '260559__roganderrick__liquor-bottle-pour-01.wav'\n#filename ='speech-female.wav'\n(fs, x) = UF.wavread(filename)\npin = 40000\nM = 4001\nN = 4096\nt = -100\nminf0 = 50\nmaxf0 = 300\nf0et = 5\nnH = 60\nharmDevSlope = .001\n\nw = get_window('blackman', M)\nhM1 = int(math.floor((M+1)/2))\nhM2 = int(math.floor(M/2))\n\nx1 = x[pin-hM1:pin+hM2]\nmX, pX = DFT.dftAnal(x1, w, N)\nploc = UF.peakDetection(mX, t)\niploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)\nipfreq = fs*iploc/N\nf0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, 0)\nhfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, [], fs, harmDevSlope)\n\nNs = 512\nhNs = 256\nYh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)\n\nwr = get_window('blackmanharris', Ns)\nxw2 = x[pin-hNs-1:pin+hNs-1] * wr / sum(wr)\nfftbuffer = np.zeros(Ns)\nfftbuffer[:hNs] = xw2[hNs:]\nfftbuffer[hNs:] = xw2[:hNs]\nX2 = fft(fftbuffer)\nXr = X2 - Yh\n\n# plt.plot(x1)\n# plt.plot(mX)\n# plt.plot(abs(Yh))\n#plt.plot(20*np.log10(abs(Yh[:70])))\n#plt.plot(20*np.log10(abs(X2[:70])))\n#plt.plot(20*np.log10(abs(Xr[:70])))\n#plt.show()\n",
"import sys\nsys.path.append('../../software/models/')\nfrom dftModel import dftAnal, dftSynth\nfrom scipy.signal import get_window\nimport numpy as np\n\"\"\"\nA3-Part-4: Suppressing frequency components using DFT model\n\nGiven a frame of the signal, write a function that uses the dftModel functions to suppress all the \nfrequency components <= 70Hz in the signal and returns the output of the dftModel \nwith and without filtering. \n\nYou will use the DFT model to implement a very basic form of filtering to suppress frequency components. \nWhen working close to mains power lines, there is a 50/60 Hz hum that can get introduced into the \naudio signal. You will try to remove that using a basic DFT model based filter. You will work on just \none frame of a synthetic audio signal to see the effect of filtering. \n\nYou can use the functions dftAnal and dftSynth provided by the dftModel file of sms-tools. Use dftAnal \nto obtain the magnitude spectrum (in dB) and phase spectrum of the audio signal. Set the values of \nthe magnitude spectrum that correspond to frequencies <= 70 Hz to -120dB (there may not be a bin \ncorresponding exactly to 70 Hz, choose the nearest bin of equal or higher frequency, e.g., using np.ceil()).\nIf you have doubts converting from frequency (Hz) to bins, you can review the beginning of theory lecture 2T1.\n\nUse dftSynth to synthesize the filtered output signal and return the output. The function should also return the \noutput of dftSynth without any filtering (without altering the magnitude spectrum in any way). \nYou will use a hamming window to smooth the signal. Hence, do not forget to scale the output signals \nby the sum of the window values (as done in sms-tools/software/models_interface/dftModel_function.py). \nTo understand the effect of filtering, you can plot both the filtered output and non-filtered output \nof the dftModel. \n\nPlease note that this question is just for illustrative purposes and filtering is not usually done \nthis way - such sharp cutoffs introduce artifacts in the output. \n\nThe input is a M length input signal x that contains undesired frequencies below 70 Hz, sampling \nfrequency fs and the FFT size N. The output is a tuple with two elements (y, yfilt), where y is the \noutput of dftModel with the unaltered original signal and yfilt is the filtered output of the dftModel.\n\nCaveat: In python (as well as numpy) variable assignment is by reference. if you assign B = A, and \nmodify B, the value of A also gets modified. If you do not want this to happen, consider using B = A.copy(). \nThis creates a copy of A and assigns it to B, and hence, you can modify B without affecting A.\n\nTest case 1: For an input signal with 40 Hz, 100 Hz, 200 Hz, 1000 Hz components, yfilt will only contain\n100 Hz, 200 Hz and 1000 Hz components. \n\nTest case 2: For an input signal with 23 Hz, 36 Hz, 230 Hz, 900 Hz, 2300 Hz components, yfilt will only contain\n230 Hz, 900 Hz and 2300 Hz components. \n\"\"\"\ndef suppressFreqDFTmodel(x, fs, N):\n \"\"\"\n Inputs:\n x (numpy array) = input signal of length M (odd)\n fs (float) = sampling frequency (Hz)\n N (positive integer) = FFT size\n Outputs:\n The function should return a tuple (y, yfilt)\n y (numpy array) = Output of the dftSynth() without filtering (M samples long)\n yfilt (numpy array) = Output of the dftSynth() with filtering (M samples long)\n The first few lines of the code have been written for you, do not modify it. \n \"\"\"\n M = len(x)\n w = get_window('hamming', M)\n outputScaleFactor = sum(w)\n \n ## Your code here\n mX, pX = dftAnal(x,w,N)\n mX_filt = mX.copy()\n thresh_sample = int(np.ceil(70 / fs * N))\n mX_filt[:thresh_sample+1] = -120.0\n signal_reconst = dftSynth(mX, pX, w.size) * outputScaleFactor\n signal_reconst_filt = dftSynth(mX_filt, pX, w.size) * outputScaleFactor\n return (signal_reconst, signal_reconst_filt)\n"
] | [
[
"scipy.fftpack.fft",
"numpy.zeros",
"scipy.signal.get_window"
],
[
"numpy.ceil",
"scipy.signal.get_window"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
greaseuniverse/greaseterminator | [
"ed00a63a5306d9020a8f9941815f6e85cd01c83c"
] | [
"interventions/vision.py"
] | [
"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\nimport sched, time\n\nfrom text.text_filter import *\nfrom visual.inpainting import *\nfrom visual.template_matching import *\nfrom visual.darken import *\nfrom visual.video_obscenity import *\nfrom PIL import Image\n\n\ndef run(image_in = '../relay/img/screen.png', image_prev = '../relay/img/screen_prev.png', image_tmpout = '../relay/img/screen.png', image_out = '../relay/img/square_img.png'):\n \n dark_state = {\n 'current_level' : 0,\n 'min_level' : 0,\n 'max_level' : 255,\n 'iterator' : 50,\n 'similarity_list' : [],\n 'similarity_count' : 10,\n }\n trigger = False\n \n while True:\n\n # settings\n hate_speech_text_censor = True\n oneshot_bbox = True\n visual = False\n scroll_darken = True\n video_obscenity_censor = False\n\n # Screen capture\n os.system(\"adb exec-out screencap -p > \"+str(image_in))\n time.sleep(100/1000)\n # remove status bar & nav bar\n img = cv2.cvtColor(cv2.imread(image_in, cv2.IMREAD_COLOR), cv2.COLOR_RGB2RGBA)\n# img_prev = cv2.cvtColor(cv2.imread(image_in, cv2.IMREAD_COLOR), cv2.COLOR_RGB2RGBA)\n height, width, channels = img.shape\n crop_img = img[int(height*0.04):int(height*0.925), ]\n cv2.imwrite(image_tmpout, crop_img)\n\n # transparency\n img = cv2.cvtColor(cv2.imread(image_in, cv2.IMREAD_COLOR), cv2.COLOR_RGB2RGBA)\n height, width, channels = img.shape\n img_prev = img.copy()\n img = opacity(img, 255)\n\n # mask layout\n mask= np.zeros((height, width, 3), np.uint8)\n mask = cv2.cvtColor(mask, cv2.COLOR_RGB2RGBA)\n mask = opacity(mask, 0)\n if hate_speech_text_censor == True:\n band_mask, reshaped_boxes = content_filtering(image_in)\n band_mask = cv2.cvtColor(band_mask, cv2.COLOR_RGB2RGBA)\n band_mask = opacity(band_mask, 128)\n print(reshaped_boxes)\n if visual == True:\n inpainted_mask = oneshot_inpainting('visual/masks/', image_in, 0.8)\n inpainted_mask = cv2.cvtColor(inpainted_mask, cv2.COLOR_RGB2RGBA)\n inpainted_mask = opacity(inpainted_mask, 128)\n \n\n # generates square image for Android\n x = height if height > width else width\n y = height if height > width else width\n square= np.zeros((height,height,3), np.uint8)\n square = cv2.cvtColor(square, cv2.COLOR_RGB2RGBA)\n square = opacity(square, 0)\n if hate_speech_text_censor == True:\n square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = band_mask\n square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = cv2.GaussianBlur(square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)], (23, 23), 30)\n# for obs_box in reshaped_boxes:\n# square[int((y-height)/2+obs_box[1]):int((y-height)/2+obs_box[3]), int((x-width)/2+obs_box[0]):int((x-width)/2+obs_box[2]), 3] = 255\n if visual == True:\n square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = inpainted_mask\n if oneshot_bbox == True:\n detections = oneshot_templatematching(image_in, mask_dir = './visual/masks/')\n for detection in detections:\n margin_control = 0.2\n cv2.rectangle(\n square,\n (int(detection[\"TOP_LEFT_X\"]+height/4-height*margin_control), int(detection[\"TOP_LEFT_Y\"]-height*margin_control)),\n (int(detection[\"BOTTOM_RIGHT_X\"]+height/4+height*margin_control), int(detection[\"BOTTOM_RIGHT_Y\"]+height*margin_control)),\n detection[\"COLOR\"],\n 2,\n ) \n square[detection[\"TOP_LEFT_Y\"]:detection[\"BOTTOM_RIGHT_Y\"], int(detection[\"TOP_LEFT_X\"]+height/4):int(detection[\"BOTTOM_RIGHT_X\"]+height/4), 3] = 255\n\n if scroll_darken == True:\n if os.path.exists(image_prev) == False:\n cv2.imwrite(image_prev, img_prev)\n if os.path.exists(image_prev) == True:\n # Insert trigger conditions here -- time / scroll\n print(dark_state['similarity_list'])\n similarity = ret_similar(image_in, image_prev, size_threshold = 0.25)\n if len(dark_state['similarity_list']) < 10:\n dark_state['similarity_list'].append(similarity)\n if len(dark_state['similarity_list']) == 10:\n dark_state['similarity_list'] = dark_state['similarity_list'][1:]\n dark_state['similarity_list'].append(similarity)\n if set(dark_state['similarity_list']) == set([True]):\n trigger = True\n# if set(dark_state['similarity_list']) != set([True]):\n# dark_state['current_level'] = 0\n square, dark_state = darken(square, trigger, dark_state)\n cv2.imwrite(image_prev, img_prev)\n trigger = False\n\n if video_obscenity_censor == True:\n obscenity_mask, coords = image_censor(image_in)\n obscenity_mask = cv2.cvtColor(obscenity_mask, cv2.COLOR_RGB2RGBA)\n obscenity_mask = opacity(obscenity_mask, 128)\n square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = obscenity_mask\n for obs_box in coords:\n square[int((y-height)/2+obs_box[1]):int((y-height)/2+obs_box[3]), int((x-width)/2+obs_box[0]):int((x-width)/2+obs_box[2]), 3] = 255\n# square[obs_box[1]:obs_box[3], obs_box[0]:obs_box[2], 3] = 255\n cv2.GaussianBlur(square, (23, 23), 11)\n\n cv2.imwrite(image_out, square)\n \n\n# main\nrun(image_in = '../relay/img/screen.png', image_out = '../relay/img/square_img.png')"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jcbird/apogee | [
"a4aac39fe52d1ca8ba8a790678e9b330f9462d49"
] | [
"apogee/tools/path.py"
] | [
"##################################################################################\n#\n# apogee.tools.path: return the path of various APOGEE data files\n#\n# This file depends on various environment variables that should be set:\n#\n# - SDSS_LOCAL_SAS_MIRROR: top-level directory with data\n# - RESULTS_VERS: APOGEE reduction version (e.g., v304 for DR10)\n# - APOGEE_APOKASC_REDUX: APOKASC catalog version\n#\n# contains:\n# \n# - allStarPath: the path of the allStar file\n# - allVisitPath: the path of the allStar file\n# - apogeeDesignPath: path of the apogeeDesign file\n# - apogeeFieldPath: path of the apogeeField file\n# - apogeeObjectPath: path of an apogeeObject file\n# - apogeePlatePlate: path of the apogeePlate file\n# - apokascPath: path of the APOKASC catalog\n# - distPath: path of the file that has APOGEE distances\n# - obslogPath: path of the observation log\n# - rcsamplePath: path of the red clump sample file\n# - apStarPath: path of a apStar file\n# - aspcapStarPath: path of a aspcapStar file\n# - apallPath: the path of the apall file (an early version of \n# allStar by JB, now deprecated)\n#\n##################################################################################\nimport os, os.path\nimport numpy\nimport warnings\n_APOGEE_DATA= os.getenv('SDSS_LOCAL_SAS_MIRROR')\nif _APOGEE_DATA is None:\n # Try old method\n _APOGEE_DATA= os.getenv('APOGEE_DATA')\n if _APOGEE_DATA is None:\n raise RuntimeError(\"SDSS_LOCAL_SAS_MIRROR environment variable needs to be set to use the 'apogee' module\")\n else:\n warnings.warn(\"APOGEE_DATA environment variable is deprecated in favor of SDSS_LOCAL_SAS_MIRROR; please update your environment\",DeprecationWarning)\n_APOGEE_REDUX= os.getenv('RESULTS_VERS')\nif _APOGEE_REDUX is None:\n _APOGEE_REDUX= os.getenv('APOGEE_REDUX')\n if _APOGEE_REDUX is None:\n raise RuntimeError(\"RESULTS_VERS environment variable needs to be set to use the 'apogee' module\")\n else:\n warnings.warn(\"APOGEE_REDUX environment variable is deprecated in favor of RESULTS_VERS; please update your environment\",DeprecationWarning)\n_APOGEE_ASPCAP_REDUX= os.getenv('APOGEE_ASPCAP_REDUX')\n_APOGEE_APOKASC_REDUX= os.getenv('APOGEE_APOKASC_REDUX')\n# Reductions\n_DR10REDUX='v304'\n_DR11REDUX='v402'\n_DR12REDUX='v603'\n_DR13REDUX='l30e.2'\n_CURRENTREDUX='current'\nif _APOGEE_REDUX is None:\n _APOGEE_REDUX= _DR12REDUX\nif _APOGEE_APOKASC_REDUX is None:\n _APOGEE_APOKASC_REDUX= 'v7.3'\nif _APOGEE_ASPCAP_REDUX is None: #deprecated\n _APOGEE_ASPCAP_REDUX= 'v0.4'\n_ASPCAP= True\n_CODEV= '1'\ndef apallPath(visit=False):\n \"\"\"\n NAME:\n apallPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n visit= if True, return the allVisit file, rather than the allStar file\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-05-30 - Edited for ASPCAP - Bovy (IAS)\n \"\"\"\n if _CODEV == '1':\n if _ASPCAP:\n return os.path.join(_APOGEE_DATA,\n 'apall-1d-'+_APOGEE_REDUX\n +'-aspcap-'+_APOGEE_ASPCAP_REDUX+'.fits')\n else:\n return os.path.join(_APOGEE_DATA,\n 'apall-'+_APOGEE_REDUX+'.fits')\n elif _CODEV == '2':\n if visit:\n pass\n else:\n return os.path.join(_APOGEE_DATA,\n 'allStar-'+_APOGEE_ASPCAP_REDUX+'.fits')\n\ndef allStarPath(dr=None,_old=False):\n \"\"\"\n NAME:\n allStarPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= return the path corresponding to this data release \n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-05-30 - Edited for ASPCAP - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n redux= _redux_dr(dr=dr)\n if _old:\n return os.path.join(_APOGEE_DATA,\n 'allStar-%s.fits' % redux)\n else:\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath,'r3','s3','a3',\n _redux_dr(dr=dr),'allStar-%s.fits' % redux)\n elif dr == '12':\n return os.path.join(specReduxPath,'r5','stars','l25_6d',\n _redux_dr(dr=dr),'allStar-%s.fits' % redux)\n elif dr == '13':\n return os.path.join(specReduxPath,'r6','stars','l30e',\n _redux_dr(dr=dr),'allStar-%s.fits' % redux)\n elif dr == 'current':\n return os.path.join(specReduxPath,'current','stars','l25_6d',\n _redux_dr(dr=dr),'allStar-%s.fits' % redux)\n\ndef allVisitPath(dr=None,_old=False):\n \"\"\"\n NAME:\n allVisitPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= return the path corresponding to this data release \n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-05-30 - Edited for ASPCAP - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n redux= _redux_dr(dr=dr)\n if _old:\n return os.path.join(_APOGEE_DATA,\n 'allVisit-%s.fits' % redux)\n else:\n return allStarPath(dr=dr,_old=_old).replace('allStar','allVisit')\n\ndef apokascPath():\n \"\"\"\n NAME:\n apokascPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n (none)\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOKASC_REDUX with the current reduction version (e.g., v6.2)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-09-10 - Edited for APOKASC - Bovy (IAS)\n \"\"\"\n if _APOGEE_APOKASC_REDUX[1] == '7':\n return os.path.join(_APOGEE_DATA,\n 'APOKASC_Catalog.'+_APOGEE_APOKASC_REDUX+'.fits')\n else:\n return os.path.join(_APOGEE_DATA,\n 'APOKASC_cat_'+_APOGEE_APOKASC_REDUX+'.fits')\n\ndef distPath(dr=None):\n \"\"\"\n NAME:\n distPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n (none)\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-05-30 - Edited for ASPCAP - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n redux= _redux_dr(dr=dr)\n if redux.lower() == _DR12REDUX:\n return os.path.join(_APOGEE_DATA,\n 'apogee-distances_DR12_v1.fits')\n elif redux.lower() == _DR11REDUX:\n return os.path.join(_APOGEE_DATA,\n 'allStar+-v402.130103.fits')\n elif redux.lower() == 'v302' or redux.lower() == _DR10REDUX:\n return os.path.join(_APOGEE_DATA,\n 'distmagall-'+redux+'.fits')\n\ndef rcsamplePath(dr=None,_old=False):\n \"\"\"\n NAME:\n rcsamplePath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-10-08 - Edited for rcsample - Bovy (IAS)\n \"\"\"\n if dr is None:\n if _APOGEE_REDUX == 'v402': dr= '11'\n elif _APOGEE_REDUX == 'v603': dr= '12'\n elif _APOGEE_REDUX == 'l30e.2': dr= '13'\n elif _APOGEE_REDUX == 'current': \n return os.path.join(_APOGEE_DATA,'apogee-rc-current.fits')\n else: raise IOError('No RC catalog available for the %s reduction' % _APOGEE_REDUX)\n if _old:\n return os.path.join(_APOGEE_DATA,\n 'apogee-rc-DR%s.fits' % dr)\n else:\n if dr == '11' or dr == '12':\n return os.path.join(_APOGEE_DATA,'dr12','apogee','vac','apogee-rc',\n 'cat','apogee-rc-DR%s.fits' % dr)\n elif dr == '13':\n return os.path.join(_APOGEE_DATA,'dr13','apogee','vac','apogee-rc',\n 'cat','apogee-rc-DR%s.fits' % dr)\n\ndef obslogPath(year=None):\n \"\"\"\n NAME:\n obslogPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n year= read up to this year (None)\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for obslog - Bovy (IAS)\n \"\"\"\n if year is None:\n if _APOGEE_REDUX == 'v402': year= 2\n elif _APOGEE_REDUX == 'v603': year= 3\n else: raise IOError('No default year available for APOGEE_REDUX %s, need to set it by hand' % _APOGEE_REDUX)\n if year == 1 or year == 2:\n return os.path.join(_APOGEE_DATA,\n 'obs-summary-year12.csv')\n elif year == 3:\n return os.path.join(_APOGEE_DATA,\n 'obs-summary-year123.csv')\n\ndef apogeeTargetDirPath(dr=None):\n \"\"\"\n NAME:\n apogeeTargetDirPath\n PURPOSE:\n returns the path of the relevant directory\n INPUT:\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for apogeeTargetDir - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n return os.path.join(_APOGEE_DATA,'dr%s' % dr,\n 'apogee','target','apogee_DR'+dr)\n \ndef apogeePlatePath(dr=None):\n \"\"\"\n NAME:\n apogeePlatePath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for apogeePlate - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '11' or dr == '12':\n platename= 'apogeePlate.fits'\n else:\n platename= 'apogeePlate_DR%s.fits' % dr\n return os.path.join(apogeeTargetDirPath(dr=dr),\n platename)\n\ndef apogeeDesignPath(dr=None):\n \"\"\"\n NAME:\n apogeeDesignPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for apogeePlate - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '11' or dr == '12':\n platename= 'apogeeDesign.fits'\n else:\n platename= 'apogeeDesign_DR%s.fits' % dr\n return os.path.join(apogeeTargetDirPath(dr=dr),\n platename)\n\ndef apogeeFieldPath(dr=None):\n \"\"\"\n NAME:\n apogeeFieldPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for apogeePlate - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '11' or dr == '12':\n platename= 'apogeeField.fits'\n else:\n platename= 'apogeeField_DR%s.fits' % dr\n return os.path.join(apogeeTargetDirPath(dr=dr),\n platename)\n\ndef apogeeObjectPath(field_name,dr=None):\n \"\"\"\n NAME:\n apogeeObjectPath\n PURPOSE:\n returns the path of the relevant file\n INPUT:\n field_name - name of the field\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n REQUIREMENTS:\n environment variables APOGEE_DATA pointing to the data directory\n APOGEE_REDUX with the current reduction version (e.g., v0.91)\n HISTORY:\n 2012-01-02 - Written - Bovy (IAS)\n 2012-11-04 - Edited for apogeeObject - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '11' or dr == '12':\n filename= 'apogeeObject_%s.fits' % field_name.strip()\n else:\n filename= 'apogeeObject_DR%s_%s.fits' % (dr,field_name.strip())\n return os.path.join(apogeeTargetDirPath(dr=dr),\n filename)\n\ndef aspcapStarPath(loc_id,apogee_id,dr=None):\n \"\"\"\n NAME:\n aspcapStarPath\n PURPOSE:\n returns the path of the aspcapStar file\n INPUT:\n loc_id - location ID (field for 1m targets)\n apogee_id - APOGEE ID of the star\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n HISTORY:\n 2014-11-25 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath,'r3','s3','a3',\n _redux_dr(dr=dr),'%i' % loc_id,\n 'aspcapStar-%s-%s.fits' % (_redux_dr(dr=dr),\n apogee_id))\n elif dr == '12':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'r5','stars','l25_6d',\n _redux_dr(dr=dr),loc_id.strip(),\n 'aspcapStar-r5-%s-%s.fits' % (_redux_dr(dr=dr),\n apogee_id.strip()))\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'r5','stars','l25_6d',\n _redux_dr(dr=dr),'%i' % loc_id,\n 'aspcapStar-r5-%s-%s.fits' % (_redux_dr(dr=dr),\n apogee_id)) \n elif dr == '13':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'r6','stars','l30e',\n _redux_dr(dr=dr),loc_id.strip(),\n 'aspcapStar-r6-%s-%s.fits' % (_redux_dr(dr=dr),\n apogee_id.strip()))\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'r6','stars','l30e',\n _redux_dr(dr=dr),'%i' % loc_id,\n 'aspcapStar-r6-%s-%s.fits' % (_redux_dr(dr=dr),\n apogee_id))\n elif dr == 'current':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'current','stars','l25_6d',\n _redux_dr(dr=dr),loc_id.strip(),\n 'aspcapStar-current-%s-%s.fits' \\\n % (_redux_dr(dr=dr),\n apogee_id.strip()))\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'current','stars','l25_6d',\n _redux_dr(dr=dr),'%i' % loc_id,\n 'aspcapStar-current-%s-%s.fits' \\\n % (_redux_dr(dr=dr),\n apogee_id))\n \ndef apStarPath(loc_id,apogee_id,dr=None):\n \"\"\"\n NAME:\n apStarPath\n PURPOSE:\n returns the path of the apStar file\n INPUT:\n loc_id - location ID (field for 1m targets)\n apogee_id - APOGEE ID of the star\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n HISTORY:\n 2015-01-13 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath,'r3','s3',\n '%i' % loc_id,\n 'apStar-s3-%s.fits' % apogee_id)\n elif dr == '12':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'r5','stars','apo1m',\n loc_id.strip(),\n 'apStar-r5-%s.fits' % apogee_id.strip())\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'r5','stars','apo25m',\n '%i' % loc_id,\n 'apStar-r5-%s.fits' % apogee_id)\n elif dr == '13':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'r6','stars','apo1m',\n loc_id.strip(),\n 'apStar-r6-%s.fits' % apogee_id.strip())\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'r6','stars','apo25m',\n '%i' % loc_id,\n 'apStar-r6-%s.fits' % apogee_id)\n elif dr == 'current':\n if isinstance(loc_id,str): #1m\n return os.path.join(specReduxPath,'current','stars','apo1m',\n loc_id.strip(),\n 'apStar-current-%s.fits' % apogee_id.strip())\n elif loc_id ==1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n return os.path.join(specReduxPath,'current','stars','apo25m',\n '%i' % loc_id,\n 'apStar-current-%s.fits' % apogee_id)\n\ndef apVisitPath(loc_id, mjd, fiberid, dr=None):\n \"\"\"\n NAME:\n apVisitPath\n PURPOSE:\n returns the path of the apVisit file\n INPUT:\n loc_id = 4-digit location ID (field for 1m targets)\n mjd = 5-digit MJD\n fiberid = 3-digit fiber ID\n dr = return the path corresponding to this data release (general default)\n OUTPUT:\n path string\n HISTORY:\n 2016-11 - Meredith Rawls\n 2016-11-29 - Bovy (UofT) - Edited inputs\n TODO: \n automatically find all apVisit files for a given apogee ID and download them\n \"\"\"\n mjd = str(mjd).strip()\n if not isinstance(fiberid,str):\n fiberid= '%03i' % fiberid\n if dr is None:\n dr = _default_dr()\n specReduxPath = apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath, 'r3', 's3', loc_id, mjd,\n 'apVisit-s3-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif dr == '12':\n if isinstance(loc_id, str): #1m\n return os.path.join(specReduxPath, 'r5', 'apo1m', loc_id, mjd,\n 'apVisit-r5-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif loc_id == 1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n loc_id = str(loc_id).strip()\n return os.path.join(specReduxPath, 'r5', 'apo25m', loc_id, mjd,\n 'apVisit-r5-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif dr == '13':\n if isinstance(loc_id, str): #1m\n return os.path.join(specReduxPath, 'r6', 'apo1m', loc_id, mjd,\n 'apVisit-r6-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif loc_id == 1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n loc_id = str(loc_id).strip()\n return os.path.join(specReduxPath, 'r6', 'apo25m', loc_id, mjd,\n 'apVisit-r6-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif dr == 'current':\n if isinstance(loc_id, str): #1m\n return os.path.join(specReduxPath, 'current', 'apo1m', loc_id, mjd,\n 'apVisit-current-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n elif loc_id == 1:\n raise IOError('For 1m targets, give the FIELD instead of the location ID')\n else:\n loc_id = str(loc_id).strip()\n return os.path.join(specReduxPath, 'current', 'apo25m', loc_id, mjd,\n 'apVisit-current-%s-%s-%s.fits' % (loc_id, mjd, fiberid))\n\ndef modelSpecPath(lib='GK',teff=4500,logg=2.5,metals=0.,\n cfe=0.,nfe=0.,afe=0.,vmicro=2.,\n dr=None):\n \"\"\"\n NAME:\n modelSpecPath\n PURPOSE:\n returns the path of a model spectrum file\n INPUT:\n lib= ('GK') spectral library\n teff= (4500) grid-point Teff\n logg= (2.5) grid-point logg\n metals= (0.) grid-point metallicity\n cfe= (0.) grid-point carbon-enhancement\n nfe= (0.) grid-point nitrogen-enhancement\n afe= (0.) grid-point alpha-enhancement\n vmicro= (2.) grid-point microturbulence\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n HISTORY:\n 2015-01-20 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n modelSpecLibPath= apogeeModelSpectroLibraryDirPath(dr=dr,lib=lib)\n if dr == '10':\n raise IOError('Loading model spectra for DR10 is not supported at this time')\n elif dr == '12':\n # Find closest grid-points for cfe, nfe, afe, and vmicro\n cfegrid= numpy.linspace(-1.,1.,9)\n nfegrid= numpy.linspace(-1.,1.,5)\n afegrid= numpy.linspace(-1.,1.,9)\n vmicrogrid= numpy.array([0.5,1.,2.,4.,8.])\n cfep= cfegrid[numpy.argmin(numpy.fabs(cfegrid-cfe))]\n nfep= nfegrid[numpy.argmin(numpy.fabs(nfegrid-nfe))]\n afep= afegrid[numpy.argmin(numpy.fabs(afegrid-afe))]\n vmp= vmicrogrid[numpy.argmin(numpy.fabs(vmicrogrid-vmicro))]\n # Create strings\n if cfep >= 0.:\n cfestr= 'cp%i%i' % (int(cfep),int(round((cfep % 1)*10.)))\n else:\n cfestr= 'cm%i%i' % (int(-cfep),int(round((-cfep % 1)*10.)))\n if nfep >= 0.:\n nfestr= 'np%i%i' % (int(nfep),int(round((nfep % 1)*10.)))\n else:\n nfestr= 'nm%i%i' % (int(-nfep),int(round((-nfep % 1)*10.)))\n if afep >= 0.:\n afestr= 'ap%i%i' % (int(afep),int(round((afep % 1)*10.)))\n else:\n afestr= 'am%i%i' % (int(-afep),int(round((-afep % 1)*10.)))\n if vmp >= 0.:\n vmstr= 'vp%i%i' % (int(vmp),int(round((vmp % 1)*10.)))\n else:\n vmstr= 'cm%i%i' % (int(-vmp),int(round((-vmp % 1)*10.)))\n return os.path.join(specReduxPath,modelSpecLibPath,\n afestr+cfestr+nfestr+vmstr+'.fits')\n \ndef ferreModelLibraryPath(lib='GK',pca=True,sixd=True,unf=False,dr=None,\n header=False):\n \"\"\"\n NAME:\n ferreModelLibraryPath\n PURPOSE:\n returns the path of a model library\n INPUT:\n lib= ('GK') spectral library\n dr= return the path corresponding to this data release\n pca= (True) if True, return path of the PCA compressed library\n sixd= (True) if True, return path of the 6D library (w/o vmicro)\n unf= (False) if True, return path of the binary library (otherwise ascii)\n header= (False) if True, return the path of the header file\n OUTPUT:\n path string\n HISTORY:\n 2015-01-21 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n modelSpecLibPath= apogeeModelSpectroLibraryDirPath(dr=dr,lib=lib)\n if dr == '10':\n raise IOError('Loading model libraries for DR10 is not supported at this time')\n elif dr == '12':\n if pca and sixd:\n filename= 'p6_aps'\n elif pca:\n filename= 'p_aps'\n else:\n filename= 'f_'\n filename+= 'as%s_131216_lsfcombo5v6_w123.' % lib.upper()\n if header:\n filename+= 'hdr'\n elif unf:\n filename+= 'unf'\n else:\n filename+= 'dat'\n return os.path.join(specReduxPath,modelSpecLibPath,filename)\n elif dr == 'current':\n if pca and sixd:\n filename= 'p6_aps'\n elif pca:\n filename= 'p_aps'\n else:\n filename= 'f_'\n if 'ms' in lib:\n filename+= '%s_140529_lsfcombo5v6_w123.' % lib\n else:\n filename+= 'as%s_131216_lsfcombo5v6_w123.' % lib.upper()\n if header:\n filename+= 'hdr'\n elif unf:\n filename+= 'unf'\n else:\n filename+= 'dat'\n return os.path.join(specReduxPath,modelSpecLibPath,filename)\n \ndef modelAtmospherePath(lib='kurucz_filled',teff=4500,logg=2.5,metals=0.,\n cfe=0.,afe=0.,vmicro=2.,dr=None):\n \"\"\"\n NAME:\n modelAtmospherePath\n PURPOSE:\n returns the path of a model spectrum file\n INPUT:\n lib= ('kurucz_filled') atmosphere library\n teff= (4500) grid-point Teff\n logg= (2.5) grid-point logg\n metals= (0.) grid-point metallicity\n cfe= (0.) grid-point carbon-enhancement\n afe= (0.) grid-point alpha-enhancement\n vmicro= (2.) grid-point microturbulence\n dr= return the path corresponding to this data release\n OUTPUT:\n path string\n HISTORY:\n 2015-02-13 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= 'current'\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n modelAtmosphereLibPath= apogeeModelAtmosphereLibraryDirPath(dr=dr,lib=lib)\n if dr == '10':\n raise IOError('Loading model atmospheres for DR10 is not supported at this time')\n elif dr == '12' or dr == 'current':\n # Create directory + filename\n if lib.lower() == 'kurucz_filled':\n metalsstr= _modelAtmKurucz_metalsString(metals)\n cfestr= _modelAtmKurucz_cfeString(cfe,metals)\n afestr= _modelAtmKurucz_afeString(afe,metals)\n dirname= os.path.join(specReduxPath,modelAtmosphereLibPath,\n metalsstr+cfestr+afestr)\n filename= 'a'+metalsstr+cfestr+afestr\n teffstr= _modelAtmKurucz_teffString(teff)\n loggstr= _modelAtmKurucz_loggString(logg,teff)\n filename+= teffstr+loggstr+'v20.mod'\n return os.path.join(dirname,filename)\n \ndef linelistPath(linelist,dr=None):\n \"\"\"\n NAME:\n linelistPath\n PURPOSE:\n returns the path of a linelist\n INPUT:\n linelist - name of the linelist\n OUTPUT:\n path string\n HISTORY:\n 2015-02-13 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= 'current'\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n return os.path.join(specReduxPath,'speclib','linelists',linelist)\n \ndef apWavePath(chip,dr=None):\n \"\"\"\n NAME:\n apWavePath\n PURPOSE:\n returns the path of an apWave file\n INPUT:\n chip - chip 'a', 'b', or 'c'\n dr= return the path corresponding to this data release \n OUTPUT:\n path string\n HISTORY:\n 2015-02-27 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath,'r3','cal','wave',\n 'apWave-%s-02420038.fits' % chip)\n elif dr == '12':\n return os.path.join(specReduxPath,'r5','cal','wave',\n 'apWave-%s-02420038.fits' % chip)\n elif dr == '13' or dr == 'current':\n return os.path.join(specReduxPath,'r6','cal','wave',\n 'apWave-%s-02420038.fits' % chip)\n \ndef apLSFPath(chip,dr=None):\n \"\"\"\n NAME:\n apLSFPath\n PURPOSE:\n returns the path of an apLSF file\n INPUT:\n chip - chip 'a', 'b', or 'c'\n dr= return the path corresponding to this data release \n OUTPUT:\n path string\n HISTORY:\n 2015-03-12 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n specReduxPath= apogeeSpectroReduxDirPath(dr=dr)\n if dr == '10':\n return os.path.join(specReduxPath,'r3','cal','lsf',\n 'apLSF-%s-02490024.fits' % chip)\n elif dr == '12':\n return os.path.join(specReduxPath,'r5','cal','lsf',\n 'apLSF-%s-02490024.fits' % chip)\n elif dr == '13' or dr == 'current':\n return os.path.join(specReduxPath,'r6','cal','lsf',\n 'apLSF-%s-05440020.fits' % chip)\n \ndef apogeeSpectroReduxDirPath(dr=None):\n \"\"\"\n NAME:\n apogeeSpectroReduxDirPath\n PURPOSE:\n returns the path of the spectro dir\n INPUT:\n dr= return the path corresponding to this data release \n OUTPUT:\n path string\n HISTORY:\n 2014-11-25 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr.lower() == 'current':\n return os.path.join(_APOGEE_DATA,'apogeework',\n 'apogee','spectro','redux')\n else:\n return os.path.join(_APOGEE_DATA,'dr%s' % dr,\n 'apogee','spectro','redux')\n \ndef apogeeModelSpectroLibraryDirPath(dr=None,lib='GK'):\n \"\"\"\n NAME:\n apogeeModelSpectroLibraryDirPath\n PURPOSE:\n returns the path of the model spectra within the spectral reduction directory\n INPUT:\n dr= return the path corresponding to this data release \n lib= ('GK') spectral library\n OUTPUT:\n path string\n HISTORY:\n 2015-01-20 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '12':\n if lib.lower() == 'gk':\n return os.path.join('speclib','asset','kurucz_filled',\n 'solarisotopes','asGK_131216_lsfcombo5v6')\n elif lib.lower() == 'f':\n return os.path.join('speclib','asset','kurucz_filled',\n 'solarisotopes','asF_131216_lsfcombo5v6')\n elif dr == 'current':\n if lib.lower() == 'msgk':\n return os.path.join('speclib','moog','kurucz_filled',\n 'solarisotopes','msGK_140529_lsfcombo5v6')\n \ndef apogeeModelAtmosphereLibraryDirPath(dr=None,lib='kurucz_filled'):\n \"\"\"\n NAME:\n apogeeModelAtmosphereLibraryDirPath\n PURPOSE:\n returns the path of the model atmospheres within the spectral reduction directory\n INPUT:\n dr= return the path corresponding to this data release \n lib= ('kurucz_filled') spectral library\n OUTPUT:\n path string\n HISTORY:\n 2015-02-13 - Written - Bovy (IAS)\n \"\"\"\n if dr is None: dr= _default_dr()\n if dr == '12' or dr == 'current':\n if lib.lower() == 'kurucz_filled':\n return os.path.join('speclib','kurucz_filled')\n elif 'marcs' in lib.lower():\n return os.path.join('speclib','marcs',lib)\n \ndef _default_dr():\n if _APOGEE_REDUX == _DR10REDUX: dr= '10'\n elif _APOGEE_REDUX == _DR11REDUX: dr= '11'\n elif _APOGEE_REDUX == _DR12REDUX: dr= '12'\n elif _APOGEE_REDUX == _DR13REDUX: dr= '13'\n elif _APOGEE_REDUX == _CURRENTREDUX: dr= 'current'\n else: raise IOError('No default dr available for APOGEE_REDUX %s, need to set it by hand' % _APOGEE_REDUX)\n return dr\n\ndef _redux_dr(dr=None):\n if dr is None: dr= _default_dr()\n if dr == '10': return _DR10REDUX\n elif dr == '11': return _DR11REDUX\n elif dr == '12': return _DR12REDUX\n elif dr == '13': return _DR13REDUX\n elif dr == 'current': return _CURRENTREDUX\n else: raise IOError('No reduction available for DR%s, need to set it by hand' % dr)\n\n# Functions that give the correct string values for model atmosphere files\n# [M/H]\n_modelAtmKurucz_fehgrid= numpy.array([-5.,-4.5,-4.,-3.5,-3.,-2.75,-2.5,\n -2.25,-2.,-1.75,-1.5,-1.25,-1.,\n -0.75,-0.5,-0.25,0.,0.25,0.5,\n 1.,1.5]) \ndef _py2_round(fl):\n # Bad ... always round 0.5 up, like in python 2 (not 3!)\n if fl % 1 >= 0.5:\n return numpy.ceil(fl)\n else:\n return numpy.floor(fl)\n\ndef _modelAtmKurucz_metalsString(metals):\n metalsp= _modelAtmKurucz_fehgrid[numpy.argmin(numpy.fabs(_modelAtmKurucz_fehgrid-metals))]\n if metalsp >= 0.:\n metalsstr= 'mp%i%i' % (int(metalsp),int(_py2_round((metalsp % 1)*10.)))\n else:\n metalsstr= 'mm%i%i' % (int(-metalsp),int(_py2_round((-metalsp % 1)*10.)))\n return metalsstr\n\n# [C/Fe]\n_modelAtmKurucz_cfegrid_lowm= numpy.linspace(-1.,1.,5)\n_modelAtmKurucz_cfegrid_midm= numpy.linspace(-1.5,1.,11)\n_modelAtmKurucz_cfegrid_him= numpy.linspace(-1.5,1.,6)\ndef _modelAtmKurucz_cfeString(cfe,metals):\n if metals <= -3.5:\n tgrid= _modelAtmKurucz_cfegrid_lowm\n elif metals >= 1.:\n tgrid= _modelAtmKurucz_cfegrid_him\n else:\n tgrid= _modelAtmKurucz_cfegrid_midm\n cfep= tgrid[numpy.argmin(numpy.fabs(tgrid-cfe))]\n if cfep >= 0.:\n cfestr= 'cp%i%i' % (int(cfep),int(_py2_round((cfep % 1)*10.)))\n else:\n cfestr= 'cm%i%i' % (int(-cfep),int(_py2_round((-cfep % 1)*10.)))\n return cfestr\n\n# [alpha/Fe]\n_modelAtmKurucz_afegrid_lowm= numpy.linspace(-1.,1.,5)\n_modelAtmKurucz_afegrid_midm= numpy.linspace(-1.5,1.,11)\n_modelAtmKurucz_afegrid_him= numpy.linspace(-1.5,1.,6)\ndef _modelAtmKurucz_afeString(afe,metals):\n if metals <= -3.5:\n tgrid= _modelAtmKurucz_afegrid_lowm\n elif metals >= 1.:\n tgrid= _modelAtmKurucz_afegrid_him\n else:\n tgrid= _modelAtmKurucz_afegrid_midm\n afep= tgrid[numpy.argmin(numpy.fabs(tgrid-afe))]\n if afep >= 0.:\n afestr= 'op%i%i' % (int(afep),int(_py2_round((afep % 1)*10.)))\n else:\n afestr= 'om%i%i' % (int(-afep),int(_py2_round((-afep % 1)*10.)))\n return afestr\n\n# Teff\n_modelAtmKurucz_teffgrid= numpy.array([3500,3750,4000,4250,4500,\n 4750,5000,5250,5500,5750,\n 6000,6250,6500,6750,7000,\n 7250,7500,7750,8000,8250,\n 8500,8750,9000,9250,9500,\n 9750,10000,10250,10500,\n 10750,11000,11250,11500,11750,\n 12000,12500,13000,13500,14000,\n 14500,15000,15500,16000,16500,\n 17000,17500,18000,18500,19000,\n 19500,20000,21000,22000,23000,\n 24000,25000,26000,27000,28000,\n 29000,30000],dtype='int')\ndef _modelAtmKurucz_teffString(teff):\n teffp= _modelAtmKurucz_teffgrid[numpy.argmin(numpy.fabs(_modelAtmKurucz_teffgrid-teff))]\n return 't%i' % teffp\n\n# log g\n_modelAtmKurucz_logggrid_G= numpy.linspace(0.,5.,11)\n_modelAtmKurucz_logggrid_F= numpy.linspace(1.,5.,9)\n_modelAtmKurucz_logggrid_A= numpy.linspace(2.,5.,7)\n_modelAtmKurucz_logggrid_B= numpy.linspace(3.,5.,5)\n_modelAtmKurucz_logggrid_O= numpy.linspace(4.,5.,3)\ndef _modelAtmKurucz_loggString(logg,teff):\n if teff <= 6000.:\n tgrid= _modelAtmKurucz_logggrid_G\n elif teff <= 8000.:\n tgrid= _modelAtmKurucz_logggrid_F\n elif teff <= 12000.:\n tgrid= _modelAtmKurucz_logggrid_A\n elif teff <= 20000.:\n tgrid= _modelAtmKurucz_logggrid_B\n else:\n tgrid= _modelAtmKurucz_logggrid_O\n loggp= tgrid[numpy.argmin(numpy.fabs(tgrid-logg))]\n return 'g%i%i' % (int(loggp),int(_py2_round((loggp % 1)*10.)))\n\n\n"
] | [
[
"numpy.linspace",
"numpy.ceil",
"numpy.floor",
"numpy.array",
"numpy.fabs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
karaage0703/zero-deeplearning | [
"8b6e4550d14819c2b4f3114405af15e78c700a33"
] | [
"3_2_4_sigmoid.py"
] | [
"import numpy as np\nimport matplotlib.pylab as plt\n\ndef sigmoid(x):\n return 1/(1 + np.exp(-x))\n\nx = np.arange(-5.0, 5.0 , 0.1)\ny = sigmoid(x)\nplt.plot(x, y)\nplt.ylim(-0.1, 1.1)\nplt.show()\n"
] | [
[
"matplotlib.pylab.show",
"numpy.arange",
"matplotlib.pylab.plot",
"matplotlib.pylab.ylim",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yongjun823/Open3D-PointNet | [
"defe5ff0e190d77723bb7f08287e172ff8f8ff34"
] | [
"datasets.py"
] | [
"from __future__ import print_function\nimport open3d\nimport torch.utils.data as data\nfrom PIL import Image\nimport os\nimport os.path\nimport errno\nimport torch\nimport json\nimport codecs\nimport numpy as np\nimport progressbar\nimport sys\nimport torchvision.transforms as transforms\nimport argparse\nimport json\nfrom provider import jitter_point_cloud, select_random_point\nfrom provider import loadTrainModel40, loadTestModel40\n\n\nclass PartDataset(data.Dataset):\n def __init__(self, root, npoints = 2500, classification = False, class_choice = None, train = True):\n self.npoints = npoints\n self.root = root\n self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')\n self.cat = {}\n\n self.classification = classification\n\n with open(self.catfile, 'r') as f:\n for line in f:\n ls = line.strip().split()\n self.cat[ls[0]] = ls[1]\n #print(self.cat)\n if not class_choice is None:\n self.cat = {k:v for k,v in self.cat.items() if k in class_choice}\n\n self.meta = {}\n for item in self.cat:\n #print('category', item)\n self.meta[item] = []\n dir_point = os.path.join(self.root, self.cat[item], 'points')\n dir_seg = os.path.join(self.root, self.cat[item], 'points_label')\n #print(dir_point, dir_seg)\n fns = sorted(os.listdir(dir_point))\n if train:\n fns = fns[:int(len(fns) * 0.9)]\n else:\n fns = fns[int(len(fns) * 0.9):]\n\n #print(os.path.basename(fns))\n for fn in fns:\n token = (os.path.splitext(os.path.basename(fn))[0])\n self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg')))\n\n self.datapath = []\n for item in self.cat:\n for fn in self.meta[item]:\n self.datapath.append((item, fn[0], fn[1]))\n\n\n self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))\n print(self.classes)\n self.num_seg_classes = 0\n if not self.classification:\n for i in range(len(self.datapath)//50):\n l = len(np.unique(np.loadtxt(self.datapath[i][-1]).astype(np.uint8)))\n if l > self.num_seg_classes:\n self.num_seg_classes = l\n #print(self.num_seg_classes)\n\n\n def __getitem__(self, index):\n fn = self.datapath[index]\n cls = self.classes[self.datapath[index][0]]\n point_set = np.asarray(\n open3d.read_point_cloud(fn[1], format='xyz').points,\n dtype=np.float32)\n seg = np.loadtxt(fn[2]).astype(np.int64)\n #print(point_set.shape, seg.shape)\n\n choice = np.random.choice(len(seg), self.npoints, replace=True)\n #resample\n point_set = point_set[choice, :]\n seg = seg[choice]\n point_set = torch.from_numpy(point_set)\n seg = torch.from_numpy(seg)\n cls = torch.from_numpy(np.array([cls]).astype(np.int64))\n if self.classification:\n return point_set, cls\n else:\n return point_set, seg\n\n def __len__(self):\n return len(self.datapath)\n\nclass ModelNetDataset(data.Dataset):\n def __init__(self,\n root,\n split='train',\n jitter=False,\n sample=False):\n self.root = root\n self.split = split\n \n if split == 'train':\n self.points = loadTrainModel40(root)\n else:\n self.points = loadTestModel40(root)\n \n if jitter:\n self.points = jitter_point_cloud(self.points)\n \n if sample:\n self.points = select_random_point(self.points)\n \n print('point load info: ', self.points.shape)\n\n def __getitem__(self, index):\n return self.points[index]\n\n def __len__(self):\n return len(self.points)\n \nif __name__ == '__main__':\n print('test')\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'])\n print(len(d))\n ps, seg = d[0]\n print(ps.size(), ps.type(), seg.size(),seg.type())\n\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True)\n print(len(d))\n ps, cls = d[0]\n print(ps.size(), ps.type(), cls.size(),cls.type())\n \n d = ModelNetDataset(root='./data')\n print(len(d))\n print(d[0])\n"
] | [
[
"numpy.array",
"torch.from_numpy",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LucasHelal/data-science | [
"9b243be1dea23a521e6ebb49dc358708a9b17dbd",
"9b243be1dea23a521e6ebb49dc358708a9b17dbd",
"9b243be1dea23a521e6ebb49dc358708a9b17dbd"
] | [
"working-with-data/part2/binning.py",
"pandas/rank-and-sort.py",
"working-with-data/part2/merge-on-index.py"
] | [
"import pandas as pd\n\n# Now we'll learn about binning\n\nyears = [1990, 1991, 1992, 2008, 2012, 2015, 1987, 1969, 2013, 2008, 1999]\n\n# We can seperate these years by decade\ndecade_bins = [1960, 1970, 1980, 1990, 2000, 2010, 2020]\n\n# Now we'll use cut to get somethign called a Category object\ndecade_cat = pd.cut(years, decade_bins)\n# We can check the categories using .categories\ndecade_cat.categories\n\n# Then we can check the value counts in each category\npd.value_counts(decade_cat)\n\n# We can also pass data values to the cut.\n\n# For instance, if we just wanted to make two bins, evenly spaced based on\n# max and min year, with a 1 year precision\npd.cut(years, 2, precision=1)\n\n\n# Thats about it for binning basics\n# One last thing to note, jus tlike in standard math notation,\n# when setting up bins:\n# () means open, while [] means closed/inclusive\n",
"import numpy as np\nfrom numpy.random import randn\nfrom pandas import Series, DataFrame\nimport pandas as pd\n\n# Sorting by index\nser1 = Series(range(3), index=['C', 'A', 'B'])\n\n\n# Now sort_index\nser1.sort_index()\n\n# Can sort a Series by its values\nser1.order()\n\n# Lets see how ranking works\nser2 = Series(randn(10))\n\n# This will show you the rank used if you sort the series\nser2.rank()\n\n# Lets sort it now\nser2.sort()\n\n# After sorting let's check the rank and see iof it makes sense\nser2.rank()\n# On the left column we see th original index value and on the right we\n# see it's rank!\n",
"import pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\n# Now we'll learn how to merge on an index\n\n# Lets get two dframes\n\ndf_left = DataFrame({'key': ['X', 'Y', 'Z', 'X', 'Y'],\n 'data': range(5)})\ndf_right = DataFrame({'group_data': [10, 20]}, index=['X', 'Y'])\n\n# Now merge, we'll use the key for the left Dframe, and the index for the right\npd.merge(df_left, df_right, left_on='key', right_index=True)\n\n# We can also get a union by using outer\npd.merge(df_left, df_right, left_on='key', right_index=True, how='outer')\n\n# Now let's try something a little more complicated, remember hierarchal index?\ndf_left_hr = DataFrame({'key1': ['SF', 'SF', 'SF', 'LA', 'LA'],\n 'key2': [10, 20, 30, 20, 30],\n 'data_set': np.arange(5.)})\ndf_right_hr = DataFrame(np.arange(10).reshape((5, 2)),\n index=[['LA', 'LA', 'SF', 'SF', 'SF'],\n [20, 10, 10, 10, 20]],\n columns=['col_1', 'col_2'])\n\n# Now we can merge the left by using keys and the right by its index\npd.merge(df_left_hr, df_right_hr, left_on=['key1', 'key2'], right_index=True)\n\n# We can alo keep a union by choosing 'outer' method\npd.merge(df_left_hr, df_right_hr, left_on=[\n 'key1', 'key2'], right_index=True, how='outer')\n\n# WE can also you .join()\n\n# Shown on our first two DataFrames\ndf_left.join(df_right)\n"
] | [
[
"pandas.value_counts",
"pandas.cut"
],
[
"numpy.random.randn"
],
[
"numpy.arange",
"pandas.merge",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
penaguerrero/jwql | [
"0e6eb58e7a631c1d6356ce6c1b192c7dd52962bf"
] | [
"setup.py"
] | [
"import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.25.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
burntfalafel/torch-mlir-internal | [
"d3ef58450fc94e9337dc0434fa3af6dd7b54b37f",
"d3ef58450fc94e9337dc0434fa3af6dd7b54b37f",
"d3ef58450fc94e9337dc0434fa3af6dd7b54b37f"
] | [
"python/torch_mlir_e2e_test/test_suite/scalar.py",
"python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/upstream_shape_helpers.py",
"python/torch_mlir_e2e_test/test_suite/elementwise.py"
] | [
"# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.framework import TestUtils\nfrom torch_mlir_e2e_test.torchscript.registry import register_test_case\nfrom torch_mlir_e2e_test.torchscript.annotations import annotate_args, export\n\n# ==============================================================================\n\n\nclass AddIntModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ([], torch.int64, True),\n ])\n def forward(self, lhs, rhs):\n return int(lhs) + int(rhs)\n\n\n@register_test_case(module_factory=lambda: AddIntModule())\ndef AddIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(-100, 100, ()), torch.randint(-100, 100, ()))\n\n\n# ==============================================================================\n\n\nclass SubIntModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ([], torch.int64, True),\n ])\n def forward(self, lhs, rhs):\n return int(lhs) - int(rhs)\n\n\n@register_test_case(module_factory=lambda: SubIntModule())\ndef SubIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(-100, 100, ()), torch.randint(-100, 100, ()))\n\n\n# ==============================================================================\n\n\nclass SubFloatModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.float64, True),\n ([], torch.float64, True),\n ])\n def forward(self, lhs, rhs):\n return float(lhs) - float(rhs)\n\n\n@register_test_case(module_factory=lambda: SubFloatModule())\ndef SubFloatModule_basic(module, tu: TestUtils):\n module.forward(torch.rand(()).double(), torch.rand(()).double())\n\n\n# ==============================================================================\n\n\nclass MulIntModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ([], torch.int64, True),\n ])\n def forward(self, lhs, rhs):\n return int(lhs) * int(rhs)\n\n\n@register_test_case(module_factory=lambda: MulIntModule())\ndef MulIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(-100, 100, ()), torch.randint(-100, 100, ()))\n\n\n# ==============================================================================\n\n\nclass DivFloatModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.float64, True),\n ([], torch.float64, True),\n ])\n def forward(self, lhs, rhs):\n return float(lhs) / float(rhs)\n\n\n@register_test_case(module_factory=lambda: DivFloatModule())\ndef DivFloatModule_basic(module, tu: TestUtils):\n module.forward(torch.rand(()).double(), torch.rand(()).double())\n\n\n# ==============================================================================\n\n\nclass CeilFloatModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.float64, True),\n ([], torch.float64, True),\n ])\n def forward(self, lhs, rhs):\n sub = float(lhs) - float(rhs)\n # Cast the result to int to make e2e test baseline result to be an int.\n # Without the cast, baseline result is a Tensor which is unexpected see\n # https://github.com/llvm/torch-mlir/issues/842\n # TODO: Investigate the root cause of baseline returning a Tensor\n # without the int cast and remove the cast.\n return int(torch.ops.aten.ceil(float(sub)))\n\n\n@register_test_case(module_factory=lambda: CeilFloatModule())\ndef CeilFloatModule_basic(module, tu: TestUtils):\n module.forward(torch.rand(()).double(), torch.rand(()).double())\n\n\n# ==============================================================================\n\n\nclass SqrtIntModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ])\n def forward(self, a):\n return float(torch.ops.aten.sqrt(int(a)))\n\n\n@register_test_case(module_factory=lambda: SqrtIntModule())\ndef SqrtIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, ()))\n\n\nclass SqrtIntConstantModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ])\n def forward(self):\n return float(torch.ops.aten.sqrt(5))\n\n\n@register_test_case(module_factory=lambda: SqrtIntConstantModule())\ndef SqrtIntConstantModule_basic(module, tu: TestUtils):\n module.forward()\n\n\n# ==============================================================================\n\n\nclass BoolFloatFalseModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.float64, True),\n ])\n def forward(self, a):\n sub = float(a) - float(a)\n return bool(torch.ops.aten.Bool(float(sub)))\n\n\n@register_test_case(module_factory=lambda: BoolFloatFalseModule())\ndef BoolFloatFalseModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(low=0.5).double())\n\n\nclass BoolFloatTrueModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.float64, True),\n ])\n def forward(self, a):\n return bool(torch.ops.aten.Bool(float(a)))\n\n\n@register_test_case(module_factory=lambda: BoolFloatTrueModule())\ndef BoolFloatTrueModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(low=0.5).double())\n\n\nclass BoolFloatConstantModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ])\n def forward(self):\n return bool(torch.ops.aten.Bool(5.0))\n\n\n@register_test_case(module_factory=lambda: BoolFloatConstantModule())\ndef BoolFloatConstantModule_basic(module, tu: TestUtils):\n module.forward()\n\n\n# ==============================================================================\n\n\nclass BoolIntFalseModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ])\n def forward(self, a):\n sub = int(a) - int(a)\n return bool(torch.ops.aten.Bool(int(sub)))\n\n\n@register_test_case(module_factory=lambda: BoolIntFalseModule())\ndef BoolIntFalseModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 100, ()))\n\n\nclass BoolIntTrueModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([], torch.int64, True),\n ])\n def forward(self, a):\n return bool(torch.ops.aten.Bool(int(a)))\n\n\n@register_test_case(module_factory=lambda: BoolIntTrueModule())\ndef BoolIntTrueModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 100, ()))\n\n\nclass BoolIntConstantModule(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ])\n def forward(self):\n return bool(torch.ops.aten.Bool(5))\n\n\n@register_test_case(module_factory=lambda: BoolIntConstantModule())\ndef BoolIntConstantModule_basic(module, tu: TestUtils):\n module.forward()\n",
"# This source code is copied from PyTorch, and remains licensed under\n# the PyTorch BSD-style license available at\n# https://github.com/pytorch/pytorch/blob/master/LICENSE\n\n# DO NOT EDIT THIS FILE, except by copying new upstream contents into it.\n# Code taken from torch/csrc/jit/runtime/symbolic_shape_registry.cpp\n# Once https://github.com/pytorch/pytorch/pull/68564 lands, we can directly\n# use the code from upstream.\n\n# Minimal imports needed for the code to compile.\nfrom typing import List, Any, Optional\nimport torch\nnumber = float\n\n#### SHAPE COMPUTE FUNCTIONS ###\ndef broadcast(a: List[int], b: List[int]):\n dimsA = len(a)\n dimsB = len(b)\n ndim = max(dimsA, dimsB)\n expandedSizes : List[int] = []\n\n for i in range(ndim):\n offset = ndim - 1 - i\n dimA = dimsA - 1 - offset\n dimB = dimsB - 1 - offset\n sizeA = a[dimA] if (dimA >= 0) else 1\n sizeB = b[dimB] if (dimB >= 0) else 1\n\n if sizeA != sizeB and sizeA != 1 and sizeB != 1:\n # TODO: only assertion error is bound in C++ compilation right now\n raise AssertionError(\"The size of tensor a {} must match the size of tensor b (\"\n \"{}) at non-singleton dimension {}\".format(sizeA, sizeB, i))\n\n expandedSizes.append(sizeB if sizeA == 1 else sizeA)\n\n return expandedSizes\n\ndef adaptive_avg_pool2d(self: List[int], out: List[int]):\n assert len(out) == 2\n assert len(self) == 3 or len(self) == 4\n for i in range (1, len(self)):\n assert self[i] != 0\n\n shape: List[int] = []\n for i in range(0, len(self) -2):\n shape.append(self[i])\n for elem in out:\n shape.append(elem)\n return shape\n\ndef _copy(self: List[int]):\n out: List[int] = []\n for elem in self:\n out.append(elem)\n return out\n\ndef unary(self: List[int]):\n return _copy(self)\n\ndef broadcast_inplace(a: List[int], b: List[int]):\n dimsA = len(a)\n dimsB = len(b)\n if dimsB > dimsA:\n raise AssertionError(\"The dims of tensor b ({}) must be less than or equal to\"\n \"the dims of tensor a ({}) \".format(dimsB, dimsA))\n for dimA in range(dimsA):\n dimB = dimsB - dimsA + dimA\n sizeA = a[dimA]\n sizeB = b[dimB] if (dimB >= 0) else 1\n if sizeA != sizeB and sizeB != 1:\n # TODO: only assertion error is bound in C++ compilation right now\n raise AssertionError(\"The size of tensor a {} must match the size of tensor b (\"\n \"{}) at non-singleton dimension {}\".format(sizeA, sizeB, dimA))\n return _copy(a)\n\ndef expand(self: List[int], sizes: List[int]):\n assert len(sizes) >= len(self)\n ndim = len(sizes)\n tensor_dim = len(self)\n if ndim == 0:\n return _copy(sizes)\n out: List[int] = []\n for i in range(ndim):\n offset = ndim - 1 - i\n dim = tensor_dim - 1 - offset\n size = self[dim] if dim >=0 else 1\n targetSize = sizes[i]\n if targetSize == -1:\n assert dim >= 0\n targetSize = size\n if size != targetSize:\n assert size == 1\n size = targetSize\n out.append(size)\n return out\n\ndef expand_one_unused(self: List[int], sizes: List[int], inp0: Any):\n return expand(self, sizes)\n\ndef infer_size_impl(shape: List[int], numel: int) -> List[int]:\n newsize = 1\n infer_dim: Optional[int] = None\n for dim in range(len(shape)):\n if shape[dim] == -1:\n if infer_dim is not None:\n raise AssertionError(\"only one dimension can be inferred\")\n infer_dim = dim\n elif shape[dim] >= 0:\n newsize *= shape[dim]\n else:\n raise AssertionError(\"invalid shape dimensions\")\n if not (numel == newsize or (infer_dim is not None and newsize > 0 and numel % newsize == 0)):\n raise AssertionError(\"invalid shape\")\n out = _copy(shape)\n if infer_dim is not None:\n out[infer_dim] = numel // newsize\n return out\n\ndef numel(sizes: List[int]):\n numel = 1\n for elem in sizes:\n numel *= elem\n return numel\n\ndef view(self: List[int], sizes: List[int]):\n return infer_size_impl(sizes, numel(self))\n\ndef view_one_unused(self: List[int], sizes: List[int], *, implicit: bool=False):\n return view(self, sizes)\n\ndef mean_dim(self: List[int], dims: List[int], keep_dim: bool, dt : Any):\n out: List[int] = []\n for idx in range(len(self)):\n is_mean_dim : bool = False\n for reduce_dim in dims:\n if idx == maybe_wrap_dim(reduce_dim, len(self)):\n is_mean_dim = True\n if is_mean_dim:\n if keep_dim:\n out.append(1)\n else:\n out.append(self[idx])\n return out\n\n# note: python already rounds down towards negative infinity on integer division, special arithmetic not needed\ndef div_rtn(x: int, y: int):\n return x // y\n\ndef pooling_output_shape_pad_lr(inputSize: int, kernelSize: int, pad_l: int, pad_r: int, stride: int, dilation: int, ceil_mode: bool):\n outputSize = div_rtn(inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 + (stride - 1 if ceil_mode else 0), stride) + 1\n if ceil_mode:\n if (outputSize - 1) * stride >= inputSize + pad_l:\n outputSize = outputSize - 1\n return outputSize\n\ndef pooling_output_shape(inputSize: int, kernelSize: int, pad_l: int, stride: int, dilation: int, ceil_mode: bool):\n assert stride != 0, \"stride should not be zeero\"\n return pooling_output_shape_pad_lr(inputSize, kernelSize, pad_l, pad_l, stride, dilation, ceil_mode)\n\ndef pool2d_shape_check(input: List[int], kH: int, kW: int, dH: int, dW: int, padH: int, padW: int,\n dilationH: int, dilationW: int, nInputPlane: int, inputHeight: int, inputWidth: int, outputHeight: int, outputWidth: int):\n\n ndim = len(input)\n nOutputPlane = nInputPlane\n\n assert kW > 0 and kH > 0\n assert dW > 0 and dH > 0\n assert dilationH > 0 and dilationW > 0\n\n valid_dims = input[1] != 0 and input[2] != 0\n assert ndim == 3 and input[0] != 0 and valid_dims or (ndim == 4 and valid_dims and input[3] != 0)\n\n assert kW // 2 >= padW and kH // 2 >= padH\n assert outputWidth >= 1 and outputHeight >= 1\n\ndef max_pool2d(input: List[int], kernel_size: List[int], stride: List[int], padding: List[int], dilation: List[int], ceil_mode: bool):\n assert len(kernel_size) == 1 or len(kernel_size) == 2, \"max_pool2d: kernel_size must either be a single int, or a tuple of two ints\"\n kH = kernel_size[0]\n kW = kH if len(kernel_size) == 1 else kernel_size[1]\n\n assert len(stride) == 0 or len(stride) == 1 or len(stride) == 2, \"max_pool2d: stride must either be omitted, a single int, or a tuple of two ints\"\n dH = kH if len(stride) == 0 else stride[0]\n if len(stride) == 0:\n dW = kW\n elif len(stride) == 1:\n dW = dH\n else:\n dW = stride[1]\n\n assert len(padding) == 1 or len(padding) == 2, \"max_pool2d: padding must be either be a single int, or a tuple of two ints\"\n padH = padding[0]\n padW = padH if len(padding) == 1 else padding[1]\n\n assert len(dilation) == 1 or len(dilation) == 2, \"max_pool2d: dilation must be either a single int, or a tuple of two ints\"\n dilationH = dilation[0]\n dilationW = dilationH if len(dilation) == 1 else dilation[1]\n\n assert len(input) == 3 or len(input) == 4\n\n nbatch = input[-4] if len(input) == 4 else 1\n nInputPlane = input[-3]\n inputHeight = input[-2]\n inputWidth = input[-1]\n\n outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)\n outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)\n\n pool2d_shape_check(input, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane,\n inputHeight, inputWidth, outputHeight, outputWidth)\n\n if len(input) == 3:\n return [nInputPlane, outputHeight, outputWidth]\n else:\n return [nbatch, nInputPlane, outputHeight, outputWidth]\n\ndef avg_pool2d(input: List[int], kernel_size: List[int], stride: List[int], padding: List[int], ceil_mode: bool, count_include_pad: bool, divisor_override: Optional[int]):\n assert len(kernel_size) == 1 or len(kernel_size) == 2, \"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints\"\n kH = kernel_size[0]\n kW = kH if len(kernel_size) == 1 else kernel_size[1]\n\n assert len(stride) == 0 or len(stride) == 1 or len(stride) == 2, \"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints\"\n dH = kH if len(stride) == 0 else stride[0]\n if len(stride) == 0:\n dW = kW\n elif len(stride) == 1:\n dW = dH\n else:\n dW = stride[1]\n\n assert len(padding) == 1 or len(padding) == 2, \"avg_pool2d: padding must be either be a single int, or a tuple of two ints\"\n padH = padding[0]\n padW = padH if len(padding) == 1 else padding[1]\n\n dilationH = 1\n dilationW = 1\n\n assert len(input) == 3 or len(input) == 4\n\n nbatch = input[-4] if len(input) == 4 else 1\n nInputPlane = input[-3]\n inputHeight = input[-2]\n inputWidth = input[-1]\n\n outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)\n outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)\n\n pool2d_shape_check(input, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane,\n inputHeight, inputWidth, outputHeight, outputWidth)\n\n if len(input) == 3:\n return [nInputPlane, outputHeight, outputWidth]\n else:\n return [nbatch, nInputPlane, outputHeight, outputWidth]\n\ndef max_pool2d_with_indices(input: List[int], kernel_size: List[int], stride: List[int], padding: List[int], dilation: List[int], ceil_mode: bool):\n out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n return (out, out)\n\ndef upsample_nearest2d(input: List[int], output_size: Optional[List[int]], scale_factors: Optional[List[float]]):\n out: List[int] = []\n out.append(input[0])\n out.append(input[1])\n if output_size is not None:\n assert scale_factors is None, \"Must specify exactly one of output_size and scale_factors\"\n assert len(output_size) == 2\n out.append(output_size[0])\n out.append(output_size[1])\n return out\n\n if scale_factors is not None:\n assert output_size is None, \"Must specify exactly one of output_size and scale_factors\"\n assert len(scale_factors) == 2\n out.append(int(input[2] * scale_factors[0]))\n out.append(int(input[3] * scale_factors[1]))\n return out\n assert 0, \"Either output_size or scale_factors must be presented\"\n\ndef mm(self: List[int] , mat2: List[int]):\n assert len(self) == 2, \"self must be a matrix\"\n assert len(mat2) == 2, \"mat2 must be a matrix\"\n\n assert self[1] == mat2[0]\n return [self[0], mat2[1]]\n\ndef dot(self: List[int], tensor: List[int]):\n assert len(self) == 1 and len(tensor) == 1\n assert self[0] == tensor[0]\n out: List[int] = []\n return out\n\ndef mv(self: List[int], vec: List[int]):\n assert len(self) == 2 and len(vec) == 1\n assert self[1] == vec[0]\n # TODO: return self\n return [self[0]]\n\ndef unsqueeze(li: List[int], dim: int):\n dim = maybe_wrap_dim(dim, len(li) + 1)\n out = _copy(li)\n out.insert(dim, 1)\n return out\n\ndef squeeze_nodim(li: List[int]):\n out: List[int] = []\n for i in range(len(li)):\n if li[i] != 1:\n out.append(li[i])\n return out\n\ndef squeeze(li: List[int], dim: int):\n out: List[int] = []\n wrapped_dim = maybe_wrap_dim(dim, len(li))\n for i in range(len(li)):\n if i == wrapped_dim:\n if li[i] != 1:\n out.append(li[i])\n else:\n out.append(li[i])\n return out\n\ndef index_select(self: List[int], dim: int, index: List[int]):\n dim = maybe_wrap_dim(dim, len(self))\n numel = multiply_integers(index)\n assert len(index) <= 1\n assert dim == 0 or dim < len(self)\n result_size: List[int] = []\n for i in range(len(self)):\n if dim == i:\n result_size.append(numel)\n else:\n result_size.append(self[i])\n return result_size\n\ndef embedding(weight: List[int], indices: List[int], padding_idx:int = -1, scale_grad_by_freq:bool=False, sparse: bool=False):\n assert len(weight) == 2\n if len(indices) == 1:\n return index_select(weight, 0, indices)\n size = _copy(indices)\n size.append(weight[1])\n return size\n\ndef max_int():\n return 9223372036854775807\n\ndef slice(self: List[int], dim: int, start: Optional[int], end: Optional[int], step: int):\n ndim = len(self)\n assert ndim != 0\n dim = maybe_wrap_dim(dim, ndim)\n start_val = start if start is not None else 0\n end_val = end if end is not None else max_int()\n assert step > 0\n if (start_val == max_int()):\n start_val = 0\n if start_val < 0:\n start_val += self[dim]\n if end_val < 0:\n end_val += self[dim]\n if start_val < 0:\n start_val = 0\n # TODO: Remove this comment after https://github.com/pytorch/pytorch/pull/74980\n # is merged to incorporate our local edit here.\n elif start_val > self[dim]:\n start_val = self[dim]\n if end_val < start_val:\n end_val = start_val\n elif end_val >= self[dim]:\n end_val = self[dim]\n len = end_val - start_val\n out = _copy(self)\n out[dim] = (len + step - 1) // step\n return out\n\ndef check_cat_no_zero_dim(tensors: List[List[int]]):\n for tensor in tensors:\n assert(len(tensor) > 0)\n\ndef legacy_cat_wrap_dim(dim: int, tensor_sizes: List[List[int]]):\n out_dim : Optional[int] = None\n for size in tensor_sizes:\n if len(size) != 0 and size != [0] and out_dim is not None:\n out_dim = maybe_wrap_dim(dim, len(size))\n if out_dim is None:\n out_dim = dim\n return out_dim\n\ndef should_skip(tensor: List[int]):\n return numel(tensor) == 0 and len(tensor) == 1\n\ndef check_cat_shape_except_dim(first: List[int], second: List[int], dimension: int, index: int):\n first_dims = len(first)\n second_dims = len(second)\n assert first_dims == second_dims, \"Tensors must have same number of dimensions\"\n for dim in range(0, first_dims):\n if dim != dimension:\n assert first[dim] == second[dim], \"Sizes of tensors must match except in dimension\"\n\ndef cat(tensors: List[List[int]], dim: int):\n check_cat_no_zero_dim(tensors)\n dim = legacy_cat_wrap_dim(dim, tensors)\n assert len(tensors) > 0\n not_skipped_tensor: Optional[List[int]] = None\n for tensor in tensors:\n if not should_skip(tensor):\n not_skipped_tensor = tensor\n if not_skipped_tensor is None:\n return [0]\n\n cat_dim_size = 0\n\n for i in range(len(tensors)):\n tensor = tensors[i]\n if not should_skip(tensor):\n check_cat_shape_except_dim(not_skipped_tensor, tensor, dim, i)\n cat_dim_size = cat_dim_size + tensor[dim]\n\n result_size = _copy(not_skipped_tensor)\n result_size[dim] = cat_dim_size\n return result_size\n\ndef select(self: List[int], dim: int, index: int):\n ndim = len(self)\n assert ndim != 0\n dim = maybe_wrap_dim(dim, ndim)\n size = self[dim]\n assert not (index < -size or index >= size)\n if index < 0:\n index += size\n out: List[int] = []\n for i in range(ndim):\n if i != dim:\n out.append(self[i])\n return out\n\ndef matmul(tensor1: List[int] , tensor2: List[int]):\n dim_tensor1 = len(tensor1)\n dim_tensor2 = len(tensor2)\n if dim_tensor1 == 1 and dim_tensor2 == 1:\n return dot(tensor1, tensor2)\n elif dim_tensor1 == 2 and dim_tensor2 == 1:\n return mv(tensor1, tensor2)\n elif dim_tensor1 == 1 and dim_tensor2 == 2:\n return squeeze(mm(unsqueeze(tensor1, 0), tensor2), 0)\n elif dim_tensor1 == 2 and dim_tensor2 == 2:\n return mm(tensor1, tensor2)\n elif dim_tensor1 >= 1 and dim_tensor2 >=1:\n # We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list);\n # we track m1 vs m2 separately even though they must match for nicer error messages\n n = tensor1[-2] if dim_tensor1 > 1 else 1\n m1 = tensor1[-1]\n batch_tensor1 : List[int] = []\n # TODO: handling of slice\n for i in range(dim_tensor1 - 2):\n batch_tensor1.append(tensor1[i])\n m2 = tensor2[-1] if dim_tensor2 > 1 else 1\n p = tensor2[-1]\n batch_tensor2 : List[int] = []\n # TODO: handling of slice\n for i in range(dim_tensor2 - 2):\n batch_tensor2.append(tensor2[i])\n\n # expand the batch portion (i.e. cut off matrix dimensions and expand rest)\n expand_batch_portion = broadcast(batch_tensor1, batch_tensor2)\n\n # todo: copy ?\n output_shape = expand_batch_portion\n if dim_tensor1 > 1:\n output_shape.append(n)\n\n if dim_tensor2 > 1:\n output_shape.append(p)\n\n return output_shape\n else:\n assert False, \"both arguments to matmul need to be at least 1D\"\n\ndef t(self: List[int]):\n assert len(self) <= 2\n self_len = len(self)\n if self_len == 0:\n out: List[int] = []\n return out\n elif self_len == 1:\n return [self[0]]\n else:\n return [self[1], self[0]]\n\ndef transpose(self: List[int], dim0: int, dim1: int):\n ndims = len(self)\n dim0 = maybe_wrap_dim(dim0, ndims)\n dim1 = maybe_wrap_dim(dim1, ndims)\n if (dim0 == dim1):\n return _copy(self)\n out: List[int] = []\n for i in range(ndims):\n if i == dim0:\n out.append(self[dim1])\n elif i == dim1:\n out.append(self[dim0])\n else:\n out.append(self[i])\n return out\n\n\ndef linear(input: List[int], weight: List[int], bias: Optional[List[int]]):\n out = matmul(input, t(weight))\n if bias is not None:\n assert broadcast(bias, out) == out\n return out\n\ndef addmm(self: List[int], mat1: List[int], mat2: List[int], beta: Any, alpha: Any):\n return broadcast(self, mm(mat1, mat2))\n\ndef check_non_negative(array: List[int]) -> bool:\n # TODO: look into rewriting with early return and getting loop unrolling to fire\n non_negative = False\n for val in array:\n if val < 0:\n non_negative = True\n return non_negative\n\ndef check_shape_forward(input: List[int], weight_sizes: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], groups: int):\n k = len(input)\n weight_dim = len(weight_sizes)\n\n # TODO: assertions could be expanded with the error messages\n assert not check_non_negative(padding)\n assert not check_non_negative(stride)\n\n assert weight_dim == k\n assert weight_sizes[0] >= groups\n assert (weight_sizes[0] % groups) == 0\n # only handling not transposed\n assert input[1] == weight_sizes[1] * groups\n assert bias is None or (len(bias) == 1 and bias[0] == weight_sizes[0])\n\n for i in range(2, k):\n assert (input[i] + 2 * padding[i - 2]) >= (dilation[i - 2] * (weight_sizes[i] - 1) + 1)\n\n# this is not handling transposed convolution yet\ndef conv_output_size(input_size: List[int], weight_size: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], groups: int):\n check_shape_forward(input_size, weight_size, bias, stride, padding, dilation, groups)\n\n has_dilation = len(dilation) > 0\n dim = len(input_size)\n output_size: List[int] = []\n input_batch_size_dim = 0\n weight_output_channels_dim = 0\n output_size.append(input_size[input_batch_size_dim])\n output_size.append(weight_size[weight_output_channels_dim])\n\n for d in range(2, dim):\n dilation_ = dilation[d - 2] if has_dilation else 1\n kernel = dilation_ * (weight_size[d] - 1) + 1\n output_size.append((input_size[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1)\n return output_size\n\ndef conv1d(input: List[int], weight: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], groups: int):\n assert len(weight) == 3\n assert len(input) == 3\n return conv_output_size(input, weight, bias, stride, padding, dilation, groups)\n\ndef conv2d(input: List[int], weight: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], groups: int):\n assert len(weight) == 4\n assert len(input) == 4\n return conv_output_size(input, weight, bias, stride, padding, dilation, groups)\n\ndef batch_norm(input: List[int], weight: List[int], bias: Optional[List[int]], running_mean: Optional[List[int]], running_var: Optional[List[int]], training: bool, momentum: float, eps: float, cudnn_enabled: bool):\n out: List[int] = []\n for elem in input:\n out.append(elem)\n return out\n\ndef conv3d(input: List[int], weight: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], groups: int):\n assert len(weight) == 5\n assert len(input) == 5\n return conv_output_size(input, weight, bias, stride, padding, dilation, groups)\n\ndef maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):\n if dim_post_expr <= 0:\n assert wrap_scalar\n dim_post_expr = 1\n min = -dim_post_expr\n max = dim_post_expr - 1\n assert not (dim < min or dim > max)\n if dim < 0:\n dim += dim_post_expr\n return dim\n\ndef zero_dim_tensor(input: Any):\n out: List[int] = []\n return out\n\ndef multiply_integers(li: List[int]):\n out = 1\n for elem in li:\n out = out * elem\n return out\n\ndef arange_end(end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):\n assert end >= 0\n return [int(torch.ceil(end))]\n\ndef arange_start(start: number, end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):\n assert end >= 0\n assert end >= start\n return [int(torch.ceil(end - start))]\n\ndef arange_start_step(start: number, end: number, step: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):\n assert step != 0\n if step < 0:\n assert start >= end\n else:\n assert end >= start\n return [int(torch.ceil((end - start) / step))]\n\ndef permute(input: List[int], dims: List[int]):\n assert len(input) == len(dims)\n ndim = len(dims)\n seen_dims: List[int] = []\n newSizes: List[int] = []\n for i in range(ndim):\n dim = maybe_wrap_dim(dims[i], ndim)\n seen_dims.append(dim)\n newSizes.append(input[dim])\n for i in range(1, ndim):\n for j in range(i):\n assert seen_dims[i] != seen_dims[j]\n return newSizes\n\ndef flatten(input: List[int], start_dim: int, end_dim: int):\n start_dim = maybe_wrap_dim(start_dim, len(input))\n end_dim = maybe_wrap_dim(end_dim, len(input))\n assert start_dim <= end_dim\n if len(input) == 0:\n return [1]\n if (start_dim == end_dim):\n # TODO: return self\n out: List[int] = []\n for elem in input:\n out.append(elem)\n return out\n slice_numel = 1\n for i in range(start_dim, end_dim + 1):\n slice_numel *= input[i]\n # TODO: use slicing when slice optimization has landed\n # slice_numel = multiply_integers(input[start_dim:end_dim - start_dim + 1])\n shape: List[int] = []\n for i in range(start_dim):\n shape.append(input[i])\n shape.append(slice_numel)\n for i in range(end_dim + 1, len(input)):\n shape.append(input[i])\n return shape\n\ndef quantized_prepacked_conv2d(input: List[int], conv2dOpContext: Any):\n assert isinstance(conv2dOpContext, __torch__.torch.classes.quantized.Conv2dPackedParamsBase)\n (weight, bias, stride, padding, dilation, groups) = unchecked_cast(Tuple[List[int], Optional[List[int]], List[int], List[int], List[int], int], ops.quantized.conv2d_unpack_sizes(conv2dOpContext))\n return conv2d(input, weight, bias, stride, padding, dilation, groups)\n\ndef pad(input: List[int], pad: List[int]):\n assert len(pad) % 2 == 0, \"Must have paired low-high pad amount values\"\n assert len(pad) // 2 <= len(input), \"Number of padded dimensions must be less than or equal to the input dimension\"\n # The `pad` list takes the form of Low-high pairs starting at the\n # *rightmost* dimension of `self`.\n for i in range(len(pad) // 2):\n input[-(i + 1)] += pad[2 * i] + pad[2 * i + 1]\n return input\n",
"# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.framework import TestUtils\nfrom torch_mlir_e2e_test.torchscript.registry import register_test_case\nfrom torch_mlir_e2e_test.torchscript.annotations import annotate_args, export\n\n# TODO: Support scalar !torch.int/!torch.float variants. Add support to\n# ReduceOpVariants to implement them in terms of the tensor-only variants +\n# torch.prim.NumToTensor.\n\n# TODO: This is pretty verbose. Can we have a helper to reduce\n# the boilerplate?\n\n# ==============================================================================\n\nclass ElementwiseUnaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.tanh(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseUnaryModule())\ndef ElementwiseUnaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseUnaryIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, a):\n return torch.tanh(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseUnaryIntModule())\ndef ElementwiseUnaryIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseBinaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b\n\n\n@register_test_case(module_factory=lambda: ElementwiseBinaryModule())\ndef ElementwiseBinaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4), tu.rand(4))\n\n# ==============================================================================\n\nclass ElementwiseBinaryStaticShapeModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([5, 4, 3, 3, 1], torch.float32, True),\n ([4, 3, 1, 2], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b\n\n\n@register_test_case(\n module_factory=lambda: ElementwiseBinaryStaticShapeModule())\ndef ElementwiseBinaryStaticShapeModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(5, 4, 3, 3, 1), tu.rand(4, 3, 1, 2))\n\n# ==============================================================================\n\nclass ElementwiseTernaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b, c):\n return torch.lerp(a, b, c)\n\n\n@register_test_case(module_factory=lambda: ElementwiseTernaryModule())\ndef ElementwiseTernaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5), tu.rand(4, 5), tu.rand(5))\n\n# ==============================================================================\n\nclass ElementwiseWhereSelfModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b, c):\n return torch.where(a > 0.5, b, c)\n\n\n@register_test_case(module_factory=lambda: ElementwiseWhereSelfModule())\ndef ElementwiseWhereSelfModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5), tu.rand(4, 5), tu.rand(5))\n\n# ==============================================================================\n\nclass ElementwiseWhereScalarModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.where(a > 0.5, 4.0, 8.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseWhereScalarModule())\ndef ElementwiseWhereScalarModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5))\n\n# ==============================================================================\n\nclass ElementwiseWhereScalarOtherModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float64, True),\n ([-1, -1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.where(a > 0.5, b, 8.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseWhereScalarOtherModule())\ndef ElementwiseWhereScalarOtherModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5).double(), tu.rand(4, 5).double())\n\n# ==============================================================================\n\nclass ElementwiseWhereScalarSelfModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float64, True),\n ([-1, -1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.where(a > 0.5, 4.0, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseWhereScalarSelfModule())\ndef ElementwiseWhereScalarSelfModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5).double(), tu.rand(4, 5).double())\n\n# ==============================================================================\n\n# Addition is an interesting special case of a binary op, because under the hood\n# it carries a third scalar \"alpha\" parameter, which needs special handling.\nclass ElementwiseAddModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a + b\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddModule())\ndef ElementwiseAddModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseUnsqueezeBroadcastModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b.unsqueeze(0)\n\n\n@register_test_case(\n module_factory=lambda: ElementwiseUnsqueezeBroadcastModule())\ndef ElementwiseUnsqueezeBroadcastModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseUnsqueezeNegDimsModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n # As mentioned in `unsqueeze` docstring,\n # valid dim values are [-input.dim()-1, input.dim()+1).\n # This tests the lower bound\n return torch.unsqueeze(a, -3)\n\n\n@register_test_case(module_factory=lambda: ElementwiseUnsqueezeNegDimsModule())\ndef ElementwiseUnsqueezeNegDimsModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 3))\n\n# ==============================================================================\n\nclass ElementwiseFlattenBroadcastModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b.flatten(-1, -1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseFlattenBroadcastModule())\ndef ElementwiseFlattenBroadcastModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(6), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseReluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.relu(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseReluModule())\ndef ElementwiseReluModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 2) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseLeakyReluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.ops.aten.leaky_relu(x, negative_slope=0.1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLeakyReluModule())\ndef ElementwiseLeakyReluModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 2) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseGeluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.gelu = torch.nn.GELU()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return self.gelu(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseGeluModule())\ndef ElementwiseGeluModule_basic(module, tu: TestUtils):\n module.forward(2 * tu.rand(5, 3) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseSigmoidModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.sigmoid(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSigmoidModule())\ndef ElementwiseSigmoidModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseSigmoidIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.sigmoid(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSigmoidIntModule())\ndef ElementwiseSigmoidIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 5), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseMinimumModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.minimum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMinimumModule())\ndef ElementwiseMinimumModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5), tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseMinimumIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.minimum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMinimumIntModule())\ndef ElementwiseMinimumIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 5)), torch.randint(10, (3, 5)))\n\n# ==============================================================================\n\nclass ElementwiseMaximumModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.maximum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMaximumModule())\ndef ElementwiseMaximumModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5), tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseMaximumIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.maximum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMaximumIntModule())\ndef ElementwiseMaximumIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 5)), torch.randint(10, (3, 5)))\n\n# ==============================================================================\n\nclass ElementwiseClampModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n # TODO: It would be great to return all of these, so they get checked\n # individually, but RefBackend doesn't support multiple returns.\n # Instead, multiply them together, which has some chance of propagating\n # all the values.\n float_min = torch.clamp(x, min=-2.0)\n int_min = torch.clamp(x, min=-3)\n float_max = torch.clamp(x, max=2.0)\n int_max = torch.clamp(x, max=3)\n both = torch.clamp(x, min=-5, max=5)\n return float_min * int_min * float_max * int_max * both\n\n\n@register_test_case(module_factory=lambda: ElementwiseClampModule())\ndef ElementwiseClampModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5, low=-10, high=10))\n\n# ==============================================================================\n\nclass RsubModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.rsub(x, 3.0, alpha=1.0)\n\n\n@register_test_case(module_factory=lambda: RsubModule())\ndef RsubModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass RsubModule_noalpha(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.rsub(x, 2.0)\n\n\n@register_test_case(module_factory=lambda: RsubModule_noalpha())\ndef RsubModule_noalpha_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x):\n return torch.mul(x, 4)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarIntModule())\ndef ElementwiseMulScalarModule_int(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.mul(x, 100.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarFloatModule())\ndef ElementwiseMulScalarModule_float(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.mul(x, 8.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarModule())\ndef ElementwiseMulScalarModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseMulTensorFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([-1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.mul(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulTensorFloatModule())\ndef ElementwiseMulTensorFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand(4).type(torch.float64))\n\n# ==============================================================================\n\nclass ElementwiseMulTensorIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.int32, True),\n ([-1], torch.int64, True),\n ])\n def forward(self, a, b):\n return torch.mul(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulTensorIntModule())\ndef ElementwiseMulTensorIntModule_basic(module, tu: TestUtils):\n module.forward(\n torch.randint(10, [4]).type(torch.int32), torch.randint(10, [4]))\n\n# ==============================================================================\n\nclass ElementwiseLogModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.log(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLogModule())\ndef ElementwiseLogModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseLogIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, a):\n return torch.log(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLogIntModule())\ndef ElementwiseLogIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseErfModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.ops.aten.erf(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseErfModule())\ndef ElementwiseErfModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseErfIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, a):\n return torch.ops.aten.erf(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseErfIntModule())\ndef ElementwiseErfIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\n\nclass ElementwiseSqrtModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.sqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSqrtModule())\ndef ElementwiseSqrtModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseSqrtIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.sqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSqrtIntModule())\ndef ElementwiseSqrtIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseFloorModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.floor(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseFloorModule())\ndef ElementwiseFloorModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCeilModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.ceil(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCeilModule())\ndef ElementwiseCeilModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwisePowModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.pow(a, 2.0)\n\n\n@register_test_case(module_factory=lambda: ElementwisePowModule())\ndef ElementwisePowModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseToDtypeF32ToI64Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True)\n ])\n def forward(self, x):\n return x.to(torch.int64)\n\n\n@register_test_case(module_factory=lambda: ElementwiseToDtypeF32ToI64Module())\ndef ElementwiseToDtypeF32ToI64Module_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseToDtypeIdentityModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True)\n ])\n def forward(self, x):\n return x.to(torch.float32, False, False)\n\n\n@register_test_case(module_factory=lambda: ElementwiseToDtypeIdentityModule())\ndef ElementwiseToDtypeIdentityModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseLog2Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.log2(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLog2Module())\ndef ElementwiseLog2Module_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseLog2IntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, a):\n return torch.log2(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLog2IntModule())\ndef ElementwiseLog2IntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseRsqrtModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.rsqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseRsqrtModule())\ndef ElementwiseRsqrtModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseRsqrtIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.rsqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseRsqrtIntModule())\ndef ElementwiseRsqrtIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseAbsModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.abs(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAbsModule())\ndef ElementwiseAbsModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5, low=-1.0, high=1.0))\n\n# ==============================================================================\n\nclass ElementwiseReciprocalModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.reciprocal(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseReciprocalModule())\ndef ElementwiseReciprocalModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4))\n\n# ==============================================================================\n\nclass ElementwiseReciprocalIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.reciprocal(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseReciprocalIntModule())\ndef ElementwiseReciprocalIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (4,), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseDivScalarModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.div(x, 10.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseDivScalarModule())\ndef ElementwiseDivScalarModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseDivTensorFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([-1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.div(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseDivTensorFloatModule())\ndef ElementwiseDivTensorFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand(4).type(torch.float64))\n\n# ==============================================================================\n\nclass ElementwiseAndIntegerModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.bitwise_and(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAndIntegerModule())\ndef ElementwiseAndIntegerModule_basic(module, tu: TestUtils):\n module.forward(\n torch.randint(-10, 10, (3, 4)).to(torch.int32),\n torch.randint(-10, 10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseSubScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.sub(x, 2.1, alpha=2)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSubScalarIntModule())\ndef ElementwiseSubScalarIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseSubScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.sub(x, 2.1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSubScalarFloatModule())\ndef ElementwiseSubScalarFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarInt64Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarInt64Module())\ndef ElementwiseAddScalarInt64Module_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarIntModule())\ndef ElementwiseAddScalarIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (2, 3), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0, alpha=2)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarFloatModule())\ndef ElementwiseAddScalarFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCloneModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.clone(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCloneModule())\ndef ElementwiseCloneModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCloneContiguousModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.clone(x, memory_format=torch.contiguous_format)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCloneContiguousModule())\ndef ElementwiseCloneContiguousModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 3, 4))\n\n# ==============================================================================\n\nclass ElementwiseExpModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.exp(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseExpModule())\ndef ElementwiseExpModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseExpIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.exp(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseExpIntModule())\ndef ElementwiseExpIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n\n# ==============================================================================\n\nclass ElementwiseSinModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.sin(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSinModule())\ndef ElementwiseSinModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseSinIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.sin(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSinIntModule())\ndef ElementwiseSinIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseCosModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.cos(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCosModule())\ndef ElementwiseCosModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCosIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n\n def forward(self, a):\n return torch.cos(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCosIntModule())\ndef ElementwiseCosIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(1, 10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseNegModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.neg(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseNegModule())\ndef ElementwiseNegModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.bool, True),\n ([-1], torch.bool, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpModule())\ndef ElementwiseAtenLogicalOrOpModule_basic(module, tu: TestUtils):\n module.forward(torch.tensor([False, True]), torch.tensor([False, False]))\n\nclass ElementwiseAtenLogicalOrOpDiffArgs1Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float64, True),\n ([-1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpDiffArgs1Module())\ndef ElementwiseAtenLogicalOrOpDiffArgs1Module_basic(module, tu: TestUtils):\n module.forward(torch.tensor([0.2, 0.1]), torch.tensor([0, 1]))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpDiffArgs2Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.bool, True),\n ([-1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpDiffArgs2Module())\ndef ElementwiseAtenLogicalOrOpDiffArgs2Module_basic(module, tu: TestUtils):\n module.forward(torch.tensor([True, False]), torch.tensor([0, 1]))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpDiffArgs3Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.int64, True),\n ([-1], torch.bool, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpDiffArgs3Module())\ndef ElementwiseAtenLogicalOrOpDiffArgs3Module_basic(module, tu: TestUtils):\n module.forward(torch.tensor([1, 2]), torch.tensor([False, True]))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpRandomModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1, -1], torch.int64, True),\n ([-1, -1, -1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpRandomModule())\ndef ElementwiseAtenLogicalOrOpRandomModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(3, 10, (2, 3, 4, 5)), torch.randint(10, 100, (2, 3, 4, 5)))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpRandomFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1, -1], torch.float32, True),\n ([-1, -1, -1, -1], torch.float32, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpRandomFloatModule())\ndef ElementwiseAtenLogicalOrOpRandomFloatModule_basic(module, tu: TestUtils):\n module.forward(torch.rand(2, 3, 3, 5), torch.rand(2, 3, 3, 5))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpNegativeModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n \n @export\n @annotate_args([\n None,\n ([-1, -1, -1, -1], torch.int64, True),\n ([-1, -1, -1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpNegativeModule())\ndef ElementwiseAtenLogicalOrOpNegativeModule_basic(module, tu: TestUtils):\n module.forward(torch.neg(torch.randint(3, 10, (2, 3, 4, 5))), torch.neg(torch.randint(10, 100, (2, 3, 4, 5))))\n\n# ==============================================================================\n\nclass ElementwiseAtenLogicalOrOpBrodcastModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n \n @export\n @annotate_args([\n None,\n ([-1], torch.int64, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.logical_or(x, y)\n\n@register_test_case(module_factory=lambda: ElementwiseAtenLogicalOrOpBrodcastModule())\ndef ElementwiseAtenLogicalOrOpBrodcastModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(3, (3,)), torch.randint(3, (4, 3)))\n\n\n\n"
] | [
[
"torch.ops.aten.sqrt",
"torch.randint",
"torch.rand",
"torch.ops.aten.Bool"
],
[
"torch.ceil"
],
[
"torch.abs",
"torch.randint",
"torch.ops.aten.erf",
"torch.ops.aten.maximum",
"torch.sin",
"torch.neg",
"torch.tanh",
"torch.rsqrt",
"torch.where",
"torch.pow",
"torch.add",
"torch.sqrt",
"torch.clone",
"torch.tensor",
"torch.log2",
"torch.relu",
"torch.mul",
"torch.reciprocal",
"torch.rsub",
"torch.rand",
"torch.ops.aten.leaky_relu",
"torch.cos",
"torch.div",
"torch.lerp",
"torch.sigmoid",
"torch.ops.aten.minimum",
"torch.floor",
"torch.unsqueeze",
"torch.exp",
"torch.log",
"torch.ops.aten.logical_or",
"torch.bitwise_and",
"torch.nn.GELU",
"torch.ceil",
"torch.sub",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tfiers/parachute | [
"7a88c0708274670d8b381282cae829c928a7c344"
] | [
"tests/validators/ndarray/test_array.py"
] | [
"import numpy as np\n\nfrom parachute import array, shape, Arbitrary\n\n\ndef test_type_arbitrary():\n Array = array()\n assert not Array(\"No good type\").is_valid()\n assert not Array([\"a\", \"b\"]).is_valid()\n assert Array(3.2).is_valid()\n assert Array([1, 2]).is_valid()\n assert Array((5, 6)).is_valid()\n assert Array([[1, 2], [4, 4]]).is_valid()\n assert Array([[True]]).is_valid()\n assert Array([(False)]).is_valid()\n # fmt: off\n assert Array(np.array([\n [[1], [2]],\n [[3], [4]],\n ])).is_valid()\n # fmt: on\n\n\ndef test_type_arbitrary_higher_order():\n Array = array()\n assert Array(((((4))))).is_valid()\n # fmt: off\n assert Array([\n [[1], [2]],\n [[3], [4]],\n ]).is_valid()\n # fmt: on\n\n\n# See test_vector.py for tests of other dtypes.\n\n\ndef test_shape():\n Array = array(shape_spec=(2,))\n assert Array([1, 2]).is_valid()\n assert Array((0.41, -4)).is_valid()\n assert Array(np.array([1, 2])).is_valid()\n assert not Array([1]).is_valid()\n assert not Array([1, 2, 3]).is_valid()\n assert not Array([[1, 2]]).is_valid()\n\n\ndef test_ndim_0():\n Array = array(ndim=0)\n assert Array(1).is_valid()\n assert not Array([1, 2]).is_valid()\n assert not Array([[1, 2], [3, 4]]).is_valid()\n assert not Array([[[1, 2], [3, 4]]]).is_valid()\n\n\ndef test_ndim_1():\n Array = array(ndim=1)\n assert not Array(1).is_valid()\n assert Array([1, 2]).is_valid()\n assert not Array([[1, 2], [3, 4]]).is_valid()\n assert not Array([[[1, 2], [3, 4]]]).is_valid()\n\n\ndef test_ndim_2():\n Array = array(ndim=2)\n assert not Array(1).is_valid()\n assert not Array([1, 2]).is_valid()\n assert Array([[1, 2], [3, 4]]).is_valid()\n assert not Array([[[1, 2], [3, 4]]]).is_valid()\n\n\ndef test_repr():\n Array = array(complex, ndim=2)\n shape_str = shape((Arbitrary, Arbitrary)).get_short_str()\n expected_str = (\n \"NumPy ndarray-like, with numeric type \"\n \"compatible to `complex`, \"\n f\"and shape `{shape_str}`.\"\n )\n assert Array.get_annotation_str() == expected_str\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mathxyz/stock2 | [
"1e07156dea37f987efbc03025693b9ca2acf3f96",
"1e07156dea37f987efbc03025693b9ca2acf3f96"
] | [
"hockey_dat/hockey_front_to_back.py",
"comments_toxicity/comments_xgb_final.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.pipeline import FeatureUnion\nfrom datetime import datetime\nimport gc\n\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nimport xgboost as xgb\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\ntrain = pd.read_csv('train.csv', encoding = \"ISO-8859-1\")\ntest_x = pd.read_csv('test.csv', encoding = \"ISO-8859-1\")\ntest_y = pd.read_csv('test_salaries.csv') \n\ntest_y=list(test_y['Salary'].values)\n\ntrain_x = train.drop('Salary',axis=1)\ntrain_y = list(train['Salary'])\n\ntrain_x.head()\ntrain=[]\ngc.collect()\n\ntest_x.head()\ntest_y[:10]\n\ntrain_x.head()\ntrain_y[:10]\n\n\n\n#Born - datetime needs to be changed to days since a set date\n#days form birth to season start\n\ndef elapsed_days(start, end=datetime(2016,10,12)):\n\t\"\"\" calcualte the number of days start and end dates\"\"\"\n\tx = (end - start)\n\treturn x.days\n\n#\ntrain_x['age_season_start'] = train_x.apply(lambda x: \n\telapsed_days(datetime.strptime(x['Born'], '%y-%m-%d')) ,axis=1)\n\ntest_x['age_season_start'] = test_x.apply(lambda x: \n\telapsed_days(datetime.strptime(x['Born'], '%y-%m-%d')) ,axis=1)\n\n\n\n# Drop the city, province and Cntry cols, will include nationality but all these\n# seemed redundant on the initial rf and XGBoost models\n\ndrop_cols = ['City', 'Pr/St', 'Cntry', 'Last Name', 'First Name', 'Team', 'Born']\n\ntest_x.drop(drop_cols, axis = 1, inplace = True)\n\ntrain_x.drop(drop_cols, axis = 1, inplace = True)\n\n\n\n#check the data types of the remaining columns\ntrain_x.dtypes\nfor i in train_x.dtypes:\n\tprint(i)\n\n\n#Categoricals:\ncat_attribs = ['Nat', 'Hand', 'Position']\n\nnum_attribs = list(train_x.drop(cat_attribs,axis=1).columns)\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n\t\"\"\" this class will select a subset of columns,\n\t\tpass in the numerical or categorical columns as \n\t\tattribute names to get just those columns for processing\"\"\"\n\tdef __init__(self, attribute_names):\n\t\tself.attribute_names = attribute_names\n\tdef fit(self, X, y=None):\n\t\treturn self\n\tdef transform(self, X):\n\t\treturn X[self.attribute_names]\n\n\n\n#build my own to binarize multiple labels at once, \n#then implement it in the cat_pipeline\n\"\"\"\nx = MultiLabelBinarizer(train_x[cat_attribs])\n\ntempdf = pd.get_dummies(train_x, columns=cat_attribs)\n\nencoder = LabelBinarizer()\nx =encoder.fit_transform(train_x['Nat'])\n\n\nclass_test = MultiColBinarize()\nclass_test.fit_transform(train_x[cat_attribs])\n\nclass_test.transform()\n\"\"\"\n\n\nclass MultiColBinarize(BaseEstimator, TransformerMixin):\n\t\"\"\" take a df with multiple categoricals\n\t\tone hot encode them all and return the numpy array\"\"\"\n\tdef __init__(self, alter_df= True):\n\t\tself.alter_df = alter_df\n\tdef fit(self, X, y=None):\n\t\t\"\"\"load the data in, initiate the binarizer for each column\"\"\"\n\t\tself.X = X\n\t\tself.cols_list = list(self.X.columns)\n\t\tself.binarizers = []\n\t\tfor i in self.cols_list:\n\t\t\tencoder = LabelBinarizer()\n\t\t\tencoder.fit(self.X[i])\n\t\t\tself.binarizers.append(encoder)\n\t\treturn self\n\tdef transform(self, X):\n\t\t\"\"\" for each of the columns, use the existing binarizer to make new cols \"\"\"\t\t\n\t\tself.X = X\n\t\tself.binarized_cols = self.binarizers[0].transform(self.X[self.cols_list[0]])\n\t\tself.classes_ = list(self.binarizers[0].classes_)\n\t\tfor i in range(1,len(self.cols_list)):\n\t\t\tbinarized_col = self.binarizers[i].transform(self.X[self.cols_list[i]])\n\t\t\tself.binarized_cols = np.concatenate((self.binarized_cols , binarized_col), axis = 1)\n\t\t\tself.classes_.extend(list(self.binarizers[i].classes_))\n\t\treturn self.binarized_cols\n\n\n\nnum_pipeline = Pipeline([\n\t\t('selector', DataFrameSelector(num_attribs)),\n\t\t('imputer', Imputer(strategy=\"median\")),\n\t\t('std_scaler', StandardScaler()),\n\t])\n\n# select the categorical columns, binarize them \ncat_pipeline = Pipeline([\n\t\t('selector', DataFrameSelector(cat_attribs)),\n\t\t('label_binarizer', MultiColBinarize()),\n\t])\n\n\n\n#####\n# impute missing values and prepare the categoricals for ml algorithms\n#####\n\n\ntrain_num_processed = num_pipeline.fit_transform(train_x)\ntrain_cat_processed = cat_pipeline.fit_transform(train_x)\n\ntrain_x_clean = np.concatenate((train_num_processed,train_cat_processed),axis=1)\n\n\n#need to just transform the test, we impute based on the training data!\n\ntest_num_processed = num_pipeline.transform(test_x)\ntest_cat_processed = cat_pipeline.transform(test_x)\n\ntest_x_clean = np.concatenate((test_num_processed,test_cat_processed),axis=1)\n\n\n#check that the number of columns are the same for both\ntrain_x_clean.shape\ntest_x_clean.shape\n\n\n\"\"\" imputation is successfully completed, on to the modelling \"\"\"\n\n##########\n# support vector machine\n##########\n\n\nsvm_reg = SVR(kernel=\"linear\")\n\n\nsvr_param_grid = [\n\t\t{'kernel': ['rbf','linear'], 'C': [1.0, 10., 100., 1000.0],\n\t\t'gamma': [0.01, 0.1,1.0]}\n\t]\n\n\nsvm_grid_search = GridSearchCV(svm_reg, svr_param_grid, cv=5,\n\t\t\t\t\t\tscoring='neg_mean_squared_error')\n\nsvm_grid_search.fit(train_x_clean, train_y)\n\nsvm_grid_search.best_params_\n\nsvm_grid_search.best_estimator_\n\ncvres = svm_grid_search.cv_results_\nfor mean_score, params in zip(cvres[\"mean_test_score\"], cvres[\"params\"]):\n\tprint(np.sqrt(-mean_score), params)\n\n\n\n##########\n# Random forest regression\n##########\n\n\nforest_reg = RandomForestRegressor(random_state=42)\n\nrf_param_grid = [\n\t{'n_estimators': [3, 10, 30,100,300,1000], 'max_features': [2, 4, 6, 8]},\n\t{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},\n ]\n\nforest_reg = RandomForestRegressor(random_state=42)\n# train across 5 folds, that's a total of (12+6)*5=90 rounds of training \nrf_grid_search = GridSearchCV(forest_reg, rf_param_grid, cv=5,\n\t\t\t\t\t\t scoring='neg_mean_squared_error')\nrf_grid_search.fit(train_x_clean, train_y)\n\nrf_grid_search.best_params_\n\nrf_grid_search.best_estimator_\n\ncvres = rf_grid_search.cv_results_\nfor mean_score, params in zip(cvres[\"mean_test_score\"], cvres[\"params\"]):\n\tprint(np.sqrt(-mean_score), params)\n\n\n##########\n# XGBoost model\n##########\n\n\nXGBoost_reg = xgb.XGBRegressor()\n\n#note all the params below must be wrapped in lists\nxgb_param_grid = [{'min_child_weight': [20,25,30], \n\t\t\t\t\t'learning_rate': [0.1, 0.2, 0.3], \n\t\t\t\t\t'colsample_bytree': [0.9], \n\t\t\t\t\t'max_depth': [5,6,7,8], \n\t\t\t\t\t'reg_lambda': [1.], \n\t\t\t\t\t'nthread': [-1], \n\t\t\t\t\t'n_estimators': [100,1000,2000],\n\t\t\t\t\t'early_stopping_rounds':50,\n\t\t\t\t\t'objective': ['reg:linear']}]\n\n\nxgb_grid_search = GridSearchCV(XGBoost_reg, xgb_param_grid, cv=5,\n\t\t\t\t\tscoring='neg_mean_squared_error', n_jobs=1)\n\nxgb_grid_search.fit(train_x_clean, train_y)\n\n\nxgb_grid_search.best_params_\n\nxgb_grid_search.best_estimator_\n\ncvres = xgb_grid_search.cv_results_\nfor mean_score, params in zip(cvres[\"mean_test_score\"], cvres[\"params\"]):\n\tprint(np.sqrt(-mean_score), params)\n\n\n\n\n# test the above 3 models, retrain on the top set of paramaters\n\n\n#SVM\nopt_svm_params = {'C': 1000.0, \n\t\t\t\t'gamma': 0.01, \n\t\t\t\t'kernel': 'linear'}\n\n#need the ** to unpack the dictonary so all the params don't get assigned to one\nopt_svm_reg = SVR(**opt_svm_params)\n\nopt_svm_reg.fit(train_x_clean, train_y)\n\n\n#RF\nopt_rf_params= {'max_features': 8, 'n_estimators': 100}\n\nopt_forest_reg = RandomForestRegressor(**opt_rf_params, random_state=42)\n\nopt_forest_reg.fit(train_x_clean, train_y)\n\n\n#XGB\nopt_xgb_params = {'colsample_bytree': 0.9,\n\t\t\t\t'learning_rate': 0.1,\n\t\t\t\t'max_depth': 7,\n\t\t\t\t'min_child_weight': 30,\n\t\t\t\t'n_estimators': 1000,\n\t\t\t\t'nthread': -1,\n\t\t\t\t'objective': 'reg:linear',\n\t\t\t\t'reg_lambda': 1.0}\n\n\nopt_XGBoost_reg = xgb.XGBRegressor(**opt_xgb_params)\n\nopt_XGBoost_reg.fit(train_x_clean, train_y)\n\n\n\ny1 = opt_XGBoost_reg.predict(test_x_clean)\ny2 = opt_svm_reg.predict(test_x_clean)\ny3 = opt_forest_reg.predict(test_x_clean)\n\n\n\"\"\"\ndo this for each:\n\nmedian_mse= mean_squared_error(test_y,meadian_guess)\n\nmedian_rmse = np.sqrt(median_mse)\nmedian_rmse\n\"\"\"\n\n# then find a way to optimize their combination into a single model\n#combine the three optimal predictors into a single sklearn class that spits\n#out predicted values, use this with a tuning param that changes the weights of\n#the models and use cross validation function to get the scores.\n\n\nclass ensemble_predictor(BaseEstimator, TransformerMixin):\n\t\"\"\" take in a dataset and train it with three models,\n\t\tcombining the outputs to make predictions\"\"\"\n\tdef __init__(self, weights= { 'xgb': 0.33, 'rf': 0.33, 'svm' : 0.34}):\n\t\tself.weights = weights\n\t\tself.opt_xgb_params = {'colsample_bytree': 0.9,\n\t\t\t\t\t'learning_rate': 0.1,\n\t\t\t\t\t'max_depth': 7,\n\t\t\t\t\t'min_child_weight': 30,\n\t\t\t\t\t'nthread': -1,\n\t\t\t\t\t'objective': 'reg:linear',\n\t\t\t\t\t'reg_lambda': 1.0}\n\t\tself.opt_svm_params = {'C': 1000.0, \n\t\t\t\t'gamma': 0.01, \n\t\t\t\t'kernel': 'linear'}\n\t\tself.opt_rf_params= {'max_features': 8, 'n_estimators': 100}\n\n\tdef fit(self, X, y):\n\t\t\"\"\"load the data in, initiate the models\"\"\"\n\t\tself.X = X\n\t\tself.y = y\n\t\tself.opt_XGBoost_reg = xgb.XGBRegressor(**self.opt_xgb_params)\n\t\tself.opt_forest_reg = RandomForestRegressor(**self.opt_rf_params)\n\t\tself.opt_svm_reg = SVR(**self.opt_svm_params)\n\t\t\"\"\" fit the models \"\"\"\n\t\tself.opt_XGBoost_reg.fit(self.X ,self.y)\n\t\tself.opt_forest_reg.fit(self.X ,self.y)\n\t\tself.opt_svm_reg.fit(self.X ,self.y)\n\tdef predict(self, X2):\n\t\t\"\"\" make the predictions for the models, combine based on weights \"\"\"\n\t\tself.y_xgb = self.opt_XGBoost_reg.predict(X2)\n\t\tself.y_rf = self.opt_forest_reg.predict(X2)\n\t\tself.y_svm = self.opt_svm_reg.predict(X2)\n\t\t\"\"\" multiply the predictions by their weights, return optimal \"\"\"\n\t\tself.prediction = self.y_xgb * self.weights['xgb'] \\\n\t\t\t\t\t\t+ self.y_rf * self.weights['rf'] \\\n\t\t\t\t\t\t+ self.y_svm * self.weights['svm']\n\t\treturn self.prediction\n\nweight_variants = [\n{ 'xgb': 0.33, 'rf': 0.33, 'svm' : 0.34},\n{ 'xgb': 0.9, 'rf': 0.05, 'svm' : 0.05},\n{ 'xgb': 0.8, 'rf': 0.1, 'svm' : 0.1},\n{ 'xgb': 0.5, 'rf': 0.3, 'svm' : 0.2},\n{ 'xgb': 0.3, 'rf': 0.2, 'svm' : 0.5},\n{ 'xgb': 0.3, 'rf': 0.5, 'svm' : 0.2}\n]\n\n\n\n#determine the optimal weights for the different models via cross validation\nfor params in weight_variants:\n\tmodel = ensemble_predictor(weights = params)\n\tensemble_score = cross_val_score(model, train_x_clean, train_y,\n\t\t\t\t\t\t\tscoring=\"neg_mean_squared_error\", cv=5)\n\tensemble_rmse = np.sqrt(-ensemble_score)\n\tprint('%s\\t %s'% (params, ensemble_rmse.mean()))\n\n#winner\n# {'xgb': 0.8, 'rf': 0.1, 'svm': 0.1}\t 1322950.1668\n\n#try again with the new weight variants, tuned in towards the optimal numbers\nweight_variants = [\n{ 'xgb': 0.8, 'rf': 0.15, 'svm' : 0.05},\n{ 'xgb': 0.8, 'rf': 0.05, 'svm' : 0.15},\n{ 'xgb': 0.82, 'rf': 0.09, 'svm' : 0.09},\n{ 'xgb': 0.79, 'rf': 0.105, 'svm' : 0.105},\n{ 'xgb': 0.79, 'rf': 0.11, 'svm' : 0.1},\n{ 'xgb': 0.79, 'rf': 0.1, 'svm' : 0.11}\n]\n\n\n#{'xgb': 0.8, 'rf': 0.15, 'svm': 0.05}\t 1322424.6932\n#\nweights = {'xgb': 0.8, 'rf': 0.15, 'svm': 0.05}\n\nopt_model = ensemble_predictor(weights)\nopt_model.fit(train_x_clean, train_y)\nfinal_predictions = opt_model.predict(test_x_clean)\n\n\nopt_mean_squared_error = mean_squared_error(test_y,final_predictions)\n\nopt_rmse = np.sqrt(opt_mean_squared_error)\nopt_rmse\n\n#1,546,809\n\nmeadian_guess = [np.median(test_y) for x in test_y]\n#925000\n\nmedian_mse= mean_squared_error(test_y,meadian_guess)\n\nmedian_rmse = np.sqrt(median_mse)\nmedian_rmse\n#2878624\n\"\"\"\n#therefore our model is about 1.3 million dollars closer on average than \n#guessing by just the median alone\n\n#the cross validation was off by only 472073, which is suggestive of over fit as this is\n#under a third of our final rmse on the test data.\n\nthis mixed model was better then previous iterations, when we ran\nRandom Forest regression the model was off by an average of $1,578,497\nand with XGBoost alone the model was only slightly improved at $1,574,073\nWhen we combined models here, we see that we are about $25,000 closer on average,\nwhich is a slight improvement, but an improvement nonetheless!\n\n\"\"\"\n\n",
"import pandas as pd\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nfrom sklearn.model_selection import GridSearchCV\n\n\ndef optimal_n_rounds(xgb_model, xgb_matrix, max_n_estimators):\n\t\"\"\" take the input model and xgb matrix (x and y values) \n\t\tand determine the optimal number of trees via cross validation.\n\t\t returns the number of trees \"\"\"\n\tcvresult = xgb.cv(xgb_model, x_values, \n\t\t\t\t\t\tnum_boost_round = max_n_estimators, \n\t\t\t\t\t\tnfold = 5,\n\t\t\t\t\t\tmetrics\t= 'auc', \n\t\t\t\t\t\tearly_stopping_rounds = 50)\t\n\treturn cvresult.shape[0]\n\ndef optimal_params(xgb_model, x_vals, y_vals, xgb_param_grid):\n\t\"\"\" take a model, predictor matrix and paramater grid and\n\t\treturn the optimal paramater set \"\"\"\n\t_gsearch = GridSearchCV(xgb_model, xgb_param_grid, \n\t\t\t\t\t\t\t\tscoring='roc_auc', \n\t\t\t\t\t\t\t\tn_jobs=4, \n\t\t\t\t\t\t\t\tiid=False, \n\t\t\t\t\t\t\t\tcv=3)\n\t_gsearch.fit(x_vals, y_vals)\n\n\treturn _gsearch.best_params_\n\n\nif __name__ == '__main__':\n\n\t#load in the processed data from train_and_test_to_matrix.py\n\ttrain_sparse = sparse.load_npz('sparse_train_punc.npz')\n\ttest_sparse = sparse.load_npz('sparse_test_punc.npz')\n\n\ttrain = pd.read_csv('train.csv')\n\ttest = pd.read_csv('test.csv')\n\tsub_file = pd.read_csv('sample_submission.csv')\n\n\n\tto_predict = list(train.columns[2:])\n\n\tfor col in to_predict:\n\n\t\txgtrain_input = xgb.DMatrix(train_sparse, label=train[col].values)\n\n\t\txgb_initial = xgb.XGBClassifier(learning_rate =0.1,\n\t\t\t\t\t\t\t\t\tn_estimators=1000,\n\t\t\t\t\t\t\t\t\tmax_depth=5,\n\t\t\t\t\t\t\t\t\tmin_child_weight=1,\n\t\t\t\t\t\t\t\t\tgamma=0,\n\t\t\t\t\t\t\t\t\tsubsample=0.8,\n\t\t\t\t\t\t\t\t\tcolsample_bytree=0.8,\n\t\t\t\t\t\t\t\t\tobjective= 'binary:logistic',\n\t\t\t\t\t\t\t\t\tscale_pos_weight=1)\n\n\t\topt_rounds = optimal_n_rounds(xgb_initial, xgtrain_input, 1000)\n\n\t\txgb_class_grid = xgb.XGBClassifier(n_estimators=opt_rounds,\n\t\t\t\t\t\t\t\t\t\t\tgamma=0,\n\t\t\t\t\t\t\t\t\t\t\tsubsample=0.8,\n\t\t\t\t\t\t\t\t\t\t\tobjective= 'binary:logistic',\n\t\t\t\t\t\t\t\t\t\t\tscale_pos_weight=1)\n\t\t\n\t\txgb_params = {'max_depth':[4, 6, 8],\n\t\t\t\t\t\t'min_child_weight':[1, 4, 8],\n\t\t\t\t\t\t'colsample_bytree': [0.8, 0.9], }\n\n\n\t\txgb_best = optimal_params(xgb_class_grid, \n\t\t\t\t\t\t\t\t\ttrain_sparse, \n\t\t\t\t\t\t\t\t\ttrain[col].values, \n\t\t\t\t\t\t\t\t\txgb_params)\n\n\t\txgb_final = xgb.XGBClassifier(gsearch_toxic.best_params_,\n\t\t\t\t\t\t\t\teta = 0.001,\n\t\t\t\t\t\t\t\tn_estimators=5000,\n\t\t\t\t\t\t\t\tgamma=0,\n\t\t\t\t\t\t\t\tsubsample=0.8,\n\t\t\t\t\t\t\t\tobjective= 'binary:logistic',\n\t\t\t\t\t\t\t\tscale_pos_weight=1,\n\t\t\t\t\t\t\t\tearly_stopping_rounds = 50)\n\n\t\txgb_final.fit(train_sparse, train[col].values, eval_metric='auc')\n\t\t\n\t\toptimal_predictions = xgb_final.predict(test_sparse)\n\n\t\tsub_file[col] = optimal_predictions\n\n\n\tsub_file.to_csv('cam_xgb_predictions.csv', index = False)\n\n\n\n\n\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"numpy.sqrt",
"numpy.median",
"sklearn.preprocessing.Imputer",
"sklearn.metrics.mean_squared_error",
"numpy.concatenate",
"sklearn.svm.SVR",
"sklearn.preprocessing.LabelBinarizer",
"sklearn.preprocessing.StandardScaler"
],
[
"sklearn.model_selection.GridSearchCV",
"scipy.sparse.load_npz",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
dannyb2018/gs-quant | [
"e963c4af1c7c65b2ee8f7995815542f6fb7b4957"
] | [
"gs_quant/timeseries/measures_reports.py"
] | [
"\"\"\"\nCopyright 2020 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\nfrom typing import Optional\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nfrom pydash import decapitalize\n\nfrom gs_quant.api.gs.data import QueryType\nfrom gs_quant.data.core import DataContext\nfrom gs_quant.entities.entity import EntityType\nfrom gs_quant.errors import MqValueError\nfrom gs_quant.markets.report import FactorRiskReport, PerformanceReport, ThematicReport, ReturnFormat\n\nfrom gs_quant.models.risk_model import FactorRiskModel\nfrom gs_quant.timeseries import plot_measure_entity\nfrom gs_quant.timeseries.measures import _extract_series_from_df, SecurityMaster, AssetIdentifier\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_EXPOSURE])\ndef factor_exposure(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor exposure data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor exposure for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_EXPOSURE)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_PNL])\ndef factor_pnl(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor PnL data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor pnl for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_PNL)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.FACTOR_PROPORTION_OF_RISK])\ndef factor_proportion_of_risk(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Factor proportion of risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of factor proportion of risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_PROPORTION_OF_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.DAILY_RISK])\ndef daily_risk(report_id: str, factor_name: str = 'Total', *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Daily risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name (must be \"Factor\", \"Specific\", or \"Total\")\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.DAILY_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.ANNUAL_RISK])\ndef annual_risk(report_id: str, factor_name: str = 'Total', *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Annual risk data associated with a factor in a factor risk report\n\n :param report_id: factor risk report id\n :param factor_name: factor name (must be \"Factor\", \"Specific\", or \"Total\")\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily risk for requested factor\n \"\"\"\n return _get_factor_data(report_id, factor_name, QueryType.ANNUAL_RISK)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.PNL])\ndef normalized_performance(report_id: str, leg: str = None, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Returns the Normalized Performance of a performance report based on AUM source\n :param report_id: id of performance report\n :param leg: short or long\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: portfolio normalized performance\n\n **Usage**\n\n Returns the normalized performance of the portfolio.\n\n :math:`NP(L/S)_{t} = SUM( PNL(L/S)_{t}/ ( EXP(L/S)_{t} ) - cPNL(L/S)_{t-1) )\n if ( EXP(L/S)_{t} ) > 0\n else:\n 1/ SUM( PNL(L/S)_{t}/ ( EXP(L/S)_{t} ) - cPNL(L/S)_{t-1) )`\n For each leg, short and long, then:\n :math:`NP_{t} = NP(L)_{t} * SUM(EXP(L)) / SUM(GROSS_EXP) + NP(S)_{t} * SUM(EXP(S)) / SUM(GROSS_EXP) + 1`\n\n If leg is short, set SUM(EXP(L)) to 0, if leg is long, set SUM(EXP(S)) to 0\n\n where :math:`cPNL(L/S)_{t-1}` is your performance reports cumulative long or short PNL at date t-1\n where :math:`PNL(L/S)_{t}` is your performance reports long or short pnl at date t\n where :math:`GROSS_EXP_{t}` is portfolio gross exposure on date t\n where :math:`EXP(L/S)_{t}` is the long or short exposure on date t\n\n \"\"\"\n start_date = DataContext.current.start_time\n end_date = DataContext.current.end_time\n\n start_date = (start_date - BDay(1)).date()\n end_date = end_date.date()\n\n performance_report = PerformanceReport.get(report_id)\n\n constituent_data = performance_report.get_portfolio_constituents(\n fields=['assetId', 'pnl', 'quantity', 'netExposure'], start_date=start_date, end_date=end_date).set_index(\n 'date')\n\n if leg:\n if leg.lower() == \"long\":\n constituent_data = constituent_data[constituent_data['quantity'] > 0]\n if leg.lower() == \"short\":\n constituent_data = constituent_data[constituent_data['quantity'] < 0]\n\n # Split into long and short and aggregate across dates\n long_side = _return_metrics(constituent_data[constituent_data['quantity'] > 0],\n list(constituent_data.index.unique()), \"long\")\n short_side = _return_metrics(constituent_data[constituent_data['quantity'] < 0],\n list(constituent_data.index.unique()), \"short\")\n\n short_exposure = sum(abs(short_side['exposure']))\n long_exposure = sum(long_side['exposure'])\n gross_exposure = short_exposure + long_exposure\n\n long_side['longRetWeighted'] = (long_side['longMetrics'] - 1) * (long_exposure / gross_exposure)\n short_side['shortRetWeighted'] = (short_side['shortMetrics'] - 1) * (short_exposure / gross_exposure)\n\n combined = long_side[['longRetWeighted']].join(short_side[['shortRetWeighted']], how='inner')\n combined['normalizedPerformance'] = combined['longRetWeighted'] + combined['shortRetWeighted'] + 1\n return pd.Series(combined['normalizedPerformance'], name=\"normalizedPerformance\").dropna()\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.PNL])\ndef long_pnl(report_id: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n PNL from long holdings\n\n :param report_id: id of performance report\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: portfolio long pnl\n \"\"\"\n start_date = DataContext.current.start_time.date()\n end_date = DataContext.current.end_time.date()\n performance_report = PerformanceReport.get(report_id)\n\n constituent_data = performance_report.get_portfolio_constituents(\n fields=['pnl', 'quantity'], start_date=start_date, end_date=end_date).set_index('date')\n long_leg = constituent_data[constituent_data['quantity'] > 0]['pnl']\n long_leg = long_leg.groupby(level=0).sum()\n return pd.Series(long_leg, name=\"longPnl\")\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.PNL])\ndef short_pnl(report_id: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n\n PNL from short holdings\n :param report_id: id of performance report\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: portfolio short pnl\n \"\"\"\n start_date = DataContext.current.start_time.date()\n end_date = DataContext.current.end_time.date()\n performance_report = PerformanceReport.get(report_id)\n\n constituent_data = performance_report.get_portfolio_constituents(\n fields=['pnl', 'quantity'], start_date=start_date, end_date=end_date).set_index('date')\n short_leg = constituent_data[constituent_data['quantity'] < 0]['pnl']\n short_leg = short_leg.groupby(level=0).sum()\n return pd.Series(short_leg, name=\"shortPnl\")\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.THEMATIC_EXPOSURE])\ndef thematic_exposure(report_id: str, basket_ticker: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Thematic exposure of a portfolio to a requested GS thematic flagship basket\n\n :param report_id: portfolio thematic analytics report id\n :param basket_ticker: ticker for thematic basket\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily thematic beta of portfolio to requested flagship basket\n \"\"\"\n thematic_report = ThematicReport.get(report_id)\n asset = SecurityMaster.get_asset(basket_ticker, AssetIdentifier.TICKER)\n df = thematic_report.get_thematic_exposure(start_date=DataContext.current.start_date,\n end_date=DataContext.current.end_date,\n basket_ids=[asset.get_marquee_id()])\n if not df.empty:\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.THEMATIC_EXPOSURE)\n\n\n@plot_measure_entity(EntityType.REPORT, [QueryType.THEMATIC_EXPOSURE])\ndef thematic_beta(report_id: str, basket_ticker: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n \"\"\"\n Thematic beta values of a portfolio to a requested GS thematic flagship basket\n\n :param report_id: portfolio thematic analytics report id\n :param basket_ticker: ticker for thematic basket\n :param source: name of function caller\n :param real_time: whether to retrieve intraday data instead of EOD\n :param request_id: server request id\n :return: Timeseries of daily thematic beta of portfolio to requested flagship basket\n \"\"\"\n thematic_report = ThematicReport.get(report_id)\n asset = SecurityMaster.get_asset(basket_ticker, AssetIdentifier.TICKER)\n df = thematic_report.get_thematic_betas(start_date=DataContext.current.start_date,\n end_date=DataContext.current.end_date,\n basket_ids=[asset.get_marquee_id()])\n if not df.empty:\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.THEMATIC_BETA)\n\n\ndef _get_factor_data(report_id: str, factor_name: str, query_type: QueryType) -> pd.Series:\n # Check params\n report = FactorRiskReport.get(report_id)\n if factor_name not in ['Factor', 'Specific', 'Total']:\n if query_type in [QueryType.DAILY_RISK, QueryType.ANNUAL_RISK]:\n raise MqValueError('Please pick a factor name from the following: [\"Total\", \"Factor\", \"Specific\"]')\n model = FactorRiskModel.get(report.get_risk_model_id())\n factor = model.get_factor(factor_name)\n factor_name = factor.name\n\n # Extract relevant data for each date\n col_name = query_type.value.replace(' ', '')\n col_name = decapitalize(col_name)\n data_type = decapitalize(col_name[6:]) if col_name.startswith('factor') else col_name\n\n factor_data = report.get_results(\n factors=[factor_name],\n start_date=DataContext.current.start_date,\n end_date=DataContext.current.end_date,\n return_format=ReturnFormat.JSON\n )\n factor_exposures = [{'date': d['date'], col_name: d[data_type]} for d in factor_data if d.get(data_type)]\n\n # Create and return timeseries\n df = pd.DataFrame(factor_exposures)\n if not df.empty:\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, query_type)\n\n\ndef _return_metrics(one_leg: pd.DataFrame, dates: list, name: str):\n if one_leg.empty:\n return pd.DataFrame(index=dates, data={f'{name}Metrics': [0 for d in dates], \"exposure\": [0 for d in dates]})\n one_leg = one_leg.groupby(one_leg.index).agg(pnl=('pnl', 'sum'), exposure=('netExposure', 'sum'))\n\n one_leg['cumulativePnl'] = one_leg['pnl'].cumsum(axis=0)\n\n one_leg['normalizedExposure'] = (one_leg['exposure'] - one_leg['cumulativePnl'])\n one_leg['cumulativePnl'].iloc[0] = 0\n one_leg[f'{name}Metrics'] = one_leg['cumulativePnl'] / one_leg['normalizedExposure'] + 1\n\n one_leg[f'{name}Metrics'] = 1 / one_leg[f'{name}Metrics'] if one_leg['exposure'].iloc[-1] < 0 else one_leg[\n f'{name}Metrics']\n return one_leg\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame",
"pandas.tseries.offsets.BDay"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
ZixuanJiang/dnnweaver2 | [
"984ffaf670378f7d83f61064d7984008461e6990"
] | [
"dnnweaver2/tf_utils/dataset.py"
] | [
"'''Utility functions and classes for handling image datasets.'''\n\nimport os.path as osp\nimport numpy as np\nimport tensorflow as tf\n\n\ndef process_image(img, scale, isotropic, crop, mean):\n '''Crops, scales, and normalizes the given image.\n scale : The image wil be first scaled to this size.\n If isotropic is true, the smaller side is rescaled to this,\n preserving the aspect ratio.\n crop : After scaling, a central crop of this size is taken.\n mean : Subtracted from the image\n '''\n # Rescale\n if isotropic:\n img_shape = tf.to_float(tf.shape(img)[:2])\n min_length = tf.minimum(img_shape[0], img_shape[1])\n new_shape = tf.to_int32((scale / min_length) * img_shape)\n else:\n new_shape = tf.stack([scale, scale])\n #img = tf.image.resize_images(img, (new_shape[0], new_shape[1]))\n img = tf.image.resize_images(img, new_shape)\n # Center crop\n # Use the slice workaround until crop_to_bounding_box supports deferred tensor shapes\n # See: https://github.com/tensorflow/tensorflow/issues/521\n offset = (new_shape - crop) / 2\n img = tf.slice(img, begin=tf.stack([offset[0], offset[1], 0]), size=tf.stack([crop, crop, -1]))\n # Mean subtraction\n return tf.to_float(img) - mean\n\n\nclass ImageProducer(object):\n '''\n Loads and processes batches of images in parallel.\n '''\n\n def __init__(self, image_paths, data_spec, num_concurrent=4, batch_size=None, labels=None):\n # The data specifications describe how to process the image\n self.data_spec = data_spec\n # A list of full image paths\n self.image_paths = image_paths\n # An optional list of labels corresponding to each image path\n self.labels = labels\n # A boolean flag per image indicating whether its a JPEG or PNG\n self.extension_mask = self.create_extension_mask(self.image_paths)\n # Create the loading and processing operations\n self.setup(batch_size=batch_size, num_concurrent=num_concurrent)\n\n def setup(self, batch_size, num_concurrent):\n # Validate the batch size\n num_images = len(self.image_paths)\n batch_size = min(num_images, batch_size or self.data_spec.batch_size)\n # if num_images % batch_size != 0:\n # raise ValueError(\n # 'The total number of images ({}) must be divisible by the batch size ({}).'.format(\n # num_images, batch_size))\n self.num_batches = num_images // batch_size\n # Create a queue that will contain image paths (and their indices and extension indicator)\n self.path_queue = tf.FIFOQueue(capacity=num_images,\n dtypes=[tf.int32, tf.bool, tf.string],\n name='path_queue')\n # Enqueue all image paths, along with their indices\n indices = tf.range(num_images)\n self.enqueue_paths_op = self.path_queue.enqueue_many([indices, self.extension_mask,\n self.image_paths])\n # Close the path queue (no more additions)\n self.close_path_queue_op = self.path_queue.close()\n\n # Create an operation that dequeues a single path and returns a processed image\n (idx, processed_image) = self.process()\n\n # Create a queue that will contain the processed images (and their indices)\n image_shape = (self.data_spec.crop_size, self.data_spec.crop_size, self.data_spec.channels)\n processed_queue = tf.FIFOQueue(capacity=int(np.ceil(num_images / float(num_concurrent))),\n dtypes=[tf.int32, tf.float32],\n shapes=[(), image_shape],\n name='processed_queue')\n\n # Enqueue the processed image and path\n enqueue_processed_op = processed_queue.enqueue([idx, processed_image])\n\n # Create a dequeue op that fetches a batch of processed images off the queue\n self.dequeue_op = processed_queue.dequeue_many(batch_size)\n # Create a queue runner to perform the processing operations in parallel\n num_concurrent = min(num_concurrent, num_images)\n\n self.queue_runner = tf.train.QueueRunner(processed_queue,\n [enqueue_processed_op] * num_concurrent)\n\n def start(self, session, coordinator, num_concurrent=4):\n '''Start the processing worker threads.'''\n # Queue all paths\n session.run(self.enqueue_paths_op)\n # Close the path queue\n session.run(self.close_path_queue_op)\n # Start the queue runner and return the created threads\n return self.queue_runner.create_threads(session, coord=coordinator, start=True)\n\n def get(self, session):\n '''\n Get a single batch of images along with their indices. If a set of labels were provided,\n the corresponding labels are returned instead of the indices.\n '''\n (indices, images) = session.run(self.dequeue_op)\n if self.labels is not None:\n labels = [self.labels[idx] for idx in indices]\n return (labels, images)\n return (indices, images)\n\n def batches(self, session):\n '''Yield a batch until no more images are left.'''\n for _ in xrange(self.num_batches):\n yield self.get(session=session)\n\n def load_image(self, image_path, is_jpeg):\n # Read the file\n file_data = tf.read_file(image_path)\n # Decode the image data\n img = tf.cond(\n is_jpeg,\n lambda: tf.image.decode_jpeg(file_data, channels=self.data_spec.channels),\n lambda: tf.image.decode_png(file_data, channels=self.data_spec.channels))\n if self.data_spec.expects_bgr:\n # Convert from RGB channel ordering to BGR\n # This matches, for instance, how OpenCV orders the channels.\n #img = tf.reverse(img, [False, False, True])\n img = tf.reverse(img, [2])\n return img\n\n def process(self):\n # Dequeue a single image path\n idx, is_jpeg, image_path = self.path_queue.dequeue()\n # Load the image\n img = self.load_image(image_path, is_jpeg)\n # Process the image\n processed_img = process_image(img=img,\n scale=self.data_spec.scale_size,\n isotropic=self.data_spec.isotropic,\n crop=self.data_spec.crop_size,\n mean=self.data_spec.mean)\n # Return the processed image, along with its index\n return (idx, processed_img)\n\n @staticmethod\n def create_extension_mask(paths):\n\n def is_jpeg(path):\n extension = osp.splitext(path)[-1].lower()\n if extension in ('.jpg', '.jpeg', '.JPEG'):\n return True\n if extension != '.png':\n raise ValueError('Unsupported image format: {}'.format(extension))\n return False\n\n return [is_jpeg(p) for p in paths]\n\n def __len__(self):\n return len(self.image_paths)\n\n\nclass ImageNetProducer(ImageProducer):\n\n def __init__(self, val_path, data_path, data_spec):\n # Read in the ground truth labels for the validation set\n # The get_ilsvrc_aux.sh in Caffe's data/ilsvrc12 folder can fetch a copy of val.txt\n gt_lines = open(val_path).readlines()\n gt_pairs = [line.split() for line in gt_lines]\n # Get the full image paths\n # You will need a copy of the ImageNet validation set for this.\n image_paths = [osp.join(data_path, p[0]) for p in gt_pairs]\n # The corresponding ground truth labels\n labels = np.array([int(p[1]) for p in gt_pairs])\n # Initialize base\n super(ImageNetProducer, self).__init__(image_paths=image_paths,\n data_spec=data_spec,\n labels=labels)\n"
] | [
[
"tensorflow.reverse",
"tensorflow.FIFOQueue",
"tensorflow.range",
"tensorflow.read_file",
"tensorflow.shape",
"tensorflow.image.resize_images",
"tensorflow.stack",
"tensorflow.minimum",
"tensorflow.image.decode_png",
"tensorflow.train.QueueRunner",
"tensorflow.to_float",
"tensorflow.to_int32",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
TLYu0419/pyquantanalyst | [
"b5be77726b2a7438d4c4d1d768f3c3340f0a286b"
] | [
"predict_stockprice.py"
] | [
"from prophet import Prophet\nimport datetime\nimport pandas as pd\n\n# predict stickprice\ndef predict_stockprice(data, date_col, target_col, periods=100, future_data=None):\n '''\n data: Your current data to training model\n date_col: Date column name\n target_col: Target Variable columns name.\n periods: How many days you want to predict. Default is 100 Days.\n future_data: Future data you used for test the model accuracy.\n '''\n \n ndata = data.loc[:,[date_col, target_col]].rename(columns={date_col:'ds',\n target_col:'y'})\n m = Prophet(yearly_seasonality=True,\n weekly_seasonality=True,\n daily_seasonality=True)\n m.fit(ndata)\n\n \n # Predict next 90D\n future = m.make_future_dataframe(periods=periods)\n forecast = m.predict(future)\n \n fig1 = m.plot(forecast)\n fig2 = m.plot_components(forecast)\n forecast['DATE'] = forecast['ds'].apply(lambda x: datetime.datetime.strftime(x, '%Y-%m-%d'))\n \n # if exist\n if 'future_data' in locals():\n data['TYPE'] = 'CURRENT_DATA'\n future_data['TYPE'] = 'FUTURE_DATA'\n ndata = pd.concat([data, future_data], ignore_index=True)\n ndata = pd.merge(left=ndata, right=forecast, how = 'left', on='DATE')\n ndata['DATE'] = ndata['DATE'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))\n print('Return A')\n return ndata\n else:\n print('Return B')\n return forecast\n \n# Visualize prediction result\n# import pandas as pd\n# import seaborn as sns\n# from sklearn.metrics import r2_score\n# from sklearn.metrics import mean_absolute_error\n\n# # Training data\n# train_r2 = r2_score(y_true = result.loc[result['TYPE']=='CURRENT_DATA']['CLOSINGPRICE'],\n# y_pred = result.loc[result['TYPE']=='CURRENT_DATA']['yhat'])\n# train_mae = mean_absolute_error(y_true = result.loc[result['TYPE']=='CURRENT_DATA']['CLOSINGPRICE'],\n# y_pred = result.loc[result['TYPE']=='CURRENT_DATA']['yhat'])\n\n# # Testing\n# test_r2 = r2_score(y_true = result.loc[result['TYPE']=='FUTURE_DATA']['CLOSINGPRICE'],\n# y_pred = result.loc[result['TYPE']=='FUTURE_DATA']['yhat'])\n\n# test_mae = mean_absolute_error(y_true = result.loc[result['TYPE']=='FUTURE_DATA']['CLOSINGPRICE'],\n# y_pred = result.loc[result['TYPE']=='FUTURE_DATA']['yhat'])\n\n# print('R-Square on training data:', train_r2)\n# print('MAE on training data:', train_mae)\n# print('R-Square on test data:', test_r2)\n# print('MAE on test data:', test_mae)\n\n# dt = result.loc[:,['STOCKID', 'STOCKNAME', 'DATE', 'CLOSINGPRICE', 'yhat']]\n# dt = pd.melt(dt, id_vars=['STOCKID', 'STOCKNAME', 'DATE'],var_name='TYPE', value_name='VALUE')\n# sns.set(font_scale=1.5, style='whitegrid', rc={'figure.figsize':(20,6)})\n# ax = sns.lineplot(x='DATE', y='VALUE', data=dt, hue='TYPE')\n# ax = sns.scatterplot(x='DATE', y='CLOSINGPRICE', data=result, hue='TYPE')\n# ax.fill_between(x='DATE', y1 = 'yhat_lower', y2='yhat_upper', data=result, alpha=0.2); "
] | [
[
"pandas.concat",
"pandas.merge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
feiranwang/deepdive | [
"53c03edba643d53fbb6d9d382870fe5dfb2e47a1"
] | [
"util/calibration.py"
] | [
"#! /usr/bin/env python\n\n# Usage: calibration.py [target/calibration_data_file.csv] [output_file.png]\n\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nCALIBRATION_FILE = sys.argv[1]\nOUT_IMG_FILE = sys.argv[2]\n\nlabels = []\ncounts = []\nprec = []\ncounts_train = []\nfor l in open(CALIBRATION_FILE):\n\t(a,b,c,d,e) = l.rstrip().split('\\t')\n\tlabels.append((float(a) + float(b))/2)\n\tcounts.append(int(c))\n\tif float(d) + float(e) == 0: \n\t\tprec.append(0.0)\n\telse:\n\t\tprec.append(float(d)/(float(d) + float(e)))\n\tcounts_train.append(float(d)+float(e))\n\nfig, ax = plt.subplots(figsize=(12,3))\n\nMARGIN = 1\nfig.subplots_adjust(right=0.99, left=0.05, top=0.9, bottom=0.25)\n\ngs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])\n\nplt.subplot(gs[0])\nwidth = 0.1\nlabels_nz = []\nprec_nz = []\nfor i in range(0, len(labels)):\n\tif counts_train[i] != 0:\n\t\tlabels_nz.append(labels[i])\n\t\tprec_nz.append(prec[i])\nplt.plot(labels_nz, prec_nz, 'ro-')\nplt.plot([0,1],[0,1],'b--')\nplt.title(\"(a) Accuracy (Testing Set)\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Probability\")\nplt.ylim(0,1)\nplt.xlim(0,1.1)\n\nplt.text(0, -0.35 , \"* (a) and (b) are produced using 50% held-out on evidence variables; (c) also includes all non-evidence variables of the same relation.\", fontsize=10, style='italic')\n\nplt.subplot(gs[1])\nwidth = 0.1\nplt.bar(labels, counts_train, width, color='b')\nplt.title(\"(b) # Predictions (Testing Set)\")\nplt.ylabel(\"# Predictions\")\nplt.xlabel(\"Probability\")\nplt.xlim(0,1.1)\n\nplt.subplot(gs[2])\nwidth = 0.1\nplt.bar(labels, counts, width, color='b')\nplt.title(\"(c) # Predictions (Whole Set)\")\nplt.ylabel(\"# Predictions\")\nplt.xlabel(\"Probability\")\nplt.xlim(0,1.1)\n\nplt.savefig(OUT_IMG_FILE)\n\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nivedwho/gan | [
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4",
"723ce1e3627778b979f048d817f834f253611ff4"
] | [
"tensorflow_gan/examples/mnist/train_test.py",
"tensorflow_gan/examples/progressive_gan/networks.py",
"tensorflow_gan/python/eval/inception_metrics.py",
"tensorflow_gan/examples/stargan/train_lib.py",
"tensorflow_gan/examples/mnist_estimator/train_lib.py",
"tensorflow_gan/examples/esrgan/train_test.py",
"tensorflow_gan/examples/self_attention_estimator/estimator_lib.py",
"tensorflow_gan/python/features/normalization.py",
"tensorflow_gan/examples/stargan/data_provider.py"
] | [
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for mnist.train.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_gan.examples.mnist import train_lib\n\nmock = tf.test.mock\n\n\nBATCH_SIZE = 5\n\n\ndef _new_data(*args, **kwargs):\n del args, kwargs\n # Tensors need to be created in the same graph, so generate them at the call\n # site.\n # Note: Make sure batch size matches hparams.\n imgs = tf.zeros([BATCH_SIZE, 28, 28, 1], dtype=tf.float32)\n labels = tf.one_hot([0] * BATCH_SIZE, depth=10)\n return (imgs, labels)\n\n\nclass TrainTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(TrainTest, self).setUp()\n self.hparams = train_lib.HParams(\n batch_size=BATCH_SIZE,\n train_log_dir=self.get_temp_dir(),\n max_number_of_steps=1,\n gan_type='unconditional',\n grid_size=1,\n noise_dims=64)\n\n @mock.patch.object(train_lib.data_provider, 'provide_data', new=_new_data)\n def test_run_one_train_step(self):\n if tf.executing_eagerly():\n # `tfgan.gan_model` doesn't work when executing eagerly.\n return\n train_lib.train(self.hparams)\n\n @parameterized.parameters(\n {'gan_type': 'unconditional'},\n {'gan_type': 'conditional'},\n {'gan_type': 'infogan'},\n )\n @mock.patch.object(train_lib.data_provider, 'provide_data', new=_new_data)\n def test_build_graph(self, gan_type):\n if tf.executing_eagerly():\n # `tfgan.gan_model` doesn't work when executing eagerly.\n return\n hparams = self.hparams._replace(max_number_of_steps=0, gan_type=gan_type)\n train_lib.train(hparams)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python2 python3\n\"\"\"Generator and discriminator for a progressive GAN model.\n\nSee https://arxiv.org/abs/1710.10196 for details about the model.\n\nSee https://github.com/tkarras/progressive_growing_of_gans for the original\ntheano implementation.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_gan.examples.progressive_gan import layers\n\n\nclass ResolutionSchedule(object):\n \"\"\"Image resolution upscaling schedule.\"\"\"\n\n def __init__(self, start_resolutions=(4, 4), scale_base=2, num_resolutions=4):\n \"\"\"Initializer.\n\n Args:\n start_resolutions: An tuple of integers of HxW format for start image\n resolutions. Defaults to (4, 4).\n scale_base: An integer of resolution base multiplier. Defaults to 2.\n num_resolutions: An integer of how many progressive resolutions (including\n `start_resolutions`). Defaults to 4.\n \"\"\"\n self._start_resolutions = start_resolutions\n self._scale_base = scale_base\n self._num_resolutions = num_resolutions\n\n @property\n def start_resolutions(self):\n return tuple(self._start_resolutions)\n\n @property\n def scale_base(self):\n return self._scale_base\n\n @property\n def num_resolutions(self):\n return self._num_resolutions\n\n @property\n def final_resolutions(self):\n \"\"\"Returns the final resolutions.\"\"\"\n return tuple([\n r * self._scale_base**(self._num_resolutions - 1)\n for r in self._start_resolutions\n ])\n\n def scale_factor(self, block_id):\n \"\"\"Returns the scale factor for network block `block_id`.\"\"\"\n if block_id < 1 or block_id > self._num_resolutions:\n raise ValueError('`block_id` must be in [1, {}]'.format(\n self._num_resolutions))\n return self._scale_base**(self._num_resolutions - block_id)\n\n\ndef block_name(block_id):\n \"\"\"Returns the scope name for the network block `block_id`.\"\"\"\n return 'progressive_gan_block_{}'.format(block_id)\n\n\ndef min_total_num_images(stable_stage_num_images, transition_stage_num_images,\n num_blocks):\n \"\"\"Returns the minimum total number of images.\n\n Computes the minimum total number of images required to reach the desired\n `resolution`.\n\n Args:\n stable_stage_num_images: Number of images in the stable stage.\n transition_stage_num_images: Number of images in the transition stage.\n num_blocks: Number of network blocks.\n\n Returns:\n An integer of the minimum total number of images.\n \"\"\"\n return (num_blocks * stable_stage_num_images +\n (num_blocks - 1) * transition_stage_num_images)\n\n\ndef compute_progress(current_image_id, stable_stage_num_images,\n transition_stage_num_images, num_blocks):\n \"\"\"Computes the training progress.\n\n The training alternates between stable phase and transition phase.\n The `progress` indicates the training progress, i.e. the training is at\n - a stable phase p if progress = p\n - a transition stage between p and p + 1 if progress = p + fraction\n where p = 0,1,2.,...\n\n Note the max value of progress is `num_blocks` - 1.\n\n In terms of LOD (of the original implementation):\n progress = `num_blocks` - 1 - LOD\n\n Args:\n current_image_id: An scalar integer `Tensor` of the current image id, count\n from 0.\n stable_stage_num_images: An integer representing the number of images in\n each stable stage.\n transition_stage_num_images: An integer representing the number of images in\n each transition stage.\n num_blocks: Number of network blocks.\n\n Returns:\n A scalar float `Tensor` of the training progress.\n \"\"\"\n # Note when current_image_id >= min_total_num_images - 1 (which means we\n # are already at the highest resolution), we want to keep progress constant.\n # Therefore, cap current_image_id here.\n capped_current_image_id = tf.minimum(\n current_image_id,\n min_total_num_images(stable_stage_num_images, transition_stage_num_images,\n num_blocks) - 1)\n\n stage_num_images = stable_stage_num_images + transition_stage_num_images\n progress_integer = tf.math.floordiv(capped_current_image_id, stage_num_images)\n progress_fraction = tf.maximum(\n 0.0,\n tf.cast(\n tf.math.mod(capped_current_image_id, stage_num_images) -\n stable_stage_num_images,\n dtype=tf.float32) /\n tf.cast(transition_stage_num_images, dtype=tf.float32))\n return tf.cast(progress_integer, dtype=tf.float32) + progress_fraction\n\n\ndef _generator_alpha(block_id, progress):\n \"\"\"Returns the block output parameter for the generator network.\n\n The generator has N blocks with `block_id` = 1,2,...,N. Each block\n block_id outputs a fake data output(block_id). The generator output is a\n linear combination of all block outputs, i.e.\n SUM_block_id(output(block_id) * alpha(block_id, progress)) where\n alpha(block_id, progress) = _generator_alpha(block_id, progress). Note it\n garantees that SUM_block_id(alpha(block_id, progress)) = 1 for any progress.\n\n With a fixed block_id, the plot of alpha(block_id, progress) against progress\n is a 'triangle' with its peak at (block_id - 1, 1).\n\n Args:\n block_id: An integer of generator block id.\n progress: A scalar float `Tensor` of training progress.\n\n Returns:\n A scalar float `Tensor` of block output parameter.\n \"\"\"\n return tf.maximum(0.0,\n tf.minimum(progress - (block_id - 2), block_id - progress))\n\n\ndef _discriminator_alpha(block_id, progress):\n \"\"\"Returns the block input parameter for discriminator network.\n\n The discriminator has N blocks with `block_id` = 1,2,...,N. Each block\n block_id accepts an\n - input(block_id) transformed from the real data and\n - the output of block block_id + 1, i.e. output(block_id + 1)\n The final input is a linear combination of them,\n i.e. alpha * input(block_id) + (1 - alpha) * output(block_id + 1)\n where alpha = _discriminator_alpha(block_id, progress).\n\n With a fixed block_id, alpha(block_id, progress) stays to be 1\n when progress <= block_id - 1, then linear decays to 0 when\n block_id - 1 < progress <= block_id, and finally stays at 0\n when progress > block_id.\n\n Args:\n block_id: An integer of generator block id.\n progress: A scalar float `Tensor` of training progress.\n\n Returns:\n A scalar float `Tensor` of block input parameter.\n \"\"\"\n return tf.clip_by_value(block_id - progress, 0.0, 1.0)\n\n\ndef blend_images(x, progress, resolution_schedule, num_blocks):\n \"\"\"Blends images of different resolutions according to `progress`.\n\n When training `progress` is at a stable stage for resolution r, returns\n image `x` downscaled to resolution r and then upscaled to `final_resolutions`,\n call it x'(r).\n\n Otherwise when training `progress` is at a transition stage from resolution\n r to 2r, returns a linear combination of x'(r) and x'(2r).\n\n Args:\n x: An image `Tensor` of NHWC format with resolution `final_resolutions`.\n progress: A scalar float `Tensor` of training progress.\n resolution_schedule: An object of `ResolutionSchedule`.\n num_blocks: An integer of number of blocks.\n\n Returns:\n An image `Tensor` which is a blend of images of different resolutions.\n \"\"\"\n x_blend = []\n for block_id in range(1, num_blocks + 1):\n alpha = _generator_alpha(block_id, progress)\n scale = resolution_schedule.scale_factor(block_id)\n x_blend.append(alpha * layers.upscale(layers.downscale(x, scale), scale))\n return tf.add_n(x_blend)\n\n\ndef num_filters(block_id, fmap_base=4096, fmap_decay=1.0, fmap_max=256):\n \"\"\"Computes number of filters of block `block_id`.\"\"\"\n return int(min(fmap_base / math.pow(2.0, block_id * fmap_decay), fmap_max))\n\n\ndef generator(z,\n progress,\n num_filters_fn,\n resolution_schedule,\n num_blocks=None,\n kernel_size=3,\n colors=3,\n to_rgb_activation=None,\n scope='progressive_gan_generator',\n reuse=None):\n \"\"\"Generator network for the progressive GAN model.\n\n Args:\n z: A `Tensor` of latent vector. The first dimension must be batch size.\n progress: A scalar float `Tensor` of training progress.\n num_filters_fn: A function that maps `block_id` to # of filters for the\n block.\n resolution_schedule: An object of `ResolutionSchedule`.\n num_blocks: An integer of number of blocks. None means maximum number of\n blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.\n kernel_size: An integer of convolution kernel size.\n colors: Number of output color channels. Defaults to 3.\n to_rgb_activation: Activation function applied when output rgb.\n scope: A string or variable scope.\n reuse: Whether to reuse `scope`. Defaults to None which means to inherit the\n reuse option of the parent scope.\n\n Returns:\n A `Tensor` of model output and a dictionary of model end points.\n \"\"\"\n if num_blocks is None:\n num_blocks = resolution_schedule.num_resolutions\n\n start_h, start_w = resolution_schedule.start_resolutions\n final_h, final_w = resolution_schedule.final_resolutions\n\n def _conv2d(scope, x, kernel_size, filters, padding='SAME'):\n return layers.custom_conv2d(\n x=x,\n filters=filters,\n kernel_size=kernel_size,\n padding=padding,\n activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),\n he_initializer_slope=0.0,\n scope=scope)\n\n def _to_rgb(x):\n return layers.custom_conv2d(\n x=x,\n filters=colors,\n kernel_size=1,\n padding='SAME',\n activation=to_rgb_activation,\n scope='to_rgb')\n\n end_points = {}\n\n with tf.variable_scope(scope, reuse=reuse):\n with tf.name_scope('input'):\n x = tf.layers.flatten(z)\n end_points['latent_vector'] = x\n\n with tf.variable_scope(block_name(1)):\n x = tf.expand_dims(tf.expand_dims(x, 1), 1)\n x = layers.pixel_norm(x)\n # Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)\n # with zeros for the next conv.\n x = tf.pad(\n tensor=x,\n paddings=[[0] * 2, [start_h - 1] * 2, [start_w - 1] * 2, [0] * 2])\n # The output is start_h x start_w x num_filters_fn(1).\n x = _conv2d('conv0', x, (start_h, start_w), num_filters_fn(1), 'VALID')\n x = _conv2d('conv1', x, kernel_size, num_filters_fn(1))\n lods = [x]\n\n for block_id in range(2, num_blocks + 1):\n with tf.variable_scope(block_name(block_id)):\n x = layers.upscale(x, resolution_schedule.scale_base)\n x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))\n x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id))\n lods.append(x)\n\n outputs = []\n for block_id in range(1, num_blocks + 1):\n with tf.variable_scope(block_name(block_id)):\n lod = _to_rgb(lods[block_id - 1])\n scale = resolution_schedule.scale_factor(block_id)\n lod = layers.upscale(lod, scale)\n end_points['upscaled_rgb_{}'.format(block_id)] = lod\n\n # alpha_i is used to replace lod_select. Note sum(alpha_i) is\n # garanteed to be 1.\n alpha = _generator_alpha(block_id, progress)\n end_points['alpha_{}'.format(block_id)] = alpha\n\n outputs.append(lod * alpha)\n\n predictions = tf.add_n(outputs)\n batch_size = tf.compat.dimension_value(z.shape[0])\n predictions.set_shape([batch_size, final_h, final_w, colors])\n end_points['predictions'] = predictions\n\n return predictions, end_points\n\n\ndef discriminator(x,\n progress,\n num_filters_fn,\n resolution_schedule,\n num_blocks=None,\n kernel_size=3,\n scope='progressive_gan_discriminator',\n reuse=None):\n \"\"\"Discriminator network for the progressive GAN model.\n\n Args:\n x: A `Tensor`of NHWC format representing images of size `resolution`.\n progress: A scalar float `Tensor` of training progress.\n num_filters_fn: A function that maps `block_id` to # of filters for the\n block.\n resolution_schedule: An object of `ResolutionSchedule`.\n num_blocks: An integer of number of blocks. None means maximum number of\n blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.\n kernel_size: An integer of convolution kernel size.\n scope: A string or variable scope.\n reuse: Whether to reuse `scope`. Defaults to None which means to inherit the\n reuse option of the parent scope.\n\n Returns:\n A `Tensor` of model output and a dictionary of model end points.\n \"\"\"\n if num_blocks is None:\n num_blocks = resolution_schedule.num_resolutions\n\n def _conv2d(scope, x, kernel_size, filters, padding='SAME'):\n return layers.custom_conv2d(\n x=x,\n filters=filters,\n kernel_size=kernel_size,\n padding=padding,\n activation=tf.nn.leaky_relu,\n he_initializer_slope=0.0,\n scope=scope)\n\n def _from_rgb(x, block_id):\n return _conv2d('from_rgb', x, 1, num_filters_fn(block_id))\n\n end_points = {}\n\n with tf.variable_scope(scope, reuse=reuse):\n x0 = x\n end_points['rgb'] = x0\n\n lods = []\n for block_id in range(num_blocks, 0, -1):\n with tf.variable_scope(block_name(block_id)):\n scale = resolution_schedule.scale_factor(block_id)\n lod = layers.downscale(x0, scale)\n end_points['downscaled_rgb_{}'.format(block_id)] = lod\n lod = _from_rgb(lod, block_id)\n # alpha_i is used to replace lod_select.\n alpha = _discriminator_alpha(block_id, progress)\n end_points['alpha_{}'.format(block_id)] = alpha\n lods.append((lod, alpha))\n\n lods_iter = iter(lods)\n x, _ = next(lods_iter)\n for block_id in range(num_blocks, 1, -1):\n with tf.variable_scope(block_name(block_id)):\n x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))\n x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1))\n x = layers.downscale(x, resolution_schedule.scale_base)\n lod, alpha = next(lods_iter)\n x = alpha * lod + (1.0 - alpha) * x\n\n with tf.variable_scope(block_name(1)):\n x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x))\n x = _conv2d('conv0', x, kernel_size, num_filters_fn(1))\n x = _conv2d('conv1', x, resolution_schedule.start_resolutions,\n num_filters_fn(0), 'VALID')\n end_points['last_conv'] = x\n logits = layers.custom_dense(x=x, units=1, scope='logits')\n end_points['logits'] = logits\n\n return logits, end_points\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Model evaluation tools for TF-GAN.\n\nThese methods come from https://arxiv.org/abs/1606.03498 and\nhttps://arxiv.org/abs/1706.08500.\n\nNOTE: This implementation uses the same weights as in\nhttps://github.com/openai/improved-gan/blob/master/inception_score/model.py,\n.\n\n\nNote that the default checkpoint is the same as in the OpenAI implementation\n(https://github.com/openai/improved-gan/tree/master/inception_score), but is\nmore numerically stable and is an unbiased estimator of the true Inception score\neven when splitting the inputs into batches. Also, the graph modified so that it\nworks with arbitrary batch size and the preprocessing moved to the `preprocess`\nfunction. Note that the modifications in the GitHub implementation are *not*\nsufficient to run with arbitrary batch size, due to the hardcoded resize value.\n\nThe graph runs on TPU.\n\nFinally, I manually removed the placeholder input, which was unnecessary and is\nnot supported on TPU.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport six\n\nimport tensorflow as tf\nfrom tensorflow_gan.python.eval import classifier_metrics\nimport tensorflow_hub as tfhub\n\n\n__all__ = [\n 'classifier_fn_from_tfhub',\n 'run_inception',\n 'sample_and_run_inception',\n 'inception_score',\n 'inception_score_streaming',\n 'frechet_inception_distance',\n 'frechet_inception_distance_streaming',\n 'kernel_inception_distance',\n 'kernel_inception_distance_and_std',\n 'INCEPTION_TFHUB',\n 'INCEPTION_OUTPUT',\n 'INCEPTION_FINAL_POOL',\n 'INCEPTION_DEFAULT_IMAGE_SIZE',\n]\n\nINCEPTION_TFHUB = 'https://tfhub.dev/tensorflow/tfgan/eval/inception/1'\nINCEPTION_OUTPUT = 'logits'\nINCEPTION_FINAL_POOL = 'pool_3'\n_DEFAULT_DTYPES = {INCEPTION_OUTPUT: tf.float32,\n INCEPTION_FINAL_POOL: tf.float32}\nINCEPTION_DEFAULT_IMAGE_SIZE = 299\n\n\ndef classifier_fn_from_tfhub(tfhub_module, output_fields, return_tensor=False):\n \"\"\"Returns a function that can be as a classifier function.\n\n Wrapping the TF-Hub module in another function defers loading the module until\n use, which is useful for mocking and not computing heavy default arguments.\n\n Args:\n tfhub_module: A string handle for a TF-Hub module.\n output_fields: A string, list, or `None`. If present, assume the module\n outputs a dictionary, and select this field.\n return_tensor: If `True`, return a single tensor instead of a dictionary.\n\n Returns:\n A one-argument function that takes an image Tensor and returns outputs.\n \"\"\"\n if isinstance(output_fields, six.string_types):\n output_fields = [output_fields]\n def _classifier_fn(images):\n output = tfhub.load(tfhub_module)(images)\n if output_fields is not None:\n output = {x: output[x] for x in output_fields}\n if return_tensor:\n assert len(output) == 1\n output = list(output.values())[0]\n return tf.nest.map_structure(tf.compat.v1.layers.flatten, output)\n return _classifier_fn\n\n\nrun_inception = functools.partial(\n classifier_metrics.run_classifier_fn,\n classifier_fn=classifier_fn_from_tfhub(INCEPTION_TFHUB, None),\n dtypes=_DEFAULT_DTYPES)\n\n\nsample_and_run_inception = functools.partial(\n classifier_metrics.sample_and_run_classifier_fn,\n classifier_fn=classifier_fn_from_tfhub(INCEPTION_TFHUB, None),\n dtypes=_DEFAULT_DTYPES)\n\ninception_score = functools.partial(\n classifier_metrics.classifier_score,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_OUTPUT, True))\n\ninception_score_streaming = functools.partial(\n classifier_metrics.classifier_score_streaming,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_OUTPUT, True))\n\nfrechet_inception_distance = functools.partial(\n classifier_metrics.frechet_classifier_distance,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_FINAL_POOL, True))\n\nfrechet_inception_distance_streaming = functools.partial(\n classifier_metrics.frechet_classifier_distance_streaming,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_FINAL_POOL, True))\n\nkernel_inception_distance = functools.partial(\n classifier_metrics.kernel_classifier_distance,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_FINAL_POOL, True))\n\nkernel_inception_distance_and_std = functools.partial(\n classifier_metrics.kernel_classifier_distance_and_std,\n classifier_fn=classifier_fn_from_tfhub(\n INCEPTION_TFHUB, INCEPTION_FINAL_POOL, True))\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trains a StarGAN model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow_gan as tfgan\n\nfrom tensorflow_gan.examples.stargan import data_provider\nfrom tensorflow_gan.examples.stargan import network\n\nHParams = collections.namedtuple('HParams', [\n 'batch_size', 'patch_size', 'train_log_dir', 'generator_lr',\n 'discriminator_lr', 'max_number_of_steps', 'adam_beta1', 'adam_beta2',\n 'gen_disc_step_ratio', 'tf_master', 'ps_replicas', 'task'\n])\n\n\ndef _define_model(images, labels):\n \"\"\"Create the StarGAN Model.\n\n Args:\n images: `Tensor` or list of `Tensor` of shape (N, H, W, C).\n labels: `Tensor` or list of `Tensor` of shape (N, num_domains).\n\n Returns:\n `StarGANModel` namedtuple.\n \"\"\"\n\n return tfgan.stargan_model(\n generator_fn=network.generator,\n discriminator_fn=network.discriminator,\n input_data=images,\n input_data_domain_label=labels)\n\n\ndef _get_lr(base_lr, max_number_of_steps):\n \"\"\"Returns a learning rate `Tensor`.\n\n Args:\n base_lr: A scalar float `Tensor` or a Python number. The base learning\n rate.\n max_number_of_steps: A Python number. The total number of steps to train.\n\n Returns:\n A scalar float `Tensor` of learning rate which equals `base_lr` when the\n global training step is less than Fmax_number_of_steps / 2, afterwards\n it linearly decays to zero.\n \"\"\"\n global_step = tf.train.get_or_create_global_step()\n lr_constant_steps = max_number_of_steps // 2\n\n def _lr_decay():\n return tf.train.polynomial_decay(\n learning_rate=base_lr,\n global_step=(global_step - lr_constant_steps),\n decay_steps=(max_number_of_steps - lr_constant_steps),\n end_learning_rate=0.0)\n\n return tf.cond(\n pred=global_step < lr_constant_steps,\n true_fn=lambda: base_lr,\n false_fn=_lr_decay)\n\n\ndef _get_optimizer(gen_lr, dis_lr, beta1, beta2):\n \"\"\"Returns generator optimizer and discriminator optimizer.\n\n Args:\n gen_lr: A scalar float `Tensor` or a Python number. The Generator learning\n rate.\n dis_lr: A scalar float `Tensor` or a Python number. The Discriminator\n learning rate.\n beta1: A scalar float `Tensor` or a Python number. The beta1 parameter to\n the `AdamOptimizer`.\n beta2: A scalar float `Tensor` or a Python number. The beta2 parameter to\n the `AdamOptimizer`.\n\n Returns:\n A tuple of generator optimizer and discriminator optimizer.\n \"\"\"\n gen_opt = tf.train.AdamOptimizer(\n gen_lr, beta1=beta1, beta2=beta2, use_locking=True)\n dis_opt = tf.train.AdamOptimizer(\n dis_lr, beta1=beta1, beta2=beta2, use_locking=True)\n return gen_opt, dis_opt\n\n\ndef _define_train_ops(model, loss, gen_lr, dis_lr, beta1, beta2,\n max_number_of_steps):\n \"\"\"Defines train ops that trains `stargan_model` with `stargan_loss`.\n\n Args:\n model: A `StarGANModel` namedtuple.\n loss: A `StarGANLoss` namedtuple containing all losses for `stargan_model`.\n gen_lr: A scalar float `Tensor` or a Python number. The Generator base\n learning rate.\n dis_lr: A scalar float `Tensor` or a Python number. The Discriminator base\n learning rate.\n beta1: A scalar float `Tensor` or a Python number. The beta1 parameter to\n the `AdamOptimizer`.\n beta2: A scalar float `Tensor` or a Python number. The beta2 parameter to\n the `AdamOptimizer`.\n max_number_of_steps: A Python number. The total number of steps to train.\n\n Returns:\n A `GANTrainOps` namedtuple.\n \"\"\"\n\n gen_lr = _get_lr(gen_lr, max_number_of_steps)\n dis_lr = _get_lr(dis_lr, max_number_of_steps)\n gen_opt, dis_opt = _get_optimizer(gen_lr, dis_lr, beta1, beta2)\n train_ops = tfgan.gan_train_ops(\n model,\n loss,\n generator_optimizer=gen_opt,\n discriminator_optimizer=dis_opt,\n summarize_gradients=True,\n colocate_gradients_with_ops=True,\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n\n tf.summary.scalar('generator_lr', gen_lr)\n tf.summary.scalar('discriminator_lr', dis_lr)\n\n return train_ops\n\n\ndef _define_train_step(gen_disc_step_ratio):\n \"\"\"Get the training step for generator and discriminator for each GAN step.\n\n Args:\n gen_disc_step_ratio: A python number. The ratio of generator to\n discriminator training steps.\n\n Returns:\n GANTrainSteps namedtuple representing the training step configuration.\n \"\"\"\n\n if gen_disc_step_ratio <= 1:\n discriminator_step = int(1 / gen_disc_step_ratio)\n return tfgan.GANTrainSteps(1, discriminator_step)\n else:\n generator_step = int(gen_disc_step_ratio)\n return tfgan.GANTrainSteps(generator_step, 1)\n\n\ndef train(hparams):\n \"\"\"Trains a StarGAN.\n\n Args:\n hparams: An HParams instance containing the hyperparameters for training.\n \"\"\"\n\n # Create the log_dir if not exist.\n if not tf.io.gfile.exists(hparams.train_log_dir):\n tf.io.gfile.makedirs(hparams.train_log_dir)\n\n # Shard the model to different parameter servers.\n with tf.device(tf.train.replica_device_setter(hparams.ps_replicas)):\n\n # Create the input dataset.\n with tf.name_scope('inputs'), tf.device('/cpu:0'):\n images, labels = data_provider.provide_data('train', hparams.batch_size,\n hparams.patch_size)\n\n # Define the model.\n with tf.name_scope('model'):\n model = _define_model(images, labels)\n\n # Add image summary.\n tfgan.eval.add_stargan_image_summaries(\n model, num_images=3 * hparams.batch_size, display_diffs=True)\n\n # Define the model loss.\n loss = tfgan.stargan_loss(model)\n\n # Define the train ops.\n with tf.name_scope('train_ops'):\n train_ops = _define_train_ops(model, loss, hparams.generator_lr,\n hparams.discriminator_lr,\n hparams.adam_beta1, hparams.adam_beta2,\n hparams.max_number_of_steps)\n\n # Define the train steps.\n train_steps = _define_train_step(hparams.gen_disc_step_ratio)\n\n # Define a status message.\n status_message = tf.strings.join([\n 'Starting train step: ',\n tf.as_string(tf.train.get_or_create_global_step())\n ],\n name='status_message')\n\n # Train the model.\n tfgan.gan_train(\n train_ops,\n hparams.train_log_dir,\n get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps),\n hooks=[\n tf.estimator.StopAtStepHook(num_steps=hparams.max_number_of_steps),\n tf.estimator.LoggingTensorHook([status_message], every_n_iter=10)\n ],\n master=hparams.tf_master,\n is_chief=hparams.task == 0)\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trains a GANEstimator on MNIST data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport numpy as np\nfrom PIL import Image as image_lib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow_gan as tfgan\n\nfrom tensorflow_gan.examples.mnist import data_provider\nfrom tensorflow_gan.examples.mnist import networks\n\nHParams = collections.namedtuple('HParams', [\n 'batch_size',\n 'max_number_of_steps',\n 'noise_dims',\n 'output_dir',\n])\n\n\ndef _get_train_input_fn(batch_size, noise_dims, num_parallel_calls=4):\n def train_input_fn():\n images, _ = data_provider.provide_data(\n 'train', batch_size, num_parallel_calls=num_parallel_calls)\n noise = tf.random.normal([batch_size, noise_dims])\n return noise, images\n return train_input_fn\n\n\ndef _get_predict_input_fn(batch_size, noise_dims):\n def predict_input_fn():\n noise = tf.random.normal([batch_size, noise_dims])\n return noise\n return predict_input_fn\n\n\ndef _unconditional_generator(noise, mode):\n \"\"\"MNIST generator with extra argument for tf.Estimator's `mode`.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n return networks.unconditional_generator(noise, is_training=is_training)\n\n\ndef train(hparams):\n \"\"\"Trains an MNIST GAN.\n\n Args:\n hparams: An HParams instance containing the hyperparameters for training.\n \"\"\"\n # Initialize GANEstimator with options and hyperparameters.\n gan_estimator = tfgan.estimator.GANEstimator(\n generator_fn=_unconditional_generator,\n discriminator_fn=networks.unconditional_discriminator,\n generator_loss_fn=tfgan.losses.wasserstein_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,\n generator_optimizer=tf.train.AdamOptimizer(0.001, 0.5),\n discriminator_optimizer=tf.train.AdamOptimizer(0.0001, 0.5),\n add_summaries=tfgan.estimator.SummaryType.IMAGES)\n\n # Train estimator.\n train_input_fn = _get_train_input_fn(hparams.batch_size, hparams.noise_dims)\n gan_estimator.train(train_input_fn, max_steps=hparams.max_number_of_steps)\n\n # Run inference.\n predict_input_fn = _get_predict_input_fn(36, hparams.noise_dims)\n prediction_iterable = gan_estimator.predict(predict_input_fn)\n predictions = np.array([next(prediction_iterable) for _ in xrange(36)])\n\n # Nicely tile.\n tiled_image = tfgan.eval.python_image_grid(predictions, grid_shape=(6, 6))\n\n # Write to disk.\n if not tf.io.gfile.exists(hparams.output_dir):\n tf.io.gfile.makedirs(hparams.output_dir)\n with tf.io.gfile.GFile(\n os.path.join(hparams.output_dir, 'unconditional_gan.png'), 'w') as f:\n # Convert tiled_image from float32 in [-1, 1] to unit8 [0, 255].\n pil_image = image_lib.fromarray(\n np.squeeze((255 / 2.0) * (tiled_image + 1.0), axis=2).astype(np.uint8))\n pil_image.convert('RGB').save(f, 'PNG')\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tfgan.examples.esrgan.train.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_gan.examples.esrgan import train_lib\n\nmock = tf.compat.v1.test.mock\n\n\nclass TrainTest(tf.test.TestCase):\n\n def setUp(self):\n # Parameters for a single step of training.\n super(TrainTest, self).setUp()\n self.hparams = train_lib.HParams(32, 4, '/content/', False, False, 256,\n '/content/', 1, 11, 1, 1, 0.5, 0.0001, 0.5,\n 0.001, 0.00005, 'L1', 0.001, 0.5,\n '/content/')\n d = tf.data.Dataset.from_tensor_slices(tf.random.normal([32, 256, 256, 3]))\n\n def lr(hr):\n lr = tf.image.resize(hr, [64, 64], method='bicubic')\n return lr, hr\n\n d = d.map(lr)\n d = d.batch(2)\n self.mock_dataset = d\n\n def test_pretrain_generator(self):\n \"\"\"Executes all the processes inside the phase-1 training step, once.\"\"\"\n self.assertIsNone(\n train_lib.pretrain_generator(self.hparams, self.mock_dataset))\n\n def test_train_generator(self):\n \"\"\"Executes the phase-2 training step for a single step, once.\"\"\"\n self.assertIsNone(train_lib.train_esrgan(self.hparams, self.mock_dataset))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some utilities for self-attention estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_gan.examples.self_attention_estimator import eval_lib\nimport tensorflow_gan as tfgan # tf\n\n\ndef get_tpu_run_config_from_hparams(hparams):\n \"\"\"Create a TPU-suitable RunConfig from HParams.\"\"\"\n tf.logging.info('tpu_location: %s', hparams.tpu_params.tpu_location)\n tf.logging.info('gcp_project: %s', hparams.tpu_params.gcp_project)\n tf.logging.info('tpu_zone: %s', hparams.tpu_params.tpu_zone)\n cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n tpu=hparams.tpu_params.tpu_location,\n project=hparams.tpu_params.gcp_project,\n zone=hparams.tpu_params.tpu_zone)\n if hparams.debug_params.eval_on_tpu:\n eval_training_input_configuration = tf.estimator.tpu.InputPipelineConfig.SLICED\n else:\n # InputPipelineConfig.SLICED is not supported when running on CPU.\n eval_training_input_configuration = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V1\n return tf.estimator.tpu.RunConfig(\n model_dir=hparams.model_dir,\n cluster=cluster_resolver,\n save_checkpoints_steps=hparams.train_steps_per_eval,\n tpu_config=tf.estimator.tpu.TPUConfig(\n iterations_per_loop=hparams.tpu_params.tpu_iterations_per_loop,\n eval_training_input_configuration=eval_training_input_configuration))\n\n\ndef get_run_config_from_hparams(hparams):\n mirrored_strategy = tf.distribute.MirroredStrategy()\n return tf.estimator.RunConfig(\n model_dir=hparams.model_dir,\n save_checkpoints_steps=hparams.train_steps_per_eval,\n train_distribute=mirrored_strategy)\n\n\ndef get_tpu_estimator(generator, discriminator, hparams, config):\n return tfgan.estimator.TPUGANEstimator(\n generator_fn=generator,\n discriminator_fn=discriminator,\n generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,\n generator_optimizer=tf.train.AdamOptimizer(hparams.generator_lr,\n hparams.beta1),\n discriminator_optimizer=tf.train.AdamOptimizer(hparams.discriminator_lr,\n hparams.beta1),\n prepare_arguments_for_eval_metric_fn=prepare_metric_arguments,\n get_eval_metric_ops_fn=functools.partial(get_metrics, hparams=hparams),\n eval_on_tpu=hparams.debug_params.eval_on_tpu,\n train_batch_size=hparams.train_batch_size,\n eval_batch_size=hparams.eval_batch_size,\n predict_batch_size=hparams.predict_batch_size,\n use_tpu=hparams.debug_params.use_tpu,\n config=config,\n params=hparams._asdict())\n\n\ndef make_gpu_get_metric_fn(hparams):\n \"\"\"Return a function compatible with GANEstimator's get_eval_metric_ops_fn.\"\"\"\n\n def gpu_get_metric(gan_model):\n \"\"\"A function compatible with GANEstimator's get_eval_metric_ops_fn arg.\"\"\"\n metrics_arguments = prepare_metric_arguments(\n gan_model.generator_inputs, gan_model.generated_data,\n gan_model.real_data, gan_model.discriminator_real_outputs,\n gan_model.discriminator_gen_outputs)\n metrics = get_metrics(hparams=hparams, **metrics_arguments)\n # Generate image summaries.\n real_data = gan_model.real_data\n generated_data = gan_model.generated_data\n real_images = (\n real_data['images'] if isinstance(real_data, dict) else real_data)\n gen_images = (\n generated_data['images']\n if isinstance(generated_data, dict) else generated_data)\n metrics.update(_generator_summary_ops(gen_images, real_images))\n return metrics\n\n return gpu_get_metric\n\n\ndef get_gpu_estimator(generator, discriminator, hparams, config):\n \"\"\"Returns an Estimator object to be used for training with GPUs.\"\"\"\n\n return tfgan.estimator.GANEstimator(\n generator_fn=generator,\n discriminator_fn=discriminator,\n generator_loss_fn=tfgan.losses.wasserstein_hinge_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_hinge_discriminator_loss,\n generator_optimizer=tf.train.AdamOptimizer(hparams.generator_lr,\n hparams.beta1),\n discriminator_optimizer=tf.train.AdamOptimizer(hparams.discriminator_lr,\n hparams.beta1),\n get_eval_metric_ops_fn=make_gpu_get_metric_fn(hparams),\n config=config,\n params=hparams._asdict())\n\n\ndef prepare_metric_arguments(generator_inputs, generated_data, real_data,\n discriminator_real_outputs,\n discriminator_gen_outputs):\n \"\"\"Prepares the arguments needed for get_metrics.\n\n When training on TPUs, this function should be executed on TPU.\n\n Args:\n generator_inputs: Inputs to the generator fn.\n generated_data: Output from the generator.\n real_data: A sample of real data.\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data.\n\n Returns:\n A metric dictionary.\n \"\"\"\n del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs\n\n real_images = (real_data['images'] if isinstance(real_data, dict) else\n real_data)\n gen_images = (generated_data['images'] if isinstance(generated_data, dict)\n else generated_data)\n # Get logits and pools for real and generated images.\n real_logits, real_pools = eval_lib.get_activations(\n lambda: real_images, num_batches=1, get_logits=True)\n fake_logits, fake_pools = eval_lib.get_activations(\n lambda: gen_images, num_batches=1, get_logits=True)\n\n return {\n 'real_logits': real_logits,\n 'real_pools': real_pools,\n 'fake_logits': fake_logits,\n 'fake_pools': fake_pools\n }\n\n\ndef get_metrics(real_logits, real_pools, fake_logits, fake_pools, hparams):\n \"\"\"Return metrics for SAGAN experiment on TPU, CPU, or GPU.\n\n When training on TPUs, this function should be executed on the CPU.\n\n Args:\n real_logits: The real_logits object retured by prepare_metric_arguments.\n real_pools: The real_pools object retured by prepare_metric_arguments.\n fake_logits: The fake_logits object retured by prepare_metric_arguments.\n fake_pools: The fake_pools object retured by prepare_metric_arguments.\n hparams: An hparams object.\n\n Returns:\n A metric dictionary.\n \"\"\"\n del hparams\n metric_dict = {\n 'eval/real_incscore':\n tfgan.eval.classifier_score_from_logits_streaming(real_logits),\n 'eval/incscore':\n tfgan.eval.classifier_score_from_logits_streaming(fake_logits),\n 'eval/fid':\n tfgan.eval.frechet_classifier_distance_from_activations_streaming(\n real_pools, fake_pools),\n }\n return metric_dict\n\n\ndef _generator_summary_ops(generated_images, real_images):\n \"\"\"Creates a dictionary of image summaries.\"\"\"\n real_img_summ = tf.summary.image('real_images', real_images)\n gen_img_summ = tf.summary.image('gen_images', generated_images)\n real_img_grid = tf.summary.image(\n 'real_images_grid',\n tfgan.eval.image_grid(\n real_images[:16],\n grid_shape=(4, 4),\n image_shape=(128, 128),\n num_channels=3))\n gen_img_grid = tf.summary.image(\n 'generated_images_grid',\n tfgan.eval.image_grid(\n generated_images[:16],\n grid_shape=(4, 4),\n image_shape=(128, 128),\n num_channels=3))\n return {\n 'images/real': (real_img_summ, tf.no_op()),\n 'images/gen': (gen_img_summ, tf.no_op()),\n 'image_grid/real': (real_img_grid, tf.no_op()),\n 'image_grid/gen': (gen_img_grid, tf.no_op()),\n }\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Normalization functions.\n\nCopied from tensorflow/contrib/layers/python/layers/normalization.py.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\n__all__ = [\n 'group_norm',\n 'instance_norm',\n]\n\nDATA_FORMAT_NCHW = 'NCHW'\nDATA_FORMAT_NHWC = 'NHWC'\n\n\ndef instance_norm(inputs,\n center=True,\n scale=True,\n epsilon=1e-6,\n activation_fn=None,\n param_initializers=None,\n reuse=None,\n outputs_collections=None,\n trainable=True,\n data_format=DATA_FORMAT_NHWC,\n scope=None):\n \"\"\"Functional interface for the instance normalization layer.\n\n Reference: https://arxiv.org/abs/1607.08022.\n\n \"Instance Normalization: The Missing Ingredient for Fast Stylization\"\n Dmitry Ulyanov, Andrea Vedaldi, Victor Lempitsky\n\n Args:\n inputs: A tensor with 2 or more dimensions, where the first dimension has\n `batch_size`. The normalization is over all but the last dimension if\n `data_format` is `NHWC` and the second dimension if `data_format` is\n `NCHW`.\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `tf.nn.relu`), this can\n be disabled since the scaling can be done by the next layer.\n epsilon: Small float added to variance to avoid dividing by zero.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n param_initializers: Optional initializers for beta, gamma, moving mean and\n moving variance.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If the rank of `inputs` is undefined.\n ValueError: If rank or channels dimension of `inputs` is undefined.\n \"\"\"\n inputs = tf.convert_to_tensor(value=inputs)\n inputs_shape = inputs.shape\n inputs_rank = inputs.shape.ndims\n\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n\n with tf.compat.v1.variable_scope(\n scope, 'InstanceNorm', [inputs], reuse=reuse):\n if data_format == DATA_FORMAT_NCHW:\n reduction_axis = 1\n # For NCHW format, rather than relying on implicit broadcasting, we\n # explicitly reshape the params to params_shape_broadcast when computing\n # the moments and the batch normalization.\n params_shape_broadcast = list(\n [1, tf.compat.dimension_value(inputs_shape[1])] +\n [1 for _ in range(2, inputs_rank)])\n else:\n reduction_axis = inputs_rank - 1\n params_shape_broadcast = None\n moments_axes = list(range(inputs_rank))\n del moments_axes[reduction_axis]\n del moments_axes[0]\n params_shape = inputs_shape[reduction_axis:reduction_axis + 1]\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined channels dimension %s.' % (\n inputs.name, params_shape))\n\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n dtype = inputs.dtype.base_dtype\n if param_initializers is None:\n param_initializers = {}\n if center:\n beta_initializer = param_initializers.get(\n 'beta', tf.compat.v1.initializers.zeros())\n beta = tf.compat.v1.get_variable(\n name='beta',\n shape=params_shape,\n dtype=dtype,\n initializer=beta_initializer,\n trainable=trainable)\n if params_shape_broadcast:\n beta = tf.reshape(beta, params_shape_broadcast)\n if scale:\n gamma_initializer = param_initializers.get(\n 'gamma', tf.compat.v1.initializers.ones())\n gamma = tf.compat.v1.get_variable(\n name='gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=gamma_initializer,\n trainable=trainable)\n if params_shape_broadcast:\n gamma = tf.reshape(gamma, params_shape_broadcast)\n\n # Calculate the moments (instance activations).\n mean, variance = tf.nn.moments(x=inputs, axes=moments_axes, keepdims=True)\n\n # Compute instance normalization.\n outputs = tf.nn.batch_normalization(\n inputs, mean, variance, beta, gamma, epsilon, name='instancenorm')\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n if outputs_collections:\n tf.compat.v1.add_to_collections(outputs_collections, outputs)\n\n return outputs\n\n\ndef group_norm(inputs,\n groups=32,\n channels_axis=-1,\n reduction_axes=(-3, -2),\n center=True,\n scale=True,\n epsilon=1e-6,\n activation_fn=None,\n param_initializers=None,\n reuse=None,\n outputs_collections=None,\n trainable=True,\n scope=None,\n mean_close_to_zero=False):\n \"\"\"Functional interface for the group normalization layer.\n\n Reference: https://arxiv.org/abs/1803.08494.\n\n \"Group Normalization\", Yuxin Wu, Kaiming He\n\n Args:\n inputs: A Tensor with at least 2 dimensions one which is channels. All\n shape dimensions except for batch must be fully defined.\n groups: Integer. Divide the channels into this number of groups over which\n normalization statistics are computed. This number must be commensurate\n with the number of channels in `inputs`.\n channels_axis: An integer. Specifies index of channels axis which will be\n broken into `groups`, each of which whose statistics will be computed\n across. Must be mutually exclusive with `reduction_axes`. Preferred usage\n is to specify negative integers to be agnostic as to whether a batch\n dimension is included.\n reduction_axes: Tuple of integers. Specifies dimensions over which\n statistics will be accumulated. Must be mutually exclusive with\n `channels_axis`. Statistics will not be accumulated across axes not\n specified in `reduction_axes` nor `channel_axis`. Preferred usage is to\n specify negative integers to be agnostic to whether a batch dimension is\n included.\n\n Some sample usage cases:\n NHWC format: channels_axis=-1, reduction_axes=[-3, -2]\n NCHW format: channels_axis=-3, reduction_axes=[-2, -1]\n\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `tf.nn.relu`), this can\n be disabled since the scaling can be done by the next layer.\n epsilon: Small float added to variance to avoid dividing by zero.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n param_initializers: Optional initializers for beta, gamma, moving mean and\n moving variance.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n scope: Optional scope for `variable_scope`.\n mean_close_to_zero: The mean of `input` before ReLU will be close to zero\n when batch size >= 4k for Resnet-50 on TPU. If `True`, use\n `tf.nn.sufficient_statistics` and `tf.nn.normalize_moments` to calculate\n the variance. This is the same behavior as `fused` equals `True` in batch\n normalization. If `False`, use `tf.nn.moments` to calculate the variance.\n When `mean` is close to zero, like 1e-4, use `mean` to calculate the\n variance may have poor result due to repeated roundoff error and\n denormalization in `mean`. When `mean` is large, like 1e2,\n sum(`input`^2) is so large that only the high-order digits of the elements\n are being accumulated. Thus, use sum(`input` - `mean`)^2/n to calculate\n the variance has better accuracy compared to (sum(`input`^2)/n - `mean`^2)\n when `mean` is large.\n\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: If the rank of `inputs` is undefined.\n ValueError: If rank or channels dimension of `inputs` is undefined.\n ValueError: If number of groups is not commensurate with number of channels.\n ValueError: If reduction_axes or channels_axis are out of bounds.\n ValueError: If reduction_axes are not mutually exclusive with channels_axis.\n \"\"\"\n # TODO(shlens): Support partially defined shapes for the inputs.\n inputs = tf.convert_to_tensor(value=inputs)\n\n if inputs.shape.ndims is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n if channels_axis > (inputs.shape.ndims - 1):\n raise ValueError('Axis is out of bounds.')\n\n # Use dynamic shape for not fully defined dimensions in the inputs.\n dyanmic_shape = tf.shape(input=inputs)\n input_shape_list = []\n for i, dim in enumerate(inputs.shape):\n if tf.compat.dimension_value(dim) is None:\n input_shape_list.append(dyanmic_shape[i])\n else:\n input_shape_list.append(dim)\n\n # Standardize the channels_axis to be positive and identify # of channels.\n if channels_axis < 0:\n channels_axis = inputs.shape.ndims + channels_axis\n channels = tf.compat.dimension_value(inputs.shape[channels_axis])\n\n if channels is None:\n raise ValueError('Inputs %s has undefined channel dimension: %d.' % (\n inputs.name, channels_axis))\n\n # Standardize the reduction_axes to be positive.\n reduction_axes = list(reduction_axes)\n for i in range(len(reduction_axes)):\n if reduction_axes[i] < 0:\n reduction_axes[i] += inputs.shape.ndims\n\n for a in reduction_axes:\n if a > inputs.shape.ndims:\n raise ValueError('Axis is out of bounds.')\n if tf.compat.dimension_value(inputs.shape[a]) is None:\n raise ValueError('Inputs %s has undefined dimensions %d.' % (\n inputs.name, a))\n if channels_axis == a:\n raise ValueError('reduction_axis must be mutually exclusive '\n 'with channels_axis')\n if groups > channels:\n raise ValueError('Invalid groups %d for %d channels.' % (groups, channels))\n if channels % groups != 0:\n raise ValueError('%d channels is not commensurate with %d groups.' %\n (channels, groups))\n\n # Determine axes before channels. Some examples of common image formats:\n # 'NCHW': before = [N], after = [HW]\n # 'NHWC': before = [NHW], after = []\n axes_before_channels = input_shape_list[:channels_axis]\n axes_after_channels = input_shape_list[channels_axis+1:]\n\n # Manually broadcast the parameters to conform to the number of groups.\n params_shape_broadcast = ([1] * len(axes_before_channels) +\n [groups, channels // groups] +\n [1] * len(axes_after_channels))\n\n # Reshape the input by the group within the channel dimension.\n inputs_shape = (axes_before_channels + [groups, channels // groups] +\n axes_after_channels)\n inputs = tf.reshape(inputs, inputs_shape)\n\n # Determine the dimensions across which moments are calculated.\n moments_axes = [channels_axis + 1]\n for a in reduction_axes:\n if a > channels_axis:\n moments_axes.append(a + 1)\n else:\n moments_axes.append(a)\n\n with tf.compat.v1.variable_scope(scope, 'GroupNorm', [inputs], reuse=reuse):\n # Note that the params_shape is the number of channels always.\n params_shape = [channels]\n\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n dtype = inputs.dtype.base_dtype\n if param_initializers is None:\n param_initializers = {}\n if center:\n beta_initializer = param_initializers.get(\n 'beta', tf.compat.v1.initializers.zeros())\n beta = tf.compat.v1.get_variable(\n name='beta',\n shape=params_shape,\n dtype=dtype,\n initializer=beta_initializer,\n trainable=trainable)\n beta = tf.reshape(beta, params_shape_broadcast)\n\n if scale:\n gamma_initializer = param_initializers.get(\n 'gamma', tf.compat.v1.initializers.ones())\n gamma = tf.compat.v1.get_variable(\n name='gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=gamma_initializer,\n trainable=trainable)\n gamma = tf.reshape(gamma, params_shape_broadcast)\n\n # Calculate the moments.\n if mean_close_to_zero:\n # One pass algorithm returns better result when mean is close to zero.\n counts, means_ss, variance_ss, _ = tf.nn.sufficient_statistics(\n inputs, moments_axes, keepdims=True)\n mean, variance = tf.nn.normalize_moments(\n counts, means_ss, variance_ss, shift=None)\n else:\n mean, variance = tf.nn.moments(\n x=inputs, axes=moments_axes, keepdims=True)\n\n # Compute normalization.\n # TODO(shlens): Fix tf.nn.batch_normalization to handle the 5-D Tensor\n # appropriately so that this operation may be faster.\n gain = tf.math.rsqrt(variance + epsilon)\n offset = -mean * gain\n if gamma is not None:\n gain *= gamma\n offset *= gamma\n if beta is not None:\n offset += beta\n outputs = inputs * gain + offset\n\n # Collapse the groups into the channel dimension.\n outputs = tf.reshape(outputs, input_shape_list)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n if outputs_collections:\n tf.compat.v1.add_to_collections(outputs_collections, outputs)\n\n return outputs\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"StarGAN data provider.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow_gan.examples.cyclegan import data_provider\n\n\ndef provide_dataset(split, batch_size, patch_size, num_parallel_calls=None,\n shuffle=True,\n domains=('Black_Hair', 'Blond_Hair', 'Brown_Hair')):\n \"\"\"Provides batches of CelebA image patches.\n\n Args:\n split: Either 'train' or 'test'.\n batch_size: The number of images in each batch.\n patch_size: Python int. The patch size to extract.\n num_parallel_calls: Number of threads dedicated to parsing.\n shuffle: Whether to shuffle.\n domains: Name of domains to transform between. Must be in Celeb A dataset.\n\n Returns:\n A tf.data.Dataset with:\n * images: `Tensor` of size [batch_size, 32, 32, 3] and type tf.float32.\n Output pixel values are in [-1, 1].\n * labels: A `Tensor` of size [batch_size, 10] of one-hot label\n encodings with type tf.int32, or a `Tensor` of size [batch_size],\n depending on the value of `one_hot`.\n\n Raises:\n ValueError: If `split` isn't `train` or `test`.\n \"\"\"\n ds = tfds.load('celeb_a:2.*.*', split=split, shuffle_files=shuffle)\n\n def _filter_pred(attribute):\n def _filter(element):\n return element['attributes'][attribute]\n return _filter\n dss = tuple([ds.filter(_filter_pred(attribute)) for attribute in domains])\n ds = tf.data.Dataset.zip(dss)\n\n def _preprocess(*elements):\n \"\"\"Map elements to the example dicts expected by the model.\"\"\"\n output_dict = {}\n num_domains = len(elements)\n for idx, (domain, elem) in enumerate(zip(domains, elements)):\n uint8_img = elem['image']\n patch = data_provider.full_image_to_patch(uint8_img, patch_size)\n label = tf.one_hot(idx, num_domains)\n output_dict[domain] = {'images': patch, 'labels': label}\n return output_dict\n\n ds = (ds\n .map(_preprocess, num_parallel_calls=num_parallel_calls)\n .cache()\n .repeat())\n if shuffle:\n ds = ds.shuffle(buffer_size=10000, reshuffle_each_iteration=True)\n ds = (ds\n .batch(batch_size, drop_remainder=True)\n .prefetch(tf.data.experimental.AUTOTUNE))\n\n return ds\n\n\ndef provide_data(split, batch_size, patch_size, num_parallel_calls=None,\n shuffle=True,\n domains=('Black_Hair', 'Blond_Hair', 'Brown_Hair')):\n \"\"\"Provides batches of CelebA image patches.\n\n Args:\n split: Either 'train' or 'test'.\n batch_size: The number of images in each batch.\n patch_size: Python int. The patch size to extract.\n num_parallel_calls: Number of threads dedicated to parsing.\n shuffle: Whether to shuffle.\n domains: Name of domains to transform between. Must be in Celeb A dataset.\n\n Returns:\n A tf.data.Dataset with:\n * images: `Tensor` of size [batch_size, patch_size, patch_size, 3] and\n type tf.float32. Output pixel values are in [-1, 1].\n * labels: A `Tensor` of size [batch_size, 10] of one-hot label\n encodings with type tf.int32, or a `Tensor` of size [batch_size],\n depending on the value of `one_hot`.\n\n Raises:\n ValueError: If `split` isn't `train` or `test`.\n \"\"\"\n ds = provide_dataset(split, batch_size, patch_size, num_parallel_calls,\n shuffle, domains)\n\n next_batch = tf.data.make_one_shot_iterator(ds).get_next()\n domains = next_batch.keys()\n images = [next_batch[domain]['images'] for domain in domains]\n labels = [next_batch[domain]['labels'] for domain in domains]\n\n return images, labels\n"
] | [
[
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.one_hot"
],
[
"tensorflow.compat.v1.math.mod",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.math.floordiv",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.nn.leaky_relu",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.layers.flatten",
"tensorflow.compat.v1.compat.dimension_value",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.nest.map_structure"
],
[
"tensorflow.compat.v1.train.polynomial_decay",
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.estimator.LoggingTensorHook",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.io.gfile.makedirs",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.estimator.StopAtStepHook",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.train.replica_device_setter",
"tensorflow.compat.v1.cond",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.io.gfile.makedirs",
"numpy.squeeze",
"tensorflow.compat.v1.random.normal"
],
[
"tensorflow.image.resize",
"tensorflow.random.normal",
"tensorflow.test.main"
],
[
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.no_op",
"tensorflow.compat.v1.summary.image",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.estimator.tpu.TPUConfig",
"tensorflow.compat.v1.estimator.RunConfig",
"tensorflow.compat.v1.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.compat.v1.distribute.MirroredStrategy"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.compat.v1.add_to_collections",
"tensorflow.nn.batch_normalization",
"tensorflow.compat.v1.initializers.ones",
"tensorflow.shape",
"tensorflow.compat.v1.initializers.zeros",
"tensorflow.nn.moments",
"tensorflow.reshape",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.dimension_value",
"tensorflow.math.rsqrt",
"tensorflow.nn.sufficient_statistics",
"tensorflow.nn.normalize_moments",
"tensorflow.compat.v1.variable_scope"
],
[
"tensorflow.compat.v1.data.Dataset.zip",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.compat.v1.one_hot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
geo7/scientific-visualization-book | [
"389766215aa6b234ed1cf560a3768437d41d1d37",
"389766215aa6b234ed1cf560a3768437d41d1d37",
"389766215aa6b234ed1cf560a3768437d41d1d37",
"389766215aa6b234ed1cf560a3768437d41d1d37"
] | [
"code/layout/layout-classical.py",
"code/optimization/line-benchmark.py",
"code/ornaments/legend-regular.py",
"code/rules/graphics.py"
] | [
"# ----------------------------------------------------------------------------\n# Title: Scientific Visualisation - Python & Matplotlib\n# Author: Nicolas P. Rougier\n# License: BSD\n# ----------------------------------------------------------------------------\n#\n# ----------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n\np = plt.rcParams\np[\"figure.figsize\"] = 7, 7\np[\"font.sans-serif\"] = [\"Roboto Condensed\"]\np[\"font.weight\"] = \"light\"\np[\"ytick.minor.visible\"] = True\np[\"xtick.minor.visible\"] = True\np[\"axes.grid\"] = True\np[\"grid.color\"] = \"0.5\"\np[\"grid.linewidth\"] = 0.5\n\n\nX = np.linspace(-np.pi, np.pi, 257, endpoint=True)\nC, S = np.cos(X), np.sin(X)\n\nfig = plt.figure()\nnrows, ncols = 3, 3\n\n\ndef plot(ax, text):\n ax.set_xlim(0, 1)\n ax.set_xticks(np.linspace(0, 1, 5))\n ax.set_xlabel(\"X Label\")\n ax.set_ylim(0, 1)\n ax.set_yticks(np.linspace(0, 1, 5))\n ax.set_ylabel(\"Y Label\")\n ax.text(\n 0.5, 0.5, text, alpha=0.75, ha=\"center\", va=\"center\", weight=\"bold\", size=12\n )\n ax.set_title(\"Title\", family=\"Roboto\", weight=500)\n\n\nfor i in range(1, nrows * ncols + 1):\n plot(\n plt.subplot(nrows, ncols, i, aspect=1), \"subplot(%d,%d,%d)\" % (nrows, ncols, i)\n )\n\nplt.tight_layout()\nplt.savefig(\"../../figures/layout/layout-classical.pdf\")\nplt.show()\n",
"# ----------------------------------------------------------------------------\n# Title: Scientific Visualisation - Python & Matplotlib\n# Author: Nicolas P. Rougier\n# License: BSD\n# ----------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\n\nn_lines, n_points = 1_000, 2\nX = [np.random.uniform(0, 1, n_points) for i in range(n_lines)]\nY = [np.random.uniform(0, 1, n_points) for i in range(n_lines)]\n\nfig = plt.figure(figsize=(9, 3.5))\n\n# -----------------------------\nax = fig.add_subplot(1, 3, 1, aspect=1, xlim=[0, 1], xticks=[], ylim=[0, 1], yticks=[])\nstart = timer()\nfor x, y in zip(X, Y):\n ax.plot(x, y, color=\"blue\", alpha=0.1, linewidth=0.5)\nend = timer()\nax.set_title(\"Individual plots: %.4fs\" % (end - start))\n\n\n# -----------------------------\nax = fig.add_subplot(1, 3, 2, aspect=1, xlim=[0, 1], xticks=[], ylim=[0, 1], yticks=[])\nstart = timer()\nX_, Y_ = [], []\nfor x, y in zip(X, Y):\n X_.extend(x), X_.extend([None])\n Y_.extend(y), Y_.extend([None])\nax.plot(X_, Y_, color=\"blue\", alpha=0.1, linewidth=0.5)\nend = timer()\nax.set_title(\"Unified plot: %.4fs\" % (end - start))\n\n# -----------------------------\nfrom matplotlib.collections import LineCollection\n\nax = fig.add_subplot(1, 3, 3, aspect=1, xlim=[0, 1], xticks=[], ylim=[0, 1], yticks=[])\nstart = timer()\nV = [np.stack([x, y], axis=1) for x, y in zip(X, Y)]\nlines = LineCollection(V, color=\"blue\", alpha=0.1, linewidth=0.5)\nax.add_collection(lines)\nend = timer()\nax.set_title(\"Line collection: %.4fs\" % (end - start))\n\nplt.tight_layout()\nplt.savefig(\"../../figures/optimization/line-benchmark.png\", dpi=600)\nplt.show()\n",
"# ----------------------------------------------------------------------------\n# Title: Scientific Visualisation - Python & Matplotlib\n# Author: Nicolas P. Rougier\n# License: BSD\n# ----------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(6, 2.5))\nax = plt.subplot(\n xlim=[-np.pi, np.pi],\n xticks=[-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi],\n xticklabels=[\"-π\", \"-π/2\", \"0\", \"+π/2\", \"+π\"],\n ylim=[-1, 1],\n yticks=[-1, 0, 1],\n yticklabels=[\"-1\", \"0\", \"+1\"],\n)\n\nX = np.linspace(-np.pi, np.pi, 256, endpoint=True)\nC, S = np.cos(X), np.sin(X)\n\nax.plot(X, C, label=\"cosine\", clip_on=False)\nax.plot(X, S, label=\"sine\", clip_on=False)\n\nax.spines[\"right\"].set_visible(False)\nax.spines[\"top\"].set_visible(False)\nax.spines[\"left\"].set_position((\"data\", -3.25))\nax.spines[\"bottom\"].set_position((\"data\", -1.25))\nax.legend(edgecolor=\"None\")\n\nplt.tight_layout()\nplt.savefig(\"../../figures/ornaments/legend-regular.pdf\")\nplt.show()\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright INRIA\n# Contributors: Wahiba Taouali ([email protected])\n# Nicolas P. Rougier ([email protected])\n#\n# This software is governed by the CeCILL license under French law and abiding\n# by the rules of distribution of free software. You can use, modify and/ or\n# redistribute the software under the terms of the CeCILL license as circulated\n# by CEA, CNRS and INRIA at the following URL\n# http://www.cecill.info/index.en.html.\n#\n# As a counterpart to the access to the source code and rights to copy, modify\n# and redistribute granted by the license, users are provided only with a\n# limited warranty and the software's author, the holder of the economic\n# rights, and the successive licensors have only limited liability.\n#\n# In this respect, the user's attention is drawn to the risks associated with\n# loading, using, modifying and/or developing or reproducing the software by\n# the user in light of its specific status of free software, that may mean that\n# it is complicated to manipulate, and that also therefore means that it is\n# reserved for developers and experienced professionals having in-depth\n# computer knowledge. Users are therefore encouraged to load and test the\n# software's suitability as regards their requirements in conditions enabling\n# the security of their systems and/or data to be ensured and, more generally,\n# to use and operate it in the same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license and that you accept its terms.\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\n\nfrom projections import *\n\n# -----------------------------------------------------------------------------\ndef polar_frame(ax, title=None, legend=False, zoom=False, labels=True):\n \"\"\" Draw a polar frame \"\"\"\n\n for rho in [0, 2, 5, 10, 20, 40, 60, 80, 90]:\n lw, color, alpha = 1, \"0.00\", 0.25\n if rho == 90 and not zoom:\n color, lw, alpha = \"0.00\", 2, 1\n\n n = 500\n R = np.ones(n) * rho / 90.0\n T = np.linspace(-np.pi / 2, np.pi / 2, n)\n X, Y = polar_to_cartesian(R, T)\n ax.plot(X, Y, color=color, lw=lw, alpha=alpha)\n\n if not zoom and rho in [0, 10, 20, 40, 80] and labels:\n ax.text(\n X[-1] * 1.0 - 0.075,\n Y[-1],\n u\"%d°\" % rho,\n color=\"k\", # size=15,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n\n for theta in [-90, -60, -30, 0, +30, +60, +90]:\n lw, color, alpha = 1, \"0.00\", 0.25\n if theta in [-90, +90] and not zoom:\n color, lw, alpha = \"0.00\", 2, 1\n angle = theta / 90.0 * np.pi / 2\n\n n = 500\n R = np.linspace(0, 1, n)\n T = np.ones(n) * angle\n X, Y = polar_to_cartesian(R, T)\n ax.plot(X, Y, color=color, lw=lw, alpha=alpha)\n\n if not zoom and theta in [-90, -60, -30, +30, +60, +90] and labels:\n ax.text(\n X[-1] * 1.05,\n Y[-1] * 1.05,\n u\"%d°\" % theta,\n color=\"k\", # size=15,\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n )\n d = 0.01\n ax.set_xlim(0.0 - d, 1.0 + d)\n ax.set_ylim(-1.0 - d, 1.0 + d)\n ax.set_xticks([])\n ax.set_yticks([])\n\n if legend:\n ax.set_frame_on(True)\n ax.spines[\"left\"].set_color(\"none\")\n ax.spines[\"right\"].set_color(\"none\")\n ax.spines[\"top\"].set_color(\"none\")\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.spines[\"bottom\"].set_position((\"data\", -1.2))\n ax.set_xticks([])\n ax.text(\n 0.0,\n -1.1,\n \"$\\longleftarrow$ Foveal\",\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n size=12,\n )\n ax.text(\n 1.0,\n -1.1,\n \"Peripheral $\\longrightarrow$\",\n verticalalignment=\"top\",\n horizontalalignment=\"right\",\n size=12,\n )\n else:\n ax.set_frame_on(False)\n if title:\n ax.title(title)\n\n\n# -----------------------------------------------------------------------------\ndef logpolar_frame(ax, title=None, legend=False, labels=True):\n \"\"\" Draw a log polar frame \"\"\"\n\n for rho in [2, 5, 10, 20, 40, 60, 80, 90]:\n lw, color, alpha = 1, \"0.00\", 0.25\n if rho == 90:\n color, lw, alpha = \"0.00\", 2, 1\n\n n = 500\n R = np.ones(n) * rho / 90.0\n T = np.linspace(-np.pi / 2, np.pi / 2, n)\n X, Y = polar_to_logpolar(R, T)\n X, Y = X * 2, 2 * Y - 1\n ax.plot(X, Y, color=color, lw=lw, alpha=alpha)\n if labels and rho in [2, 5, 10, 20, 40, 80]:\n ax.text(\n X[-1],\n Y[-1] + 0.05,\n u\"%d°\" % rho,\n color=\"k\", # size=15,\n horizontalalignment=\"right\",\n verticalalignment=\"bottom\",\n )\n\n for theta in [-90, -60, -30, 0, +30, +60, +90]:\n lw, color, alpha = 1, \"0.00\", 0.25\n if theta in [-90, +90]:\n color, lw, alpha = \"0.00\", 2, 1\n angle = theta / 90.0 * np.pi / 2\n\n n = 500\n R = np.linspace(0, 1, n)\n T = np.ones(n) * angle\n X, Y = polar_to_logpolar(R, T)\n X, Y = X * 2, 2 * Y - 1\n ax.plot(X, Y, color=color, lw=lw, alpha=alpha)\n if labels:\n ax.text(\n X[-1] * 1.0 + 0.05,\n Y[-1] * 1.0,\n u\"%d°\" % theta,\n color=\"k\", # size=15,\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n )\n\n d = 0.01\n ax.set_xlim(0.0 - d, 2.0 + d)\n ax.set_ylim(-1.0 - d, 1.0 + d)\n ax.set_xticks([])\n ax.set_yticks([])\n if legend:\n ax.set_frame_on(True)\n ax.spines[\"left\"].set_color(\"none\")\n ax.spines[\"right\"].set_color(\"none\")\n ax.spines[\"top\"].set_color(\"none\")\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.spines[\"bottom\"].set_position((\"data\", -1.2))\n ax.set_xticks([0, 2])\n ax.set_xticklabels([\"0\", \"4.8 (mm)\"])\n ax.text(\n 0.0,\n -1.1,\n \"$\\longleftarrow$ Rostral\",\n verticalalignment=\"top\",\n horizontalalignment=\"left\",\n size=12,\n )\n ax.text(\n 2,\n -1.1,\n \"Caudal $\\longrightarrow$\",\n verticalalignment=\"top\",\n horizontalalignment=\"right\",\n size=12,\n )\n else:\n ax.set_frame_on(False)\n if title:\n ax.title(title)\n\n\n# -----------------------------------------------------------------------------\ndef polar_imshow(axis, Z, *args, **kwargs):\n kwargs[\"interpolation\"] = kwargs.get(\"interpolation\", \"nearest\")\n kwargs[\"cmap\"] = kwargs.get(\"cmap\", plt.cm.gray_r)\n # kwargs['vmin'] = kwargs.get('vmin', Z.min())\n # kwargs['vmax'] = kwargs.get('vmax', Z.max())\n kwargs[\"vmin\"] = kwargs.get(\"vmin\", 0)\n kwargs[\"vmax\"] = kwargs.get(\"vmax\", 1)\n kwargs[\"origin\"] = kwargs.get(\"origin\", \"lower\")\n axis.imshow(Z, extent=[0, 1, -1, 1], *args, **kwargs)\n\n\n# -----------------------------------------------------------------------------\ndef logpolar_imshow(axis, Z, *args, **kwargs):\n kwargs[\"interpolation\"] = kwargs.get(\"interpolation\", \"nearest\")\n kwargs[\"cmap\"] = kwargs.get(\"cmap\", plt.cm.gray_r)\n # kwargs['vmin'] = kwargs.get('vmin', Z.min())\n # kwargs['vmax'] = kwargs.get('vmax', Z.max())\n kwargs[\"vmin\"] = kwargs.get(\"vmin\", 0)\n kwargs[\"vmax\"] = kwargs.get(\"vmax\", 1)\n kwargs[\"origin\"] = kwargs.get(\"origin\", \"lower\")\n im = axis.imshow(Z, extent=[0, 2, -1, 1], *args, **kwargs)\n # axins = inset_axes(axis, width='25%', height='5%', loc=3)\n # vmin, vmax = Z.min(), Z.max()\n # plt.colorbar(im, cax=axins, orientation='horizontal', ticks=[vmin,vmax], format = '%.2f')\n # axins.xaxis.set_ticks_position('bottom')\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.savefig",
"numpy.stack",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.linspace",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RabbitWhite1/torchrec | [
"031bcca5300d52099eb7490ff06fe0301c1c02f2",
"031bcca5300d52099eb7490ff06fe0301c1c02f2",
"031bcca5300d52099eb7490ff06fe0301c1c02f2"
] | [
"torchrec/modules/deepfm.py",
"torchrec/distributed/tests/test_train_pipeline.py",
"torchrec/distributed/sharding/cw_sharding.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\n.. fb:display_title::\n Deep Factorization-Machine Modules\n\n=====\n\nThe following modules are based off the `Deep Factorization-Machine (DeepFM) paper\n<https://arxiv.org/pdf/1703.04247.pdf>`_\n\n* Class DeepFM implents the DeepFM Framework\n* Class FactorizationMachine implements FM as noted in the above paper.\n\n\"\"\"\n\nfrom typing import List\n\nimport torch\nfrom torch import nn\nfrom torch.fx import wrap\n\n\n# pyre-ignore[56]: Pyre was not able to infer the type of the decorator `torch.fx.wrap`.\n@wrap\ndef _get_flatten_input(inputs: List[torch.Tensor]) -> torch.Tensor:\n return torch.cat(\n [input.flatten(1) for input in inputs],\n dim=1,\n )\n\n\nclass DeepFM(nn.Module):\n r\"\"\"\n This is the `DeepFM module <https://arxiv.org/pdf/1703.04247.pdf>`_\n\n This module does not cover the end-end functionality of the published paper.\n Instead, it covers only the deep component of the publication. It is used to learn\n high-order feature interactions. If low-order feature interactions should\n be learnt, please use `FactorizationMachine` module instead, which will share\n the same embedding input of this module.\n\n To support modeling flexibility, we customize the key components as:\n\n * Different from the public paper, we change the input from raw sparse\n features to embeddings of the features. It allows flexibility in embedding\n dimensions and the number of embeddings, as long as all embedding tensors\n have the same batch size.\n * On top of the public paper, we allow users to customize the hidden layer\n to be any module, not limited to just MLP.\n\n The general architecture of the module is like::\n\n # 1 x 1 output\n # ^\n # pass into `dense_module`\n # ^\n # 1 x 90\n # ^\n # concat\n # ^\n # 1 x 20, 1 x 30, 1 x 40 list of embeddings\n\n Args:\n dense_module (nn.Module):\n any customerized module that can be used (such as MLP) in DeepFM. The\n `in_features` of this module must be equal to the elements counts. For\n example, the input embeddings is [randn(3, 2, 3), randn(3, 4, 5)], the\n `in_features` should be: 2*3+4*5.\n\n Example::\n\n import torch\n from torchrec.fb.modules.deepfm import DeepFM\n from torchrec.fb.modules.mlp import LazyMLP\n batch_size = 3\n output_dim = 30\n # the input embedding are a torch.Tensor of [batch_size, num_embeddings, embedding_dim]\n input_embeddings = [\n torch.randn(batch_size, 2, 64),\n torch.randn(batch_size, 2, 32),\n ]\n dense_module = nn.Linear(192, output_dim)\n deepfm = DeepFM(dense_module=dense_module)\n deep_fm_output = deepfm(embeddings=input_embeddings)\n \"\"\"\n\n def __init__(\n self,\n dense_module: nn.Module,\n ) -> None:\n super().__init__()\n self.dense_module = dense_module\n\n def forward(\n self,\n embeddings: List[torch.Tensor],\n ) -> torch.Tensor:\n \"\"\"\n Args:\n embeddings (List[torch.Tensor]):\n The list of all embeddings (e.g. dense, common_sparse,\n specialized_sparse,\n embedding_features, raw_embedding_features) in the shape of::\n\n (batch_size, num_embeddings, embedding_dim)\n\n For the ease of operation, embeddings that have the same embedding\n dimension have the option to be stacked into a single tensor. For\n example, when we have 1 trained embedding with dimension=32, 5 native\n embeddings with dimension=64, and 3 dense features with dimension=16, we\n can prepare the embeddings list to be the list of::\n\n tensor(B, 1, 32) (trained_embedding with num_embeddings=1, embedding_dim=32)\n tensor(B, 5, 64) (native_embedding with num_embeddings=5, embedding_dim=64)\n tensor(B, 3, 16) (dense_features with num_embeddings=3, embedding_dim=32)\n\n .. note::\n `batch_size` of all input tensors need to be identical.\n\n Returns:\n torch.Tensor: output of `dense_module` with flattened and concatenated `embeddings` as input.\n \"\"\"\n\n # flatten each embedding to be [B, N, D] -> [B, N*D], then cat them all on dim=1\n deepfm_input = _get_flatten_input(embeddings)\n deepfm_output = self.dense_module(deepfm_input)\n return deepfm_output\n\n\nclass FactorizationMachine(nn.Module):\n r\"\"\"\n This is the Factorization Machine module, mentioned in the `DeepFM paper\n <https://arxiv.org/pdf/1703.04247.pdf>`_:\n\n This module does not cover the end-end functionality of the published paper.\n Instead, it covers only the FM part of the publication, and is used to learn\n 2nd-order feature interactions.\n\n To support modeling flexibility, we customize the key components as:\n\n * Different from the public paper, we change the input from raw sparse\n features to embeddings of the features. It allows flexibility in embedding\n dimensions and the number of embeddings, as long as all embedding tensors\n have the same batch size.\n\n The general architecture of the module is like::\n\n # 1 x 1 output\n # ^\n # pass into `dense_module`\n # ^\n # 1 x 90\n # ^\n # concat\n # ^\n # 1 x 20, 1 x 30, 1 x 40 list of embeddings\n\n Example::\n\n batch_size = 3\n # the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]\n input_embeddings = [\n torch.randn(batch_size, 2, 64),\n torch.randn(batch_size, 2, 32),\n ]\n fm = FactorizationMachine()\n output = fm(embeddings=input_embeddings)\n \"\"\"\n\n def __init__(\n self,\n ) -> None:\n super().__init__()\n\n def forward(\n self,\n embeddings: List[torch.Tensor],\n ) -> torch.Tensor:\n \"\"\"\n Args:\n embeddings: List[torch.Tensor]:\n The list of all embeddings (e.g. dense, common_sparse,\n specialized_sparse, embedding_features, raw_embedding_features) in the\n shape of::\n\n (batch_size, num_embeddings, embedding_dim)\n\n For the ease of operation, embeddings that have the same embedding\n dimension have the option to be stacked into a single tensor. For\n example, when we have 1 trained embedding with dimension=32, 5 native\n embeddings with dimension=64, and 3 dense features with dimension=16, we\n can prepare the embeddings list to be the list of::\n\n tensor(B, 1, 32) (trained_embedding with num_embeddings=1, embedding_dim=32)\n tensor(B, 5, 64) (native_embedding with num_embeddings=5, embedding_dim=64)\n tensor(B, 3, 16) (dense_features with num_embeddings=3, embedding_dim=32)\n\n NOTE:\n batch_size of all input tensors need to be identical.\n\n Returns:\n torch.Tensor: output of fm with flattened and concatenated `embeddings` as input. Expected to be [B, 1].\n \"\"\"\n\n # flatten each embedding to be [B, N, D] -> [B, N*D], then cat them all on dim=1\n fm_input = _get_flatten_input(embeddings)\n sum_of_input = torch.sum(fm_input, dim=1, keepdim=True)\n sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)\n square_of_sum = sum_of_input * sum_of_input\n cross_term = square_of_sum - sum_of_square\n cross_term = torch.sum(cross_term, dim=1, keepdim=True) * 0.5 # [B, 1]\n return cross_term\n",
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport unittest\nfrom dataclasses import dataclass\nfrom typing import Tuple, List, Optional, Dict\n\nimport torch\nimport torch.distributed as dist\nfrom torch import nn, optim\nfrom torchrec.distributed import DistributedModelParallel\nfrom torchrec.distributed.embedding_types import EmbeddingComputeKernel\nfrom torchrec.distributed.embedding_types import (\n SparseFeaturesList,\n)\nfrom torchrec.distributed.embeddingbag import (\n ShardedEmbeddingBagCollection,\n EmbeddingBagCollectionSharder,\n)\nfrom torchrec.distributed.test_utils.test_model import (\n TestSparseNN,\n ModelInput,\n TestEBCSharder,\n)\nfrom torchrec.distributed.train_pipeline import (\n TrainPipelineBase,\n TrainPipelineSparseDist,\n)\nfrom torchrec.distributed.types import (\n Awaitable,\n ParameterSharding,\n ShardedModuleContext,\n ShardingEnv,\n)\nfrom torchrec.distributed.types import (\n ShardingType,\n)\nfrom torchrec.modules.embedding_configs import EmbeddingBagConfig\nfrom torchrec.modules.embedding_modules import EmbeddingBagCollection\nfrom torchrec.optim.keyed import KeyedOptimizerWrapper\nfrom torchrec.sparse.jagged_tensor import KeyedJaggedTensor\nfrom torchrec.streamable import Pipelineable\nfrom torchrec.test_utils import get_free_port, init_distributed_single_host\n\n\nclass TestShardedEmbeddingBagCollection(ShardedEmbeddingBagCollection):\n def input_dist(\n self,\n ctx: ShardedModuleContext,\n features: KeyedJaggedTensor,\n ) -> Awaitable[SparseFeaturesList]:\n return super().input_dist(ctx, features)\n\n\nclass TestCustomEBCSharder(EmbeddingBagCollectionSharder[EmbeddingBagCollection]):\n def shard(\n self,\n module: EmbeddingBagCollection,\n params: Dict[str, ParameterSharding],\n env: ShardingEnv,\n device: Optional[torch.device] = None,\n ) -> TestShardedEmbeddingBagCollection:\n return TestShardedEmbeddingBagCollection(\n module, params, env, self.fused_params, device\n )\n\n def sharding_types(self, compute_device_type: str) -> List[str]:\n return [\n ShardingType.TABLE_WISE.value,\n ]\n\n def compute_kernels(\n self, sharding_type: str, compute_device_type: str\n ) -> List[str]:\n return [EmbeddingComputeKernel.DENSE.value]\n\n\n@dataclass\nclass ModelInputSimple(Pipelineable):\n float_features: torch.Tensor\n label: torch.Tensor\n\n def to(self, device: torch.device, non_blocking: bool) -> \"ModelInputSimple\":\n return ModelInputSimple(\n float_features=self.float_features.to(\n device=device, non_blocking=non_blocking\n ),\n label=self.label.to(device=device, non_blocking=non_blocking),\n )\n\n def record_stream(self, stream: torch.cuda.streams.Stream) -> None:\n self.float_features.record_stream(stream)\n self.label.record_stream(stream)\n\n\nclass TestModule(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.model = nn.Linear(10, 1)\n self.loss_fn = nn.BCEWithLogitsLoss()\n\n def forward(\n self, model_input: ModelInputSimple\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n pred = self.model(model_input.float_features)\n loss = self.loss_fn(pred, model_input.label)\n return (loss, pred)\n\n\nclass TrainPipelineBaseTest(unittest.TestCase):\n def setUp(self) -> None:\n self.device = torch.device(\"cuda:0\")\n torch.backends.cudnn.allow_tf32 = False\n torch.backends.cuda.matmul.allow_tf32 = False\n\n # pyre-fixme[56]: Pyre was not able to infer the type of argument\n @unittest.skipIf(\n torch.cuda.device_count() <= 1,\n \"Not enough GPUs, this test requires at least two GPUs\",\n )\n def test_equal_to_non_pipelined(self) -> None:\n model_cpu = TestModule()\n model_gpu = TestModule().to(self.device)\n model_gpu.load_state_dict(model_cpu.state_dict())\n optimizer_cpu = optim.SGD(model_cpu.model.parameters(), lr=0.01)\n optimizer_gpu = optim.SGD(model_gpu.model.parameters(), lr=0.01)\n data = [\n ModelInputSimple(\n float_features=torch.rand((10,)),\n label=torch.randint(2, (1,), dtype=torch.float32),\n )\n for b in range(5)\n ]\n dataloader = iter(data)\n pipeline = TrainPipelineBase(model_gpu, optimizer_gpu, self.device)\n\n for example in data[:-1]:\n optimizer_cpu.zero_grad()\n loss, pred = model_cpu(example)\n loss.backward()\n optimizer_cpu.step()\n\n pred_gpu = pipeline.progress(dataloader)\n\n self.assertEqual(pred_gpu.device, self.device)\n self.assertTrue(torch.isclose(pred_gpu.cpu(), pred))\n\n\nclass TrainPipelineSparseDistTest(unittest.TestCase):\n def setUp(self) -> None:\n os.environ[\"MASTER_ADDR\"] = str(\"localhost\")\n os.environ[\"MASTER_PORT\"] = str(get_free_port())\n self.pg = init_distributed_single_host(backend=\"gloo\", rank=0, world_size=1)\n\n num_features = 4\n num_weighted_features = 2\n\n self.tables = [\n EmbeddingBagConfig(\n num_embeddings=(i + 1) * 100,\n embedding_dim=(i + 1) * 4,\n name=\"table_\" + str(i),\n feature_names=[\"feature_\" + str(i)],\n )\n for i in range(num_features)\n ]\n self.weighted_tables = [\n EmbeddingBagConfig(\n num_embeddings=(i + 1) * 100,\n embedding_dim=(i + 1) * 4,\n name=\"weighted_table_\" + str(i),\n feature_names=[\"weighted_feature_\" + str(i)],\n )\n for i in range(num_weighted_features)\n ]\n\n self.device = torch.device(\"cuda:0\")\n\n def tearDown(self) -> None:\n super().tearDown()\n dist.destroy_process_group(self.pg)\n\n def _test_move_cpu_gpu_helper(\n self, distributed_model: DistributedModelParallel\n ) -> None:\n model_cpu = TestSparseNN(\n tables=self.tables, weighted_tables=self.weighted_tables\n )\n optimizer_cpu = optim.SGD(model_cpu.parameters(), lr=0.1)\n optimizer_distributed = KeyedOptimizerWrapper(\n dict(distributed_model.named_parameters()),\n lambda params: optim.SGD(params, lr=0.1),\n )\n pipeline = TrainPipelineSparseDist(\n distributed_model, optimizer_distributed, self.device\n )\n\n data = [\n ModelInput.generate(\n tables=self.tables,\n weighted_tables=self.weighted_tables,\n batch_size=1,\n world_size=1,\n num_float_features=10,\n )[0]\n for i in range(5)\n ]\n dataloader = iter(data)\n\n for example in data[:-2]:\n optimizer_cpu.zero_grad()\n loss, pred = model_cpu(example)\n loss.backward()\n optimizer_cpu.step()\n\n pred_gpu = pipeline.progress(dataloader)\n\n self.assertEqual(pred_gpu.device, self.device)\n self.assertEqual(pred_gpu.cpu().size(), pred.size())\n self.assertEqual(len(pipeline._pipelined_modules), 2)\n\n # pyre-fixme[56]: Pyre was not able to infer the type of argument\n @unittest.skipIf(\n torch.cuda.device_count() <= 1,\n \"Not enough GPUs, this test requires at least two GPUs\",\n )\n def test_move_cpu_gpu(self) -> None:\n unsharded_model = TestSparseNN(\n tables=self.tables,\n weighted_tables=self.weighted_tables,\n dense_device=self.device,\n sparse_device=torch.device(\"meta\"),\n )\n distributed_model = DistributedModelParallel(\n unsharded_model,\n env=ShardingEnv.from_process_group(self.pg),\n init_data_parallel=False,\n device=self.device,\n # pyre-ignore [6]\n sharders=[\n TestEBCSharder(\n sharding_type=ShardingType.TABLE_WISE.value,\n kernel_type=EmbeddingComputeKernel.DENSE.value,\n )\n ],\n )\n self._test_move_cpu_gpu_helper(distributed_model)\n\n # pyre-fixme[56]: Pyre was not able to infer the type of argument\n @unittest.skipIf(\n torch.cuda.device_count() <= 1,\n \"Not enough GPUs, this test requires at least two GPUs\",\n )\n def test_pipelining(self) -> None:\n unsharded_model = TestSparseNN(\n tables=self.tables,\n weighted_tables=self.weighted_tables,\n dense_device=self.device,\n sparse_device=torch.device(\"meta\"),\n )\n distributed_model = DistributedModelParallel(\n unsharded_model,\n env=ShardingEnv.from_process_group(self.pg),\n init_data_parallel=False,\n device=self.device,\n # pyre-fixme [6]\n sharders=[TestCustomEBCSharder()],\n )\n self._test_move_cpu_gpu_helper(distributed_model)\n",
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Set, Callable, Dict, List, Optional, Tuple, TypeVar, Any\n\nimport torch\nimport torch.distributed as dist # noqa\nfrom fbgemm_gpu.permute_pooled_embedding_modules import PermutePooledEmbeddings\nfrom torchrec.distributed.embedding_lookup import GroupedPooledEmbeddingsLookup\nfrom torchrec.distributed.embedding_sharding import (\n BaseEmbeddingDist,\n BaseSparseFeaturesDist,\n BaseEmbeddingLookup,\n)\nfrom torchrec.distributed.embedding_types import (\n ShardedEmbeddingTable,\n EmbeddingComputeKernel,\n SparseFeatures,\n BaseGroupedFeatureProcessor,\n)\nfrom torchrec.distributed.sharding.tw_sharding import (\n BaseTwEmbeddingSharding,\n TwPooledEmbeddingDist,\n TwSparseFeaturesDist,\n)\nfrom torchrec.distributed.types import (\n ShardingEnv,\n ShardedTensorMetadata,\n ShardMetadata,\n ParameterSharding,\n)\nfrom torchrec.modules.embedding_configs import EmbeddingTableConfig\nfrom torchrec.streamable import Multistreamable\n\nF = TypeVar(\"F\", bound=Multistreamable)\nT = TypeVar(\"T\")\n\n\nclass BaseCwEmbeddingSharding(BaseTwEmbeddingSharding[F, T]):\n \"\"\"\n base class for column-wise sharding\n \"\"\"\n\n def __init__(\n self,\n embedding_configs: List[\n Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]\n ],\n env: ShardingEnv,\n device: Optional[torch.device] = None,\n permute_embeddings: bool = False,\n ) -> None:\n super().__init__(\n embedding_configs,\n env,\n device,\n )\n self._permute_embeddings = permute_embeddings\n if self._permute_embeddings:\n self._init_combined_embeddings()\n\n def _init_combined_embeddings(self) -> None:\n \"\"\"\n Grabs the embedding names and dims from TwEmbeddingSharder.\n\n NOTE:\n This could have duplications if there are multiple shards from the same\n table on a rank. Later on we process these to combine shards together.\n \"\"\"\n\n embedding_names: List[str] = super().embedding_names()\n embedding_dims: List[int] = super().embedding_dims()\n\n embedding_shard_metadata: List[\n Optional[ShardMetadata]\n ] = super().embedding_shard_metadata()\n\n embedding_name_to_index_offset_tuples: Dict[str, List[Tuple[int, int]]] = {}\n for i, (name, metadata) in enumerate(\n zip(embedding_names, embedding_shard_metadata)\n ):\n if name not in embedding_name_to_index_offset_tuples:\n embedding_name_to_index_offset_tuples[name] = []\n embedding_name_to_index_offset_tuples[name].append(\n (i, metadata.shard_offsets[1] if metadata is not None else 0)\n )\n\n embedding_name_to_index: Dict[str, List[int]] = {}\n for name, index_offset_tuples in embedding_name_to_index_offset_tuples.items():\n embedding_name_to_index[name] = [\n idx_off_tuple[0]\n for idx_off_tuple in sorted(\n index_offset_tuples,\n key=lambda idx_off_tuple: idx_off_tuple[1],\n )\n ]\n\n combined_embedding_names: List[str] = []\n seen_embedding_names: Set[str] = set()\n\n for name in embedding_names:\n if name not in seen_embedding_names:\n combined_embedding_names.append(name)\n seen_embedding_names.add(name)\n\n combined_embedding_dims: List[int] = []\n\n embedding_order: List[int] = []\n for name in combined_embedding_names:\n combined_embedding_dims.append(\n sum([embedding_dims[idx] for idx in embedding_name_to_index[name]])\n )\n embedding_order.extend(embedding_name_to_index[name])\n\n self._embedding_names: List[str] = embedding_names\n self._embedding_dims: List[int] = embedding_dims\n self._embedding_order: List[int] = embedding_order\n\n self._combined_embedding_names: List[str] = combined_embedding_names\n self._combined_embedding_dims: List[int] = combined_embedding_dims\n\n def _shard(\n self,\n embedding_configs: List[\n Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]\n ],\n ) -> List[List[ShardedEmbeddingTable]]:\n world_size = self._pg.size()\n tables_per_rank: List[List[ShardedEmbeddingTable]] = [\n [] for i in range(world_size)\n ]\n for config in embedding_configs:\n # pyre-fixme [16]\n shards: List[ShardMetadata] = config[1].sharding_spec.shards\n\n # construct the global sharded_tensor_metadata\n global_metadata = ShardedTensorMetadata(\n shards_metadata=shards,\n size=torch.Size([config[0].num_embeddings, config[0].embedding_dim]),\n )\n\n # pyre-fixme [6]\n for i, rank in enumerate(config[1].ranks):\n tables_per_rank[rank].append(\n ShardedEmbeddingTable(\n num_embeddings=config[0].num_embeddings,\n embedding_dim=config[0].embedding_dim,\n name=config[0].name,\n embedding_names=config[0].embedding_names,\n data_type=config[0].data_type,\n feature_names=config[0].feature_names,\n pooling=config[0].pooling,\n is_weighted=config[0].is_weighted,\n has_feature_processor=config[0].has_feature_processor,\n local_rows=config[0].num_embeddings,\n local_cols=shards[i].shard_sizes[1],\n compute_kernel=EmbeddingComputeKernel(config[1].compute_kernel),\n local_metadata=shards[i],\n global_metadata=global_metadata,\n )\n )\n\n return tables_per_rank\n\n def embedding_dims(self) -> List[int]:\n return (\n self._combined_embedding_dims\n if self._permute_embeddings\n else super().embedding_dims()\n )\n\n def embedding_names(self) -> List[str]:\n return (\n self._combined_embedding_names\n if self._permute_embeddings\n else super().embedding_names()\n )\n\n\nclass CwPooledEmbeddingSharding(BaseCwEmbeddingSharding[SparseFeatures, torch.Tensor]):\n \"\"\"\n Shards embedding bags column-wise, i.e.. a given embedding table is entirely placed\n on a selected rank.\n \"\"\"\n\n def create_input_dist(\n self,\n device: Optional[torch.device] = None,\n ) -> BaseSparseFeaturesDist[SparseFeatures]:\n return TwSparseFeaturesDist(\n self._pg,\n self._id_list_features_per_rank(),\n self._id_score_list_features_per_rank(),\n device if device is not None else self._device,\n )\n\n def create_lookup(\n self,\n device: Optional[torch.device] = None,\n fused_params: Optional[Dict[str, Any]] = None,\n feature_processor: Optional[BaseGroupedFeatureProcessor] = None,\n ) -> BaseEmbeddingLookup:\n return GroupedPooledEmbeddingsLookup(\n grouped_configs=self._grouped_embedding_configs,\n grouped_score_configs=self._score_grouped_embedding_configs,\n fused_params=fused_params,\n pg=self._pg,\n device=device if device is not None else self._device,\n feature_processor=feature_processor,\n )\n\n def create_output_dist(\n self,\n device: Optional[torch.device] = None,\n ) -> BaseEmbeddingDist[torch.Tensor]:\n device = device if device is not None else self._device\n embedding_permute_op: Optional[PermutePooledEmbeddings] = None\n callbacks: Optional[List[Callable[[torch.Tensor], torch.Tensor]]] = None\n if self._permute_embeddings and self._embedding_order != list(\n range(len(self._embedding_order))\n ):\n assert len(self._embedding_order) == len(self._embedding_dims)\n embedding_permute_op = PermutePooledEmbeddings(\n self._embedding_dims,\n self._embedding_order,\n ).to(device=device)\n callbacks = [embedding_permute_op]\n return TwPooledEmbeddingDist(\n self._pg, self._dim_sum_per_rank(), device, callbacks\n )\n"
] | [
[
"torch.sum"
],
[
"torch.randint",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"torch.rand",
"torch.distributed.destroy_process_group",
"torch.device",
"torch.optim.SGD",
"torch.cuda.device_count"
],
[
"torch.Size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
richardsun-voyager/capsule-network | [
"349cec1caa9ab95ff4b3333c33d04b1bdb442f67"
] | [
"model.py"
] | [
"########################################\n#### Licensed under the MIT license ####\n########################################\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom numpy import prod\nimport capsules as caps\n\nclass CapsuleNetwork(nn.Module):\n\tdef __init__(self, img_shape, channels, primary_dim, num_classes, out_dim, num_routing, kernel_size=9):\n\t\tsuper(CapsuleNetwork, self).__init__()\n\t\tself.img_shape = img_shape\n\t\tself.num_classes = num_classes\n\n\t\tself.conv1 = nn.Conv2d(img_shape[0], channels, kernel_size, stride=1, bias=True)\n\t\tself.relu = nn.ReLU(inplace=True)\n\n\t\tself.primary = caps.PrimaryCapsules(channels, channels, primary_dim, kernel_size)\n\t\t\n\t\tprimary_caps = int(channels / primary_dim * ( img_shape[1] - 2*(kernel_size-1) ) * ( img_shape[2] - 2*(kernel_size-1) ) / 4)\n\t\tself.digits = caps.RoutingCapsules(primary_dim, primary_caps, num_classes, out_dim, num_routing)\n\n\t\tself.decoder = nn.Sequential(\n\t\t\tnn.Linear(out_dim * num_classes, 512),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Linear(512, 1024),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Linear(1024, int(prod(img_shape)) ),\n\t\t\tnn.Sigmoid()\n\t\t)\n\n\tdef forward(self, x):\n\t\tout = self.conv1(x)\n\t\tout = self.relu(out)\n\t\tout = self.primary(out)\n\t\tout = self.digits(out)\n\t\tpreds = torch.norm(out, dim=-1)\n\n\t\t# Reconstruct the *predicted* image\n\t\t_, max_length_idx = preds.max(dim=1)\t\n\t\ty = Variable(torch.sparse.torch.eye(self.num_classes))\n\t\tif torch.cuda.is_available():\n\t\t\ty = y.cuda()\n\n\t\ty = y.index_select(dim=0, index=max_length_idx).unsqueeze(2)\n\n\t\treconstructions = self.decoder( (out*y).view(out.size(0), -1) )\n\t\treconstructions = reconstructions.view(-1, *self.img_shape)\n\n\t\treturn preds, reconstructions\n"
] | [
[
"torch.norm",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.sparse.torch.eye",
"torch.cuda.is_available",
"numpy.prod",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pengyuanzhuo/Detection | [
"18ac491a41f31978216e198c4d3bf4862564789a"
] | [
"data/voc.py"
] | [
"\"\"\"VOC Dataset Classes\n\nOriginal author: Francisco Massa\nhttps://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py\n\nUpdated by: Ellis Brown, Max deGroot\n\"\"\"\nimport os\nimport torch\nimport torch.utils.data as data\nimport cv2\nimport numpy as np\nimport sys\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\nimport data.augmentations as aug\nimport data.collate as collate\n\n\nclass VOCDetection(data.Dataset):\n '''\n voc detection dataset class\n\n Args:\n root: string, VOCdevkit path\n image_set: list, imageset to use ('train', 'val', 'test')\n [('2007', 'trainval'), ('2012', 'trainval')]\n transform (callable, optional): transformation to perform on the\n input image\n keep_difficult: bool, keep difficult object or not\n default: False\n do_norm: bool, bbox / (w, h) or not\n default: True\n '''\n def __init__(self, root, image_set=[('2007', 'trainval'), ('2012', 'trainval')],\n transform=None, keep_difficult=False, do_norm=True):\n super(data.Dataset, self).__init__()\n self.classes = ('background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'\n )\n self.class_index_dict = dict(zip(self.classes, range(len(self.classes))))\n self.root = root\n self.image_set = image_set\n self.transform = transform\n self.image_list = []\n self.ann_list = []\n for year, dataset in image_set:\n subdir = os.path.join(self.root, 'VOC' + year)\n ann_dir = os.path.join(subdir, 'Annotations')\n img_dir = os.path.join(subdir, 'JPEGImages')\n for line in open(os.path.join(subdir, 'ImageSets', 'Main', dataset + '.txt')):\n self.image_list.append(os.path.join(img_dir, line.strip() + '.jpg'))\n self.ann_list.append(os.path.join(ann_dir, line.strip() + '.xml'))\n self.keep_difficult = keep_difficult\n self.do_norm = do_norm\n\n def __len__(self):\n return len(self.image_list)\n\n def __getitem__(self, index):\n '''\n return img, target\n target: [[xmin, ymin, xmax, ymax, label],\n ...,\n ...]\n '''\n # assert ann and img exist\n # TODO\n\n img = cv2.imread(self.image_list[index])\n h, w, c = img.shape\n img = img[:, :, ::-1]\n\n xmlroot = ET.parse(self.ann_list[index]).getroot()\n\n target = []\n for obj in xmlroot.findall('object'):\n difficult = int(obj.find('difficult').text) == 1\n if difficult and (not self.keep_difficult):\n continue\n classname = obj.find('name').text.lower().strip()\n classlabel = self.class_index_dict[classname]\n bndbox = obj.find('bndbox')\n xmin = int(bndbox.find('xmin').text) - 1\n ymin = int(bndbox.find('ymin').text) - 1\n xmax = int(bndbox.find('xmax').text) - 1\n ymax = int(bndbox.find('ymax').text) - 1\n if self.do_norm:\n xmin /= w\n xmax /= w\n ymin /= h\n ymax /= h\n target.append([xmin, ymin, xmax, ymax, classlabel])\n target = np.array(target, dtype=np.float32)\n\n if self.transform:\n img, bbox, label = self.transform(img, target[:, :4], target[:, 4:])\n target = np.hstack((bbox, label))\n\n return img, target\n"
] | [
[
"numpy.hstack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sthagen/pyvtreat | [
"c32e7ce6db11a2ccdd63e545b25028cbec03a3ff"
] | [
"pkg/tests/test_outcome_name_required.py"
] | [
"\nimport numpy\n\nimport pytest\nimport pandas\n\nimport vtreat # https://github.com/WinVector/pyvtreat\n\n\ndef test_outcome_name_required():\n\n numpy.random.seed(235)\n d = pandas.DataFrame(\n {\"x\": ['1', '1', '1', '2', '2', '2']})\n y = [1, 2, 3, 4, 5, 6]\n\n transform = vtreat.NumericOutcomeTreatment(\n params=vtreat.vtreat_parameters({'filter_to_recommended': False})\n )\n transform.fit_transform(d, y)\n with pytest.raises(Exception):\n transform.fit_transform(d)\n\n transform = vtreat.BinomialOutcomeTreatment(\n params=vtreat.vtreat_parameters({'filter_to_recommended': False}),\n outcome_target=3)\n transform.fit_transform(d, y)\n with pytest.raises(Exception):\n transform.fit_transform(d)\n\n transform = vtreat.vtreat_api.MultinomialOutcomeTreatment(\n params=vtreat.vtreat_parameters({'filter_to_recommended': False})\n )\n transform.fit_transform(d, y)\n with pytest.raises(Exception):\n transform.fit_transform(d)\n"
] | [
[
"numpy.random.seed",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
angelikakw/e-commerce-reviews | [
"109ec5fb4e74bf37d4fd445c6fc3f2284fc1b6ba"
] | [
"main.py"
] | [
"import pandas as pd\nfrom dash_app import show_dashboard\n\n\ndef read_data():\n data = pd.read_csv('Womens Clothing E-Commerce Reviews.csv', index_col=0)\n return data\n\n\ndef main():\n data = read_data()\n show_dashboard(data)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
arlanschouwstra/Robotica-Groep-11 | [
"17cac9527c3cc3f94f5a8ab11741cb5e546c85c1"
] | [
"camera/shapedetection/colorred.py"
] | [
"import cv2\nimport numpy as np\nimport imutils\nfrom shapedetector import ShapeDetector\n\ncap = cv2.VideoCapture(0)\n\ndef findContours(mask, frame):\n # find contours in the treshholded frame and initialize the shape detector\n ratio = 1.0\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n sd = ShapeDetector()\n\n for c in cnts:\n #compute the center of the contour, then detect the rectangle of the\n # shape using only the contour\n M = cv2.moments(c)\n if M[\"m10\"] == 0.0 or M[\"m00\"] == 0.0 or M[\"m01\"] == 0.0:\n continue\n cX = int((M[\"m10\"] / M[\"m00\"]) * ratio)\n cY = int((M[\"m01\"] / M[\"m00\"]) * ratio)\n shape = sd.detect(c)\n if (shape != \"rectangle\"):\n continue\n \n # multiply the contour (x, y)-coordinates by the resize ratio,\n # then draw the contours and the name of the shape on the frame\n c = c.astype(\"float\")\n c *= ratio\n c = c.astype(\"int\")\n cv2.drawContours(frame, [c], -1, (0, 255, 0), 2)\n cv2.putText(frame, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0),2)\n\n\nwhile True:\n _, frame = cap.read()\n \n #blurrs the video so more of the expected color is shown\n blur = cv2.GaussianBlur(frame, (7, 7), 3)\n \n #color wheel 1/4th degrees changed to the left\n hsv = cv2.cvtColor(blur,cv2.COLOR_RGB2HSV)\n lower_red = np.array([220 * 0.5, 50 * 2.55,40 * 2.55])\n upper_red = np.array([250 * 0.5, 100 * 2.55,100 * 2.55])\n lower_green = np.array([95 * 0.5, 32 * 2.55, 22 * 2.55])\n upper_green = np.array([125 * 0.5, 100 * 2.55, 100 * 2.55])\n lower_yellow = np.array([185 * 0.5, 55 * 2.55, 60 * 2.55])\n upper_yellow = np.array([220 * 0.5, 100 * 2.55, 100 * 2.55])\n lower_orange = np.array([200 * 0.5, 60 * 2.55, 60 * 2.55])\n upper_orange = np.array([250 * 0.5, 100 * 2.55, 100 * 2.55])\n \n mask1 = cv2.inRange(hsv, lower_red, upper_red)\n mask2 = cv2.inRange(hsv, lower_green, upper_green)\n mask3 = cv2.inRange(hsv, lower_yellow, upper_yellow)\n mask4 = cv2.inRange(hsv, lower_orange, upper_orange)\n \n # combining the masks together to see all blocks\n mergeMask = cv2.bitwise_or(mask1, mask2)\n mergeMask = cv2.bitwise_or(mergeMask, mask3)\n mergeMask = cv2.bitwise_or(mergeMask, mask4)\n \n #checking if the shape is rectangle\n findContours(mask1,blur)\n findContours(mask2,blur)\n findContours(mask3,blur)\n findContours(mask4,blur)\n \n #Showing the esolution\n res = cv2.bitwise_and(blur, blur, mask = mergeMask)\n \n #show video screens\n cv2.imshow('blurredFrame', blur)\n cv2.imshow('res', res)\n #cv2.imshow('mask', mask)\n \n #Closing the video capturing\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\ncv2.destroyAllWindows()\ncap.release()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Vipin3112/arviz | [
"000b2823b3b008845c2a2f6d4117acab034ebae5"
] | [
"arviz/plots/jointplot.py"
] | [
"\"\"\"Joint scatter plot of two variables.\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom ..data import convert_to_dataset\nfrom .kdeplot import plot_kde\nfrom .plot_utils import _scale_fig_size, get_bins, xarray_var_iter, make_label, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_joint(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n kind=\"scatter\",\n gridsize=\"auto\",\n contour=True,\n fill_last=True,\n joint_kwargs=None,\n marginal_kwargs=None,\n):\n \"\"\"\n Plot a scatter or hexbin of two variables with their respective marginals distributions.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : Iter of 2 e.g. (var_1, var_2)\n Variables to be plotted, two variables are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n kind : str\n Type of plot to display (scatter, kde or hexbin)\n gridsize : int or (int, int), optional.\n The number of hexagons in the x-direction. Ignored when hexbin is False. See `plt.hexbin`\n for details\n contour : bool\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n joint_kwargs : dicts, optional\n Additional keywords modifying the join distribution (central subplot)\n marginal_kwargs : dicts, optional\n Additional keywords modifying the marginals distributions (top and right subplot)\n\n Returns\n -------\n axjoin : matplotlib axes, join (central) distribution\n ax_hist_x : matplotlib axes, x (top) distribution\n ax_hist_y : matplotlib axes, y (right) distribution\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n if kind not in valid_kinds:\n raise ValueError(\n (\"Plot type {} not recognized.\" \"Plot type must be in {}\").format(kind, valid_kinds)\n )\n\n data = convert_to_dataset(data, group=\"posterior\")\n\n if coords is None:\n coords = {}\n\n var_names = _var_names(var_names, data)\n\n plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))\n\n if len(plotters) != 2:\n raise Exception(\n \"Number of variables to be plotted must 2 (you supplied {})\".format(len(plotters))\n )\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)\n\n if joint_kwargs is None:\n joint_kwargs = {}\n\n if marginal_kwargs is None:\n marginal_kwargs = {}\n\n # Instantiate figure and grid\n fig, _ = plt.subplots(0, 0, figsize=figsize, constrained_layout=True)\n grid = plt.GridSpec(4, 4, hspace=0.1, wspace=0.1, figure=fig)\n\n # Set up main plot\n axjoin = fig.add_subplot(grid[1:, :-1])\n\n # Set up top KDE\n ax_hist_x = fig.add_subplot(grid[0, :-1], sharex=axjoin)\n ax_hist_x.tick_params(labelleft=False, labelbottom=False)\n\n # Set up right KDE\n ax_hist_y = fig.add_subplot(grid[1:, -1], sharey=axjoin)\n ax_hist_y.tick_params(labelleft=False, labelbottom=False)\n\n # Set labels for axes\n x_var_name = make_label(plotters[0][0], plotters[0][1])\n y_var_name = make_label(plotters[1][0], plotters[1][1])\n\n axjoin.set_xlabel(x_var_name, fontsize=ax_labelsize)\n axjoin.set_ylabel(y_var_name, fontsize=ax_labelsize)\n axjoin.tick_params(labelsize=xt_labelsize)\n\n # Flatten data\n x = plotters[0][2].flatten()\n y = plotters[1][2].flatten()\n\n if kind == \"scatter\":\n axjoin.scatter(x, y, **joint_kwargs)\n elif kind == \"kde\":\n plot_kde(x, y, contour=contour, fill_last=fill_last, ax=axjoin, **joint_kwargs)\n else:\n if gridsize == \"auto\":\n gridsize = int(len(x) ** 0.35)\n axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)\n axjoin.grid(False)\n\n for val, ax, orient, rotate in (\n (x, ax_hist_x, \"vertical\", False),\n (y, ax_hist_y, \"horizontal\", True),\n ):\n if val.dtype.kind == \"i\":\n bins = get_bins(val)\n ax.hist(\n val, bins=bins, align=\"left\", density=True, orientation=orient, **marginal_kwargs\n )\n else:\n marginal_kwargs.setdefault(\"plot_kwargs\", {})\n marginal_kwargs[\"plot_kwargs\"][\"linewidth\"] = linewidth\n plot_kde(val, rotated=rotate, ax=ax, **marginal_kwargs)\n\n ax_hist_x.set_xlim(axjoin.get_xlim())\n ax_hist_y.set_ylim(axjoin.get_ylim())\n\n return axjoin, ax_hist_x, ax_hist_y\n"
] | [
[
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuga-n/ModelLearner | [
"3193efd5eb15172ba8231a34829942040fcb0fc5",
"3193efd5eb15172ba8231a34829942040fcb0fc5"
] | [
"network_model/builder/pytorch_builder.py",
"DataIO/data_loader.py"
] | [
"# -*- coding: utf-8 -*-\nimport keras.engine.training\nfrom typing import Callable\nfrom typing import Tuple\nfrom typing import List\nfrom typing import Union\nfrom util_types import types_of_loco\nfrom network_model.distillation.distillation_model_builder import DistllationModelIncubator\nfrom network_model.build_model import builder_pt, builder_with_merge\nfrom keras.callbacks import Callback\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim import SGD\nfrom torch.nn.modules.loss import _Loss\nfrom torch.nn import CrossEntropyLoss, Module\nfrom network_model.wrapper.pytorch.model_pt import ModelForPytorch\nfrom model_merger.pytorch.proc.distance.calculator import L1Norm\nfrom model_merger.pytorch.proc.distance.abs_calculator import AbstractDistanceCaluclator\nfrom model_merger.pytorch.proc.loss.calculator import AAEUMLoss\nfrom model_merger.pytorch.proc.loss.abstract_calculator import AbstractLossCalculator\nfrom model_merger.pytorch.proc.shiamese_loss import SiameseLoss, SiameseLossForInceptionV3\nfrom model_merger.pytorch.siamese import SiameseNetworkPT\n\n\nModelBuilderResult = Union[keras.engine.training.Model, List[Callback]]\n\nModelBuilder = Union[Callable[[int], ModelBuilderResult],\n Callable[[Union[str, Tuple[str, str]]], keras.engine.training.Model],\n DistllationModelIncubator]\n\nOptimizerBuilder = Callable[[Module], Optimizer]\n\n\ndef optimizer_builder(optimizer, **kwargs):\n def build(base_model: Module):\n kwargs[\"params\"] = base_model.parameters()\n return optimizer(**kwargs)\n return build\n\n\ndefault_optimizer_builder = optimizer_builder(SGD)\n\n\nclass PytorchModelBuilder(object):\n\n def __init__(self,\n img_size: types_of_loco.input_img_size = 28,\n channels: int = 3,\n model_name: str = \"model1\",\n opt_builder: OptimizerBuilder = default_optimizer_builder,\n loss: _Loss = None,\n decide_dataset_generator=None,\n nearest_data_ave_num=1,\n will_calc_rate_real_data_train=False):\n self.__img_size = img_size\n self.__channels = channels\n self.__model_name = model_name\n self.__opt_builder = opt_builder\n self.__loss = loss\n self.__decide_dataset_generator = decide_dataset_generator\n self.__nearest_data_ave_num = nearest_data_ave_num\n self.__will_calc_rate_real_data_train = will_calc_rate_real_data_train\n\n def build_raw_model(self, model_builder_input) -> torch.nn.Module:\n if self.__model_name == \"tempload\":\n return torch.jit.load(model_builder_input)\n return builder_pt(model_builder_input, self.__img_size, self.__model_name)\n\n def build_model_builder_wrapper(self, model_builder_input):\n base_model = self.build_raw_model(model_builder_input)\n optimizer = self.__opt_builder(base_model)\n return ModelForPytorch.build_wrapper(base_model,\n optimizer,\n self.__loss,\n decide_dataset_generator=self.__decide_dataset_generator,\n nearest_data_ave_num=self.__nearest_data_ave_num,\n will_calc_rate_real_data_train=self.__will_calc_rate_real_data_train)\n\n def __call__(self, model_builder_input):\n return self.build_model_builder_wrapper(model_builder_input)\n\n\nclass PytorchSiameseModelBuilder(PytorchModelBuilder):\n\n def __init__(self,\n q: float,\n img_size: types_of_loco.input_img_size = 28,\n channels: int = 3,\n model_name: str = \"model1\",\n opt_builder: OptimizerBuilder = default_optimizer_builder,\n loss_calculator: AbstractLossCalculator = None,\n calc_distance: AbstractDistanceCaluclator=L1Norm(),\n is_inceptionv3: bool = False,\n decide_dataset_generator=None,\n nearest_data_ave_num=1,\n will_calc_rate_real_data_train=False):\n use_loss_calculator = AAEUMLoss(q) if loss_calculator is None else loss_calculator\n loss = SiameseLossForInceptionV3(calc_distance, use_loss_calculator) if is_inceptionv3 else SiameseLoss(calc_distance, use_loss_calculator)\n super(PytorchSiameseModelBuilder, self).__init__(img_size,\n channels,\n model_name,\n opt_builder,\n loss,\n decide_dataset_generator,\n nearest_data_ave_num,\n will_calc_rate_real_data_train\n )\n\n def build_raw_model(self, model_builder_input) -> torch.nn.Module:\n original_model = super(PytorchSiameseModelBuilder, self).build_raw_model(model_builder_input)\n return SiameseNetworkPT(original_model)\n\n",
"# -*- coding: utf-8 -*-\nfrom keras.utils import np_utils\nimport os\nimport cv2\nimport numpy as np\nfrom typing import List\nfrom util_types import two_dim\nfrom typing import Optional\nfrom numba import jit\nfrom enum import Enum\n\nimg_size, size_converter = two_dim.init_pair_type(int)\n\n\nclass NormalizeType(Enum):\n NotNormalize = 0\n Div255 = 1\n Div127_5 = 2\n\n\ndef is_image(file_path:str) -> bool:\n \"\"\"\n 拡張子から画像ファイルかどうか判定\n :param file_path:\n :return:\n \"\"\"\n root, ext = os.path.splitext(file_path)\n return (ext == \".jpg\") or (ext == \".jpeg\") or (ext == \".JPG\") or (ext == \".JPEG\") or (ext == \".jfif\")\n\n\ndef count_data_num_in_dir(root_dir: str):\n path_set, label_set, class_names, class_num = load_dataset_path(root_dir)\n return len(path_set)\n\n\ndef load_dataset_path(root_dir: str):\n \"\"\"\n 画像データのパスを読み込む\n :param root_dir: 画像データの格納されているルートディレクトリ。直下に存在するディレクトリ名が各画像のクラス名に\n :return: 画像データのパスの配列とラベルの配列とクラスの総数のタプル\n \"\"\"\n class_names = os.listdir(root_dir)\n print(\"all classes\", class_names)\n encoder = label_encoder(class_names)\n result_path_set = []\n result_label_set = []\n for class_name in class_names:\n class_dir = os.path.join(root_dir, class_name)\n class_path_set = [os.path.join(class_dir, data_name) for data_name in os.listdir(class_dir)\n if is_image(os.path.join(class_dir, data_name))]\n label_converted = [encoder(class_name) for index in range(len(class_path_set))]\n result_path_set.extend(class_path_set)\n result_label_set.extend(label_converted)\n print(\"class\", class_name, \"loaded data_num\", len(class_path_set))\n return np.array(result_path_set), np.array(result_label_set), class_names, len(class_names)\n\n\ndef load_dataset(root_dir: str,\n normalize_type: NormalizeType = NormalizeType.Div255,\n img_resize_val: Optional[img_size] = None,\n color: str = \"RGB\"):\n \"\"\"\n 画像データを読み込む\n :param root_dir: 画像データの格納されているルートディレクトリ。直下に存在するディレクトリ名が各画像のクラス名に\n :param normalize_type: どのように正規化するか\n :param img_resize_val: 画像のサイズをリサイズする際のサイズ 指定しなければオリジナルのサイズのまま読み込み\n :param color: グレースケールかカラーで読み込むか デフォルトではカラー(RGB)\n :return: numpy形式の画像データの配列とラベルの配列とクラスの総数のタプル\n \"\"\"\n class_names = os.listdir(root_dir)\n print(\"all classes\", class_names)\n encoder = label_encoder(class_names)\n result_img_set = []\n result_label_set = []\n for class_name in class_names:\n got_data = load_data_in_dir(os.path.join(root_dir, class_name), normalize_type, img_resize_val, color)\n label_converted = [encoder(class_name) for index in range(len(got_data))]\n result_img_set.extend(got_data)\n result_label_set.extend(label_converted)\n print(\"class\", class_name, \"loaded data_num\", len(got_data))\n return np.array(result_img_set), np.array(result_label_set), class_names, len(class_names)\n\n\ndef load_data_in_dir(dir_path: str,\n normalize_type: NormalizeType = NormalizeType.Div255,\n img_resize_val: Optional[img_size] = None,\n color: str = \"RGB\"):\n \"\"\"\n 指定したディレクトリに存在するデータを読み込む\n :param dir_path: 画像データの格納されているディレクトリ。\n :param normalize_type: どのように正規化するか\n :param img_resize_val: 画像のサイズをリサイズする際のサイズ 指定しなければオリジナルのサイズのまま読み込み\n :param color: グレースケールかカラーで読み込むか デフォルトではカラー(RGB)\n :return: numpy形式の画像データの配列\n \"\"\"\n print(\"load\", dir_path)\n img_path_set = [os.path.join(dir_path, data_name) for data_name in os.listdir(dir_path)]\n img_set = [load_img(img_path, img_resize_val, color) for img_path in img_path_set]\n if normalize_type == NormalizeType.NotNormalize:\n return np.array(img_set)\n return normalise_img_set(np.array(img_set), normalize_type)\n\n\n@jit\ndef load_img(img_path: str, img_resize_val: Optional[img_size] = None, color: str = \"RGB\"):\n \"\"\"\n 指定されたパスの画像ファイルを読み込む\n :param img_path: 画像ファイル\n :param img_resize_val: 画像のサイズをリサイズする際のサイズ 指定しなければオリジナルのサイズのまま読み込み\n :param color: グレースケールかカラーで読み込むか デフォルトではカラー(RGB)\n :return:\n \"\"\"\n raw_img = cv2.imread(img_path)\n img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB) if color == \"RGB\" else cv2.cvtColor(raw_img, cv2.COLOR_RGB2GRAY)\n if img_resize_val is None:\n return img\n resize_val = size_converter(img_resize_val)\n return cv2.resize(img, resize_val)\n\n\ndef label_encoder(class_set: List[str]):\n \"\"\"\n ベースとなるクラスのリストを与えてエンコードする関数を返す\n :param class_set: ベースとなるクラスのリスト\n :return: keras形式でエンコードされたラベルのインデックスを返す関数\n \"\"\"\n @jit\n def encode(base_class: str):\n \"\"\"\n クラスとなる文字列を与えてkeras形式でエンコードされたクラスのインデックスを返す\n :param base_class: ベースとなるクラス名\n :return: エンコードされたクラス名\n \"\"\"\n label_encoded = class_set.index(base_class)\n return np_utils.to_categorical(label_encoded, len(class_set))\n return encode\n\n\n@jit\ndef normalise_img(img: np.ndarray, normalize_type: NormalizeType = NormalizeType.Div255) -> np.ndarray:\n \"\"\"\n 画像を正規化\n :param img: 正規化する対象\n :param normalize_type: どのように正規化するか\n :return: 正規化後の配列\n \"\"\"\n if NormalizeType(normalize_type) is NormalizeType.Div127_5:\n return (img.astype(np.float32) - 127.5) / 127.5\n if NormalizeType(normalize_type) is NormalizeType.Div255:\n return (img.astype(np.float32)) / 255.0\n return img.astype(np.float32)\n \n\ndef normalise_img_set(img_set: np.ndarray, normalize_type: NormalizeType = NormalizeType.Div255) -> np.ndarray:\n \"\"\"\n 画像を入力するために正規化\n :param img_set: ベースになるデータ\n :param normalize_type: どのように正規化するか\n :return: 正規化後のデータ\n \"\"\"\n return np.array([normalise_img(img, normalize_type) for img in img_set])\n\n\n@jit\ndef sampling_real_data_set(batch_num: int, img_set: np.ndarray) -> np.ndarray:\n \"\"\"\n 実際のデータセットからbatch_num分だけデータを復元抽出する\n :param batch_num: データを抽出する数\n :param img_set: 元となるデータセット\n :return: 抽出されたデータセット\n \"\"\"\n chosen_id_set = np.random.randint(0, img_set.shape[0], batch_num)\n return img_set[chosen_id_set]\n"
] | [
[
"torch.jit.load"
],
[
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wikeex/tensorflow-learning | [
"a6ab7c99455711e9f3c015e0abb04fa58342e0cb"
] | [
"tensorflow1.x/lstm_test.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport reader\nDATA_PATH = 'E:/datasets/ptb'\nVOCAB_SIZE = 10000\n\nHIDDEN_SIZE = 200 # lstm隐含层神经元数\nNUM_LAYERS = 2 # lstm结构层数\nLEARNING_RATE = 1.0 # 学习率\nKEEP_PROB = 0.5 # Dropout保留率\nMAX_GRAD_NORM = 5 # 控制梯度膨胀的系数\n\nTRAIN_BATCH_SIZE = 20 # 训练batch尺寸\nTRAIN_NUM_STEP = 35 # 训练数据的截断长度\n\nEVAL_BATCH_SIZE = 1 # 测试batch尺寸\nEVAL_NUM_STEP = 1 # 测试数据的截断长度\nNUM_EPOCH = 2 # 训练轮数\n\n\nclass PTBModel:\n def __init__(self, is_training, batch_size, num_steps):\n self.batch_size = batch_size # 语料库分集\n self.num_steps = num_steps # 时间步\n\n self.input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) # 输入数据placeholder\n self.targets = tf.placeholder(tf.int32, [batch_size, num_steps]) # 输出数据placeholder\n\n cells = []\n for _ in range(NUM_LAYERS):\n # 基本lstm单元,隐含状态数和输出特征维度都为HIDDEN_SIZE\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=HIDDEN_SIZE)\n if is_training:\n # 每个lstm单元外包裹一个DropoutWrapper\n lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=KEEP_PROB)\n\n cells.append(lstm_cell)\n cell = tf.contrib.rnn.MultiRNNCell(cells) # 构建多层rnn网络结构\n\n self.initial_state = cell.zero_state(batch_size, tf.float32) # 初始化网络参数为0\n embedding = tf.get_variable(\"embedding\", [VOCAB_SIZE, HIDDEN_SIZE]) # 创建词嵌入变量\n\n inputs = tf.nn.embedding_lookup(embedding, self.input_data) # 单词索引转化为词向量\n\n if is_training:\n inputs = tf.nn.dropout(inputs, KEEP_PROB) # 训练时执行dropout操作\n # 定义输出层\n outputs = [] # 定义lstm输出列表\n state = self.initial_state # 保存不同batch中lstm的状态,初始化为0\n with tf.variable_scope('RNN'):\n for time_step in range(num_steps):\n if time_step > 0:\n tf.get_variable_scope().reuse_variables() # 对下面的变量进行复用\n cell_output, state = cell(inputs[:, time_step, :], state) # 输入数据开始训练,state为历史信息\n outputs.append(cell_output) # 每个时间步的输出添加到列表中\n output = tf.reshape(tf.concat(outputs, 1), [-1, HIDDEN_SIZE]) # 将输出列表拼接成张量\n # 定义softmax层\n softmax_weight = tf.get_variable('softmax_w', [HIDDEN_SIZE, VOCAB_SIZE]) #\n softmax_bias = tf.get_variable('softmax_b', [VOCAB_SIZE])\n\n logits = tf.matmul(output, softmax_weight) + softmax_bias\n # 定义损失函数\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [logits], # 表示预测分类的置信度\n [tf.reshape(self.targets, [-1])], # 表示预期目标为one-hot类型\n [tf.ones([batch_size * num_steps], dtype=tf.float32)] # 各类损失计算权重均为1\n )\n self.cost = tf.reduce_sum(loss) / batch_size # 求得每batch的损失\n self.final_state = state # 更新整个lstm网络状态\n\n if not is_training:\n return\n trainable_variables = tf.trainable_variables() # 得到所有trainable=True的变量\n\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainable_variables), MAX_GRAD_NORM) # 梯度裁剪\n optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE) # 梯度下降优化器\n self.train_op = optimizer.apply_gradients(zip(grads, trainable_variables)) # 优化操作应用到变量上\n\n\ndef run_epoch(session, model, data, train_op, output_log, epoch_size):\n total_costs = 0.0 # 整体代价\n iters = 0 # 迭代次数\n state = session.run(model.initial_state) # 初始化模型状态\n\n for step in range(epoch_size):\n x, y = session.run(data) # 将训练数据拆分成训练部分和标签部分\n cost, state, _ = session.run(\n [model.cost, model.final_state, train_op],\n {model.input_data: x, model.targets: y, model.initial_state: state}\n ) # 开始训练\n total_costs += cost # 整体代价\n iters += model.num_steps #\n\n if output_log and step % 100 == 0:\n with open('lstm_run_recode.txt', 'a') as f:\n f.write('After %d steps, perplexity is %.3f\\n' % (step, np.exp(total_costs / iters)))\n print('After %d steps, perplexity is %.3f' % (step, np.exp(total_costs / iters)))\n return np.exp(total_costs / iters) # 计算混乱度\n\n\ndef main():\n train_data, valid_data, test_data, _ = reader.ptb_raw_data(DATA_PATH) # 读取数据集中的数据\n\n train_data_len = len(train_data) # 计算数据长度\n train_batch_len = train_data_len # 计算batch长度\n train_epoch_size = (train_batch_len - 1) # 计算该epoch训练次数\n\n valid_data_len = len(valid_data)\n valid_batch_len = valid_data_len\n valid_epoch_size = (valid_batch_len - 1)\n\n test_data_len = test_batch_len = len(test_data)\n test_epoch_size = (test_batch_len - 1)\n\n initializer = tf.random_uniform_initializer(-0.05, 0.05) # 随机数初始化\n with tf.variable_scope('language_model', reuse=None, initializer=initializer):\n train_model = PTBModel(True, TRAIN_BATCH_SIZE, TRAIN_NUM_STEP) # 实例化训练模型\n\n with tf.variable_scope('language_model', reuse=True, initializer=initializer):\n eval_model = PTBModel(False, EVAL_BATCH_SIZE, EVAL_NUM_STEP) # 实例化评估模型\n\n with tf.Session() as session:\n tf.global_variables_initializer().run()\n\n train_queue = reader.ptb_producer(train_data, train_model.batch_size, train_model.num_steps) # 生成训练数据序列\n eval_queue = reader.ptb_producer(valid_data, eval_model.batch_size, eval_model.num_steps) # 生成评估数据序列\n test_queue = reader.ptb_producer(test_data, eval_model.batch_size, eval_model.num_steps) # 生成测试数据序列\n\n coord = tf.train.Coordinator() # 管理多线程的协调器\n threads = tf.train.start_queue_runners(sess=session, coord=coord) # 启动多线程\n\n for i in range(NUM_EPOCH):\n with open('lstm_run_recode.txt', 'a') as f:\n f.write('In iteration: %d\\n' % (i + 1))\n print('In iteration: %d' % (i + 1))\n run_epoch(session, train_model, train_queue, train_model.train_op, True, train_epoch_size) # 训练模型\n\n valid_perplexity = run_epoch(session, eval_model, eval_queue, tf.no_op(), False, valid_epoch_size) # 评估\n with open('lstm_run_recode.txt', 'a') as f:\n f.write('In iteration: %d\\n' % (i + 1))\n print('Epoch: %d Validation Perplexity: %.3f' % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, eval_model, test_queue, tf.no_op(), False, test_epoch_size) # 测试\n with open('lstm_run_recode.txt', 'a') as f:\n f.write('In iteration: %d\\n' % (i + 1))\n print('Test Perplexity: %.3f' % test_perplexity)\n\n coord.request_stop() # 请求停止多线程\n coord.join(threads) # 直到所有线程结束\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.reduce_sum",
"numpy.exp",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.nn.dropout",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.matmul",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.no_op",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.train.start_queue_runners",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
pkol/metaworld | [
"718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9",
"718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9",
"718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9"
] | [
"metaworld/policies/sawyer_window_open_v2_policy.py",
"metaworld/envs/mujoco/sawyer_xyz/sawyer_disassemble_peg.py",
"metaworld/envs/mujoco/sawyer_xyz/sawyer_basketball_v2.py"
] | [
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerWindowOpenV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_xyz': obs[:3],\n 'wndw_xyz': obs[3:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_pow': 3\n })\n\n action['delta_pos'] = move(o_d['hand_xyz'], to_xyz=self._desired_xyz(o_d), p=25.)\n action['grab_pow'] = 1.\n\n return action.array\n\n @staticmethod\n def _desired_xyz(o_d):\n pos_curr = o_d['hand_xyz']\n pos_wndw = o_d['wndw_xyz']\n pos_wndw += np.array([-0.03, -0.03, -0.1])\n\n if np.linalg.norm(pos_curr[:2] - pos_wndw[:2]) > 0.04:\n return pos_wndw + np.array([0., 0., 0.3])\n elif abs(pos_curr[2] - pos_wndw[2]) > 0.02:\n return pos_wndw\n else:\n return pos_wndw + np.array([0.1, 0., 0.])\n",
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv\n\n\nclass SawyerNutDisassembleEnv(SawyerXYZEnv):\n def __init__(self, random_init=True):\n\n liftThresh = 0.05\n hand_low = (-0.5, 0.40, 0.05)\n hand_high = (0.5, 1, 0.5)\n obj_low = (0.1, 0.75, 0.02)\n obj_high = (0., 0.85, 0.02)\n goal_low = (-0.1, 0.75, 0.17)\n goal_high = (0.1, 0.85, 0.17)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.random_init = random_init\n\n self.init_config = {\n 'obj_init_angle': 0.3,\n 'obj_init_pos': np.array([0, 0.8, 0.02]),\n 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),\n }\n self.goal = np.array([0, 0.8, 0.17])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self.liftThresh = liftThresh\n self.max_path_length = 200\n\n self.action_space = Box(\n np.array([-1, -1, -1, -1]),\n np.array([1, 1, 1, 1]),\n )\n\n self.obj_and_goal_space = Box(\n np.hstack((obj_low, goal_low)),\n np.hstack((obj_high, goal_high)),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n self.observation_space = Box(\n np.hstack((self.hand_low, obj_low,)),\n np.hstack((self.hand_high, obj_high,)),\n )\n\n self.reset()\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_assembly_peg.xml')\n\n def step(self, action):\n self.set_xyz_action(action[:3])\n self.do_simulation([action[-1], -action[-1]])\n # The marker seems to get reset every time you do a simulation\n self._set_goal_marker(self._state_goal)\n ob = self._get_obs()\n obs_dict = self._get_obs_dict()\n reward, _, reachDist, pickRew, _, placingDist, success = self.compute_reward(action, obs_dict)\n self.curr_path_length += 1\n info = {'reachDist': reachDist, 'pickRew':pickRew, 'epRew' : reward, 'goalDist': placingDist, 'success': success}\n info['goal'] = self.goal\n\n return ob, reward, False, info\n\n def _get_obs(self):\n hand = self.get_endeff_pos()\n graspPos = self.get_site_pos('RoundNut-8')\n flat_obs = np.concatenate((hand, graspPos))\n\n return np.concatenate([flat_obs,])\n\n def _get_obs_dict(self):\n hand = self.get_endeff_pos()\n graspPos = self.get_site_pos('RoundNut-8')\n objPos = self.get_body_com('RoundNut')\n flat_obs = np.concatenate((hand, graspPos))\n\n return dict(\n state_observation=flat_obs,\n state_desired_goal=self._state_goal,\n state_achieved_goal=objPos,\n )\n\n def _set_goal_marker(self, goal):\n self.data.site_xpos[self.model.site_name2id('pegTop')] = (\n goal[:3]\n )\n\n def reset_model(self):\n self._reset_hand()\n self._state_goal = self.goal.copy()\n self.obj_init_pos = np.array(self.init_config['obj_init_pos'])\n self.obj_init_angle = self.init_config['obj_init_angle']\n\n if self.random_init:\n goal_pos = np.random.uniform(\n self.obj_and_goal_space.low,\n self.obj_and_goal_space.high,\n size=(self.obj_and_goal_space.low.size),\n )\n while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.1:\n goal_pos = np.random.uniform(\n self.obj_and_goal_space.low,\n self.obj_and_goal_space.high,\n size=(self.obj_and_goal_space.low.size),\n )\n self.obj_init_pos = goal_pos[:3]\n self._state_goal = goal_pos[:3] + np.array([0, 0, 0.15])\n\n peg_pos = self.obj_init_pos + np.array([0., 0., 0.03])\n peg_top_pos = self.obj_init_pos + np.array([0., 0., 0.08])\n self.sim.model.body_pos[self.model.body_name2id('peg')] = peg_pos\n self.sim.model.site_pos[self.model.site_name2id('pegTop')] = peg_top_pos\n self._set_obj_xyz(self.obj_init_pos)\n self._set_goal_marker(self._state_goal)\n self.objHeight = self.data.get_geom_xpos('RoundNut-8')[2]\n self.heightTarget = self.objHeight + self.liftThresh\n self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget\n\n return self._get_obs()\n\n def _reset_hand(self):\n for _ in range(10):\n self.data.set_mocap_pos('mocap', self.hand_init_pos)\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n self.do_simulation([-1,1], self.frame_skip)\n\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.pickCompleted = False\n\n def compute_reward(self, actions, obs):\n obs = obs['state_observation']\n\n graspPos = obs[3:6]\n objPos = graspPos\n\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n fingerCOM = (rightFinger + leftFinger)/2\n\n heightTarget = self.heightTarget\n placingGoal = self._state_goal\n\n reachDist = np.linalg.norm(graspPos - fingerCOM)\n reachDistxy = np.linalg.norm(graspPos[:-1] - fingerCOM[:-1])\n zDist = np.abs(fingerCOM[-1] - self.init_fingerCOM[-1])\n\n placingDist = np.linalg.norm(objPos - placingGoal)\n\n def reachReward():\n reachRew = -reachDist\n if reachDistxy < 0.04:\n reachRew = -reachDist\n else:\n reachRew = -reachDistxy - 2*zDist\n\n # incentive to close fingers when reachDist is small\n if reachDist < 0.04:\n reachRew = -reachDist + max(actions[-1],0)/50\n return reachRew, reachDist\n\n def pickCompletionCriteria():\n tolerance = 0.01\n if objPos[2] >= (heightTarget- tolerance) and reachDist < 0.04:\n return True\n else:\n return False\n\n if pickCompletionCriteria():\n self.pickCompleted = True\n\n def objDropped():\n return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)\n\n def orig_pickReward():\n hScale = 100\n if self.pickCompleted and not(objDropped()):\n return hScale*heightTarget\n elif (reachDist < 0.04) and (objPos[2]> (self.objHeight + 0.005)) :\n return hScale* min(heightTarget, objPos[2])\n else:\n return 0\n\n def placeRewardMove():\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n\n placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))\n placeRew = max(placeRew,0)\n cond = self.pickCompleted and (reachDist < 0.03) and not(objDropped())\n if cond:\n return [placeRew, placingDist]\n else:\n return [0 , placingDist]\n\n\n reachRew, reachDist = reachReward()\n pickRew = orig_pickReward()\n\n peg_pos = self.sim.model.body_pos[self.model.body_name2id('peg')]\n nut_pos = self.get_body_com('RoundNut')\n if abs(nut_pos[0] - peg_pos[0]) > 0.05 or \\\n abs(nut_pos[1] - peg_pos[1]) > 0.05:\n placingDist = 0\n reachRew = 0\n reachDist = 0\n pickRew = heightTarget*100\n\n placeRew , placingDist = placeRewardMove()\n assert ((placeRew >=0) and (pickRew>=0))\n reward = reachRew + pickRew + placeRew\n success = (abs(nut_pos[0] - peg_pos[0]) > 0.05 or abs(nut_pos[1] - peg_pos[1]) > 0.05) or placingDist < 0.02\n\n return [reward, reachRew, reachDist, pickRew, placeRew, placingDist, float(success)]\n",
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv\n\n\nclass SawyerBasketballEnvV2(SawyerXYZEnv):\n \"\"\"\n Motivation for V2:\n V1 was difficult to solve because the observation didn't say where\n to drop the ball (the hoop's location).\n Changelog from V1 to V2:\n - (6/16/20) Added a 1 element vector to the observation. This vector\n points from the end effector to the hoop in the X direction.\n i.e. (self._state_goal - pos_hand)[0]\n \"\"\"\n def __init__(self, random_init=False):\n\n liftThresh = 0.3\n goal_low = (-0.1, 0.85, 0.15)\n goal_high = (0.1, 0.9+1e-7, 0.15)\n hand_low = (-0.5, 0.40, 0.05)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.6, 0.03)\n obj_high = (0.1, 0.7, 0.03)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.random_init = random_init\n\n self.init_config = {\n 'obj_init_angle': .3,\n 'obj_init_pos': np.array([0, 0.6, 0.03], dtype=np.float32),\n 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),\n }\n self.goal = np.array([0, 0.9, 0.15])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self.max_path_length = 150\n self.liftThresh = liftThresh\n\n self.action_space = Box(\n np.array([-1, -1, -1, -1]),\n np.array([1, 1, 1, 1]),\n )\n self.obj_and_goal_space = Box(\n np.hstack((obj_low, goal_low)),\n np.hstack((obj_high, goal_high)),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n self.observation_space = Box(\n np.hstack((self.hand_low, obj_low,)),\n np.hstack((self.hand_high, obj_high,)),\n )\n self.reset()\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_basketball.xml')\n\n def step(self, action):\n self.set_xyz_action(action[:3])\n self.do_simulation([action[-1], -action[-1]])\n # The marker seems to get reset every time you do a simulation\n self._set_goal_marker(self._state_goal)\n ob = self._get_obs()\n obs_dict = self._get_obs_dict()\n reward, reachDist, pickRew, placingDist = self.compute_reward(action, obs_dict)\n self.curr_path_length +=1\n info = {'reachDist': reachDist, 'goalDist': placingDist, 'epRew' : reward, 'pickRew':pickRew, 'success': float(placingDist <= 0.08)}\n info['goal'] = self.goal\n return ob, reward, False, info\n\n def _get_obs(self):\n pos_hand = self.get_endeff_pos()\n pos_obj = self.data.get_geom_xpos('objGeom')\n hand_to_goal = (self._state_goal - pos_hand)[0]\n\n flat_obs = np.hstack((pos_hand, pos_obj, hand_to_goal))\n return np.concatenate([flat_obs, ])\n\n def _get_obs_dict(self):\n return dict(\n state_observation=self._get_obs(),\n state_desired_goal=self._state_goal,\n state_achieved_goal=self.data.get_geom_xpos('objGeom'),\n )\n\n def _set_goal_marker(self, goal):\n self.data.site_xpos[self.model.site_name2id('goal')] = (\n goal[:3]\n )\n\n def _set_obj_xyz(self, pos):\n qpos = self.data.qpos.flat.copy()\n qvel = self.data.qvel.flat.copy()\n qpos[9:12] = pos.copy()\n qvel[9:15] = 0\n self.set_state(qpos, qvel)\n\n def reset_model(self):\n self._reset_hand()\n\n basket_pos = self.goal.copy()\n self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos\n self._state_goal = self.data.site_xpos[self.model.site_name2id('goal')]\n\n self.objHeight = self.data.get_geom_xpos('objGeom')[2]\n self.heightTarget = self.objHeight + self.liftThresh\n\n if self.random_init:\n goal_pos = np.random.uniform(\n self.obj_and_goal_space.low,\n self.obj_and_goal_space.high,\n size=(self.obj_and_goal_space.low.size),\n )\n basket_pos = goal_pos[3:]\n while np.linalg.norm(goal_pos[:2] - basket_pos[:2]) < 0.15:\n goal_pos = np.random.uniform(\n self.obj_and_goal_space.low,\n self.obj_and_goal_space.high,\n size=(self.obj_and_goal_space.low.size),\n )\n basket_pos = goal_pos[3:]\n self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))\n self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos\n self._state_goal = self.data.site_xpos[self.model.site_name2id('goal')]\n\n self._set_goal_marker(self._state_goal)\n self._set_obj_xyz(self.obj_init_pos)\n self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget\n return self._get_obs()\n\n def _reset_hand(self):\n for _ in range(10):\n self.data.set_mocap_pos('mocap', self.hand_init_pos)\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n self.do_simulation([-1,1], self.frame_skip)\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.pickCompleted = False\n\n def compute_reward(self, actions, obs):\n obs = obs['state_observation']\n\n objPos = obs[3:6]\n\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n fingerCOM = (rightFinger + leftFinger)/2\n\n heightTarget = self.heightTarget\n goal = self._state_goal\n\n reachDist = np.linalg.norm(objPos - fingerCOM)\n placingDist = np.linalg.norm(objPos - goal)\n assert np.all(goal == self.get_site_pos('goal'))\n\n def reachReward():\n reachRew = -reachDist\n reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])\n zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])\n if reachDistxy < 0.05:\n reachRew = -reachDist\n else:\n reachRew = -reachDistxy - 2*zRew\n\n #incentive to close fingers when reachDist is small\n if reachDist < 0.05:\n reachRew = -reachDist + max(actions[-1],0)/50\n return reachRew , reachDist\n\n def pickCompletionCriteria():\n tolerance = 0.01\n if objPos[2] >= (heightTarget - tolerance):\n return True\n else:\n return False\n\n if pickCompletionCriteria():\n self.pickCompleted = True\n\n\n def objDropped():\n return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)\n\n def orig_pickReward():\n hScale = 100\n if self.pickCompleted and not(objDropped()):\n return hScale*heightTarget\n elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :\n return hScale* min(heightTarget, objPos[2])\n else:\n return 0\n\n def placeReward():\n c1 = 1000 ; c2 = 0.01 ; c3 = 0.001\n cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())\n if cond:\n placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))\n placeRew = max(placeRew,0)\n return [placeRew , placingDist]\n else:\n return [0 , placingDist]\n\n reachRew, reachDist = reachReward()\n pickRew = orig_pickReward()\n placeRew , placingDist = placeReward()\n assert ((placeRew >=0) and (pickRew>=0))\n reward = reachRew + pickRew + placeRew\n return [reward, reachDist, pickRew, placingDist]\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.hstack",
"numpy.abs",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.exp",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.hstack",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.exp",
"numpy.random.uniform",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
morph-dev/self-learning-ai | [
"f94f3e86ab5bc088adf9fdcc320edc9debb4fb87"
] | [
"morphzero/games/connectfour/game.py"
] | [
"from __future__ import annotations\n\nfrom typing import Union, Iterator, Tuple, Optional\n\nimport numpy as np\n\nfrom morphzero.core.common.connect_on_matrix_board import ConnectOnMatrixBoardResult, ConnectOnMatrixBoardState, \\\n ConnectOnMatrixBoardRules, ConnectOnMatrixBoardEngine, ConnectOnMatrixBoardMove\nfrom morphzero.core.common.matrix_board import MatrixBoardSize, MatrixBoardCoordinates, MatrixBoard\nfrom morphzero.core.game import Player\n\n\nclass ConnectFourResult(ConnectOnMatrixBoardResult):\n \"\"\"Result for the Connect 4 game.\"\"\"\n\n\nclass ConnectFourState(ConnectOnMatrixBoardState):\n \"\"\"State for the Connect 4 game.\"\"\"\n\n def to_training_data(self) -> np.array:\n return np.array(self.board.data)\n\n\nclass ConnectFourMove(ConnectOnMatrixBoardMove):\n \"\"\"Move for the Connect 4 game.\"\"\"\n\n\nMoveOrMoveIndex = Union[ConnectFourMove, int]\n\n\nclass ConnectFourRules(ConnectOnMatrixBoardRules):\n def number_of_possible_moves(self) -> int:\n return ConnectFourEngine.number_of_possible_moves(self.board_size)\n\n def create_engine(self) -> ConnectFourEngine:\n return ConnectFourEngine(self)\n\n @classmethod\n def create_default_rules(cls) -> ConnectFourRules:\n return cls(board_size=MatrixBoardSize(6, 7), goal=4)\n\n\nclass ConnectFourEngine(ConnectOnMatrixBoardEngine):\n\n def new_game(self) -> ConnectFourState:\n return ConnectFourState(\n current_player=Player.FIRST_PLAYER,\n result=None,\n board=MatrixBoard.create_empty(self.rules.board_size, Player.NO_PLAYER))\n\n def create_move_from_move_index(self, move_index: int) -> ConnectFourMove:\n return ConnectFourMove(\n move_index=move_index,\n resign=move_index == self.get_move_index_for_resign(),\n coordinates=self.get_coordinates_for_move_index(move_index))\n\n def playable_moves(self, state: ConnectFourState) -> Iterator[ConnectFourMove]: # type: ignore[override]\n if state.is_game_over:\n return []\n\n for column in range(self.rules.board_size.columns):\n move = self.playable_move_for_column(state, column)\n if move:\n yield move\n\n yield ConnectFourMove(\n move_index=self.get_move_index_for_resign(),\n resign=True,\n coordinates=None,\n )\n\n def playable_move_for_column(self, state: ConnectFourState, column: int) -> Optional[ConnectFourMove]:\n # Going bottom to top, return first row that is empty.\n for row in reversed(range(self.rules.board_size.rows)):\n if state.board.rows[row][column] == Player.NO_PLAYER:\n coordinates = MatrixBoardCoordinates(row, column)\n return ConnectFourMove(\n move_index=self.get_move_index_for_coordinates(coordinates),\n resign=False,\n coordinates=coordinates,\n )\n return None\n\n def playable_moves_bitmap(self, state: ConnectFourState) -> Tuple[bool, ...]: # type: ignore[override]\n result = [False] * self.number_of_possible_moves(self.rules.board_size)\n for move in self.playable_moves(state):\n result[move.move_index] = True\n return tuple(result)\n\n def is_move_playable( # type: ignore[override]\n self, state: ConnectFourState, move: MoveOrMoveIndex) -> bool:\n if state.is_game_over:\n return False\n if isinstance(move, int):\n move = self.create_move_from_move_index(move)\n else:\n self.validate_move(move)\n if move.resign:\n return True\n assert move.coordinates\n if state.board[move.coordinates] == Player.NO_PLAYER:\n next_row_coordinates = move.coordinates + MatrixBoardCoordinates(row=1, column=0)\n if next_row_coordinates not in state.board:\n # move.coordinates is the last row\n return True\n else:\n # Move is playable if next row is full\n return state.board[next_row_coordinates] != Player.NO_PLAYER\n return False\n\n def play_move( # type: ignore[override]\n self, state: ConnectFourState, move: MoveOrMoveIndex) -> ConnectFourState:\n if isinstance(move, int):\n move = self.create_move_from_move_index(move)\n if not self.is_move_playable(state, move):\n raise ValueError(f\"Move {move} is not playable.\")\n\n board = state.board\n if move.resign:\n return ConnectFourState(\n current_player=state.current_player.other_player,\n result=ConnectFourResult.create_resignation(\n winner=state.current_player.other_player),\n board=board)\n assert move.coordinates\n\n board = board.replace({move.coordinates: state.current_player})\n result = ConnectFourResult.create_from_board_and_last_move(self.rules, board, move.coordinates)\n return ConnectFourState(\n current_player=state.current_player.other_player,\n result=result,\n board=board,\n )\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rahul0/spark | [
"057c051285ec32c665fb458d0670c1c16ba536b2"
] | [
"python/pyspark/pandas/internal.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAn internal immutable DataFrame with some metadata to manage indexes.\n\"\"\"\nimport re\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype # noqa: F401\nfrom pyspark._globals import _NoValue, _NoValueType\nfrom pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, Window\nfrom pyspark.sql.types import ( # noqa: F401\n BooleanType,\n DataType,\n LongType,\n StructField,\n StructType,\n StringType,\n)\nfrom pyspark.sql.utils import is_timestamp_ntz_preferred\n\n# For running doctests and reference resolution in PyCharm.\nfrom pyspark import pandas as ps\nfrom pyspark.pandas._typing import Label\n\nif TYPE_CHECKING:\n # This is required in old Python 3.5 to prevent circular reference.\n from pyspark.pandas.series import Series\nfrom pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale\nfrom pyspark.pandas.data_type_ops.base import DataTypeOps\nfrom pyspark.pandas.typedef import (\n Dtype,\n as_spark_type,\n extension_dtypes,\n infer_pd_series_spark_type,\n spark_type_to_pandas_dtype,\n)\nfrom pyspark.pandas.utils import (\n column_labels_level,\n default_session,\n is_name_like_tuple,\n is_testing,\n lazy_property,\n name_like_string,\n scol_for,\n spark_column_equals,\n)\n\n\n# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.\nSPARK_INDEX_NAME_FORMAT = \"__index_level_{}__\".format\nSPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)\n# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.\nSPARK_INDEX_NAME_PATTERN = re.compile(r\"__index_level_[0-9]+__\")\n\nNATURAL_ORDER_COLUMN_NAME = \"__natural_order__\"\n\nHIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}\n\nDEFAULT_SERIES_NAME = 0\nSPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)\n\n\nclass InternalField:\n \"\"\"\n The internal field to store the dtype as well as the Spark's StructField optionally.\n\n Parameters\n ----------\n dtype : numpy.dtype or pandas' ExtensionDtype\n The dtype for the field\n struct_field : StructField, optional\n The `StructField` for the field. If None, InternalFrame will properly set.\n \"\"\"\n\n def __init__(self, dtype: Dtype, struct_field: Optional[StructField] = None):\n self._dtype = dtype\n self._struct_field = struct_field\n\n @staticmethod\n def from_struct_field(\n struct_field: StructField, *, use_extension_dtypes: bool = False\n ) -> \"InternalField\":\n \"\"\"\n Returns a new InternalField object created from the given StructField.\n\n The dtype will be inferred from the data type of the given StructField.\n\n Parameters\n ----------\n struct_field : StructField\n The StructField used to create a new InternalField object.\n use_extension_dtypes : bool\n If True, try to use the extension dtypes.\n\n Returns\n -------\n InternalField\n \"\"\"\n return InternalField(\n dtype=spark_type_to_pandas_dtype(\n struct_field.dataType, use_extension_dtypes=use_extension_dtypes\n ),\n struct_field=struct_field,\n )\n\n @property\n def dtype(self) -> Dtype:\n \"\"\"Return the dtype for the field.\"\"\"\n return self._dtype\n\n @property\n def struct_field(self) -> Optional[StructField]:\n \"\"\"Return the StructField for the field.\"\"\"\n return self._struct_field\n\n @property\n def name(self) -> str:\n \"\"\"Return the field name if the StructField exists.\"\"\"\n assert self.struct_field is not None\n return self.struct_field.name\n\n @property\n def spark_type(self) -> DataType:\n \"\"\"Return the spark data type for the field if the StructField exists.\"\"\"\n assert self.struct_field is not None\n return self.struct_field.dataType\n\n @property\n def nullable(self) -> bool:\n \"\"\"Return the nullability for the field if the StructField exists.\"\"\"\n assert self.struct_field is not None\n return self.struct_field.nullable\n\n @property\n def metadata(self) -> Dict[str, Any]:\n \"\"\"Return the metadata for the field if the StructField exists.\"\"\"\n assert self.struct_field is not None\n return self.struct_field.metadata\n\n @property\n def is_extension_dtype(self) -> bool:\n \"\"\"Return whether the dtype for the field is an extension type or not.\"\"\"\n return isinstance(self.dtype, extension_dtypes)\n\n def normalize_spark_type(self) -> \"InternalField\":\n \"\"\"Return a new InternalField object with normalized Spark data type.\"\"\"\n assert self.struct_field is not None\n return self.copy(\n spark_type=force_decimal_precision_scale(as_nullable_spark_type(self.spark_type)),\n nullable=True,\n )\n\n def copy(\n self,\n *,\n name: Union[str, _NoValueType] = _NoValue,\n dtype: Union[Dtype, _NoValueType] = _NoValue,\n spark_type: Union[DataType, _NoValueType] = _NoValue,\n nullable: Union[bool, _NoValueType] = _NoValue,\n metadata: Union[Optional[Dict[str, Any]], _NoValueType] = _NoValue,\n ) -> \"InternalField\":\n \"\"\"Copy the InternalField object.\"\"\"\n if name is _NoValue:\n name = self.name\n if dtype is _NoValue:\n dtype = self.dtype\n if spark_type is _NoValue:\n spark_type = self.spark_type\n if nullable is _NoValue:\n nullable = self.nullable\n if metadata is _NoValue:\n metadata = self.metadata\n return InternalField(\n dtype=cast(Dtype, dtype),\n struct_field=StructField(\n name=cast(str, name),\n dataType=cast(DataType, spark_type),\n nullable=cast(bool, nullable),\n metadata=cast(Optional[Dict[str, Any]], metadata),\n ),\n )\n\n def __eq__(self, other: Any) -> bool:\n return (\n isinstance(other, InternalField)\n and self.dtype == other.dtype\n and self.struct_field == other.struct_field\n )\n\n def __repr__(self) -> str:\n return \"InternalField(dtype={dtype}, struct_field={struct_field})\".format(\n dtype=self.dtype, struct_field=self.struct_field\n )\n\n\nclass InternalFrame:\n \"\"\"\n The internal immutable DataFrame which manages Spark DataFrame and column names and index\n information.\n\n .. note:: this is an internal class. It is not supposed to be exposed to users and users\n should not directly access to it.\n\n The internal immutable DataFrame represents the index information for a DataFrame it belongs to.\n For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not\n store the index as columns.\n\n >>> psdf = ps.DataFrame({\n ... 'A': [1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8],\n ... 'C': [9, 10, 11, 12],\n ... 'D': [13, 14, 15, 16],\n ... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n A B C D E\n 0 1 5 9 13 17\n 1 2 6 10 14 18\n 2 3 7 11 15 19\n 3 4 8 12 16 20\n\n However, all columns including index column are also stored in Spark DataFrame internally\n as below.\n\n >>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +-----------------+---+---+---+---+---+\n |__index_level_0__| A| B| C| D| E|\n +-----------------+---+---+---+---+---+\n | 0| 1| 5| 9| 13| 17|\n | 1| 2| 6| 10| 14| 18|\n | 2| 3| 7| 11| 15| 19|\n | 3| 4| 8| 12| 16| 20|\n +-----------------+---+---+---+---+---+\n\n In order to fill this gap, the current metadata is used by mapping Spark's internal column\n to pandas-on-Spark's index. See the method below:\n\n * `spark_frame` represents the internal Spark DataFrame\n\n * `data_spark_column_names` represents non-indexing Spark column names\n\n * `data_spark_columns` represents non-indexing Spark columns\n\n * `data_fields` represents non-indexing InternalFields\n\n * `index_spark_column_names` represents internal index Spark column names\n\n * `index_spark_columns` represents internal index Spark columns\n\n * `index_fields` represents index InternalFields\n\n * `spark_column_names` represents all columns\n\n * `index_names` represents the external index name as a label\n\n * `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.\n\n * `to_pandas_frame` represents pandas DataFrame derived by the metadata\n\n >>> internal = psdf._internal\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+---+---+---+---+---+-----------------+\n |__index_level_0__| A| B| C| D| E|__natural_order__|\n +-----------------+---+---+---+---+---+-----------------+\n | 0| 1| 5| 9| 13| 17| ...|\n | 1| 2| 6| 10| 14| 18| ...|\n | 2| 3| 7| 11| 15| 19| ...|\n | 3| 4| 8| 12| 16| 20| ...|\n +-----------------+---+---+---+---+---+-----------------+\n >>> internal.data_spark_column_names\n ['A', 'B', 'C', 'D', 'E']\n >>> internal.index_spark_column_names\n ['__index_level_0__']\n >>> internal.spark_column_names\n ['__index_level_0__', 'A', 'B', 'C', 'D', 'E']\n >>> internal.index_names\n [None]\n >>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('A', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('B', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('C', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('D', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('E', LongType(), False))]\n >>> internal.index_fields\n [InternalField(dtype=int64, struct_field=StructField('__index_level_0__', LongType(), False))]\n >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +-----------------+---+---+---+---+---+\n |__index_level_0__| A| B| C| D| E|\n +-----------------+---+---+---+---+---+\n | 0| 1| 5| 9| 13| 17|\n | 1| 2| 6| 10| 14| 18|\n | 2| 3| 7| 11| 15| 19|\n | 3| 4| 8| 12| 16| 20|\n +-----------------+---+---+---+---+---+\n >>> internal.to_pandas_frame\n A B C D E\n 0 1 5 9 13 17\n 1 2 6 10 14 18\n 2 3 7 11 15 19\n 3 4 8 12 16 20\n\n In case that index is set to one of the existing column as below:\n\n >>> psdf1 = psdf.set_index(\"A\")\n >>> psdf1 # doctest: +NORMALIZE_WHITESPACE\n B C D E\n A\n 1 5 9 13 17\n 2 6 10 14 18\n 3 7 11 15 19\n 4 8 12 16 20\n\n >>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +---+---+---+---+---+\n | A| B| C| D| E|\n +---+---+---+---+---+\n | 1| 5| 9| 13| 17|\n | 2| 6| 10| 14| 18|\n | 3| 7| 11| 15| 19|\n | 4| 8| 12| 16| 20|\n +---+---+---+---+---+\n\n >>> internal = psdf1._internal\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+---+---+---+---+---+-----------------+\n |__index_level_0__| A| B| C| D| E|__natural_order__|\n +-----------------+---+---+---+---+---+-----------------+\n | 0| 1| 5| 9| 13| 17| ...|\n | 1| 2| 6| 10| 14| 18| ...|\n | 2| 3| 7| 11| 15| 19| ...|\n | 3| 4| 8| 12| 16| 20| ...|\n +-----------------+---+---+---+---+---+-----------------+\n >>> internal.data_spark_column_names\n ['B', 'C', 'D', 'E']\n >>> internal.index_spark_column_names\n ['A']\n >>> internal.spark_column_names\n ['A', 'B', 'C', 'D', 'E']\n >>> internal.index_names\n [('A',)]\n >>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('B', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('C', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('D', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('E', LongType(), False))]\n >>> internal.index_fields\n [InternalField(dtype=int64, struct_field=StructField('A', LongType(), False))]\n >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +---+---+---+---+---+\n | A| B| C| D| E|\n +---+---+---+---+---+\n | 1| 5| 9| 13| 17|\n | 2| 6| 10| 14| 18|\n | 3| 7| 11| 15| 19|\n | 4| 8| 12| 16| 20|\n +---+---+---+---+---+\n >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE\n B C D E\n A\n 1 5 9 13 17\n 2 6 10 14 18\n 3 7 11 15 19\n 4 8 12 16 20\n\n In case that index becomes a multi index as below:\n\n >>> psdf2 = psdf.set_index(\"A\", append=True)\n >>> psdf2 # doctest: +NORMALIZE_WHITESPACE\n B C D E\n A\n 0 1 5 9 13 17\n 1 2 6 10 14 18\n 2 3 7 11 15 19\n 3 4 8 12 16 20\n\n >>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +-----------------+---+---+---+---+---+\n |__index_level_0__| A| B| C| D| E|\n +-----------------+---+---+---+---+---+\n | 0| 1| 5| 9| 13| 17|\n | 1| 2| 6| 10| 14| 18|\n | 2| 3| 7| 11| 15| 19|\n | 3| 4| 8| 12| 16| 20|\n +-----------------+---+---+---+---+---+\n\n >>> internal = psdf2._internal\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+---+---+---+---+---+-----------------+\n |__index_level_0__| A| B| C| D| E|__natural_order__|\n +-----------------+---+---+---+---+---+-----------------+\n | 0| 1| 5| 9| 13| 17| ...|\n | 1| 2| 6| 10| 14| 18| ...|\n | 2| 3| 7| 11| 15| 19| ...|\n | 3| 4| 8| 12| 16| 20| ...|\n +-----------------+---+---+---+---+---+-----------------+\n >>> internal.data_spark_column_names\n ['B', 'C', 'D', 'E']\n >>> internal.index_spark_column_names\n ['__index_level_0__', 'A']\n >>> internal.spark_column_names\n ['__index_level_0__', 'A', 'B', 'C', 'D', 'E']\n >>> internal.index_names\n [None, ('A',)]\n >>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('B', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('C', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('D', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('E', LongType(), False))]\n >>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('__index_level_0__', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('A', LongType(), False))]\n >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +-----------------+---+---+---+---+---+\n |__index_level_0__| A| B| C| D| E|\n +-----------------+---+---+---+---+---+\n | 0| 1| 5| 9| 13| 17|\n | 1| 2| 6| 10| 14| 18|\n | 2| 3| 7| 11| 15| 19|\n | 3| 4| 8| 12| 16| 20|\n +-----------------+---+---+---+---+---+\n >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE\n B C D E\n A\n 0 1 5 9 13 17\n 1 2 6 10 14 18\n 2 3 7 11 15 19\n 3 4 8 12 16 20\n\n For multi-level columns, it also holds column_labels\n\n >>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),\n ... ('Y', 'C'), ('Y', 'D')])\n >>> psdf3 = ps.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12],\n ... [13, 14, 15, 16],\n ... [17, 18, 19, 20]], columns = columns)\n >>> psdf3 # doctest: +NORMALIZE_WHITESPACE\n X Y\n A B C D\n 0 1 2 3 4\n 1 5 6 7 8\n 2 9 10 11 12\n 3 13 14 15 16\n 4 17 18 19 20\n\n >>> internal = psdf3._internal\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+------+------+------+------+-----------------+\n |__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|\n +-----------------+------+------+------+------+-----------------+\n | 0| 1| 2| 3| 4| ...|\n | 1| 5| 6| 7| 8| ...|\n | 2| 9| 10| 11| 12| ...|\n | 3| 13| 14| 15| 16| ...|\n | 4| 17| 18| 19| 20| ...|\n +-----------------+------+------+------+------+-----------------+\n >>> internal.data_spark_column_names\n ['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']\n >>> internal.column_labels\n [('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]\n\n For Series, it also holds scol to represent the column.\n\n >>> psseries = psdf1.B\n >>> psseries\n A\n 1 5\n 2 6\n 3 7\n 4 8\n Name: B, dtype: int64\n\n >>> internal = psseries._internal\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+---+---+---+---+---+-----------------+\n |__index_level_0__| A| B| C| D| E|__natural_order__|\n +-----------------+---+---+---+---+---+-----------------+\n | 0| 1| 5| 9| 13| 17| ...|\n | 1| 2| 6| 10| 14| 18| ...|\n | 2| 3| 7| 11| 15| 19| ...|\n | 3| 4| 8| 12| 16| 20| ...|\n +-----------------+---+---+---+---+---+-----------------+\n >>> internal.data_spark_column_names\n ['B']\n >>> internal.index_spark_column_names\n ['A']\n >>> internal.spark_column_names\n ['A', 'B']\n >>> internal.index_names\n [('A',)]\n >>> internal.data_fields\n [InternalField(dtype=int64, struct_field=StructField('B', LongType(), False))]\n >>> internal.index_fields\n [InternalField(dtype=int64, struct_field=StructField('A', LongType(), False))]\n >>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE\n +---+---+\n | A| B|\n +---+---+\n | 1| 5|\n | 2| 6|\n | 3| 7|\n | 4| 8|\n +---+---+\n >>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE\n B\n A\n 1 5\n 2 6\n 3 7\n 4 8\n \"\"\"\n\n def __init__(\n self,\n spark_frame: SparkDataFrame,\n index_spark_columns: Optional[List[Column]],\n index_names: Optional[List[Optional[Label]]] = None,\n index_fields: Optional[List[InternalField]] = None,\n column_labels: Optional[List[Label]] = None,\n data_spark_columns: Optional[List[Column]] = None,\n data_fields: Optional[List[InternalField]] = None,\n column_label_names: Optional[List[Optional[Label]]] = None,\n ):\n \"\"\"\n Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and\n index fields and names.\n\n :param spark_frame: Spark DataFrame to be managed.\n :param index_spark_columns: list of Spark Column\n Spark Columns for the index.\n :param index_names: list of tuples\n the index names.\n :param index_fields: list of InternalField\n the InternalFields for the index columns\n :param column_labels: list of tuples with the same length\n The multi-level values in the tuples.\n :param data_spark_columns: list of Spark Column\n Spark Columns to appear as columns. If this is None, calculated\n from spark_frame.\n :param data_fields: list of InternalField\n the InternalFields for the data columns\n :param column_label_names: Names for each of the column index levels.\n\n See the examples below to refer what each parameter means.\n\n >>> column_labels = pd.MultiIndex.from_tuples(\n ... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=[\"column_labels_a\", \"column_labels_b\"])\n >>> row_index = pd.MultiIndex.from_tuples(\n ... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],\n ... names=[\"row_index_a\", \"row_index_b\"])\n >>> psdf = ps.DataFrame(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)\n >>> psdf.set_index(('a', 'x'), append=True, inplace=True)\n >>> psdf # doctest: +NORMALIZE_WHITESPACE\n column_labels_a a b\n column_labels_b y z\n row_index_a row_index_b (a, x)\n foo bar 1 2 3\n 4 5 6\n zoo bar 7 8 9\n\n >>> internal = psdf._internal\n\n >>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n +-----------------+-----------------+------+------+------+...\n |__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...\n +-----------------+-----------------+------+------+------+...\n | foo| bar| 1| 2| 3|...\n | foo| bar| 4| 5| 6|...\n | zoo| bar| 7| 8| 9|...\n +-----------------+-----------------+------+------+------+...\n\n >>> internal.index_spark_columns # doctest: +SKIP\n [Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]\n\n >>> internal.index_names\n [('row_index_a',), ('row_index_b',), ('a', 'x')]\n\n >>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=object,\n struct_field=StructField('__index_level_0__', StringType(), False)),\n InternalField(dtype=object,\n struct_field=StructField('__index_level_1__', StringType(), False)),\n InternalField(dtype=int64,\n struct_field=StructField('(a, x)', LongType(), False))]\n\n >>> internal.column_labels\n [('a', 'y'), ('b', 'z')]\n\n >>> internal.data_spark_columns # doctest: +SKIP\n [Column<'(a, y)'>, Column<'(b, z)'>]\n\n >>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('(a, y)', LongType(), False)),\n InternalField(dtype=int64, struct_field=StructField('(b, z)', LongType(), False))]\n\n >>> internal.column_label_names\n [('column_labels_a',), ('column_labels_b',)]\n \"\"\"\n\n assert isinstance(spark_frame, SparkDataFrame)\n assert not spark_frame.isStreaming, \"pandas-on-Spark does not support Structured Streaming.\"\n\n if not index_spark_columns:\n if data_spark_columns is not None:\n if column_labels is not None:\n data_spark_columns = [\n scol.alias(name_like_string(label))\n for scol, label in zip(data_spark_columns, column_labels)\n ]\n spark_frame = spark_frame.select(data_spark_columns)\n\n assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (\n \"Index columns should not appear in columns of the Spark DataFrame. Avoid \"\n \"index column names [%s].\" % SPARK_INDEX_NAME_PATTERN\n )\n\n # Create default index.\n spark_frame = InternalFrame.attach_default_index(spark_frame)\n index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]\n\n index_fields = [\n InternalField.from_struct_field(\n StructField(SPARK_DEFAULT_INDEX_NAME, LongType(), nullable=False)\n )\n ]\n\n if data_spark_columns is not None:\n data_struct_fields = [\n field\n for field in spark_frame.schema.fields\n if field.name != SPARK_DEFAULT_INDEX_NAME\n ]\n data_spark_columns = [\n scol_for(spark_frame, field.name) for field in data_struct_fields\n ]\n if data_fields is not None:\n data_fields = [\n field.copy(\n name=name_like_string(struct_field.name),\n )\n for field, struct_field in zip(data_fields, data_struct_fields)\n ]\n\n if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:\n spark_frame = spark_frame.withColumn(\n NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()\n )\n\n self._sdf: SparkDataFrame = spark_frame\n\n # index_spark_columns\n assert all(\n isinstance(index_scol, Column) for index_scol in index_spark_columns\n ), index_spark_columns\n\n self._index_spark_columns: List[Column] = index_spark_columns\n\n # data_spark_columns\n if data_spark_columns is None:\n data_spark_columns = [\n scol_for(spark_frame, col)\n for col in spark_frame.columns\n if all(\n not spark_column_equals(scol_for(spark_frame, col), index_scol)\n for index_scol in index_spark_columns\n )\n and col not in HIDDEN_COLUMNS\n ]\n else:\n assert all(isinstance(scol, Column) for scol in data_spark_columns)\n\n self._data_spark_columns: List[Column] = data_spark_columns\n\n # fields\n if index_fields is None:\n index_fields = [None] * len(index_spark_columns)\n if data_fields is None:\n data_fields = [None] * len(data_spark_columns)\n\n assert len(index_spark_columns) == len(index_fields), (\n len(index_spark_columns),\n len(index_fields),\n )\n assert len(data_spark_columns) == len(data_fields), (\n len(data_spark_columns),\n len(data_fields),\n )\n\n if any(field is None or field.struct_field is None for field in index_fields) and any(\n field is None or field.struct_field is None for field in data_fields\n ):\n schema = spark_frame.select(index_spark_columns + data_spark_columns).schema\n fields = [\n InternalField.from_struct_field(struct_field)\n if field is None\n else InternalField(field.dtype, struct_field)\n if field.struct_field is None\n else field\n for field, struct_field in zip(index_fields + data_fields, schema.fields)\n ]\n index_fields = fields[: len(index_spark_columns)]\n data_fields = fields[len(index_spark_columns) :]\n elif any(field is None or field.struct_field is None for field in index_fields):\n schema = spark_frame.select(index_spark_columns).schema\n index_fields = [\n InternalField.from_struct_field(struct_field)\n if field is None\n else InternalField(field.dtype, struct_field)\n if field.struct_field is None\n else field\n for field, struct_field in zip(index_fields, schema.fields)\n ]\n elif any(field is None or field.struct_field is None for field in data_fields):\n schema = spark_frame.select(data_spark_columns).schema\n data_fields = [\n InternalField.from_struct_field(struct_field)\n if field is None\n else InternalField(field.dtype, struct_field)\n if field.struct_field is None\n else field\n for field, struct_field in zip(data_fields, schema.fields)\n ]\n\n assert all(\n isinstance(ops.dtype, Dtype.__args__) # type: ignore[attr-defined]\n and (\n ops.dtype == np.dtype(\"object\")\n or as_spark_type(ops.dtype, raise_error=False) is not None\n )\n for ops in index_fields\n ), index_fields\n\n if is_testing():\n struct_fields = spark_frame.select(index_spark_columns).schema.fields\n assert all(\n index_field.struct_field == struct_field\n for index_field, struct_field in zip(index_fields, struct_fields)\n ), (index_fields, struct_fields)\n\n self._index_fields: List[InternalField] = index_fields\n\n assert all(\n isinstance(ops.dtype, Dtype.__args__) # type: ignore[attr-defined]\n and (\n ops.dtype == np.dtype(\"object\")\n or as_spark_type(ops.dtype, raise_error=False) is not None\n )\n for ops in data_fields\n ), data_fields\n\n if is_testing():\n struct_fields = spark_frame.select(data_spark_columns).schema.fields\n assert all(\n data_field.struct_field == struct_field\n for data_field, struct_field in zip(data_fields, struct_fields)\n ), (data_fields, struct_fields)\n\n self._data_fields: List[InternalField] = data_fields\n\n # index_names\n if not index_names:\n index_names = [None] * len(index_spark_columns)\n\n assert len(index_spark_columns) == len(index_names), (\n len(index_spark_columns),\n len(index_names),\n )\n assert all(\n is_name_like_tuple(index_name, check_type=True) for index_name in index_names\n ), index_names\n\n self._index_names: List[Optional[Label]] = index_names\n\n # column_labels\n if column_labels is None:\n column_labels = [(col,) for col in spark_frame.select(self._data_spark_columns).columns]\n else:\n assert len(column_labels) == len(self._data_spark_columns), (\n len(column_labels),\n len(self._data_spark_columns),\n )\n if len(column_labels) == 1:\n column_label = column_labels[0]\n assert is_name_like_tuple(column_label, check_type=True), column_label\n else:\n assert all(\n is_name_like_tuple(column_label, check_type=True)\n for column_label in column_labels\n ), column_labels\n assert len(set(len(label) for label in column_labels)) <= 1, column_labels\n\n self._column_labels: List[Label] = column_labels\n\n # column_label_names\n if column_label_names is None:\n column_label_names = [None] * column_labels_level(self._column_labels)\n else:\n if len(self._column_labels) > 0:\n assert len(column_label_names) == column_labels_level(self._column_labels), (\n len(column_label_names),\n column_labels_level(self._column_labels),\n )\n else:\n assert len(column_label_names) > 0, len(column_label_names)\n assert all(\n is_name_like_tuple(column_label_name, check_type=True)\n for column_label_name in column_label_names\n ), column_label_names\n\n self._column_label_names: List[Optional[Label]] = column_label_names\n\n @staticmethod\n def attach_default_index(\n sdf: SparkDataFrame, default_index_type: Optional[str] = None\n ) -> SparkDataFrame:\n \"\"\"\n This method attaches a default index to Spark DataFrame. Spark does not have the index\n notion so corresponding column should be generated.\n There are several types of default index can be configured by `compute.default_index_type`.\n\n >>> spark_frame = ps.range(10).to_spark()\n >>> spark_frame\n DataFrame[id: bigint]\n\n It adds the default index column '__index_level_0__'.\n\n >>> spark_frame = InternalFrame.attach_default_index(spark_frame)\n >>> spark_frame\n DataFrame[__index_level_0__: bigint, id: bigint]\n\n It throws an exception if the given column name already exists.\n\n >>> InternalFrame.attach_default_index(spark_frame)\n ... # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n AssertionError: '__index_level_0__' already exists...\n \"\"\"\n index_column = SPARK_DEFAULT_INDEX_NAME\n assert (\n index_column not in sdf.columns\n ), \"'%s' already exists in the Spark column names '%s'\" % (index_column, sdf.columns)\n\n if default_index_type is None:\n default_index_type = ps.get_option(\"compute.default_index_type\")\n\n if default_index_type == \"sequence\":\n return InternalFrame.attach_sequence_column(sdf, column_name=index_column)\n elif default_index_type == \"distributed-sequence\":\n return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)\n elif default_index_type == \"distributed\":\n return InternalFrame.attach_distributed_column(sdf, column_name=index_column)\n else:\n raise ValueError(\n \"'compute.default_index_type' should be one of 'sequence',\"\n \" 'distributed-sequence' and 'distributed'\"\n )\n\n @staticmethod\n def attach_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:\n scols = [scol_for(sdf, column) for column in sdf.columns]\n sequential_index = (\n F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast(\"long\") - 1\n )\n return sdf.select(sequential_index.alias(column_name), *scols)\n\n @staticmethod\n def attach_distributed_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:\n scols = [scol_for(sdf, column) for column in sdf.columns]\n return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)\n\n @staticmethod\n def attach_distributed_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:\n \"\"\"\n This method attaches a Spark column that has a sequence in a distributed manner.\n This is equivalent to the column assigned when default index type 'distributed-sequence'.\n\n >>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()\n >>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name=\"sequence\")\n >>> sdf.show() # doctest: +NORMALIZE_WHITESPACE\n +--------+---+\n |sequence| 0|\n +--------+---+\n | 0| a|\n | 1| b|\n | 2| c|\n +--------+---+\n \"\"\"\n if len(sdf.columns) > 0:\n return SparkDataFrame(\n sdf._jdf.toDF().withSequenceColumn(column_name),\n sdf.sparkSession,\n )\n else:\n cnt = sdf.count()\n if cnt > 0:\n return default_session().range(cnt).toDF(column_name)\n else:\n return default_session().createDataFrame(\n [], schema=StructType().add(column_name, data_type=LongType(), nullable=False)\n )\n\n def spark_column_for(self, label: Label) -> Column:\n \"\"\"Return Spark Column for the given column label.\"\"\"\n column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))\n if label in column_labels_to_scol:\n return column_labels_to_scol[label]\n else:\n raise KeyError(name_like_string(label))\n\n def spark_column_name_for(self, label_or_scol: Union[Label, Column]) -> str:\n \"\"\"Return the actual Spark column name for the given column label.\"\"\"\n if isinstance(label_or_scol, Column):\n return self.spark_frame.select(label_or_scol).columns[0]\n else:\n return self.field_for(label_or_scol).name\n\n def spark_type_for(self, label_or_scol: Union[Label, Column]) -> DataType:\n \"\"\"Return DataType for the given column label.\"\"\"\n if isinstance(label_or_scol, Column):\n return self.spark_frame.select(label_or_scol).schema[0].dataType\n else:\n return self.field_for(label_or_scol).spark_type\n\n def spark_column_nullable_for(self, label_or_scol: Union[Label, Column]) -> bool:\n \"\"\"Return nullability for the given column label.\"\"\"\n if isinstance(label_or_scol, Column):\n return self.spark_frame.select(label_or_scol).schema[0].nullable\n else:\n return self.field_for(label_or_scol).nullable\n\n def field_for(self, label: Label) -> InternalField:\n \"\"\"Return InternalField for the given column label.\"\"\"\n column_labels_to_fields = dict(zip(self.column_labels, self.data_fields))\n if label in column_labels_to_fields:\n return column_labels_to_fields[label]\n else:\n raise KeyError(name_like_string(label))\n\n @property\n def spark_frame(self) -> SparkDataFrame:\n \"\"\"Return the managed Spark DataFrame.\"\"\"\n return self._sdf\n\n @lazy_property\n def data_spark_column_names(self) -> List[str]:\n \"\"\"Return the managed column field names.\"\"\"\n return [field.name for field in self.data_fields]\n\n @property\n def data_spark_columns(self) -> List[Column]:\n \"\"\"Return Spark Columns for the managed data columns.\"\"\"\n return self._data_spark_columns\n\n @property\n def index_spark_column_names(self) -> List[str]:\n \"\"\"Return the managed index field names.\"\"\"\n return [field.name for field in self.index_fields]\n\n @property\n def index_spark_columns(self) -> List[Column]:\n \"\"\"Return Spark Columns for the managed index columns.\"\"\"\n return self._index_spark_columns\n\n @lazy_property\n def spark_column_names(self) -> List[str]:\n \"\"\"Return all the field names including index field names.\"\"\"\n return self.spark_frame.select(self.spark_columns).columns\n\n @lazy_property\n def spark_columns(self) -> List[Column]:\n \"\"\"Return Spark Columns for the managed columns including index columns.\"\"\"\n index_spark_columns = self.index_spark_columns\n return index_spark_columns + [\n spark_column\n for spark_column in self.data_spark_columns\n if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns)\n ]\n\n @property\n def index_names(self) -> List[Optional[Label]]:\n \"\"\"Return the managed index names.\"\"\"\n return self._index_names\n\n @lazy_property\n def index_level(self) -> int:\n \"\"\"Return the level of the index.\"\"\"\n return len(self._index_names)\n\n @property\n def column_labels(self) -> List[Label]:\n \"\"\"Return the managed column index.\"\"\"\n return self._column_labels\n\n @lazy_property\n def column_labels_level(self) -> int:\n \"\"\"Return the level of the column index.\"\"\"\n return len(self._column_label_names)\n\n @property\n def column_label_names(self) -> List[Optional[Label]]:\n \"\"\"Return names of the index levels.\"\"\"\n return self._column_label_names\n\n @property\n def index_fields(self) -> List[InternalField]:\n \"\"\"Return InternalFields for the managed index columns.\"\"\"\n return self._index_fields\n\n @property\n def data_fields(self) -> List[InternalField]:\n \"\"\"Return InternalFields for the managed columns.\"\"\"\n return self._data_fields\n\n @lazy_property\n def to_internal_spark_frame(self) -> SparkDataFrame:\n \"\"\"\n Return as Spark DataFrame. This contains index columns as well\n and should be only used for internal purposes.\n \"\"\"\n index_spark_columns = self.index_spark_columns\n data_columns = []\n for spark_column in self.data_spark_columns:\n if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns):\n data_columns.append(spark_column)\n return self.spark_frame.select(index_spark_columns + data_columns)\n\n @lazy_property\n def to_pandas_frame(self) -> pd.DataFrame:\n \"\"\"Return as pandas DataFrame.\"\"\"\n sdf = self.to_internal_spark_frame\n pdf = sdf.toPandas()\n if len(pdf) == 0 and len(sdf.schema) > 0:\n pdf = pdf.astype(\n {field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}\n )\n\n return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)\n\n @lazy_property\n def arguments_for_restore_index(self) -> Dict:\n \"\"\"Create arguments for `restore_index`.\"\"\"\n column_names = []\n fields = self.index_fields.copy()\n\n for spark_column, column_name, field in zip(\n self.data_spark_columns, self.data_spark_column_names, self.data_fields\n ):\n for index_spark_column_name, index_spark_column in zip(\n self.index_spark_column_names, self.index_spark_columns\n ):\n if spark_column_equals(spark_column, index_spark_column):\n column_names.append(index_spark_column_name)\n break\n else:\n column_names.append(column_name)\n fields.append(field)\n\n return dict(\n index_columns=self.index_spark_column_names,\n index_names=self.index_names,\n data_columns=column_names,\n column_labels=self.column_labels,\n column_label_names=self.column_label_names,\n fields=fields,\n )\n\n @staticmethod\n def restore_index(\n pdf: pd.DataFrame,\n *,\n index_columns: List[str],\n index_names: List[Label],\n data_columns: List[str],\n column_labels: List[Label],\n column_label_names: List[Label],\n fields: List[InternalField] = None,\n ) -> pd.DataFrame:\n \"\"\"\n Restore pandas DataFrame indices using the metadata.\n\n :param pdf: the pandas DataFrame to be processed.\n :param index_columns: the original column names for index columns.\n :param index_names: the index names after restored.\n :param data_columns: the original column names for data columns.\n :param column_labels: the column labels after restored.\n :param column_label_names: the column label names after restored.\n :param fields: the fields after restored.\n :return: the restored pandas DataFrame\n\n >>> from numpy import dtype\n >>> pdf = pd.DataFrame({\"index\": [10, 20, 30], \"a\": ['a', 'b', 'c'], \"b\": [0, 2, 1]})\n >>> InternalFrame.restore_index(\n ... pdf,\n ... index_columns=[\"index\"],\n ... index_names=[(\"idx\",)],\n ... data_columns=[\"a\", \"b\", \"index\"],\n ... column_labels=[(\"x\",), (\"y\",), (\"z\",)],\n ... column_label_names=[(\"lv1\",)],\n ... fields=[\n ... InternalField(\n ... dtype=dtype('int64'),\n ... struct_field=StructField(name='index', dataType=LongType(), nullable=False),\n ... ),\n ... InternalField(\n ... dtype=dtype('object'),\n ... struct_field=StructField(name='a', dataType=StringType(), nullable=False),\n ... ),\n ... InternalField(\n ... dtype=CategoricalDtype(categories=[\"i\", \"j\", \"k\"]),\n ... struct_field=StructField(name='b', dataType=LongType(), nullable=False),\n ... ),\n ... ],\n ... ) # doctest: +NORMALIZE_WHITESPACE\n lv1 x y z\n idx\n 10 a i 10\n 20 b k 20\n 30 c j 30\n \"\"\"\n for col, field in zip(pdf.columns, fields):\n pdf[col] = DataTypeOps(field.dtype, field.spark_type).restore(pdf[col])\n\n append = False\n for index_field in index_columns:\n drop = index_field not in data_columns\n pdf = pdf.set_index(index_field, drop=drop, append=append)\n append = True\n pdf = pdf[data_columns]\n\n pdf.index.names = [\n name if name is None or len(name) > 1 else name[0] for name in index_names\n ]\n\n names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]\n if len(column_label_names) > 1:\n pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)\n else:\n pdf.columns = pd.Index(\n [None if label is None else label[0] for label in column_labels],\n name=names[0],\n )\n\n return pdf\n\n @lazy_property\n def resolved_copy(self) -> \"InternalFrame\":\n \"\"\"Copy the immutable InternalFrame with the updates resolved.\"\"\"\n sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))\n return self.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],\n data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],\n )\n\n def with_new_sdf(\n self,\n spark_frame: SparkDataFrame,\n *,\n index_fields: Optional[List[InternalField]] = None,\n data_columns: Optional[List[str]] = None,\n data_fields: Optional[List[InternalField]] = None,\n ) -> \"InternalFrame\":\n \"\"\"Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.\n\n :param spark_frame: the new Spark DataFrame\n :param index_fields: the new InternalFields for the index columns.\n If None, the original dtyeps are used.\n :param data_columns: the new column names. If None, the original one is used.\n :param data_fields: the new InternalFields for the data columns.\n If None, the original dtyeps are used.\n :return: the copied InternalFrame.\n \"\"\"\n if index_fields is None:\n index_fields = self.index_fields\n else:\n assert len(index_fields) == len(self.index_fields), (\n len(index_fields),\n len(self.index_fields),\n )\n\n if data_columns is None:\n data_columns = self.data_spark_column_names\n else:\n assert len(data_columns) == len(self.column_labels), (\n len(data_columns),\n len(self.column_labels),\n )\n\n if data_fields is None:\n data_fields = self.data_fields\n else:\n assert len(data_fields) == len(self.column_labels), (\n len(data_fields),\n len(self.column_labels),\n )\n\n sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)\n return self.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],\n index_fields=index_fields,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n data_fields=data_fields,\n )\n\n def with_new_columns(\n self,\n scols_or_pssers: Sequence[Union[Column, \"Series\"]],\n *,\n column_labels: Optional[List[Label]] = None,\n data_fields: Optional[List[InternalField]] = None,\n column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,\n keep_order: bool = True,\n ) -> \"InternalFrame\":\n \"\"\"\n Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.\n\n :param scols_or_pssers: the new Spark Columns or Series.\n :param column_labels: the new column index.\n If None, the column_labels of the corresponding `scols_or_pssers` is used if it is\n Series; otherwise the original one is used.\n :param data_fields: the new InternalFields for the data columns.\n If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;\n otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.\n :param column_label_names: the new names of the column index levels.\n :return: the copied InternalFrame.\n \"\"\"\n from pyspark.pandas.series import Series\n\n if column_labels is None:\n if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):\n column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]\n else:\n assert len(scols_or_pssers) == len(self.column_labels), (\n len(scols_or_pssers),\n len(self.column_labels),\n )\n column_labels = []\n for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):\n if isinstance(scol_or_psser, Series):\n column_labels.append(scol_or_psser._column_label)\n else:\n column_labels.append(label)\n else:\n assert len(scols_or_pssers) == len(column_labels), (\n len(scols_or_pssers),\n len(column_labels),\n )\n\n data_spark_columns = []\n for scol_or_psser in scols_or_pssers:\n if isinstance(scol_or_psser, Series):\n scol = scol_or_psser.spark.column\n else:\n scol = scol_or_psser\n data_spark_columns.append(scol)\n\n if data_fields is None:\n data_fields = []\n for scol_or_psser in scols_or_pssers:\n if isinstance(scol_or_psser, Series):\n data_fields.append(scol_or_psser._internal.data_fields[0])\n else:\n data_fields.append(None)\n else:\n assert len(scols_or_pssers) == len(data_fields), (\n len(scols_or_pssers),\n len(data_fields),\n )\n\n sdf = self.spark_frame\n if not keep_order:\n sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)\n index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]\n data_spark_columns = [\n scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns\n ]\n else:\n index_spark_columns = self.index_spark_columns\n\n if column_label_names is _NoValue:\n column_label_names = self._column_label_names\n\n return self.copy(\n spark_frame=sdf,\n index_spark_columns=index_spark_columns,\n column_labels=column_labels,\n data_spark_columns=data_spark_columns,\n data_fields=data_fields,\n column_label_names=column_label_names,\n )\n\n def with_filter(self, pred: Union[Column, \"Series\"]) -> \"InternalFrame\":\n \"\"\"\n Copy the immutable InternalFrame with the updates by the predicate.\n\n :param pred: the predicate to filter.\n :return: the copied InternalFrame.\n \"\"\"\n from pyspark.pandas.series import Series\n\n if isinstance(pred, Series):\n assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type\n condition = pred.spark.column\n else:\n condition = pred\n spark_type = self.spark_frame.select(condition).schema[0].dataType\n assert isinstance(spark_type, BooleanType), spark_type\n\n return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))\n\n def with_new_spark_column(\n self,\n column_label: Label,\n scol: Column,\n *,\n field: Optional[InternalField] = None,\n keep_order: bool = True,\n ) -> \"InternalFrame\":\n \"\"\"\n Copy the immutable InternalFrame with the updates by the specified Spark Column.\n\n :param column_label: the column label to be updated.\n :param scol: the new Spark Column\n :param field: the new InternalField for the data column.\n If not specified, the InternalField will be inferred from the spark Column.\n :return: the copied InternalFrame.\n \"\"\"\n assert column_label in self.column_labels, column_label\n\n idx = self.column_labels.index(column_label)\n data_spark_columns = self.data_spark_columns.copy()\n data_spark_columns[idx] = scol\n data_fields = self.data_fields.copy()\n data_fields[idx] = field\n return self.with_new_columns(\n data_spark_columns, data_fields=data_fields, keep_order=keep_order\n )\n\n def select_column(self, column_label: Label) -> \"InternalFrame\":\n \"\"\"\n Copy the immutable InternalFrame with the specified column.\n\n :param column_label: the column label to use.\n :return: the copied InternalFrame.\n \"\"\"\n assert column_label in self.column_labels, column_label\n\n return self.copy(\n column_labels=[column_label],\n data_spark_columns=[self.spark_column_for(column_label)],\n data_fields=[self.field_for(column_label)],\n column_label_names=None,\n )\n\n def copy(\n self,\n *,\n spark_frame: Union[SparkDataFrame, _NoValueType] = _NoValue,\n index_spark_columns: Union[List[Column], _NoValueType] = _NoValue,\n index_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,\n index_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,\n column_labels: Union[Optional[List[Label]], _NoValueType] = _NoValue,\n data_spark_columns: Union[Optional[List[Column]], _NoValueType] = _NoValue,\n data_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,\n column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,\n ) -> \"InternalFrame\":\n \"\"\"\n Copy the immutable InternalFrame.\n\n :param spark_frame: the new Spark DataFrame. If not specified, the original one is used.\n :param index_spark_columns: the list of Spark Column.\n If not specified, the original ones are used.\n :param index_names: the index names. If not specified, the original ones are used.\n :param index_fields: the new InternalFields for the index columns.\n If not specified, the original metadata are used.\n :param column_labels: the new column labels. If not specified, the original ones are used.\n :param data_spark_columns: the new Spark Columns.\n If not specified, the original ones are used.\n :param data_fields: the new InternalFields for the data columns.\n If not specified, the original metadata are used.\n :param column_label_names: the new names of the column index levels.\n If not specified, the original ones are used.\n :return: the copied immutable InternalFrame.\n \"\"\"\n if spark_frame is _NoValue:\n spark_frame = self.spark_frame\n if index_spark_columns is _NoValue:\n index_spark_columns = self.index_spark_columns\n if index_names is _NoValue:\n index_names = self.index_names\n if index_fields is _NoValue:\n index_fields = self.index_fields\n if column_labels is _NoValue:\n column_labels = self.column_labels\n if data_spark_columns is _NoValue:\n data_spark_columns = self.data_spark_columns\n if data_fields is _NoValue:\n data_fields = self.data_fields\n if column_label_names is _NoValue:\n column_label_names = self.column_label_names\n return InternalFrame(\n spark_frame=cast(SparkDataFrame, spark_frame),\n index_spark_columns=cast(List[Column], index_spark_columns),\n index_names=cast(Optional[List[Optional[Label]]], index_names),\n index_fields=cast(Optional[List[InternalField]], index_fields),\n column_labels=cast(Optional[List[Label]], column_labels),\n data_spark_columns=cast(Optional[List[Column]], data_spark_columns),\n data_fields=cast(Optional[List[InternalField]], data_fields),\n column_label_names=cast(Optional[List[Optional[Label]]], column_label_names),\n )\n\n @staticmethod\n def from_pandas(pdf: pd.DataFrame) -> \"InternalFrame\":\n \"\"\"Create an immutable DataFrame from pandas DataFrame.\n\n :param pdf: :class:`pd.DataFrame`\n :return: the created immutable DataFrame\n \"\"\"\n\n index_names: List[Optional[Label]] = [\n name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names\n ]\n\n columns = pdf.columns\n column_labels: List[Label]\n if isinstance(columns, pd.MultiIndex):\n column_labels = columns.tolist()\n else:\n column_labels = [(col,) for col in columns]\n\n column_label_names: List[Optional[Label]] = [\n name if name is None or isinstance(name, tuple) else (name,) for name in columns.names\n ]\n\n prefer_timestamp_ntz = is_timestamp_ntz_preferred()\n\n (\n pdf,\n index_columns,\n index_fields,\n data_columns,\n data_fields,\n ) = InternalFrame.prepare_pandas_frame(pdf, prefer_timestamp_ntz=prefer_timestamp_ntz)\n\n schema = StructType([field.struct_field for field in index_fields + data_fields])\n\n sdf = default_session().createDataFrame(pdf, schema=schema)\n return InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_columns],\n index_names=index_names,\n index_fields=index_fields,\n column_labels=column_labels,\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n data_fields=data_fields,\n column_label_names=column_label_names,\n )\n\n @staticmethod\n def prepare_pandas_frame(\n pdf: pd.DataFrame, *, retain_index: bool = True, prefer_timestamp_ntz: bool = False\n ) -> Tuple[pd.DataFrame, List[str], List[InternalField], List[str], List[InternalField]]:\n \"\"\"\n Prepare pandas DataFrame for creating Spark DataFrame.\n\n :param pdf: the pandas DataFrame to be prepared.\n :param retain_index: whether the indices should be retained.\n :return: the tuple of\n - the prepared pandas dataFrame\n - index column names for Spark DataFrame\n - the InternalFields for the index columns of the given pandas DataFrame\n - data column names for Spark DataFrame\n - the InternalFields for the data columns of the given pandas DataFrame\n\n >>> pdf = pd.DataFrame(\n ... {(\"x\", \"a\"): ['a', 'b', 'c'],\n ... (\"y\", \"b\"): pd.Categorical([\"i\", \"k\", \"j\"], categories=[\"i\", \"j\", \"k\"])},\n ... index=[10, 20, 30])\n >>> prepared, index_columns, index_fields, data_columns, data_fields = (\n ... InternalFrame.prepare_pandas_frame(pdf)\n ... )\n >>> prepared\n __index_level_0__ (x, a) (y, b)\n 0 10 a 0\n 1 20 b 2\n 2 30 c 1\n >>> index_columns\n ['__index_level_0__']\n >>> index_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=int64, struct_field=StructField('__index_level_0__',\n LongType(), False))]\n >>> data_columns\n ['(x, a)', '(y, b)']\n >>> data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=object, struct_field=StructField('(x, a)', StringType(), False)),\n InternalField(dtype=category, struct_field=StructField('(y, b)', ByteType(), False))]\n\n >>> import datetime\n >>> pdf = pd.DataFrame({\n ... \"dt\": [datetime.datetime(1970, 1, 1)], \"dt_obj\": [datetime.datetime(1970, 1, 1)]\n ... })\n >>> pdf.dt_obj = pdf.dt_obj.astype(\"object\")\n >>> _, _, _, _, data_fields = (\n ... InternalFrame.prepare_pandas_frame(pdf, prefer_timestamp_ntz=True)\n ... )\n >>> data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=datetime64[ns],\n struct_field=StructField('dt', TimestampNTZType(), False)),\n InternalField(dtype=object,\n struct_field=StructField('dt_obj', TimestampNTZType(), False))]\n\n >>> pdf = pd.DataFrame({\n ... \"td\": [datetime.timedelta(0)], \"td_obj\": [datetime.timedelta(0)]\n ... })\n >>> pdf.td_obj = pdf.td_obj.astype(\"object\")\n >>> _, _, _, _, data_fields = (\n ... InternalFrame.prepare_pandas_frame(pdf)\n ... )\n >>> data_fields # doctest: +NORMALIZE_WHITESPACE\n [InternalField(dtype=timedelta64[ns],\n struct_field=StructField('td', DayTimeIntervalType(0, 3), False)),\n InternalField(dtype=object,\n struct_field=StructField('td_obj', DayTimeIntervalType(0, 3), False))]\n \"\"\"\n pdf = pdf.copy()\n\n data_columns = [name_like_string(col) for col in pdf.columns]\n pdf.columns = data_columns\n\n if retain_index:\n index_nlevels = pdf.index.nlevels\n index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]\n pdf.index.names = index_columns\n reset_index = pdf.reset_index()\n else:\n index_nlevels = 0\n index_columns = []\n reset_index = pdf\n\n index_dtypes = list(reset_index.dtypes)[:index_nlevels]\n data_dtypes = list(reset_index.dtypes)[index_nlevels:]\n\n for col, dtype in zip(reset_index.columns, reset_index.dtypes):\n spark_type = infer_pd_series_spark_type(reset_index[col], dtype, prefer_timestamp_ntz)\n reset_index[col] = DataTypeOps(dtype, spark_type).prepare(reset_index[col])\n\n fields = [\n InternalField(\n dtype=dtype,\n struct_field=StructField(\n name=str(name),\n dataType=infer_pd_series_spark_type(col, dtype, prefer_timestamp_ntz),\n nullable=bool(col.isnull().any()),\n ),\n )\n for (name, col), dtype in zip(reset_index.iteritems(), index_dtypes + data_dtypes)\n ]\n\n return (\n reset_index,\n index_columns,\n fields[:index_nlevels],\n data_columns,\n fields[index_nlevels:],\n )\n\n\ndef _test() -> None:\n import os\n import doctest\n import sys\n from pyspark.sql import SparkSession\n import pyspark.pandas.internal\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.internal.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.internal tests\")\n .getOrCreate()\n )\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.internal,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"pandas.Index",
"pandas.MultiIndex.from_tuples",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
akhilagrawal1001/model-zoo | [
"984ee5091b2835c9555b2cea2745ce36229efa92",
"984ee5091b2835c9555b2cea2745ce36229efa92"
] | [
"multimodal_models/ShowAndTellTensorflow/data_process.py",
"object_detection/SSD300_VGG16_TensorFlow/iou.py"
] | [
"from tensorflow.keras.preprocessing.text import Tokenizer\nimport numpy as np\n\n\n# Loading all the captions data into a dictionary with the image_name as key\n# Enter the path of \"Flickr8k.token.txt\" present in Dataset in \"caps_path\" variable\ncaps_path = \"../input/flickr8k/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt\"\nf = open(caps_path, 'r')\ncaps = f.read()\n\n# For easy preprocessing of data, let us store it in a dictionary\ncaptions_dict = dict()\n\nfor line in caps.split('\\n'):\n txt = line.split(\" \")\n img_name = txt[0].split('#')[0]\n\n if img_name not in captions_dict.keys():\n captions_dict[img_name] = list()\n\n # Appending the start and end tokens in the captions while loading them\n captions_dict[img_name].append(\"startseq \" + \" \".join(txt[1:]) + \" endseq\")\n\n\n# Enter the path of file \"Flickr_8k.trainImages.txt\" present in Dataset, in \"train_image_names_path\" variable\ntrain_image_names_path = \"../input/flickr8k/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt\"\ng = open(train_image_names_path, 'r')\ntrain_image_names = g.read()\ntrain_image_names = train_image_names.split('\\n')\n\ntrain_image_names.remove('')\n\n# Store the captions of training images in a different list\ntrain_caps = []\nfor img in train_image_names:\n train_caps.append(captions_dict[img])\n\n# Since each image has 5 captions, to get a 1 D array of captions, flattening is required\ntrain_flat = [cap for caps in train_caps for cap in caps]\n\n# Converting the text data into sequence data, which can be padded and fed to neural network\ntokenizer = Tokenizer(num_words=5000,\n oov_token=\"<unk>\",\n filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~ ')\ntokenizer.fit_on_texts(train_flat)\nword_index = tokenizer.word_index\nindex_word = dict((word_index[k], k) for k in word_index.keys())\n\ntrain_tokens = [tokenizer.texts_to_sequences(caps) for caps in train_caps]\n\n\n# This is a custom function that picks a caption at random out 5, for any given image index\ndef one_of_five_caps(temp):\n caps1 = []\n for x in temp:\n y = np.random.choice(5)\n caps1.append(train_tokens[x][y])\n return caps1\n",
"import numpy as np\n\n# function to evaluate IOU between two boxes\ndef iou(bbox1, bbox2):\n\n # shape of both tensor is (num_box, 4) \n # value in format (xmin, ymin, xmax, ymax)\n \n xmin_inter = np.maximum(bbox1[..., 0], bbox2[..., 0])\n ymin_inter = np.maximum(bbox1[..., 1], bbox2[..., 1])\n\n xmax_inter = np.minimum(bbox1[..., 2], bbox2[..., 2])\n ymax_inter = np.minimum(bbox1[..., 3], bbox2[..., 3])\n\n inter = (xmax_inter - xmin_inter) * (ymax_inter - ymin_inter)\n bb1_ar = (bbox1[..., 2] - bbox1[..., 0]) * (bbox1[..., 3] - bbox1[..., 1])\n bb2_ar = (bbox2[..., 2] - bbox2[..., 0]) * (bbox2[..., 3] - bbox2[..., 1])\n union_ar = bb1_ar + bb2_ar - inter\n\n iou_res = inter/union_ar\n\n iou_res[xmax_inter < xmin_inter] = 0\n iou_res[ymax_inter < ymin_inter] = 0\n iou_res[iou_res < 0] = 0\n iou_res[iou_res > 1] = 0\n\n return iou_res"
] | [
[
"tensorflow.keras.preprocessing.text.Tokenizer",
"numpy.random.choice"
],
[
"numpy.maximum",
"numpy.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
damazz/HQCA | [
"b013ba68f86e42350913c4abc2e1c91695a429b7",
"b013ba68f86e42350913c4abc2e1c91695a429b7",
"b013ba68f86e42350913c4abc2e1c91695a429b7",
"b013ba68f86e42350913c4abc2e1c91695a429b7",
"b013ba68f86e42350913c4abc2e1c91695a429b7"
] | [
"hqca/tomography/__constant_project.py",
"examples/r2021_arxiv_qcase_benzyne/_instruct_22.py",
"examples/r2021_pra_tomography/02_pra_example_2.py",
"hqca/acse/_qubit_A.py",
"hqca/transforms/_functions.py"
] | [
"'''\nfrom a given operator, looking to project the operator into the constant number\nspace\n'''\nfrom hqca.tools import *\nimport numpy as np\nimport sys\nfrom copy import deepcopy as copy\nfrom hqca.operators import *\nimport scipy as sp\n\nclass ConstantNumberProjection:\n def __init__(self,op,transform,verbose=False):\n '''\n takes a fermionic operator, and performs the following.\n\n First, determine the order of E and C within the operators. The set E\n has the elements + and -, and the set C has p and h.\n\n Then, we look at elements of the operator in the Pauli basis, projected\n onto the constant dimension space, which has dimension of the order of\n E.\n\n Using this, we can look for a potentially minimal representation if\n there is over redundancy in the basis set. Note we also have to order\n the list of operators that is generated? (Maybe).\n '''\n if isinstance(op,type(QuantumString())):\n E,C=[],[]\n for n,i in enumerate(op.s):\n if i in ['+','-']:\n E.append(n)\n elif i in ['p','h']:\n C.append(n)\n nE = len(E)\n nC = len(C)\n op = ''.join(E)\n perm = Recursive(choices=E)\n perm.choose()\n perm.simplify()\n dimNe = len(perm.total)\n\n elif isinstance(op,type(Operator())):\n first = copy(op[0])\n newop1 = FermiString(coeff=1,\n indices=[0,4,7,3],\n ops='++--',\n N=8)\n newop2 = FermiString(coeff=1,\n indices=[3,7,4,0],\n ops='++--',\n N=8)\n #if first==newop1:\n # print(first)\n #elif first==newop2:\n # sys.exit('Found it! 2')\n #print(op)\n ops = []\n for j in op:\n E,C,e = [],[],[]\n for n,i in enumerate(j.s):\n if i in ['+','-']:\n E.append(i)\n elif i in ['p','h']:\n C.append(i)\n ops.append([E,C])\n if len(ops[0][0])==0:\n self.qubOp = op.transform(transform)\n else:\n print('here!')\n perm.simplify()\n dimNe = len(perm.total)\n dimCe = 2**(len(ops[0][1]))\n def bin_to_ph(binary):\n ret = ''\n for item in binary:\n if item=='0':\n ret+='p'\n else:\n ret+='h'\n return ret\n permC = [bin_to_ph(bin(i)[2:]) for i in range(dimCe)]\n new = op.transform(transform)\n if new.null():\n self.qubOp = new\n return None\n # \n # subroutine using first\n # getting Pauli basis \n # generators \n #\n temp = copy(first)\n ind = first.inds()\n ts = copy(temp.s)\n op_basis = {}\n dimNull = 0\n initial=False #propery, 'initialized'\n for j,p in enumerate(perm.total):\n for k,q in enumerate(permC):\n top = ''. join(first.ops())\n m,l=0,0\n for n in range(len(top)):\n if top[n] in ['+','-']:\n top = top[:n]+p[m]+top[n+1:]\n m+=1 \n elif top[n] in ['p','h']:\n top = top[:n]+q[l]+top[n+1:]\n l+=1 \n if not initial:\n new = FermiString(\n coeff=2**(len(first)),\n ops=top,\n indices=ind,\n N=first.N(),\n )\n new = (Operator()+ new).transform(transform)\n new = FermiString(\n coeff=len(new),\n ops=top,\n indices=ind,\n N=first.N(),\n )\n else:\n new = FermiString(\n coeff=len(pauli_basis.keys()),\n ops=top,\n indices=ind,\n N=first.N(),\n )\n #print(new)\n new = (Operator()+ new).transform(transform)\n if dimNe*dimCe>len(new):\n # i.e., \n self.qubOp = op.transform(transform)\n return None\n\n\n # check for null vectors? \n # i.e., because of transformation\n #print(new)\n if new.null():\n dimNull+=1 \n continue\n op_basis[top]=j*dimCe+k-dimNull\n if not initial:\n initial=True\n # if initial, then we generate basis and transformation\n # matrix pauli_to_op\n pauli_basis = {o.s:n for n,o in enumerate(new)}\n pauli_to_op = np.zeros(\n (\n dimNe*dimCe,\n len(pauli_basis.keys())),\n dtype=np.complex_)\n # now, expressing\n for pauli in new:\n pauli_to_op[\n dimCe*j+k-dimNull,\n pauli_basis[pauli.s]\n ]=pauli.c\n #if first==newop1:\n # print(dimCe,dimNe,dimNull)\n # print(pauli_basis)\n # print(pauli_to_op)\n\n # now, remove null\n pauli_to_op = pauli_to_op[:(dimCe*dimNe-dimNull),:]\n # not pauli to op is a.....something\n #print(pauli_to_op)\n #print(dimCe,dimNe )\n # now, look for a square matrix\n sq_pauli_to_op = np.zeros(\n (\n dimNe*dimCe-dimNull,\n dimNe*dimCe-dimNull),\n dtype=np.complex_)\n # optional sorting should be done here\n #print(dimNull)\n added = []\n done=False\n #print(pauli_to_op)\n error=False\n while not done:\n if len(added)==(dimNe*dimCe-dimNull):\n # then, we are done\n done=True\n break\n elif len(added)==0:\n sq_pauli_to_op[:,len(added)]=pauli_to_op[:,0]\n added.append(0)\n continue\n for i in range(added[-1],len(pauli_basis.keys())):\n if i in added:\n continue\n vec = pauli_to_op[:,i]\n use=True\n # check linear dependence through rank\n temp = copy(sq_pauli_to_op)\n temp[:,len(added)]=pauli_to_op[:,i]\n if not np.linalg.matrix_rank(temp.T)==len(added)+1:\n use=False\n continue\n if use:\n sq_pauli_to_op[:,len(added)]=vec[:]\n added.append(i)\n break\n else:\n continue\n if use:\n continue\n else:\n print('ran into something...?')\n print(sq_pauli_to_op)\n # if we reach the end of this iteration, then we are done :\n print('Could not find linearly independent basis.')\n done=True\n error=True\n if error:\n self.qubOp = op.transform(transform)\n print('Error generating operator: ')\n print(op)\n else:\n # now, express original operator as a vector in op basis\n v_f = np.zeros((dimNe*dimCe-dimNull,1))\n for fermi in op:\n v_f[op_basis[''.join(fermi.ops())]]=fermi.c\n x = np.linalg.solve(sq_pauli_to_op,v_f)\n #\n n_to_pauli = {v:k for k,v in pauli_basis.items()}\n final = Operator()\n for n,i in enumerate(added):\n if abs(x[n])>1e-10:\n final+= PauliString(n_to_pauli[i],x[n])\n self.qubOp= final\n #if first==newop1:\n # print(self.qubOp)\n else:\n sys.exit('Not implemented yet for operators.')\n\n\n\n\n\n\ndef sign(c):\n if abs(c.real)>1e-14:\n if c.real<0:\n return -1\n elif c.real>0:\n return 1\n elif abs(c.imag)>1e-14:\n if c.imag<0:\n return -1j\n elif c.imag>0:\n return 1j\n else:\n return 0\n\ndef trim(b):\n use = []\n for i in range(len(b)):\n if abs(b[i])>1e-10:\n use.append(i)\n return use\n\nclass SimplifyTwoBody:\n def __init__(self,\n indices,\n weight='default',\n **kw\n ):\n self.ind = indices\n self.w = weight\n self._sorting_procedure(**kw)\n if len(set(self.ind))==4:\n self._simplify_double_excitation(**kw)\n elif len(set(self.ind))==3:\n self._simplify_number_excitation(**kw)\n elif len(set(self.ind))==2:\n self._simplify_number_operator(**kw)\n\n def _sorting_procedure(self,\n criteria='pauli-weighting',\n key_list=[],\n **kw,\n ):\n self.crit = criteria\n if self.crit=='pauli-weighting':\n pass\n elif self.crit=='mc':\n # maximally commutative sets\n self.sort_crit = key_list\n\n def _commutative_paulis(self,A,B):\n k=0\n for i in range(len(A)):\n if not ((A[i]=='I' or B[i]=='I') or A[i]==B[i]):\n k+=1\n return k%2\n\n def _get_keys(self,x):\n if self.crit=='pauli-weighting':\n if type(self.w)==type('A'):\n return self.fermi_map[x]['weight'][self.w]\n elif type(self.w)==type((1,2)) or type(self.w)==type([1,2]):\n new = []\n for i in self.w:\n new.append(self.fermi_map[x]['weight'][i])\n return new\n elif self.crit=='mc':\n n=0\n for pauli in self.sort_crit:\n n+=self._commutative_paulis(x,pauli)\n return (n,self.fermi_map[x]['weight']['I'])\n\n def _simplify_number_operator(self,\n spin='aabb',\n mapping='jw',\n real=True,\n imag=False,\n Nq=0,\n **kw):\n '''\n Given p<q, generate tomography\n '''\n ops = ['ph','hp','pp','hh']\n self.key_ops = {j:i for i,j in zip(range(len(ops)),ops)}\n self.ops = ops\n pauli_map = {}\n for op in ops:\n new = FermionicOperator(\n coeff=1,\n indices=self.ind,\n sqOp=op,\n spin=spin)\n new.generateOperators(Nq,mapping=mapping,**kw)\n newop = new.formOperator()\n pauli_map[op]={}\n for item in newop.op:\n pauli_map[op][item.p]=sign(item.c)\n fermi_map = {}\n temp = {}\n for so,v in pauli_map.items():\n for p,c in v.items():\n try:\n temp[p].append(c)\n except Exception:\n temp[p]=[c]\n for k,v in temp.items():\n v = np.asarray(v)\n for k,v in temp.items():\n fermi_map[k] = {\n 'coeff':v,\n 'real':np.isreal(v[0]),\n 'imag':np.iscomplex(v[0]),\n 'weight':self._weights(k)\n }\n key_list = [k for k,v in fermi_map.items()]\n self.fermi_map = fermi_map\n if self.w=='default':\n self.w = []\n self.w = ['Z','X']\n self.kl = sorted(\n key_list,\n key=lambda x:self._get_keys(x),\n reverse=True)\n n = len(self.kl)\n lens = len(self.kl[0])\n try:\n done = False\n for l in range(0,n-3):\n if done:\n continue\n for i in range(l+1,n-2):\n if done:\n continue\n for j in range(i+1,n-1):\n if done:\n continue\n for k in range(j+1,n):\n v1 = self.fermi_map[self.kl[l]]['coeff']\n v2 = self.fermi_map[self.kl[i]]['coeff']\n v3 = self.fermi_map[self.kl[j]]['coeff']\n v4 = self.fermi_map[self.kl[k]]['coeff']\n mat = np.matrix([v1,v2,v3,v4])\n d = np.linalg.det(mat)\n if abs(d)>1e-10:\n c1,c2,c3,c4 = copy(l),copy(i),copy(j),copy(k)\n done = True\n break\n v1 = self.fermi_map[self.kl[c1]]['coeff']\n v2 = self.fermi_map[self.kl[c2]]['coeff']\n v3 = self.fermi_map[self.kl[c3]]['coeff']\n v4 = self.fermi_map[self.kl[c4]]['coeff']\n mat = np.matrix([v1,v2,v3,v4]).T\n r1 = np.array([1,0,0,0])\n r2 = np.array([0,1,0,0])\n r3 = np.array([0,0,1,0])\n r4 = np.array([0,0,0,1])\n a1 = np.linalg.solve(mat,r1)\n a2 = np.linalg.solve(mat,r2)\n a3 = np.linalg.solve(mat,r3)\n a4 = np.linalg.solve(mat,r4)\n inds = [c1,c2,c3,c4]\n self.real = {\n 'ph':[[self.kl[inds[i]],a1[i]] for i in trim(a1)],\n 'hp':[[self.kl[inds[i]],a2[i]] for i in trim(a2)],\n 'pp':[[self.kl[inds[i]],a3[i]] for i in trim(a3)],\n 'hh':[[self.kl[inds[i]],a4[i]] for i in trim(a4)],\n }\n self.imag = {}\n for op in ['pp','ph','hp','hh']:\n self.imag[op]=[['I'*lens,0]]\n except Exception as e:\n self.real = {}\n self.imag = {}\n for op in ops:\n new = FermionicOperator(\n coeff=1,\n indices=self.ind,\n sqOp=op,\n spin=spin)\n new.generateOperators(Nq,mapping=mapping,**kw)\n newop = new.formOperator()\n self.real[op]=[[o.p,o.c] for o in newop.op]\n self.imag[op]=[['I'*lens,0]]\n\n\n def _simplify_number_excitation(self,\n spin='aabb',\n real=True,\n imag=False,\n mapping='jw',\n Nq=0,\n **kw):\n self.real = {}\n self.imag = {}\n '''\n Given p<q<r\n '''\n ops = [\n ['+-h','+-p','-+h','-+p',],\n ['+h-','+p-','-h+','-p+',],\n ['h+-','p+-','h-+','p-+',],\n ]\n self.ops =ops\n for place in ops:\n pauli_map = {}\n self.key_ops = {j:i for i,j in zip(range(len(place)),place)}\n for op in place:\n new = FermionicOperator(\n coeff=1,\n indices=self.ind,\n sqOp=op,\n spin=spin)\n new.generateOperators(Nq,mapping=mapping,**kw)\n newop = new.formOperator()\n pauli_map[op]={}\n # \n # pauli map is a dict with keys: opeartor, val: paulis \n # basis of pauli opeartors\n # \n for item in newop.op:\n pauli_map[op][item.p]=sign(item.c)\n fermi_map = {}\n temp = {}\n for so,v in pauli_map.items():\n for p,c in v.items():\n try:\n temp[p].append(c)\n except Exception:\n temp[p]=[c]\n #\n # now, we want to move to the reverse: i.e., key: pauli\n # value: operator\n #\n for k,v in temp.items():\n v = np.asarray(v)\n for k,v in temp.items():\n fermi_map[k] = {\n 'coeff':v,\n 'real':np.isreal(v[0]),\n 'imag':np.iscomplex(v[0]),\n 'weight':self._weights(k)\n }\n # fermi_map just has dict with:\n # key:val, pauli:sqops\n key_list = [k for k,v in fermi_map.items()]\n self.fermi_map = fermi_map\n # sorting\n if self.w=='default':\n self.w = []\n self.w = ['Z','X']\n self.kl = sorted(\n key_list,\n key=lambda x:self._get_keys(x),\n reverse=True)\n ####### \n try:\n done=False\n n = len(self.kl)\n for i in range(0,n-1):\n if done or self.fermi_map[self.kl[i]]['imag']:\n continue\n for j in range(i+1,n):\n if done or self.fermi_map[self.kl[j]]['imag']:\n continue\n for k in range(0,n-1):\n if done or self.fermi_map[self.kl[k]]['real']:\n continue\n for l in range(k+1,n):\n if done or self.fermi_map[self.kl[l]]['real']:\n continue\n v1 = self.fermi_map[self.kl[i]]['coeff']\n v2 = self.fermi_map[self.kl[j]]['coeff']\n v3 = self.fermi_map[self.kl[k]]['coeff']\n v4 = self.fermi_map[self.kl[l]]['coeff']\n # note, each v is a vector of coefficients in\n # the op basis \n mat = np.matrix([v1,v2,v3,v4])\n # mat has each row being a different pauli, \n # and each col being the op\n d = np.linalg.det(mat)\n if abs(d)>1e-10:\n c1,c2,c3,c4 = copy(i),copy(j),copy(k),copy(l)\n done = True\n break\n v1 = self.fermi_map[self.kl[c1]]['coeff']\n v2 = self.fermi_map[self.kl[c2]]['coeff']\n v3 = self.fermi_map[self.kl[c3]]['coeff']\n v4 = self.fermi_map[self.kl[c4]]['coeff']\n mat = np.matrix([v1,v2,v3,v4]).T\n # now, mat is transposed, and so has the row being the op and \n # cols being the pauli\n # so, r1 is a vec in the op basis\n # and a1, or the solution, is a vec in the pauli basis \n r1 = np.array([0.5,0,-0.5,0])\n r2 = np.array([0,0.5,0,-0.5])\n i1 = np.array([0.5,0,0.5,0])\n i2 = np.array([0,0.5,0,0.5])\n a1 = np.linalg.solve(mat,r1)\n a2 = np.linalg.solve(mat,r2)\n b1 = np.linalg.solve(mat,i1)\n b2 = np.linalg.solve(mat,i2)\n inds = [c1,c2,c3,c4] # # c1 is....\n vRe = [a1,a2,-a1,-a2] # this is now...each row is a vector \n # in the pauli basis, giving the paulis to give that op\n vIm = [-b1,-b2,-b1,-b2] # # \n for n,op in enumerate(place):\n # iterating through place with n,\n # we look to find the real opeartors from vRe that are non\n # zero, i.e. the correct Pauli matrices\n self.real[op]=[\n [self.kl[inds[i]],vRe[n][i]\n ] for i in trim(vRe[n])]\n self.imag[op]=[\n [self.kl[inds[i]],vIm[n][i]\n ] for i in trim(vIm[n])]\n except Exception as e:\n mat = np.asmatrix(\n [self.fermi_map[v]['coeff'] for v in self.kl]).T\n inds = [0,1,2,3]\n\n r1 = np.array([0.5,0,-0.5,0])\n r2 = np.array([0,0.5,0,-0.5])\n i1 = np.array([0.5,0,0.5,0])\n i2 = np.array([0,0.5,0,0.5])\n #r1 = np.array([1,0,-1,0])\n #r2 = np.array([0,1,0,-1])\n #i1 = np.array([1,0,1,0])\n #i2 = np.array([0,1,0,1])\n #for vec in [r1,r2,i1,i2]:\n # try:\n # x,res,rank,s = np.linalg.lstsq(mat,vec)\n # except Exception:\n # sys.exit()\n # ans = np.linalg.solve(mat,vec)\n # print(x)\n # print(res)\n # print(rank)\n # print(s)\n a1,res1,rank1,s1 = np.linalg.lstsq(mat,r1)\n a2,res2,rank2,s2 = np.linalg.lstsq(mat,r2)\n b1,res3,rank3,s3 = np.linalg.lstsq(mat,i1)\n b2,res4,rank4,s4 = np.linalg.lstsq(mat,i2)\n vRe = [a1,a2,-a1,-a2] # #\n vIm = [-b1,-b2,-b1,-b2] # # \n lens = len(self.kl[0])\n for n,op in enumerate(place):\n # iterating through place with n,\n # we look to find the real opeartors from vRe that are non\n # zero, i.e. the correct Pauli matrices\n if len(trim(vRe[n]))==0:\n self.real[op]=[['I'*lens,0]]\n else:\n self.real[op]=[\n [self.kl[inds[i]],vRe[n][i]\n ] for i in trim(vRe[n])]\n if len(trim(vIm[n]))==0:\n self.imag[op]=[['I'*lens,0]]\n else:\n self.imag[op]=[\n [self.kl[inds[i]],vIm[n][i]\n ] for i in trim(vIm[n])]\n '''\n conjOp = {\n '+-h':'-+h',\n '+-p':'-+p',\n '-+h':'+-h',\n '-+p':'+-p',\n '+h-':'-h+',\n '+p-':'-p+',\n '-h+':'+h-',\n '-p+':'+p-',\n 'h+-':'h-+',\n 'p+-':'p-+',\n 'h-+':'h+-',\n 'p-+':'p+-',}\n '''\n '''\n for op in place:\n pass\n for op in place:\n new1 = FermionicOperator(\n coeff=0.5,\n indices=self.ind,\n sqOp=op,\n spin=spin)\n new1.generateOperators(Nq,mapping=mapping,**kw)\n print(new1)\n new = new1.formOperator()\n print(new)\n new2 = FermionicOperator(\n coeff=-0.5,\n indices=self.ind,\n sqOp=conjOp[op],\n spin=spin)\n new2.generateOperators(Nq,mapping=mapping,**kw)\n new += new2.formOperator()\n new.clean()\n if len(new.op)==0:\n self.real[op]=[[Nq*'I',0]]\n else:\n self.real[op]=[[o.p,o.c] for o in new.op]\n # now, imag\n new1 = FermionicOperator(\n coeff=0.5,\n indices=self.ind,\n sqOp=op,\n spin=spin)\n new1.generateOperators(Nq,mapping=mapping,**kw)\n new = new1.formOperator()\n new2 = FermionicOperator(\n coeff=0.5,\n indices=self.ind,\n sqOp=conjOp[op],\n spin=spin)\n new2.generateOperators(Nq,mapping=mapping,**kw)\n new += new2.formOperator()\n new.clean()\n if len(new.op)==0:\n self.imag[op]=[[Nq*'I',0]]\n else:\n self.imag[op]=[[o.p,o.c] for o in new.op]\n '''\n\n def _simplify_double_excitation(self,\n spin='aabb',\n real=True,\n imag=False,\n mapping='jw',\n Nq=0,\n **kw):\n '''\n given p<q<r<s\n '''\n ops = [\n '++--','+-+-','+--+',\n '--++','-+-+','-++-']\n self.key_ops = {j:i for i,j in zip(range(len(ops)),ops)}\n self.ops = ops\n pauli_map = {}\n cont = True\n for op in ops:\n # generate the operators \n new = FermionicOperator(\n coeff=1,\n indices=self.ind,\n sqOp=op,\n spin=spin,\n )\n new.generateOperators(Nq,mapping=mapping,**kw)\n newop = new.formOperator()\n pauli_map[op]={}\n if len(newop.op)<=6:\n cont = False\n for item in newop.op:\n pauli_map[op][item.p]=sign(item.c)\n fermi_map = {}\n temp = {}\n for so,v in pauli_map.items():\n #so, second quantized opeartor \n for p,c in v.items():\n if abs(c)==0:\n continue\n try:\n temp[p].append(c)\n except Exception:\n temp[p]=[c]\n for k,v in temp.items():\n v = np.asarray(v)\n for k,v in temp.items():\n fermi_map[k] = {\n 'coeff':v,\n 'real':np.isreal(v[0]),\n 'imag':np.iscomplex(v[0]),\n 'weight':self._weights(k)\n }\n key_list = [k for k,v in fermi_map.items()]\n self.fermi_map = fermi_map\n if self.w=='default':\n self.w = []\n self.w = ['Z','X']\n self.kl = sorted(\n key_list,\n key=lambda x:self._get_keys(x),\n reverse=True)\n if len(key_list)==0:\n self.real = {}\n self.imag = {}\n for op in ops:\n lens = len(list(pauli_map[op].keys())[0])\n self.real[op]=[['I'*lens,0]]\n self.imag[op]=[['I'*lens,0]]\n elif cont:\n self._standard_subproblem()\n else:\n self._degenerate_subproblem()\n\n def _degenerate_subproblem(self):\n ops = [\n '+-+-','+--+',\n '-+-+','-++-']\n if not len(self.kl)==4:\n sys.exit('Error in degenerate subproblem.')\n done=False\n n = len(self.kl)\n use = [self.kl[0]]\n # get reals\n done = False\n self.f = self.fermi_map\n v1 = self.fermi_map[self.kl[0]]['coeff']\n v2 = self.fermi_map[self.kl[1]]['coeff']\n v3 = self.fermi_map[self.kl[2]]['coeff']\n v4 = self.fermi_map[self.kl[3]]['coeff']\n #print(v1,v2,v3,v4)\n v1 = [i for i in v1 if abs(i)>0]\n v2 = [i for i in v2 if abs(i)>0]\n v3 = [i for i in v3 if abs(i)>0]\n v4 = [i for i in v4 if abs(i)>0]\n mat = np.matrix([v1,v2,v3,v4]).T\n #print(mat)\n r1 = np.array([0.5,0,0.5,0])\n r2 = np.array([0,0.5,0,0.5])\n i1 = np.array([0.5,0,-0.5,0])\n i2 = np.array([0,0.5,0,-0.5])\n a1 = np.linalg.solve(mat,r1)\n a2 = np.linalg.solve(mat,r2)\n b1 = np.linalg.solve(mat,i1)\n b2 = np.linalg.solve(mat,i2)\n inds = [0,1,2,3] # # c1 is....\n vRe = [a1,a2,a1,a2] # #\n vIm = [b1,+b2,-b1,-b2] # # \n self.real = {}\n self.imag = {}\n for n,op in enumerate(ops):\n self.real[op]=[\n [self.kl[inds[i]],vRe[n][i]\n ] for i in trim(vRe[n])]\n self.imag[op]=[\n [self.kl[inds[i]],vIm[n][i]\n ] for i in trim(vIm[n])]\n\n def _standard_subproblem(self):\n ops = [\n '++--','+-+-','+--+',\n '--++','-+-+','-++-']\n done=False\n n = len(self.kl)\n use = [self.kl[0]]\n # get reals\n done = False\n self.f = self.fermi_map\n for i in range(0,n-2):\n if self.fermi_map[self.kl[i]]['imag'] or done:\n continue\n for j in range(i+1,n-1):\n if self.fermi_map[self.kl[j]]['imag'] or done:\n continue\n for k in range(j+1,n):\n #print(i,j)\n if self.fermi_map[self.kl[k]]['imag'] or done:\n continue\n for p in range(0,n-2):\n if self.f[self.kl[p]]['real']:\n continue\n elif done:\n continue\n for q in range(p+1,n-1):\n if self.f[self.kl[q]]['real']:\n continue\n elif done:\n continue\n for r in range(q+1,n):\n if self.f[self.kl[r]]['real']:\n continue\n w1 = self.f[self.kl[i]]['coeff']\n w2 = self.f[self.kl[j]]['coeff']\n w3 = self.f[self.kl[k]]['coeff']\n w4 = self.f[self.kl[p]]['coeff']\n w5 = self.f[self.kl[q]]['coeff']\n w6 = self.f[self.kl[r]]['coeff']\n mat = np.matrix(\n [w1,w2,w3,w4,w5,w6],\n )\n d = np.linalg.det(mat)\n if abs(d)>1e-10:\n c1,c2,c3 = copy(i),copy(j),copy(k),\n c4,c5,c6 = copy(p),copy(q),copy(r)\n done = True\n break\n inds = [c1,c2,c3,c4,c5,c6]\n v1 = self.f[self.kl[c1]]['coeff']\n v2 = self.f[self.kl[c2]]['coeff']\n v3 = self.f[self.kl[c3]]['coeff']\n u1 = self.f[self.kl[c4]]['coeff']\n u2 = self.f[self.kl[c5]]['coeff']\n u3 = self.f[self.kl[c6]]['coeff']\n mat = np.matrix([v1,v2,v3,u1,u2,u3]).T\n r1 = np.array([0.5,0,0,+0.5,0,0])\n r2 = np.array([0,0.5,0,0,+0.5,0])\n r3 = np.array([0,0,0.5,0,0,+0.5])\n i1 = np.array([0.5,0,0,-0.5,0,0])\n i2 = np.array([0,0.5,0,0,-0.5,0])\n i3 = np.array([0,0,0.5,0,0,-0.5])\n a1 = np.linalg.solve(mat,r1)\n a2 = np.linalg.solve(mat,r2)\n a3 = np.linalg.solve(mat,r3)\n b1 = np.linalg.solve(mat,i1)\n b2 = np.linalg.solve(mat,i2)\n b3 = np.linalg.solve(mat,i3)\n #ops = [\n # '++--','+-+-','+--+',\n # '--++','-+-+','-++-']\n vRe = [a1,a2,a3,a1,a2,a3] # #\n vIm = [-b1,-b2,-b3,b1,b2,b3] # # \n self.real = {}\n self.imag = {}\n for n,op in enumerate(ops):\n self.real[op]=[\n [self.kl[inds[i]],vRe[n][i]\n ] for i in trim(vRe[n])]\n self.imag[op]=[\n [self.kl[inds[i]],vIm[n][i]\n ] for i in trim(vIm[n])]\n #self.real = {\n # '++--':[[self.kl[inds[i]],a1[i]] for i in trim(a1)],\n # '--++':[[self.kl[inds[i]],a1[i]] for i in trim(a1)],\n # '+-+-':[[self.kl[inds[i]],a2[i]] for i in trim(a2)],\n # '-+-+':[[self.kl[inds[i]],a2[i]] for i in trim(a2)],\n # '+--+':[[self.kl[inds[i]],a3[i]] for i in trim(a3)],\n # '-++-':[[self.kl[inds[i]],a3[i]] for i in trim(a3)],\n # }\n #self.imag = {\n # '++--':[[self.kl[inds[i]],+1*b1[i]] for i in trim(b1)],\n # '--++':[[self.kl[inds[i]],-1*b1[i]] for i in trim(b1)],\n # '+-+-':[[self.kl[inds[i]],+1*b2[i]] for i in trim(b2)],\n # '-+-+':[[self.kl[inds[i]],-1*b2[i]] for i in trim(b2)],\n # '+--+':[[self.kl[inds[i]],+1*b3[i]] for i in trim(b3)],\n # '-++-':[[self.kl[inds[i]],-1*b3[i]] for i in trim(b3)],\n # }\n\n\n\n def _weights(self,string):\n count = {'I':0,'X':0,'Y':0,'Z':0}\n for i in string:\n count[i]+=1\n return count\n\n\n\n",
"import numpy as np\nfrom hqca.core import *\nfrom hqca.core.primitives import *\nfrom hqca.tools import *\nimport sys\nfrom numpy import sin as sin\nfrom numpy import cos as cos\nfrom copy import deepcopy as copy\n\nclass ExpPauli:\n def __init__(self,vec):\n v = np.asmatrix(vec)\n if v.shape[0]>v.shape[1]:\n v = v.T\n if np.linalg.norm(v)==0:\n self.iden=True\n self.a = 0\n self.v = v\n else:\n self.iden=False\n self.a = np.linalg.norm(v)\n self.v = v/self.a\n \n def __mul__(self,w):\n if self.iden:\n return w\n if w.iden:\n return self\n cc = np.cos(self.a)*np.cos(w.a)\n cs = np.cos(self.a)*np.sin(w.a)\n sc = np.sin(self.a)*np.cos(w.a)\n ss = np.sin(self.a)*np.sin(w.a)\n c = np.arccos(cc-np.dot(self.v,w.v.T)*ss)\n k1 = self.v*sc\n k2 = w.v*cs\n k3 = -np.cross(self.v,w.v)*ss\n k = (1/np.sin(c))*(k1+k2+k3)\n return ExpPauli(c*k)\n\n def __str__(self):\n t = '||v||: {:.5f}, '.format(self.a)\n t+= 'nx: {:+.5f}, '.format(self.v[0,0])\n t+= 'ny: {:+.5f}, '.format(self.v[0,1])\n t+= 'nz: {:+.5f}'.format(self.v[0,2])\n return t\n\n def matrix(self):\n x = np.matrix([[0,1],[1,0]],dtype=np.complex_)\n y = np.matrix([[0,-1j],[1j,0]],dtype=np.complex_)\n z = np.matrix([[1,0],[0,-1]],dtype=np.complex_)\n nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]\n i = np.identity(2)\n if self.iden:\n return np.identity(2)\n return np.cos(self.a)*i + (x*nx+y*ny+z*nz)*1j*np.sin(self.a)\n\n def U3(self):\n if self.iden:\n return 0,0,0\n A = np.sin(self.a)**2\n nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]\n part = nx**2+ny**2\n vd = np.cos(self.a)+1j*nz*np.sin(self.a)\n vo = (1j*nx-ny)*np.sin(self.a)\n if abs(part-0)<=1e-10:\n theta= 0\n sigma = (1j*np.log(vd)).real\n delta= 0\n else:\n theta = 2*np.arcsin(np.sqrt((nx**2+ny**2)*A))\n aleph=-ny*np.sin(self.a)/np.sin(theta/2)\n beta = nx*np.sin(self.a)/np.sin(theta/2)\n delta = (-1j*np.log(vo/np.sin(theta/2))).real\n sigma = (1j*np.log(vd/np.cos(theta/2))).real\n return theta,sigma+delta,sigma-delta\n\nclass BenzyneInstruct(Instructions):\n '''\n type 1, 2 and 3\n '''\n def __init__(self,operator,\n Nq,\n propagate=False,\n HamiltonianOperator=[],\n scaleH=1,\n **kw):\n if not Nq==1:\n sys.exit('Did not 1 qubit in instructions...')\n para = np.array([0.0,0.0,0.0])\n expS = ExpPauli(para)\n for A in operator:\n para = np.array([0.0,0.0,0.0])\n for o in A:\n if o.s=='X':\n para[0]=np.imag(o.c)\n elif o.s=='Y':\n para[1]=np.imag(o.c)\n elif o.s=='Z':\n para[2]=np.imag(o.c)\n expS = ExpPauli(para)*expS\n #\n paraH = np.array([0.0,0.0,0.0])\n for o in HamiltonianOperator:\n if o.s=='X':\n paraH[0]= np.real(o.c)*scaleH\n elif o.s=='Y':\n paraH[1]=np.real(o.c)*scaleH\n elif o.s=='Z':\n paraH[2]=np.real(o.c)*scaleH\n expiH = ExpPauli(paraH)\n exp = expiH*expS\n self._gates = [\n [(exp,),self._U3]\n ]\n\n @property\n def gates(self):\n return self._gates\n\n @gates.setter\n def gates(self,a):\n self._gates = a\n\n def _U3(self,Q,exp):\n theta,phi,lamb = exp.U3()\n Q.U3(0,theta,phi,lamb)\n\n",
"'''\nSecond example calculation from:\n\n Smart, S. E., & Mazziotti, D. A. (2021). Lowering tomography costs in quantum simulation \n with a symmetry projected operator basis. Physical Review A, 103(1), 012420. \n https://doi.org/10.1103/PhysRevA.103.012420\n\nHere we are simuatling a noisy quantum system using a tunable noise model provided from an actual quantum device, and comparing the tomography of the 2-RDM under the default and symmetry projected techniques with the ideal 2-RDM.\n\n'''\n\nimport numpy as np\nimport sys\nfrom math import pi\nimport qiskit.providers.aer.noise as noise\nfrom noise_model.deconstruct import *\nfrom hqca.hamiltonian import *\nfrom hqca.instructions import *\nfrom hqca.processes import *\nfrom hqca.acse import *\nfrom hqca.core import *\nfrom hqca.core.primitives import *\nfrom pyscf import gto\nfrom hqca.transforms import *\nfrom functools import partial\nfrom hqca.tools import *\nfrom hqca.state_tomography import *\nnp.set_printoptions(precision=3)\nimport qiskit\n\nclass Ins(Instructions):\n def __init__(self,coeff):\n self._gates =[[(coeff,),self._test]]\n\n def _test(self,Q,coeff):\n Q.si(0)\n Q.Cx(1,0)\n Q.Cx(2,1)\n Q.Cx(3,2)\n Q.Rx(3,coeff[0])\n Q.Rx(1,coeff[1])\n Q.Cx(3,2)\n Q.Cx(2,1)\n Q.Cx(1,0)\n Q.Cx(3,2)\n Q.Ry(3,coeff[2])\n Q.Cx(3,2)\n Q.s(0)\n\n @property\n def gates(self):\n return self._gates\n\n @gates.setter\n def gates(self,a):\n self._gates = a\n\ndef split_matrix(rdm):\n N = rdm.rdm.shape[0]\n R = int(np.sqrt(N))\n nn = np.zeros(rdm.rdm.shape,dtype=np.complex_)\n ne = np.zeros(rdm.rdm.shape,dtype=np.complex_)\n ee = np.zeros(rdm.rdm.shape,dtype=np.complex_)\n for i in range(N):\n p,r = i//R,i%R\n for j in range(N):\n q,s = j//R,j%R\n ind = tuple([p,q,r,s])\n if len(set(ind))==2:\n nn[i,j]=rdm.rdm[i,j]\n elif len(set(ind))==3:\n ne[i,j]=rdm.rdm[i,j]\n elif len(set(ind))==4:\n ee[i,j]=rdm.rdm[i,j]\n return nn,ne,ee\n\nn = 0\n# generate mol object\nmol = gto.Mole()\nmol.atom=[['H',(0,0,0)],['H',(2.0,0,0)]]\nmol.basis='sto-3g'\nmol.spin=0\nmol.build()\nN = []\neig = []\nnorm = []\nham = MolecularHamiltonian(mol,transform=JordanWigner)\nst = StorageACSE(ham)\nqs = QuantumStorage()\nqs0 = QuantumStorage()\npr = StandardProcess()\nqs0.set_algorithm(st)\n\n# set Nq, number of shots, and error strength\n\nNq = 4\nNs = 8192\nerror = 0.0\n\n\n# qs0, ideal\n# qs, noisy simulated \nqs0.set_backend(\n backend='statevector_simulator',\n Nq=Nq,\n Nq_ancilla=0,\n num_shots=Ns,\n provider='Aer')\nqs.set_algorithm(st)\n# can specify provider='IBMQ' and an appropriate backend if desired\nqs.set_backend(\n backend='qasm_simulator',\n Nq=Nq,\n num_shots=Ns,\n provider='Aer')\nnm = model_v2(scaling=error,name='./noise_model/110220_ibmq_bogota')\nqs.set_noise_model(custom=True,\n noise_model=nm)\ntomo = []\ntomo_sim = []\ncoefficients = np.load('./noise_model/coefficients.npy')\n# runs the tomography in sets of 5...suited for particular constraints on quantum device access\n# but can be easily modified\nfor q in range(5):\n coeffs = coefficients[q*5:q*5+5,:]\n for coeff in coeffs:\n print(coeff)\n # run 1\n tomo0 = StandardTomography(qs0,verbose=False)\n tomo0.generate(real=True,imag=True,\n simplify=True,transform=JordanWigner,\n method='gt',strategy='lf')\n ins0 = Ins(coeff)\n tomo0.set(ins0)\n\n tomo1 = StandardTomography(qs,verbose=False)\n tomo1.generate(real=True,imag=True,\n simplify=True,transform=JordanWigner,\n method='gt',strategy='lf')\n ins = Ins(coeff)\n tomo1.set(ins)\n \n tomo2 = ReducedTomography(qs,verbose=False)\n tomo2.generate(real=True,imag=True,\n simplify=True,transform=JordanWigner,\n method='gt',strategy='lf')\n ins = Ins(coeff)\n tomo2.set(ins)\n tomo_sim.append(tomo0)\n tomo.append(tomo1)\n tomo.append(tomo2)\n run_multiple(tomo[q*10:(q*10+10)],qs)\n run_multiple(tomo_sim[q*5:(q*5+5)],qs0)\nfor item in tomo:\n print(item.counts['ZZZZ'])\nprint('Constructing..')\nfor t in tomo:\n t.construct(processor=pr)\nfor t in tomo_sim:\n t.construct(processor=pr)\nfor i in range(len(coefficients)):\n print(coefficients[i,:])\n tomo0 = tomo_sim[i]\n tomo1 = tomo[i*2]\n tomo2 = tomo[i*2+1]\n st.analysis(tomo0.rdm)\n st.analysis(tomo1.rdm)\n st.analysis(tomo2.rdm)\n tomo0.rdm.contract()\n tomo1.rdm.contract()\n tomo2.rdm.contract()\n e0 = np.linalg.eigvalsh(tomo0.rdm.rdm)\n e1 = np.linalg.eigvalsh(tomo1.rdm.rdm)\n e2 = np.linalg.eigvalsh(tomo2.rdm.rdm)\n d01 = tomo0.rdm-tomo1.rdm\n d02 = tomo0.rdm-tomo2.rdm\n d12 = tomo1.rdm-tomo2.rdm\n d01.contract()\n d12.contract()\n d02.contract()\n N01 = np.linalg.norm(d01.rdm,ord='fro') \n N02 = np.linalg.norm(d02.rdm,ord='fro')\n N12 = np.linalg.norm(d12.rdm,ord='fro')\n print('Difference D0-D1: {}'.format(N01))\n print('Difference D0-D2: {}'.format(N02))\n print('Difference D1-D2: {}'.format(N12))\n norm.append([N01,N02,N12])\n\nprint('--- --- --- --- --- ---')\nprint('Frombenius norm of D01, D02, and D12 for each run')\nnorm = np.asmatrix(norm)\nprint(norm)\nprint('--- --- --- --- --- ---')\nprint(' average (std dev)')\nfor i,l in zip(range(norm.shape[1]),['D01','D02','D12']):\n print('{}: {:.6f} {:.6f}'.format(l,np.average(norm[:,i]),np.std(norm[:,i])))\n\n\n",
"import numpy as np\nimport sys\nfrom hqca.tools import *\nfrom hqca.tomography import *\nfrom hqca.operators import *\nimport traceback\n\n'''\n/hqca/acse/_quant_S_acse.py\n\nWill generate elements of the A matrix according to the quantum solution. Requires tomography of the auxillary 2-RDM, aquired with an additional propagator sequence appended to the ansatz.\n'''\n\ndef findQubitAQuantum(\n operator=None,\n instruct=None,\n process=None,\n store=None,\n quantstore=None,\n verbose=False,\n S_min=1e-10,\n hamiltonian_step_size=1.0,\n depth=1,\n parallel=False,\n commutative=True,\n tomo=None,\n transform=None,\n matrix=False,\n **kw\n ):\n '''\n need to do following:\n 3. find S from resulting matrix\n\n '''\n if verbose:\n print('Generating new S pairs with Hamiltonian step.')\n newPsi = instruct(\n operator=operator,\n Nq=quantstore.Nq,\n quantstore=quantstore,\n propagate=True,\n HamiltonianOperator=store.H.qubit_operator,\n scaleH=hamiltonian_step_size,\n depth=depth,\n **kw\n )\n if type(tomo)==type(None):\n newCirc = QubitTomography(\n quantstore,\n verbose=verbose,\n )\n newCirc.generate(real=False,imag=True)\n else:\n newCirc = QubitTomography(\n quantstore,\n preset=True,\n Tomo=tomo,\n verbose=verbose,\n )\n newCirc.set(newPsi)\n hss = (1/hamiltonian_step_size)\n if verbose:\n print('Running circuits...')\n newCirc.simulate(verbose=verbose)\n if verbose:\n print('Constructing the RDMs...')\n if matrix:\n newCirc.construct(processor=process, compact=True)\n rdm = np.imag(newCirc.rdm) * hss\n return rdm\n else:\n newCirc.construct(processor=process)\n rdm = np.imag(newCirc.rdm.rdm) * hss\n\n #rdm = newCirc.rdm.rdm-store.rdm.rdm\n\n rdm = np.imag(newCirc.rdm.rdm)\n new = np.transpose(np.nonzero(rdm))\n if verbose:\n print('Elements of A from quantum generation: ')\n newF = Operator()\n for index in new:\n ind = tuple(index)\n val = rdm[ind]*hss\n if abs(val)>S_min:\n l = len(ind)\n sop = l//2*'+'+l//2*'-'\n newF+= QubitString(\n val,\n indices=list(ind),\n ops=sop,\n N=quantstore.dim,\n )\n #fullS = newF.transform(quantstore.transform)\n if verbose:\n print('Qubit A operator:')\n print(newF)\n return newF\n\n",
"from copy import deepcopy as copy\nimport sys\nfrom functools import partial\nimport numpy as np\nfrom hqca.operators import *\n\n\n\ndef trim_operator(ops,\n qubits,\n paulis,\n eigvals,\n null=0,\n ):\n new = Operator()\n if not qubits==sorted(qubits)[::-1]:\n sys.exit('Reorder your trimming operations to ensure qubit ordering.')\n for op in ops:\n s,c = op.s,op.c\n for q,p,e in zip(qubits,paulis,eigvals):\n if s[q]==p:\n c*= e\n elif s[q]=='I':\n pass\n else:\n c*=null\n s = s[:q]+s[q+1:]\n new+= PauliString(s,c,symbolic=op.sym)\n return new\n\ndef change_basis(op,\n U,\n Ut=None,\n **kw):\n if type(Ut)==type(None):\n Ut = copy(U)\n return (U*op)*Ut\n\ndef modify(ops,\n fermi,\n U,Ut,\n qubits,\n paulis,\n eigvals,\n initial=False):\n T = fermi(ops,initial=initial)\n # initialize\n T = change_basis(T,U,Ut)\n # apply Pauli change of basis\n T = trim_operator(T,\n qubits=qubits,\n paulis=paulis,\n null=int(initial),\n eigvals=eigvals)\n # perform trimming\n return T\n\ndef clifford(ops,\n fermi,\n U,\n **kw\n ):\n new = fermi(ops,**kw)\n return new.clifford(U)\n\ndef get_transform_from_symmetries(\n Transform,\n symmetries,\n qubits,\n eigvals,\n ):\n cTr = copy(Transform)\n for i in range(len(symmetries)):\n x = 'I'*len(symmetries[i])\n ind = qubits[i]\n x = x[:ind] + 'X' + x[ind+1:]\n op = Operator([\n PauliString(symmetries[i],1/np.sqrt(2)),\n PauliString(x,1/np.sqrt(2))\n ])\n cTr = partial(\n modify,\n fermi=copy(cTr),\n U=op,Ut=op,\n qubits=[qubits[i]],\n eigvals=[eigvals[i]],\n paulis=['X'],\n )\n ciTr = partial(cTr,initial=True)\n return cTr, ciTr\n\ndef parity_free(Na,Nb,paritya,parityb,transform):\n Z1 = 'Z'*(Na+Nb)\n Z2 = 'Z'*Na + 'I'*(Nb-1)\n Tr,iTr = get_transform_from_symmetries(\n transform,\n [Z1,Z2],\n [Na+Nb-1,Na-1],\n [paritya,parityb])\n return Tr,iTr\n\n'''\ndef find_initial_symmetries(fermi):\n # what is the quickest way......hrm. \n rho = Op([fermi])\n\ndef tapered_transform(Transform,hamiltonian,\n initial_state,\n verbose=False,\n ):\n\n print(stab)\n rho = initial_state\n cH = hamiltonian.transform(Transform)\n def _remaining_symmetries(stab):\n return len(cstab.null_basis)\n\n cstab = Stabilizer(cH,verbose=False)\n cstab.gaussian_elmination()\n cstab.find_symmetry_generators()\n cTr = copy(Transform)\n appended_symmetries = []\n while _remaining_symmetries(cstab)>0:\n for S in cstab.null_basis:\n # check compatibility with previous\n use = False\n if len(appended_symmetries)==0:\n appended_symmetries.append(S)\n else:\n for a in appended_symmetries:\n pass\n if use:\n cTr = partial(\n modify,\n fermi=copy(cTr),\n U=U,\n qubits=[]\n paulis=[]\n eigvals=[]\n cH = hamiltonian.tranform(cTr)\n cstab = Stabilizer(cH,verbose=False)\n cstab.gaussian_elmination()\n cstab.find_symmetry_generators()\n\n\n\n\n pass\n\n nTr= None\n iTr= None\n return nTr,iTr\n'''\n"
] | [
[
"numpy.matrix",
"numpy.linalg.solve",
"numpy.linalg.matrix_rank",
"numpy.asarray",
"numpy.asmatrix",
"numpy.linalg.lstsq",
"numpy.linalg.det",
"numpy.iscomplex",
"numpy.array",
"numpy.isreal",
"numpy.zeros"
],
[
"numpy.matrix",
"numpy.dot",
"numpy.log",
"numpy.imag",
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.asmatrix",
"numpy.real",
"numpy.identity",
"numpy.cross",
"numpy.array"
],
[
"numpy.sqrt",
"numpy.set_printoptions",
"numpy.linalg.norm",
"numpy.asmatrix",
"numpy.std",
"numpy.load",
"numpy.linalg.eigvalsh",
"numpy.average",
"numpy.zeros"
],
[
"numpy.imag",
"numpy.nonzero"
],
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
seeekr/lingvo | [
"3be069de2795cc6d12475bec60a1d96ba521ef16"
] | [
"lingvo/core/lr_schedule_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lr_schedule.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\n\nfrom six.moves import range\n\nimport tensorflow as tf\n\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import early_stop\nfrom lingvo.core import lr_schedule\n\n\nclass LearningRateScheduleTest(tf.test.TestCase):\n\n def testConstantOne(self):\n with self.session(use_gpu=False):\n p = lr_schedule.ConstantOne.Params()\n lrs = p.cls(p)\n for x in [0, 10, 100, 1000000]:\n self.assertAllClose(lrs.Value(x).eval(), 1.0)\n\n def testPiecewiseConstant(self):\n cls = lr_schedule.PiecewiseConstantLearningRateSchedule\n with self.session(use_gpu=False):\n bs = [300000, 400000, 500000]\n vs = [1.0, 0.1, 0.01, 0.001]\n x_ins = [tf.constant(x) for x in [299999, 399999, 499999, 599999]]\n outs = []\n for x in x_ins:\n lrs = cls(cls.Params().Set(boundaries=bs, values=vs))\n outs.append(lrs.Value(x).eval())\n self.assertAllClose([1.0, 0.1, 0.01, 0.001], outs)\n\n def testContinuousLearningRateSchedule(self):\n p = lr_schedule.ContinuousLearningRateSchedule.Params()\n p.start_step = 1000\n p.half_life_steps = 100\n p.min = 0.1\n decay = p.cls(p)\n with self.session():\n self.assertAllClose(decay.Value(0).eval(), 1.0)\n self.assertAllClose(decay.Value(500).eval(), 1.0)\n self.assertAllClose(decay.Value(1000).eval(), 1.0)\n self.assertAllClose(decay.Value(1100).eval(), 0.5)\n self.assertAllClose(decay.Value(1200).eval(), 0.25)\n self.assertAllClose(decay.Value(1300).eval(), 0.125)\n self.assertAllClose(decay.Value(1400).eval(), 0.1)\n self.assertAllClose(decay.Value(2000).eval(), 0.1)\n\n # Tests that the decay consistently decreases by half per 100\n # steps.\n for step in range(1000, 1200, 25):\n self.assertGreater(\n decay.Value(step).eval(),\n decay.Value(step + 10).eval())\n self.assertAllClose(\n decay.Value(step).eval(),\n decay.Value(step + 100).eval() * 2.)\n\n def testContinuousLearningRateSchedule_CanOverrideStart(self):\n p = lr_schedule.ContinuousLearningRateSchedule.Params()\n p.initial_value = 2.0\n p.start_step = 1000\n p.half_life_steps = 100\n decay = p.cls(p)\n with self.session():\n self.assertAllClose(decay.Value(0).eval(), 2.0)\n self.assertAllClose(decay.Value(1000).eval(), 2.0)\n self.assertAllClose(decay.Value(1100).eval(), 1.0)\n self.assertAllClose(decay.Value(1200).eval(), 0.5)\n self.assertAllClose(decay.Value(1300).eval(), 0.25)\n\n def testTransformerLearningRateSchedule(self):\n p = lr_schedule.TransformerLearningRateSchedule.Params()\n p.warmup_steps = 4000\n p.model_dim = 512\n lrs = p.cls(p)\n with self.session():\n print(lrs.Value(0).eval())\n print(lrs.Value(1000).eval())\n print(lrs.Value(2000).eval())\n print(lrs.Value(3000).eval())\n print(lrs.Value(4000).eval())\n print(lrs.Value(4500).eval())\n print(lrs.Value(5000).eval())\n self.assertAllClose(lrs.Value(0).eval(), 1.74693e-07)\n self.assertAllClose(lrs.Value(1000).eval(), 0.000174867)\n self.assertAllClose(lrs.Value(2000).eval(), 0.00034956)\n self.assertAllClose(lrs.Value(3000).eval(), 0.000524253)\n self.assertAllClose(lrs.Value(4000).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(4500).eval(), 0.000658735)\n self.assertAllClose(lrs.Value(5000).eval(), 0.000624937)\n # Tests that the schedule peaks at 4000 steps.\n self.assertGreater(lrs.Value(4000).eval(), lrs.Value(3990).eval())\n self.assertGreater(lrs.Value(4000).eval(), lrs.Value(4010).eval())\n\n # Tests that the schedule increases linearly before 4000 steps.\n for step in range(300, 4000, 200):\n self.assertAllClose(\n lrs.Value(step).eval() * 2.,\n lrs.Value(step + 10).eval() + lrs.Value(step - 10).eval())\n\n def testTransformerLearningRateScheduleWithDecayEnd(self):\n p = lr_schedule.TransformerLearningRateSchedule.Params()\n p.warmup_steps = 4000\n p.model_dim = 512\n p.decay_end = 5000\n lrs = p.cls(p)\n with self.session():\n self.assertAllClose(lrs.Value(0).eval(), 1.74693e-07)\n self.assertAllClose(lrs.Value(3000).eval(), 0.000524253)\n self.assertAllClose(lrs.Value(5000).eval(), 0.000624937)\n\n # Tests that the schedule peaks at 4000 steps.\n self.assertGreater(lrs.Value(4000).eval(), lrs.Value(3990).eval())\n self.assertGreater(lrs.Value(4000).eval(), lrs.Value(4010).eval())\n\n # Tests that the schedule increases linearly before 4000 steps.\n for step in range(300, 4000, 200):\n self.assertAllClose(\n lrs.Value(step).eval() * 2.,\n lrs.Value(step + 10).eval() + lrs.Value(step - 10).eval())\n\n print(lrs.Value(4999).eval())\n print(lrs.Value(5000).eval())\n print(lrs.Value(5001).eval())\n print(lrs.Value(6000).eval())\n # Tests that the schedule is fixed after decay end steps.\n self.assertGreater(lrs.Value(4999).eval(), lrs.Value(5000).eval())\n self.assertAllClose(lrs.Value(5000).eval(), lrs.Value(5001).eval())\n self.assertAllClose(lrs.Value(5000).eval(), lrs.Value(6000).eval())\n\n def testTransformerLearningRateScheduleNoWarmUp(self):\n params = lr_schedule.TransformerLearningRateScheduleNoWarmUp.Params().Set(\n decay_start=4000, model_dim=512)\n lrs = params.cls(params)\n\n base_params = lr_schedule.TransformerLearningRateSchedule.Params().Set(\n warmup_steps=4000, model_dim=512)\n base_lrs = base_params.cls(base_params)\n\n with self.session():\n\n # Tests that the schedule is flat up until 4000 steps.\n self.assertAllClose(lrs.Value(0).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(1000).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(2000).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(3000).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(4000).eval(), 0.000698684)\n self.assertAllClose(lrs.Value(4500).eval(), 0.000658735)\n self.assertAllClose(lrs.Value(5000).eval(), 0.000624937)\n\n # Test that the schedule is identical with transformer-lr after 4k steps\n self.assertAllClose(base_lrs.Value(4000).eval(), lrs.Value(4000).eval())\n self.assertAllClose(base_lrs.Value(4010).eval(), lrs.Value(4010).eval())\n self.assertAllClose(base_lrs.Value(5000).eval(), lrs.Value(5000).eval())\n\n def testPolynomialLRSchedule(self):\n p = lr_schedule.PolynomialLearningRateSchedule.Params().Set(\n power=2, start=(0, 0.), limit=(20000, 2.))\n with self.session():\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in [0, 10000, 20000]]\n self.assertAllClose(\n pts,\n [\n [0, 0.0],\n [10000, 0.5], # 2 * (0.5 ** 2)\n [20000, 2.0],\n ])\n\n def testCombinedLRSchedule(self):\n p = lr_schedule.CombinedMinimumLearningRateSchedule.Params().Set(schedules=[\n lr_schedule.LinearLearningRateSchedule.Params().Set(\n start=(0., 1.), limit=(2000000, 8.)),\n lr_schedule.LinearLearningRateSchedule.Params().Set(\n start=(2000000., 8.), limit=(4000000, 8.)),\n lr_schedule.ExponentialLearningRateSchedule.Params().Set(\n start=(4000000., 8.), limit=(8000000, 0.5))\n ])\n with self.session():\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 10000000, 1000000)]\n self.assertAllClose(\n pts,\n [\n # Linear increasing.\n [0, 1.0],\n [1000000, 4.5],\n # Constant\n [2000000, 8.0],\n [3000000, 8.0],\n # Exponentially decreasing.\n [4000000, 8.0],\n [5000000, 4.0],\n [6000000, 2.0],\n [7000000, 1.0],\n [8000000, 0.5],\n [9000000, 0.5]\n ])\n\n def testLinearRampupExponentialDecayScaledByNumSplitSchedule(self):\n p = lr_schedule.LinearRampupExponentialDecayScaledByNumSplitSchedule.Params(\n ).Set(\n warmup=250000, decay_start=32000000, decay_end=64000000, min=0.5)\n with self.session(), cluster_factory.ForTestingWorker(\n mode='sync', job='trainer_client', gpus=8):\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 10000000, 1000000)]\n self.assertAllClose(\n pts,\n [\n # Linear increasing.\n [0, 1.0],\n [1000000, 4.5],\n # Constant\n [2000000, 8.0],\n [3000000, 8.0],\n # Exponentially decreasing.\n [4000000, 8.0],\n [5000000, 4.0],\n [6000000, 2.0],\n [7000000, 1.0],\n [8000000, 0.5],\n [9000000, 0.5]\n ])\n\n def testLinearRampupExponentialDecayScaledByNumSplitScheduleWarmUpInit(self):\n p = lr_schedule.LinearRampupExponentialDecayScaledByNumSplitSchedule.Params(\n ).Set(\n warmup_init=0,\n warmup=250000,\n decay_start=32000000,\n decay_end=64000000,\n min=0.5)\n with self.session(), cluster_factory.ForTestingWorker(\n mode='sync', job='trainer_client', gpus=8):\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 10000000, 1000000)]\n self.assertAllClose(\n pts,\n [\n # Linear increasing from warmup_init=0.\n [0, 0],\n [1000000, 4.0],\n # Constant\n [2000000, 8.0],\n [3000000, 8.0],\n # Exponentially decreasing.\n [4000000, 8.0],\n [5000000, 4.0],\n [6000000, 2.0],\n [7000000, 1.0],\n [8000000, 0.5],\n [9000000, 0.5]\n ])\n\n def testLinearRampupExponentialDecayScaledByNumSplitScheduleWithCap(self):\n p = lr_schedule.LinearRampupExponentialDecayScaledByNumSplitSchedule.Params(\n ).Set(\n warmup=250000,\n decay_start=32000000,\n decay_end=64000000,\n min=0.5,\n max=5.0)\n with self.session(), cluster_factory.ForTestingWorker(\n mode='sync', job='trainer_client', gpus=8):\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 10000000, 1000000)]\n self.assertAllClose(\n pts,\n [\n # Linear increasing.\n [0, 1.0],\n [1000000, 4.5],\n # Constant\n [2000000, 5.0],\n [3000000, 5.0],\n # Exponentially decreasing.\n [4000000, 5.0],\n [5000000, 4.0],\n [6000000, 2.0],\n [7000000, 1.0],\n [8000000, 0.5],\n [9000000, 0.5]\n ])\n\n def testLinearRampupExponentialDecayScaledByNumSplitScheduleWithNumSplits(\n self):\n p = lr_schedule.LinearRampupExponentialDecayScaledByNumSplitSchedule.Params(\n ).Set(\n warmup=250000,\n decay_start=32000000,\n decay_end=64000000,\n min=0.5,\n max=5.0,\n num_splits=8)\n # Increases the number of splits to 32.\n with self.session(), cluster_factory.ForTestingWorker(\n mode='sync', job='trainer_client', gpus=8, split_size=4):\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 10000000, 1000000)]\n # Values are copied from\n # testLinearRampupExponentialDecayScaledByNumSplitScheduleWithCap.\n self.assertAllClose(\n pts,\n [\n # Linear increasing.\n [0, 1.0],\n [1000000, 4.5],\n # Constant\n [2000000, 5.0],\n [3000000, 5.0],\n # Exponentially decreasing.\n [4000000, 5.0],\n [5000000, 4.0],\n [6000000, 2.0],\n [7000000, 1.0],\n [8000000, 0.5],\n [9000000, 0.5]\n ])\n\n def testDevBasedSchedule(self):\n logdir = tf.test.get_temp_dir()\n tf.gfile.MkDir(os.path.join(logdir, 'eval_dev'))\n\n p = lr_schedule.DevBasedSchedule.Params()\n p.tolerance = 1.0\n p.window = 2\n p.decay = 0.5\n p.min_factor = 0.20\n early_stop.MetricHistory.SetLogdirInMetricHistories(p, logdir)\n\n lrs = p.cls(p)\n mh = lrs._metric_history\n mh.params.local_filesystem = True\n with self.session():\n tf.global_variables_initializer().run()\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 1, 10.0)\n # best = 1\n self.assertAllClose(lrs.Value(0).eval(), 1.0)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 2, 5.0)\n # best = 2\n self.assertAllClose(lrs.Value(0).eval(), 1.0)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 5, 4.0)\n # best = 2, out of window\n self.assertAllClose(lrs.Value(0).eval(), 0.5)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 6, 4.0)\n # best = 2, ref = 5, in window\n self.assertAllClose(lrs.Value(0).eval(), 0.5)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 9, 4.0)\n # best = 2, ref = 5, out of window\n self.assertAllClose(lrs.Value(0).eval(), 0.25)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 10, 3.9)\n # best = 10\n self.assertAllClose(lrs.Value(0).eval(), 0.25)\n\n mh.ConditionalAppend(mh.params.jobname, mh.params.metric, 13, 3.0)\n # best = 10, out of window, min factor\n self.assertAllClose(lrs.Value(0).eval(), 0.20)\n\n def testLinearRampupPiecewiseConstantSchedule(self):\n p = lr_schedule.LinearRampupPiecewiseConstantSchedule.Params().Set(\n boundaries=[40, 64, 80, 96],\n lrs=[1.0, 0.1, 0.01, 0.001],\n )\n with self.session(), cluster_factory.ForTestingWorker(\n mode='sync', job='trainer_client', tpus=8):\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 15, 1)]\n\n self.assertAllClose(\n pts, [[0, 0.0], [1, 1.6], [2, 3.2], [3, 4.8], [4, 6.4], [5, 8.0],\n [6, 8.0], [7, 8.0], [8, 8.], [9, 0.8], [10, 0.8], [11, 0.08],\n [12, 0.08], [13, 0.008], [14, 0.008]])\n\n def testCosineSchedule(self):\n p = lr_schedule.CosineSchedule.Params().Set(\n initial_value=2.0, total_steps=400000)\n with self.session():\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 500000, 100000)]\n self.assertAllClose(\n pts,\n [\n [0, 2.0],\n [100000, math.cos(math.pi / 4) + 1.], # angle=pi/4\n [200000, 1.0], # angle=pi/2, half-way\n [300000, math.cos(math.pi * 3 / 4) + 1.], # angle=pi*3/4\n [400000, 0.0],\n ])\n\n def testPiecewiseSchedule(self):\n # Linear ramp-up in 20000 steps, cosine decay in 40000 steps.\n p0 = lr_schedule.LinearLearningRateSchedule.Params().Set(\n start=(0, 0.), limit=(20000, 2.))\n p1 = lr_schedule.CosineSchedule.Params().Set(\n initial_value=2.0, total_steps=40000)\n p = lr_schedule.PiecewiseSchedule.Params().Set(\n boundaries=[20000], schedules=[p0, p1])\n with self.session():\n lrs = p.cls(p)\n pts = [[i, lrs.Value(i).eval()] for i in range(0, 70000, 10000)]\n self.assertAllClose(\n pts,\n [\n [0, 0.0],\n [10000, 1.0], # half-way in linear ramp-up.\n [20000, 2.0], # completed linear ramp-up.\n [30000, math.cos(math.pi / 4) + 1.], # pi/4.\n [40000, 1.0], # pi/2.\n [50000, math.cos(math.pi * 3 / 4) + 1.], # pi*3/4.\n [60000, 0.0], # pi.\n ])\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rovinyu/xalpha | [
"218281552d7f243ee0cb393938a00a041e618246"
] | [
"xalpha/info.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodules of info class, including cashinfo, indexinfo and fundinfo class\n\"\"\"\n\nimport os\nimport csv\nimport datetime as dt\nimport json\nimport re\nimport logging\nfrom functools import lru_cache\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import exc\n\nimport xalpha.remain as rm\nfrom xalpha.cons import (\n convert_date,\n droplist,\n myround,\n opendate,\n yesterday,\n yesterdaydash,\n yesterdayobj,\n today_obj,\n rget,\n _float,\n)\nfrom xalpha.exceptions import FundTypeError, TradeBehaviorError, ParserFailure\nfrom xalpha.indicator import indicator\n\n_warnmess = \"Something weird on redem fee, please adjust self.segment by hand\"\nlogger = logging.getLogger(__name__)\n\n\ndef _shengoucal(sg, sgf, value, label):\n \"\"\"\n Infer the share of buying fund by money input, the rate of fee in the unit of %,\n and netvalue of fund\n\n :param sg: positive float, 申购金额\n :param sgf: positive float, 申购费,以%为单位,如 0.15 表示 0.15%\n :param value: positive float, 对应产品的单位净值\n :param label: integer, 1 代表份额正常进行四舍五入, 2 代表份额直接舍去小数点两位之后。金额部分都是四舍五入\n :returns: tuple of two positive float, 净申购金额和申购份额\n \"\"\"\n jsg = myround(sg / (1 + sgf * 1e-2))\n share = myround(jsg / value, label)\n return (jsg, share)\n\n\ndef _nfloat(string):\n \"\"\"\n deal with comment column in fundinfo price table,\n positive value for fenhong and negative value for chaifen,\n keep other unrocognized pattern as original string\n\n :param string: string of input from original data\n :returns: make fenhong and songpei as float number\n \"\"\"\n result = 0\n if string:\n try:\n result = float(string)\n except ValueError:\n if re.match(r'\"分红\\D*(\\d*\\.\\d*)\\D*\"', string):\n result = float(re.match(r'\"分红\\D*(\\d*\\.\\d*)\\D*\"', string).group(1))\n elif re.match(r\".*现金(\\d*\\.\\d*)\\D*\", string):\n result = float(re.match(r\".*现金(\\d*\\.\\d*)\\D*\", string).group(1))\n elif re.match(r\".*折算(\\d*\\.\\d*)\\D*\", string):\n result = -float(re.match(r\".*折算(\\d*\\.\\d*)\\D*\", string).group(1))\n elif re.match(r'\"拆分\\D*(\\d*\\.\\d*)\\D*\"', string):\n result = -float(re.match(r'\"拆分\\D*(\\d*\\.\\d*)\\D*\"', string).group(1))\n elif re.match(r\"\\D*分拆(\\d*\\.\\d*)\\D*\", string):\n result = -float(re.match(r\"\\D*分拆(\\d*\\.\\d*)\\D*\", string).group(1))\n else:\n logger.warning(\"The comment col cannot be converted: %s\" % string)\n result = string\n return result\n\n\nclass FundReport:\n \"\"\"\n 提供查看各种基金报告的接口\n \"\"\"\n\n def __init__(self, code):\n self.code = code\n r = rget(\n \"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}\".format(\n code=code, type_=\"3\"\n ),\n headers={\n \"Referer\": \"http://fundf10.eastmoney.com/jjgg_{code}_3.html\".format(\n code=code\n )\n },\n )\n self.report_list = r.json()[\"Data\"]\n self.report_detail = {}\n\n def get_report(self, no=0, id_=None):\n \"\"\"\n\n :param no: int。在type_=3 中的第no个报告。\n :param id_: id 可由 :meth:`show_report_list` 中条目的对应 ID 得到\n :return:\n \"\"\"\n if id_:\n report_url = \"http://fund.eastmoney.com/gonggao/{code},{id_}.html\".format(\n code=self.code, id_=id_\n )\n r = rget(report_url)\n b = BeautifulSoup(r.text, \"lxml\")\n seasonr = b.find(\"pre\")\n sr = [s.string.strip() for s in seasonr.findAll(\"p\") if s.string]\n return sr\n\n if not self.report_detail.get(no):\n report_url = \"http://fund.eastmoney.com/gonggao/{code},{id_}.html\".format(\n code=self.code, id_=self.report_list[no][\"ID\"]\n )\n r = rget(report_url)\n b = BeautifulSoup(r.text, \"lxml\")\n seasonr = b.find(\"pre\")\n sr = [s.string.strip() for s in seasonr.findAll(\"p\") if s.string]\n self.report_detail[no] = sr\n\n return self.report_detail[no]\n\n def show_report_list(self, type_=3):\n \"\"\"\n\n :param type_: int。第0栏,第1栏,每栏的含义,请参照天天基金基金报告的页面。\n :return:\n \"\"\"\n r = rget(\n \"http://api.fund.eastmoney.com/f10/JJGG?callback=&fundcode={code}&pageIndex=1&pageSize=20&type={type_}\".format(\n code=self.code, type_=str(type_)\n ),\n headers={\n \"Referer\": \"http://fundf10.eastmoney.com/jjgg_{code}_3.html\".format(\n code=self.code\n )\n },\n )\n return r.json()[\"Data\"]\n\n def analyse_report(self, no=0):\n l = self.get_report(no)\n d = {}\n d[\"title\"] = \"\"\n for s in l[:5]:\n if s.startswith(\"基金管理\"):\n break\n d[\"title\"] += s + \" \"\n for i, s in enumerate(l):\n if s.startswith(\"业绩比较基准\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n if l[i + 1][0] != \"本\":\n d[\"benchmark\"] = ss[-1] + l[i + 1]\n else:\n d[\"benchmark\"] = ss[-1]\n elif s.startswith(\"基金管理人\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"company\"] = ss[-1]\n elif s.startswith(\"基金托管人\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"bank\"] = ss[-1]\n elif s.startswith(\"场内简称\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"shortname\"] = ss[-1]\n elif s.startswith(\"基金主代码\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"code\"] = ss[-1]\n elif s.startswith(\"报告期末基金份额总额\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"share\"] = ss[-1]\n elif s.startswith(\"基金合同生效日\"):\n ss = [s for s in s.split(\" \") if s.strip()]\n if len(ss) == 2:\n d[\"start_date\"] = ss[-1]\n return d\n\n\n@lru_cache()\ndef get_fund_holdings(code, year=\"\", season=\"\", month=\"\", category=\"jjcc\"):\n \"\"\"\n 获取基金详细的底层持仓信息\n\n :param code: str. 6 位基金代码\n :param year: int. eg. 2019\n :param season: int, 1,2,3,4\n :param month: Optional[int]. 指定 season 即可,一般不需理会\n :param category: str. stock 股票持仓, bond 债券持仓,天天基金无法自动处理海外基金持仓,暂未兼容 FOF 的国内基金持仓\n :return: pd.DataFrame or None. 没有对应持仓时返回 None。\n \"\"\"\n if not month and season:\n month = 3 * int(season)\n if category in [\"stock\", \"stocks\", \"jjcc\", \"\", \"gp\", \"s\"]:\n category = \"jjcc\"\n elif category in [\"bond\", \"bonds\", \"zq\", \"zqcc\", \"b\"]:\n category = \"zqcc\"\n else:\n raise ParserFailure(\"unrecognized category %s\" % category)\n if code.startswith(\"F\"):\n code = code[1:]\n r = rget(\n \"http://fundf10.eastmoney.com/FundArchivesDatas.aspx?type={category}&code={code}&topline=10&\\\nyear={year}&month={month}\".format(\n year=str(year), month=str(month), code=code, category=category\n ),\n headers={\n \"Host\": \"fundf10.eastmoney.com\",\n \"Referer\": \"http://fundf10.eastmoney.com/ccmx_{code}.html\".format(\n code=code\n ),\n },\n )\n if len(r.text) < 50:\n return\n # raise ParserFailure(\n # \"This fund has no holdings on stock or bonds in this period\"\n # )\n s = BeautifulSoup(\n re.match(\"[\\s\\S]*apidata={ content:(.*),arryear:\", r.text).groups()[0], \"lxml\"\n )\n if len(s.text) < 30:\n return\n # raise ParserFailure(\n # \"This fund has no holdings on stock or bonds in this period\"\n # )\n timeline = [\n i.string for i in s.findAll(\"font\", class_=\"px12\") if i.text.startswith(\"2\")\n ]\n ind = 0\n if month:\n for i, d in enumerate(timeline):\n if d.split(\"-\")[1][-1] == str(month)[-1]: # avoid 09 compare to 9\n ind = i\n break\n else:\n return # not update to this month\n t1 = s.findAll(\"table\")[ind]\n main = [[j.text for j in i.contents] for i in t1.findAll(\"tr\")[1:]]\n cols = [j.text for j in t1.findAll(\"tr\")[0].contents if j.text.strip()]\n icode = 1\n iname = 2\n iratio = 4\n ishare = 5\n ivalue = 6\n for j, col in enumerate(cols):\n if col.endswith(\"代码\"):\n icode = j\n elif col.endswith(\"名称\"):\n iname = j\n elif col.endswith(\"比例\"):\n iratio = j\n elif col.startswith(\"持股数\"):\n ishare = j\n elif col.startswith(\"持仓市值\"):\n ivalue = j\n if category == \"jjcc\":\n result = {\"code\": [], \"name\": [], \"ratio\": [], \"share\": [], \"value\": []}\n for l in main:\n if l[iratio][0] == '-':\n continue\n result[\"code\"].append(l[icode])\n result[\"name\"].append(l[iname])\n result[\"ratio\"].append(float(l[iratio][:-1]))\n result[\"share\"].append(_float(l[ishare]))\n result[\"value\"].append(_float(l[ivalue]))\n elif category == \"zqcc\":\n result = {\"code\": [], \"name\": [], \"ratio\": [], \"value\": []}\n for l in main:\n result[\"code\"].append(l[1])\n result[\"name\"].append(l[2])\n result[\"ratio\"].append(float(l[3][:-1]))\n result[\"value\"].append(_float(l[4]))\n return pd.DataFrame(result)\n\n\nclass basicinfo(indicator):\n \"\"\"\n Base class for info of fund, index or even cash,\n which cannot be directly instantiate, the basic implementation consider\n redemption fee as zero when shuhui() function is implemented\n\n :param code: string of code for specific product\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO. Or in sql case, path is the engine from sqlalchemy.\n :param form: string, the format of IO, options including: 'csv','sql'\n :param round_label: int, default 0 or 1, label to the different round scheme of shares, reserved for fundinfo class. 1 代表全舍而非四舍五入。\n :param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。\n :param value_label: int, default 0 or 1. 1 代表记账单上的赎回数目是按金额而非份额的,只能完美支持货币基金。其他净值型基金本质上无法精确到分支持这一选项,因此不开放支持。\n \"\"\"\n\n def __init__(\n self,\n code,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n round_label=0,\n dividend_label=0,\n value_label=0,\n ):\n # 增量 IO 的逻辑都由 basicinfo 类来处理,对于具体的子类,只需实现_save_form 和 _fetch_form 以及 update 函数即可\n self.code = code\n\n self.round_label = round_label\n self.dividend_label = dividend_label\n self.value_label = value_label\n self.specialdate = []\n self.fenhongdate = []\n self.zhesuandate = []\n\n # compatible with new ``xa.set_backend()`` API\n import xalpha.universal as xu\n\n if (xu.ioconf[\"backend\"] in [\"csv\", \"sql\"]) and (not path):\n fetch = True\n save = True\n form = xu.ioconf[\"backend\"]\n path = xu.ioconf[\"path\"]\n if xu.ioconf[\"backend\"] == \"csv\":\n path = os.path.join(path, xu.ioconf[\"prefix\"] + \"INFO-\")\n self.format = form\n if fetch is False:\n self._basic_init() # update self. name rate and price table\n else:\n try:\n self.fetch(path, self.format)\n df = self.update() # update the price table as well as the file\n if (df is not None) and save is True:\n self.save(path, self.format, option=\"a\", delta=df)\n\n except (FileNotFoundError, exc.ProgrammingError) as e:\n logger.info(\"no saved copy of %s\" % self.code)\n fetch = False\n self._basic_init()\n\n if (save is True) and (fetch is False):\n self.save(path, self.format)\n\n def _basic_init(self):\n \"\"\"\n set self. name rate and price (dataframe) as well as other necessary attr of info()\n \"\"\"\n # below lines are just showcase, this function must be rewrite by child classes\n # self.name = 'unknown'\n # self.rate = 0\n # self.price = pd.DataFrame(data={'date':[],'netvalue':[],'comment':[]})\n raise NotImplementedError\n\n def shengou(self, value, date, fee=None):\n \"\"\"\n give the realdate deltacash deltashare tuple based on purchase date and purchase amount\n if the date is not a trade date, then the purchase would happen on the next trade day, if the date is\n in the furture, then the trade date is taken as yesterday.\n\n :param value: the money for purchase\n :param date: string or object of date\n :param fee: the rate for shengou, default None and info.rate will be used, ok for most cases\n :returns: three elements tuple, the first is the actual dateobj of commit\n the second is a negative float for cashin,\n the third is a positive float for share increase\n \"\"\"\n if fee is None:\n fee = self.rate\n row = self.price[self.price[\"date\"] >= date].iloc[0]\n share = _shengoucal(value, fee, row.netvalue, label=self.round_label + 1)[1]\n return (row.date, -myround(value), share)\n\n def shuhui(self, share, date, rem, value_label=None, fee=None):\n \"\"\"\n give the cashout considering redemption rates as zero.\n if the date is not a trade date, then the purchase would happen on the next trade day, if the date is\n in the furture, then the trade date is taken as yesterday.\n\n :param share: float or int, number of shares to be sold. if value_label=1, its cash to be sold.\n :param date: string or object of date\n :param rem: positions with time list\n :param value_label: default None, value_label will be chosen by info.value_label, determining\n whether shuhui by share 0 or value 1. value_label = 0 will rewrite self.value_label = 1\n :param fee: default None, determined automatically, suggested for most of the cases.\n Otherwise 0.015 means 1.5% in shuhui\n :returns: three elements tuple, the first is dateobj\n the second is a positive float for cashout,\n the third is a negative float for share decrease\n \"\"\"\n if self.value_label == 0 or value_label == 0:\n return self._shuhui_by_share(share, date, rem)\n elif self.value_label == 1: # 按金额赎回,仅支持无赎回费的货币基金\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n share = share / row.netvalue\n return self._shuhui_by_share(share, date, rem, fee=fee)\n\n def _shuhui_by_share(self, share, date, rem, fee=None):\n date = convert_date(date)\n tots = sum([remitem[1] for remitem in rem if remitem[0] <= date])\n if share > tots:\n sh = tots\n else:\n sh = share\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n value = myround(sh * row.netvalue)\n if fee is not None:\n value = (1 - fee) * value\n return (\n row.date,\n value,\n -myround(sh),\n ) # TODO: 这里 myround 是否也和 round_label 有关,有待考证\n\n def info(self):\n \"\"\"\n print basic info on the class\n \"\"\"\n print(\"fund name: %s\" % self.name)\n print(\"fund code: %s\" % self.code)\n print(\"fund purchase fee: %s%%\" % self.rate)\n\n def __repr__(self):\n return self.name\n\n def save(self, path, form=None, option=\"r\", delta=None):\n \"\"\"\n save info to files, this function is designed to redirect to more specific functions\n\n :param path: string of the folder path prefix! or engine obj from sqlalchemy\n :param form: string, option:'csv'\n :param option: string, r for replace and a for append output\n :param delta: if option is a, you have to specify the delta which is the incremental part of price table\n \"\"\"\n if form is None:\n form = self.format\n if form == \"csv\" and option == \"r\":\n self._save_csv(path)\n elif form == \"csv\" and option == \"a\":\n self._save_csv_a(path, delta)\n elif form == \"sql\" and option == \"r\":\n self._save_sql(path)\n elif form == \"sql\" and option == \"a\":\n self._save_sql_a(path, delta)\n\n def _save_csv_a(self, path, df):\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\",\n mode=\"a\",\n header=None,\n index=False,\n date_format=\"%Y-%m-%d\",\n )\n\n def _save_sql_a(self, path, df):\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, path, if_exists=\"append\", index=False\n )\n\n def fetch(self, path, form=None):\n \"\"\"\n fetch info from files\n\n :param path: string of the folder path prefix! end with / in csv case;\n engine from sqlalchemy.create_engine() in sql case.\n :param form: string, option:'csv' or 'sql\n \"\"\"\n if form is None:\n form = self.format\n if form == \"csv\":\n self._fetch_csv(path)\n elif form == \"sql\":\n self._fetch_sql(path)\n\n def update(self):\n \"\"\"\n 对类的价格表进行增量更新,并进行增量存储,适合 fetch 打开的情形\n\n :returns: the incremental part of price table or None if no incremental part exsits\n \"\"\"\n raise NotImplementedError\n\n\nclass fundinfo(basicinfo):\n \"\"\"\n class for specific fund with basic info and every day values\n 所获得的基金净值数据一般截止到昨日。但注意QDII基金的净值数据会截止的更早,因此部分时间默认昨日的函数可能出现问题,\n 处理QDII基金时,需要额外注意。\n\n :param code: str, 基金六位代码字符\n :param round_label: integer 0 or 1, 取1表示基金申购时份额直接舍掉小数点两位之后。当基金处于 cons.droplist 名单中时,\n label 总会被自动设置为1。非名单内基金可以显式令 round_label=1.\n :param dividend_label: int, default 0 or 1. 0 代表默认现金分红,1代表红利再投。两者均可通过记账单上的 0.05 来改变单次的默认。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n \"\"\"\n\n def __init__(\n self,\n code,\n round_label=0,\n dividend_label=0,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n priceonly=False,\n ):\n if round_label == 1 or (code in droplist):\n label = 1 # the scheme of round down on share purchase\n else:\n label = 0\n if code.startswith(\"F\") and code[1:].isdigit():\n code = code[1:]\n elif code.startswith(\"M\") and code[1:].isdigit():\n raise FundTypeError(\n \"This code seems to be a mfund, use ``mfundinfo`` instead\"\n )\n code = code.zfill(6) # 1234 is the same as 001234\n self._url = (\n \"http://fund.eastmoney.com/pingzhongdata/\" + code + \".js\"\n ) # js url api for info of certain fund\n self._feeurl = (\n \"http://fund.eastmoney.com/f10/jjfl_\" + code + \".html\"\n ) # html url for trade fees info of certain fund\n self.priceonly = priceonly\n\n super().__init__(\n code,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n round_label=label,\n dividend_label=dividend_label,\n )\n\n self.special = self.price[self.price[\"comment\"] != 0]\n self.specialdate = list(self.special[\"date\"])\n # date with nonvanishing comment, usually fenhong or zhesuan\n try:\n self.fenhongdate = list(self.price[self.price[\"comment\"] > 0][\"date\"])\n self.zhesuandate = list(self.price[self.price[\"comment\"] < 0][\"date\"])\n except TypeError:\n print(\"There are still string comments for the fund!\")\n\n def _basic_init(self):\n self._page = rget(self._url)\n if self._page.status_code == 404:\n raise ParserFailure(\"Unrecognized fund, please check fund code you input.\")\n if self._page.text[:800].find(\"Data_millionCopiesIncome\") >= 0:\n raise FundTypeError(\"This code seems to be a mfund, use mfundinfo instead\")\n\n l = re.match(\n r\"[\\s\\S]*Data_netWorthTrend = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n l = l.replace(\"null\", \"None\") # 暂未发现基金净值有 null 的基金,若有,其他地方也很可能出问题!\n l = eval(l)\n ltot = re.match(\n r\"[\\s\\S]*Data_ACWorthTrend = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[\n 0\n ] # .* doesn't match \\n\n ltot = ltot.replace(\"null\", \"None\") ## 096001 总值数据中有 null!\n ltot = eval(ltot)\n ## timestamp transform tzinfo must be taken into consideration\n tz_bj = dt.timezone(dt.timedelta(hours=8))\n infodict = {\n \"date\": [\n dt.datetime.fromtimestamp(int(d[\"x\"]) / 1e3, tz=tz_bj).replace(\n tzinfo=None\n )\n for d in l\n ],\n \"netvalue\": [float(d[\"y\"]) for d in l],\n \"comment\": [_nfloat(d[\"unitMoney\"]) for d in l],\n }\n\n if len(l) == len(ltot): # 防止总值和净值数据量不匹配,已知有该问题的基金:502010\n infodict[\"totvalue\"] = [d[1] for d in ltot]\n\n try:\n rate = float(\n eval(\n re.match(\n r\"[\\s\\S]*fund_Rate=([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n )\n )\n except ValueError:\n rate = 0\n logger.info(\"warning: this fund has no data for rate\") # know cases: ETF\n\n name = eval(\n re.match(r\"[\\s\\S]*fS_name = ([^;]*);[\\s\\S]*\", self._page.text).groups()[0]\n )\n\n self.rate = rate\n # shengou rate in tiantianjijin, daeshengou rate discount is not considered\n self.name = name # the name of the fund\n df = pd.DataFrame(data=infodict)\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n if len(df) == 0:\n raise ParserFailure(\"no price table found for this fund %s\" % self.code)\n self.price = df[df[\"date\"] <= yesterdaydash()]\n # deal with the redemption fee attrs finally\n if not self.priceonly:\n self._feepreprocess()\n\n def _feepreprocess(self):\n \"\"\"\n Preprocess to add self.feeinfo and self.segment attr according to redemption fee info\n \"\"\"\n feepage = rget(self._feeurl)\n soup = BeautifulSoup(\n feepage.text, \"lxml\"\n ) # parse the redemption fee html page with beautiful soup\n somethingwrong = False\n if not soup.findAll(\"a\", {\"name\": \"shfl\"}):\n somethingwrong = True\n logger.warning(\"%s 基金赎回信息为空,可能由于该基金已终止运作\" % self.code)\n self.feeinfo = []\n else:\n self.feeinfo = [\n item.string\n for item in soup.findAll(\"a\", {\"name\": \"shfl\"})[\n 0\n ].parent.parent.next_sibling.next_sibling.find_all(\"td\")\n if item.string != \"---\"\n ]\n # this could be [], known case 510030\n\n if not self.feeinfo or len(self.feeinfo) % 2 != 0:\n somethingwrong = True\n else:\n for item in self.feeinfo:\n if \"开放期\" in item or \"封闭\" in item or \"开放日期\" in item or \"运作期\" in item:\n # 暂时没有完美维护定开基金赎回费处理的计划\n somethingwrong = True\n if somethingwrong:\n logger.warning(\n \"%s 赎回费信息异常,多是因为定开基金,封闭基金或场内 ETF: %s\" % (self.code, self.feeinfo)\n )\n self.feeinfo = [\"小于7天\", \"1.50%\", \"大于等于7天\", \"0.00%\"]\n # print(self.feeinfo)\n try:\n self.segment = fundinfo._piecewise(self.feeinfo)\n except (ValueError, IndexError) as e:\n logger.warning(\n \"%s 赎回费信息抓取异常,请手动设定 ``self.segment`` 和 ``self.feeinfo``: %s\"\n % (self.code, self.feeinfo)\n )\n # below is default one\n self.feeinfo = [\"小于7天\", \"1.50%\", \"大于等于7天\", \"0.00%\"]\n self.segment = fundinfo._piecewise(self.feeinfo)\n\n @staticmethod\n def _piecewise(a):\n \"\"\"\n Transform the words list into a pure number segment list for redemption fee, eg. [[0,7],[7,365],[365]]\n \"\"\"\n\n b = [\n (\n a[2 * i]\n .replace(\"持有期限\", \"\")\n .replace(\"开放运作期时持有\", \"\")\n .replace(\"不少于\", \"\")\n .replace(\"小于\", \"\")\n .replace(\"大于\", \"\")\n .replace(\"等于\", \"\")\n .replace(\"个\", \"\")\n .replace(\"持有\", \"\")\n .replace(\"以上\", \"\")\n .replace(\"以内\", \"\")\n .replace(\"的\", \"\")\n .replace(\"(含7天)\", \"\")\n .replace(\"份额持有时间\", \"\")\n ).split(\",\")\n for i in range(int(len(a) / 2))\n ]\n # ['赎回时份额持有7天以内的', '1.50%', '持有7天以上(含7天),30天以内的', '0.10%', '赎回时份额持有满30天以上(含30天)的', '0.00%']\n # ['由于本基金最短持有期限为三年,赎回费率设置为零。', '0.00%', '对持续持有期少于7日的投资者收取不低于1.5%的赎回费。', '1.50%']\n # ['对持续持有期少于7日的投资者收取1.5%的赎回费并全额计入基金财产', '1.50%', '对于持续持有期大于等于7日的投资者不收取赎回费用。', '0.00%']\n # print(b)\n for j, tem in enumerate(b):\n for i, num in enumerate(tem):\n if num[-1] == \"天\":\n num = int(num[:-1])\n elif num[-1] == \"月\":\n num = int(num[:-1]) * 30\n elif num == \".5年\":\n num = 183\n else:\n num = int(float(num[:-1]) * 365)\n b[j][i] = num\n if len(b[0]) == 1: # 有时赎回费会写大于等于一天\n b[0].insert(0, 0)\n elif len(b[0]) == 2:\n b[0][0] = 0\n else:\n print(_warnmess)\n for i in range(len(b) - 1): # 有时赎回费两区间都是闭区间\n if b[i][1] - b[i + 1][0] == -1:\n b[i][1] = b[i + 1][0]\n elif b[i][1] == b[i + 1][0]:\n pass\n else:\n print(_warnmess)\n\n return b\n\n def feedecision(self, day):\n \"\"\"\n give the redemption rate in percent unit based on the days difference between purchase and redemption\n\n :param day: integer, 赎回与申购时间之差的自然日数\n :returns: float,赎回费率,以%为单位\n \"\"\"\n i = -1\n for seg in self.segment:\n i += 2\n if day - seg[0] >= 0 and (len(seg) == 1 or day - seg[-1] < 0):\n return float(self.feeinfo[i].strip(\"%\"))\n return 0 # error backup, in case there is sth wrong in segment\n\n def shuhui(self, share, date, rem, value_label=None, fee=None):\n \"\"\"\n give the cashout based on rem term considering redemption rates\n\n :returns: three elements tuple, the first is dateobj\n the second is a positive float for cashout,\n the third is a negative float for share decrease\n \"\"\"\n # \t\t value = myround(share*self.price[self.price['date']==date].iloc[0].netvalue)\n date = convert_date(date)\n partprice = self.price[self.price[\"date\"] >= date]\n if len(partprice) == 0:\n row = self.price[self.price[\"date\"] < date].iloc[-1]\n else:\n row = partprice.iloc[0]\n soldrem, _ = rm.sell(rem, share, row.date)\n value = 0\n sh = myround(sum([item[1] for item in soldrem]))\n for d, s in soldrem:\n if fee is None:\n tmpfee = self.feedecision((row.date - d).days) * 1e-2\n else:\n tmpfee = fee\n value += myround(\n s * row.netvalue * (1 - tmpfee)\n ) # TODO: round_label whether play a role here?\n return (row.date, value, -sh)\n\n def info(self):\n super().info()\n print(\"fund redemption fee info: %s\" % self.feeinfo)\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n s = json.dumps(\n {\n \"feeinfo\": self.feeinfo,\n \"name\": self.name,\n \"rate\": self.rate,\n \"segment\": self.segment,\n }\n )\n df = pd.DataFrame(\n [[s, 0, 0, 0]], columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"]\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n content = pd.read_csv(path + self.code + \".csv\")\n pricetable = content.iloc[1:]\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n saveinfo = json.loads(content.iloc[0].date)\n if not isinstance(saveinfo, dict):\n raise FundTypeError(\"This csv doesn't looks like from fundinfo\")\n self.segment = saveinfo[\"segment\"]\n self.feeinfo = saveinfo[\"feeinfo\"]\n self.name = saveinfo[\"name\"]\n self.rate = saveinfo[\"rate\"]\n except FileNotFoundError as e:\n # print('no saved copy of fund %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n s = json.dumps(\n {\n \"feeinfo\": self.feeinfo,\n \"name\": self.name,\n \"rate\": self.rate,\n \"segment\": self.segment,\n }\n )\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n content = pd.read_sql(\"xa\" + self.code, path)\n pricetable = content.iloc[1:]\n commentl = [float(com) for com in pricetable.comment]\n self.price = pricetable[[\"date\", \"netvalue\", \"totvalue\"]]\n self.price[\"comment\"] = commentl\n saveinfo = json.loads(content.iloc[0].comment)\n if not isinstance(saveinfo, dict):\n raise FundTypeError(\"This csv doesn't looks like from fundinfo\")\n self.segment = saveinfo[\"segment\"]\n self.feeinfo = saveinfo[\"feeinfo\"]\n self.name = saveinfo[\"name\"]\n self.rate = saveinfo[\"rate\"]\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def update(self):\n \"\"\"\n function to incrementally update the pricetable after fetch the old one\n \"\"\"\n lastdate = self.price.iloc[-1].date\n diffdays = (yesterdayobj() - lastdate).days\n if (\n diffdays == 0\n ): ## for some QDII, this value is 1, anyways, trying update is compatible (d+2 update)\n return None\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=1\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n if dt.datetime.strptime(str(items[0].string), \"%Y-%m-%d\") == today_obj():\n diffdays += 1\n if diffdays <= 10:\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=\"\n + str(diffdays)\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n elif (\n diffdays > 10\n ): ## there is a 20 item per page limit in the API, so to be safe, we query each page by 10 items only\n items = []\n for pg in range(1, int(diffdays / 10) + 2):\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=\"\n + str(pg)\n + \"&per=10\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items.extend(soup.findAll(\"td\"))\n else:\n raise TradeBehaviorError(\n \"Weird incremental update: the saved copy has future records\"\n )\n\n date = []\n netvalue = []\n totvalue = []\n comment = []\n for i in range(int(len(items) / 7)):\n ts = pd.Timestamp(str(items[7 * i].string))\n if (ts - lastdate).days > 0:\n date.append(ts)\n netvalue.append(_float(items[7 * i + 1].string))\n totvalue.append(_float(items[7 * i + 2].string))\n comment.append(_nfloat(items[7 * i + 6].string))\n else:\n break\n df = pd.DataFrame(\n {\n \"date\": date,\n \"netvalue\": netvalue,\n \"totvalue\": totvalue,\n \"comment\": comment,\n }\n )\n df = df.iloc[::-1] ## reverse the time order\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n if len(df) != 0:\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n def get_holdings(self, year=\"\", season=\"\", month=\"\", category=\"stock\"):\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=category\n )\n\n def get_stock_holdings(self, year=\"\", season=\"\", month=\"\"):\n \"\"\"\n 持仓个股细节\n\n :param year:\n :param season:\n :param month:\n :return: pd.DataFrame\n \"\"\"\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=\"stock\"\n )\n\n def get_bond_holdings(self, year=\"\", season=\"\", month=\"\"):\n \"\"\"\n 持仓债券细节\n\n :param year:\n :param season:\n :param month:\n :return: pd.DataFrame\n \"\"\"\n return get_fund_holdings(\n self.code, year, season=season, month=month, category=\"bond\"\n )\n\n def get_portfolio_holdings(self, date=None):\n \"\"\"\n 持仓股债现金占比\n\n :param date:\n :return: Dict\n \"\"\"\n if date is None:\n date = dt.datetime.now().strftime(\"%Y-%m-%d\")\n import xalpha.universal as xu\n\n df = xu.get_daily(\"pt-F\" + self.code, end=date)\n if df is not None and len(df) > 0:\n d = dict(df.iloc[-1])\n del d[\"assets\"], d[\"date\"]\n return d\n else:\n logger.warning(\"no portfolio information before %s\" % date)\n return\n\n def get_industry_holdings(self, year=\"\", season=\"\", month=\"\", threhold=0.5):\n \"\"\"\n 持仓行业占比\n\n :param year:\n :param season:\n :param month:\n :param threhold: float, 持仓小于该百分数的个股行业不再统计,加快速度\n :return: Dict\n \"\"\"\n # 注意该 API 未直接使用天天基金的行业数据,其数据行业划分比较奇怪,大量行业都划分进了笼统的制造业,\n # 用于分析代表性不强,甚至没有消费,医药等行业划分方式\n\n from xalpha.universal import ttjjcode, get_industry_fromxq\n\n df = self.get_stock_holdings(year=year, season=season, month=month)\n if df is None:\n logger.warning(\n \"%s has no stock holdings in %s y %s s. (Possible reason: 链接基金,债券基金)\"\n % (self.code, year, season)\n )\n return\n d = {}\n for i, row in df.iterrows():\n if row[\"ratio\"] < threhold:\n continue\n code = ttjjcode(row[\"code\"])\n industry = get_industry_fromxq(code)[\"industryname\"]\n if not industry.strip():\n logger.warning(\n \"%s has no industry information, cannot be classfied\" % code\n )\n else:\n if industry not in d:\n d[industry] = 0\n d[industry] += row[\"ratio\"]\n return d\n\n def which_industry(self, threhold=1.0):\n \"\"\"\n Experimental API\n 当单一行业占比较其他行业的 threhold 倍还多时,自动判定为对应的行业基金\n 注意这里的行业可能比较细分,导致持仓多个行业其实是同一大行业从而误判为宽基基金的可能\n\n :param threhold: float\n :return: str\n \"\"\"\n d = self.get_industry_holdings()\n l = sorted([(k, v) for k, v in d.items()], key=lambda s: -s[1])\n s0 = 0\n if l and l[0] and l[0][1]:\n s0 = l[0][1]\n s1 = sum([l[i][1] for i in range(1, len(l))])\n if s0 > threhold * s1:\n return \"行业基金: \" + l[0][0]\n else:\n return \"宽基基金\"\n\n\nclass indexinfo(basicinfo):\n \"\"\"\n Get everyday close price of specific index.\n In self.price table, totvalue column is the real index\n while netvalue comlumn is normalized to 1 for the start date.\n In principle, this class can also be used to save stock prices but the price is without adjusted.\n\n :param code: string with seven digitals! note the code here has an extra digit at the beginning,\n 0 for sh and 1 for sz.\n :param value_label: int, default 0 or 1. If set to 1, 记账单数字按金额赎回。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n \"\"\"\n\n def __init__(\n self, code, value_label=0, fetch=False, save=False, path=\"\", form=\"csv\"\n ):\n date = yesterday()\n if code.startswith(\"SH\") and code[2:].isdigit():\n code = \"0\" + code[2:]\n elif code.startswith(\"SZ\") and code[2:].isdigit():\n code = \"1\" + code[2:]\n self.rate = 0\n self._url = (\n \"http://quotes.money.163.com/service/chddata.html?code=\"\n + code\n + \"&start=19901219&end=\"\n + date\n + \"&fields=TCLOSE\"\n )\n super().__init__(\n code, value_label=value_label, fetch=fetch, save=save, path=path, form=form\n )\n\n def _basic_init(self):\n raw = rget(self._url)\n cr = csv.reader(raw.text.splitlines(), delimiter=\",\")\n my_list = list(cr)\n factor = float(my_list[-1][3])\n dd = {\n \"date\": [\n dt.datetime.strptime(my_list[i + 1][0], \"%Y-%m-%d\")\n for i in range(len(my_list) - 1)\n ],\n \"netvalue\": [\n float(my_list[i + 1][3]) / factor for i in range(len(my_list) - 1)\n ],\n \"totvalue\": [float(my_list[i + 1][3]) for i in range(len(my_list) - 1)],\n \"comment\": [0 for _ in range(len(my_list) - 1)],\n }\n index = pd.DataFrame(data=dd)\n index = index.iloc[::-1]\n index = index.reset_index(drop=True)\n self.price = index[index[\"date\"].isin(opendate)]\n self.price = self.price[self.price[\"date\"] <= yesterdaydash()]\n self.name = my_list[-1][2]\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n self.price.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n pricetable = pd.read_csv(path + self.code + \".csv\")\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n\n except FileNotFoundError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n self.price.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n pricetable = pd.read_sql(\"xa\" + self.code, path)\n self.price = pricetable\n\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def update(self):\n lastdate = self.price.iloc[-1].date\n lastdatestr = lastdate.strftime(\"%Y%m%d\")\n weight = self.price.iloc[1].totvalue\n self._updateurl = (\n \"http://quotes.money.163.com/service/chddata.html?code=\"\n + self.code\n + \"&start=\"\n + lastdatestr\n + \"&end=\"\n + yesterday()\n + \"&fields=TCLOSE\"\n )\n df = pd.read_csv(self._updateurl, encoding=\"gb2312\")\n self.name = df.iloc[0].loc[\"名称\"]\n if len(df) > 1:\n df = df.rename(columns={\"收盘价\": \"totvalue\"})\n df[\"date\"] = pd.to_datetime(df.日期)\n df = df.drop([\"股票代码\", \"名称\", \"日期\"], axis=1)\n df[\"netvalue\"] = df.totvalue / weight\n df[\"comment\"] = [0 for _ in range(len(df))]\n df = df.iloc[::-1].iloc[1:]\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n\nclass cashinfo(basicinfo):\n \"\"\"\n A virtual class for remaining cash manage: behave like monetary fund\n\n :param interest: float, daily rate in the unit of 100%, note this is not a year return rate!\n :param start: str of date or dateobj, the virtual starting date of the cash fund\n :param value_label: int, default 0 or 1. If set to 1, 记账单数字按金额赎回。\n \"\"\"\n\n def __init__(self, interest=0.0001, start=\"2012-01-01\", value_label=0):\n self.interest = interest\n start = convert_date(start)\n self.start = start\n super().__init__(\n \"mf\", value_label=value_label, fetch=False, save=False, path=\"nobackend\"\n ) # 永远不缓存 cashinfo\n\n def _basic_init(self):\n self.name = \"货币基金\"\n self.rate = 0\n datel = list(\n pd.date_range(dt.datetime.strftime(self.start, \"%Y-%m-%d\"), yesterdaydash())\n )\n valuel = []\n for i, date in enumerate(datel):\n valuel.append((1 + self.interest) ** i)\n dfdict = {\n \"date\": datel,\n \"netvalue\": valuel,\n \"totvalue\": valuel,\n \"comment\": [0 for _ in datel],\n }\n df = pd.DataFrame(data=dfdict)\n self.price = df[df[\"date\"].isin(opendate)]\n\n\nclass mfundinfo(basicinfo):\n \"\"\"\n 真实的货币基金类,可以通过货币基金六位代码,来获取真实的货币基金业绩,并进行交易回测等\n\n :param code: string of six digitals, code of real monetnary fund\n :param round_label: int, default 0 or 1, label to the different round scheme of shares, reserved for fundinfo class. 1 代表全舍而非四舍五入。\n :param value_label: int, default 0 or 1. 1 代表记账单上的赎回数目是按金额而非份额的,只能完美支持货币基金。\n :param fetch: boolean, when open the fetch option, the class will try fetching from local files first in the init\n :param save: boolean, when open the save option, automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n\n \"\"\"\n\n def __init__(\n self,\n code,\n round_label=0,\n value_label=0,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n ):\n if code.startswith(\"M\") and code[1:].isdigit():\n code = code[1:]\n code = code.zfill(6)\n self._url = \"http://fund.eastmoney.com/pingzhongdata/\" + code + \".js\"\n self.rate = 0\n super().__init__(\n code,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n round_label=round_label,\n value_label=value_label,\n )\n\n def _basic_init(self):\n self._page = rget(self._url)\n if self._page.text[:800].find(\"Data_fundSharesPositions\") >= 0:\n raise FundTypeError(\"This code seems to be a fund, use fundinfo instead\")\n l = eval(\n re.match(\n r\"[\\s\\S]*Data_millionCopiesIncome = ([^;]*);[\\s\\S]*\", self._page.text\n ).groups()[0]\n )\n self.name = re.match(\n r\"[\\s\\S]*fS_name = \\\"([^;]*)\\\";[\\s\\S]*\", self._page.text\n ).groups()[0]\n tz_bj = dt.timezone(dt.timedelta(hours=8))\n datel = [\n dt.datetime.fromtimestamp(int(d[0]) / 1e3, tz=tz_bj).replace(tzinfo=None)\n for d in l\n ]\n ratel = [float(d[1]) for d in l]\n netvalue = [1]\n for dailyrate in ratel:\n netvalue.append(netvalue[-1] * (1 + dailyrate * 1e-4))\n netvalue.remove(1)\n\n df = pd.DataFrame(\n data={\n \"date\": datel,\n \"netvalue\": netvalue,\n \"totvalue\": netvalue,\n \"comment\": [0 for _ in datel],\n }\n )\n df = df[df[\"date\"].isin(opendate)]\n if len(df) == 0:\n raise ParserFailure(\"no price table for %s\" % self.code)\n df = df.reset_index(drop=True)\n self.price = df[df[\"date\"] <= yesterdaydash()]\n\n def _save_csv(self, path):\n \"\"\"\n save the information and pricetable into path+code.csv, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n df = pd.DataFrame(\n [[0, 0, self.name, 0]], columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"]\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_csv(\n path + self.code + \".csv\", index=False, date_format=\"%Y-%m-%d\"\n )\n\n def _fetch_csv(self, path):\n \"\"\"\n fetch the information and pricetable from path+code.csv, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: string of folder path\n \"\"\"\n try:\n content = pd.read_csv(path + self.code + \".csv\")\n pricetable = content.iloc[1:]\n datel = list(pd.to_datetime(pricetable.date))\n self.price = pricetable[[\"netvalue\", \"totvalue\", \"comment\"]]\n self.price[\"date\"] = datel\n self.name = content.iloc[0].comment\n except FileNotFoundError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def _save_sql(self, path):\n \"\"\"\n save the information and pricetable into sql, not recommend to use manually,\n just set the save label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n s = json.dumps({\"name\": self.name})\n df = pd.DataFrame(\n [[pd.Timestamp(\"1990-01-01\"), 0, s, 0]],\n columns=[\"date\", \"netvalue\", \"comment\", \"totvalue\"],\n )\n df = df.append(self.price, ignore_index=True, sort=True)\n df.sort_index(axis=1).to_sql(\n \"xa\" + self.code, con=path, if_exists=\"replace\", index=False\n )\n\n def _fetch_sql(self, path):\n \"\"\"\n fetch the information and pricetable from sql, not recommend to use manually,\n just set the fetch label to be true when init the object\n\n :param path: engine object from sqlalchemy\n \"\"\"\n try:\n content = pd.read_sql(\"xa\" + self.code, path)\n pricetable = content.iloc[1:]\n commentl = [float(com) for com in pricetable.comment]\n self.price = pricetable[[\"date\", \"netvalue\", \"totvalue\"]]\n self.price[\"comment\"] = commentl\n self.name = json.loads(content.iloc[0].comment)[\"name\"]\n except exc.ProgrammingError as e:\n # print('no saved copy of %s' % self.code)\n raise e\n\n def update(self):\n \"\"\"\n function to incrementally update the pricetable after fetch the old one\n \"\"\"\n lastdate = self.price.iloc[-1].date\n startvalue = self.price.iloc[-1].totvalue\n diffdays = (yesterdayobj() - lastdate).days\n if diffdays == 0:\n return None\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=1\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n if dt.datetime.strptime(str(items[0].string), \"%Y-%m-%d\") == today_obj():\n diffdays += 1\n if diffdays <= 10:\n # caution: there may be today data!! then a day gap will be in table\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=1&per=\"\n + str(diffdays)\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items = soup.findAll(\"td\")\n elif (\n diffdays > 10\n ): ## there is a 20 item per page limit in the API, so to be safe, we query each page by 10 items only\n items = []\n for pg in range(1, int(diffdays / 10) + 2):\n self._updateurl = (\n \"http://fund.eastmoney.com/f10/F10DataApi.aspx?type=lsjz&code=\"\n + self.code\n + \"&page=\"\n + str(pg)\n + \"&per=10\"\n )\n con = rget(self._updateurl)\n soup = BeautifulSoup(con.text, \"lxml\")\n items.extend(soup.findAll(\"td\"))\n else:\n raise TradeBehaviorError(\n \"Weird incremental update: the saved copy has future records\"\n )\n\n date = []\n earnrate = []\n comment = []\n for i in range(int(len(items) / 6)):\n ts = pd.Timestamp(str(items[6 * i].string))\n if (ts - lastdate).days > 0:\n date.append(ts)\n earnrate.append(float(items[6 * i + 1].string) * 1e-4)\n comment.append(_nfloat(items[6 * i + 5].string))\n date = date[::-1]\n earnrate = earnrate[::-1]\n comment = comment[::-1]\n netvalue = [startvalue]\n for earn in earnrate:\n netvalue.append(netvalue[-1] * (1 + earn))\n netvalue.remove(startvalue)\n\n df = pd.DataFrame(\n {\n \"date\": date,\n \"netvalue\": netvalue,\n \"totvalue\": netvalue,\n \"comment\": comment,\n }\n )\n df = df[df[\"date\"].isin(opendate)]\n df = df.reset_index(drop=True)\n df = df[df[\"date\"] <= yesterdayobj()]\n if len(df) != 0:\n self.price = self.price.append(df, ignore_index=True, sort=True)\n return df\n\n\nFundInfo = fundinfo\nMFundInfo = mfundinfo\nCashInfo = cashinfo\nIndexInfo = indexinfo\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TimWeaving/CS-VQE-Ansatz | [
"42b4db51e8639e7bcc014af67f353c18ab8b01a5"
] | [
"utils/molecule_tools.py"
] | [
"import numpy as np\nimport json\nfrom collections import Counter\nimport utils.qonversion_tools as qonvert\nimport utils.bit_tools as bit\nimport utils.linalg_tools as la\nimport itertools\nfrom copy import deepcopy\n# Qiskit libraries\nfrom qiskit_nature.operators.second_quantization.fermionic_op import FermionicOp\nfrom qiskit_nature.drivers import UnitsType, Molecule\nfrom qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver\nfrom qiskit_nature.problems.second_quantization import ElectronicStructureProblem\nfrom qiskit_nature.converters.second_quantization import QubitConverter\nfrom qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper\nfrom qiskit_nature.circuit.library.ansatzes.ucc import UCC\nfrom qiskit_nature.circuit.library.initial_states.hartree_fock import hartree_fock_bitstring\nfrom qiskit.opflow.primitive_ops import PauliSumOp\nfrom qiskit.quantum_info import Pauli, SparsePauliOp\nfrom qiskit.opflow.primitive_ops.tapered_pauli_sum_op import TaperedPauliSumOp\n\n\ndef taper_it(operator, Z2symmetries, taper_type='hamiltonian'):\n \"\"\" Perform qubit tapering, adapted from Qiskit Nature \n \"\"\"\n assert(taper_type in ['hamiltonian', 'ansatz'])\n assert(isinstance(operator, PauliSumOp))\n \n cliffords = Z2symmetries.cliffords\n taper_qubits = Z2symmetries.sq_list\n leave_qubits = list(set(range(operator.num_qubits))-set(taper_qubits))\n taper_eigvals = Z2symmetries.tapering_values\n \n op_temp = deepcopy(operator)\n if taper_type == 'hamiltonian':\n rot_op = la.rotate_operator(op_temp, cliffords)\n elif taper_type == 'ansatz':\n rot_op = la.rotate_operator(op_temp, cliffords, apply_on='left') \n \n pauli_list = []\n for pauli_term in rot_op:\n coeff_out = pauli_term.primitive.coeffs[0]\n if (\n not np.any(np.delete(pauli_term.primitive.table.Z.copy(), \n np.asarray(leave_qubits)))\n or taper_type=='hamiltonian'\n ):\n for idx, qubit_idx in enumerate(taper_qubits):\n if (\n pauli_term.primitive.table.Z[0][qubit_idx]\n or pauli_term.primitive.table.X[0][qubit_idx]\n ):\n coeff_out = taper_eigvals[idx] * coeff_out\n z_temp = np.delete(pauli_term.primitive.table.Z[0].copy(), np.asarray(taper_qubits))\n x_temp = np.delete(pauli_term.primitive.table.X[0].copy(), np.asarray(taper_qubits))\n pauli_list.append((Pauli((z_temp, x_temp)).to_label(), coeff_out))\n spo = SparsePauliOp.from_list(pauli_list).simplify(atol=0.0)\n taper_out = TaperedPauliSumOp(spo, Z2symmetries)\n\n return taper_out\n\n\ndef check_ansatz_expressibility(ref_state, ansatz, n_q, obsrv=None, spectrum=None, threshold=1e-4):\n \"\"\"\n Exactly one of obsrv and spectrum must not be None\n \"\"\"\n if obsrv is None:\n assert(spectrum is not None)\n else:\n assert(spectrum is None)\n \n if spectrum is None:\n try:\n gs_nrg, spectrum = la.get_ground_state(obsrv.to_spmatrix()) # true ground state for reference\n except:\n gs_nrg, spectrum = la.get_ground_state(obsrv.to_matrix()) \n gsprobs = [abs(amp)**2 for amp in spectrum]\n bstates = [bit.int_to_bin(i, n_q) for i in range(2**n_q)]\n sig_state_order = [s for s in sorted(list(zip(gsprobs, bstates)), key=lambda x:-x[0]) if s[0]>threshold]\n unexpressed_states=[]\n for amp, b in sig_state_order:\n if b!=ref_state:\n diff = [index for index,compare in enumerate(zip(ref_state, b)) if len(set(compare))!=1]\n matches=[]\n for op in ansatz:\n flip = set([op[i] for i in diff])\n stick = set([op[i] for i in range(n_q) if i not in diff])\n if (flip in [{'X'}, {'Y'}, {'X','Y'}] and\n stick in [{'I'}, {'Z'}, {'I','Z'}]):\n matches.append(op)\n if matches == []:\n unexpressed_states.append(b)\n \n if unexpressed_states == []:\n print('The Ansatz is fully expressible at a significance level of {}'.format(threshold))\n else:\n print('The Ansatz cannot express the following states:')\n print(unexpressed_states)\n\n return unexpressed_states\n\n\ndef construct_molecule(atoms, coords, charge, multiplicity, basis, \n excite_threshold=None, excite_type=[1,2], \n taper=False, sym_sector=None, check_expressibility=False):\n \"\"\"\n atoms: list\n - list of atom names\n coords: list\n - list of cartesian coordinates corresponding with each atom in atoms\n charge: int\n - molecule charge, either -1, 0, +1\n multiplicity: int\n - Molecule multiplicity (1=singlet, 2=doublet etc.)\n basis: str\n - Molecular basis, such as 'sto-3g', '3-21g', '6-31g', ... etc.\n excite_threshold: int\n - Will include Ansatz terms of weight up to 1e-<excite_threshold> in the spectrum of H\n excite_type: list\n - list of integers representing excitations, eg. 1=single, 2=double, 3=triple, ... etc. \n By default set to singles and doubles\n taper: bool\n - Flag determining whether qubit tapering is performed\n sym_sector: list:\n - Manually specify the symmetry sector (List of integers +-1) into which we taper \n (if None will try to determine the sector automatically)\n \"\"\"\n # generate speciesname string from atoms\n mult = Counter(atoms)\n speciesname = ''\n for a in mult.keys():\n speciesname += (a+str(mult[a])+'_')\n speciesname += str(basis)\n \n # ensure correct format for geometry\n geometry = []\n for index, a in enumerate(atoms):\n geometry.append((a, coords[index]))\n\n # now we duplicate this electronic structure problem in Qiskit Nature for Z2 symmetry identification\n molecule_qiskit = Molecule(geometry=geometry, charge=charge, multiplicity=multiplicity) \n driver = ElectronicStructureMoleculeDriver(molecule_qiskit, basis=basis, driver_type=ElectronicStructureDriverType.PYSCF)\n es_problem = ElectronicStructureProblem(driver)\n ham_2ndQ = es_problem.second_q_ops()[0]\n num_qubits = ham_2ndQ.register_length\n \n # Hartree-Fock state\n alpha_beta_nums = list(es_problem.num_particles)\n if charge==-1:\n alpha_beta_nums[1]=alpha_beta_nums[1]-1\n if charge==+1:\n alpha_beta_nums[0]=alpha_beta_nums[0]+1\n hf_config_bool = hartree_fock_bitstring(num_particles=alpha_beta_nums,num_spin_orbitals=num_qubits)\n hf_config = ''.join([str(int(b)) for b in hf_config_bool])[::-1]\n\n # Determine tapering stabilisers\n qubit_converter = QubitConverter(JordanWignerMapper(), z2symmetry_reduction='auto')\n ham_ref = qubit_converter.convert(ham_2ndQ) # stores the Z2 symmetries in qubit_converter \n true_gs_nrg, true_gs_vec = la.get_ground_state(ham_ref.to_spmatrix()) # true ground state for reference\n\n # Here we collate a dictionary of Ansatze of varying expressibility, starting with UCC...\n ansatze={}\n excite_type_map = {1:'s', 2:'d', 3:'t', 4:'q'}\n for i in excite_type:\n excitations = list(range(1, i+1))\n ucc_excitations=UCC(num_particles=alpha_beta_nums, \n num_spin_orbitals=num_qubits,\n excitations=excitations).excitation_ops()\n ansatze['ucc'+''.join([excite_type_map[j] for j in excitations])] = 1j*sum(ucc_excitations)\n\n # determine most significant excitation terms\n if excite_threshold is not None:\n gsprobs = [abs(amp)**2 for amp in true_gs_vec]\n bstates = [bit.int_to_bin(i, num_qubits) for i in range(2**num_qubits)]\n threshold_list = [('excite'+str(n), 1/(10**n)) for n in range(1, excite_threshold)]\n for anz_name, threshold in threshold_list:\n sig_state_order = [s for s in sorted(list(zip(gsprobs, bstates)), key=lambda x:-x[0]) if s[0]>threshold]\n reduccsd = []\n for amp, state in sig_state_order:\n bit_compare = list(zip(hf_config, state))\n annihilate = [str(num_qubits-1-index) for index, b in enumerate(bit_compare) if (b[0]=='1' and b[1]=='0')]\n create = [str(num_qubits-1-index) for index, b in enumerate(bit_compare) if (b[0]=='0' and b[1]=='1')]\n exCite = annihilate+create\n if exCite != [] and len(exCite) in np.array(excite_type)*2:\n dags = ['-_' for i in range(len(annihilate))]+['+_' for i in range(len(create))]\n fo_string = ' '.join([''.join(x) for x in zip(dags, exCite)])\n excite_op = amp*FermionicOp(fo_string, register_length=num_qubits, display_format='sparse')\n reduccsd.append(excite_op)\n reduccsd.append(-excite_op.adjoint())\n ansatz=sum(reduccsd)\n if ansatz!=0:\n ansatze[anz_name] = ansatz\n\n if not taper:\n q_convert = QubitConverter(JordanWignerMapper())\n ham_q = q_convert.convert(ham_2ndQ)\n ham = qonvert.PauliOp_to_dict(ham_q, num_qubits)\n for a in ansatze.keys():\n ucc_q = q_convert.convert(ansatze[a])\n ansatze[a] = {op:coeff.imag for op,coeff in qonvert.PauliOp_to_dict(ucc_q, num_qubits).items()}\n num_tapered=0\n sym_sector=None\n \n else:\n hf_config_tap = ''.join([str(int(b)) for b in np.delete(hf_config_bool, qubit_converter.z2symmetries.sq_list)])[::-1]\n if sym_sector is None:\n Z2ref = es_problem.symmetry_sector_locator(qubit_converter.z2symmetries) #try to find the correct sector\n else:\n Z2ref = sym_sector\n num_tapered = len(Z2ref)\n\n # list all possible sectors\n sectors = []\n for c in list(itertools.combinations_with_replacement([+1, -1], len(Z2ref))):\n sectors+=set(itertools.permutations(c))\n # order by hamming distance from the reference sector\n sectors_order=[]\n for s in sectors:\n ham_dist=0\n for a,b in zip(Z2ref, s):\n if a!=b:\n ham_dist+=1\n sectors_order.append((s, ham_dist))\n sectors=[a for a,b in sorted(sectors_order, key=lambda x:x[1])]\n\n print('Attempting to taper %i --> %i qubits' % (num_qubits, num_qubits-num_tapered))\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n pretap=true_gs_nrg\n for sym_sector in sectors:\n # Perform Jordan-Wigner transformation and taper\n qubit_taper = QubitConverter(JordanWignerMapper(), z2symmetry_reduction=sym_sector)\n ham_tap = qubit_taper.convert(ham_2ndQ)\n postap, tap_gs_vec=la.get_ground_state(ham_tap.to_spmatrix())\n\n if postap-pretap<1e-10:\n print('Energies match in sector %s, tapering successful!\\n' % str(sym_sector))\n ham = taper_it( operator = ham_ref, \n Z2symmetries = qubit_taper.z2symmetries)\n break\n else:\n print('Energy mismatch with target problem in sector %s, trying another...' % str(sym_sector))\n \n # taper the Ansatze \n expressibility={}\n for a in ansatze.keys():\n anz = QubitConverter(JordanWignerMapper()).convert(ansatze[a])\n anz_tap = taper_it( operator = anz, \n Z2symmetries = qubit_taper.z2symmetries,\n taper_type = 'ansatz')\n anz_tap = qonvert.PauliOp_to_dict(anz_tap)\n anz_tap = {op:coeff.imag for op,coeff in anz_tap.items()}\n ansatze[a] = anz_tap\n if check_expressibility:\n print('Tapering {} Ansatz...'.format(a))\n unexpressed_states = check_ansatz_expressibility(ref_state=hf_config_tap, \n ansatz = anz_tap,\n n_q=ham.num_qubits,\n spectrum=tap_gs_vec)\n if unexpressed_states == []:\n expressibility[a] = 'full'\n else:\n expressibility[a]=unexpressed_states\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n \n ham = qonvert.PauliOp_to_dict(ham)\n ham = {op:coeff.real for op,coeff in ham.items()}\n num_qubits -= num_tapered\n \n\n return {'speciesname':speciesname,\n 'num_qubits' :num_qubits,\n 'num_tapered':num_tapered,\n 'hamiltonian':ham,\n 'ansatze' :ansatze,\n 'expressible':expressibility,\n 'hf_config' :hf_config_tap,\n #'hf_energy' :hf_energy,\n 'true_gs_nrg':true_gs_nrg,\n 'true_gs_vec':true_gs_vec,\n 'tapersector':sym_sector}\n\n\ndef get_molecule(speciesname, taper=False):\n \"\"\" Wrapper for construct molecule\n \"\"\"\n file = 'molecule_data'\n with open('data/'+file+'.json', 'r') as json_file:\n molecule_data = json.load(json_file)\n\n atoms, coords, multiplicity, charge, basis, sym_sector = molecule_data[speciesname].values()\n if taper:\n if sym_sector==\"None\":\n sym_sector=None\n print('*** sector not specified in molecule data, searching now ***')\n else:\n print('*** sector saved, will check tapered ground state energy matches target problem ***')\n \n mol_out = construct_molecule(atoms=atoms, \n coords=coords, \n charge=charge, \n multiplicity=multiplicity, \n basis=basis, \n taper=taper, \n sym_sector=sym_sector)\n \n return mol_out\n"
] | [
[
"numpy.asarray",
"numpy.delete",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiang1997/mmaction2 | [
"3cc2e807d91c6f829ec7b6debfaf76ce97858b40",
"ccc759dcf159969aa93865bc2a0b18a0c3494044",
"ccc759dcf159969aa93865bc2a0b18a0c3494044"
] | [
"mmaction/datasets/pipelines/augmentations.py",
"tests/test_data/test_pipelines/test_augmentations/test_crop.py",
"demo/demo_posec3d.py"
] | [
"import random\nimport warnings\nfrom collections.abc import Sequence\nfrom distutils.version import LooseVersion\n\nimport cv2\nimport mmcv\nimport numpy as np\nfrom torch.nn.modules.utils import _pair\n\nfrom ..builder import PIPELINES\nfrom .formating import to_tensor\n\n\ndef _combine_quadruple(a, b):\n return (a[0] + a[2] * b[0], a[1] + a[3] * b[1], a[2] * b[2], a[3] * b[3])\n\n\ndef _flip_quadruple(a):\n return (1 - a[0] - a[2], a[1], a[2], a[3])\n\n\ndef _init_lazy_if_proper(results, lazy):\n \"\"\"Initialize lazy operation properly.\n\n Make sure that a lazy operation is properly initialized,\n and avoid a non-lazy operation accidentally getting mixed in.\n\n Required keys in results are \"imgs\" if \"img_shape\" not in results,\n otherwise, Required keys in results are \"img_shape\", add or modified keys\n are \"img_shape\", \"lazy\".\n Add or modified keys in \"lazy\" are \"original_shape\", \"crop_bbox\", \"flip\",\n \"flip_direction\", \"interpolation\".\n\n Args:\n results (dict): A dict stores data pipeline result.\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n if 'img_shape' not in results:\n results['img_shape'] = results['imgs'][0].shape[:2]\n if lazy:\n if 'lazy' not in results:\n img_h, img_w = results['img_shape']\n lazyop = dict()\n lazyop['original_shape'] = results['img_shape']\n lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],\n dtype=np.float32)\n lazyop['flip'] = False\n lazyop['flip_direction'] = None\n lazyop['interpolation'] = None\n results['lazy'] = lazyop\n else:\n assert 'lazy' not in results, 'Use Fuse after lazy operations'\n\n\[email protected]_module()\nclass TorchvisionTrans:\n \"\"\"Torchvision Augmentations, under torchvision.transforms.\n\n Args:\n type (str): The name of the torchvision transformation.\n \"\"\"\n\n def __init__(self, type, **kwargs):\n try:\n import torchvision\n import torchvision.transforms as tv_trans\n except ImportError:\n raise RuntimeError('Install torchvision to use TorchvisionTrans')\n if LooseVersion(torchvision.__version__) < LooseVersion('0.8.0'):\n raise RuntimeError('The version of torchvision should be at least '\n '0.8.0')\n\n trans = getattr(tv_trans, type, None)\n assert trans, f'Transform {type} not in torchvision'\n self.trans = trans(**kwargs)\n\n def __call__(self, results):\n assert 'imgs' in results\n\n imgs = [x.transpose(2, 0, 1) for x in results['imgs']]\n imgs = to_tensor(np.stack(imgs))\n\n imgs = self.trans(imgs).data.numpy()\n imgs[imgs > 255] = 255\n imgs[imgs < 0] = 0\n imgs = imgs.astype(np.uint8)\n imgs = [x.transpose(1, 2, 0) for x in imgs]\n results['imgs'] = imgs\n return results\n\n\[email protected]_module()\nclass PytorchVideoTrans:\n \"\"\"PytorchVideoTrans Augmentations, under pytorchvideo.transforms.\n\n Args:\n type (str): The name of the pytorchvideo transformation.\n \"\"\"\n\n def __init__(self, type, **kwargs):\n try:\n import torch\n import pytorchvideo.transforms as ptv_trans\n except ImportError:\n raise RuntimeError('Install pytorchvideo to use PytorchVideoTrans')\n if LooseVersion(torch.__version__) < LooseVersion('1.8.0'):\n raise RuntimeError(\n 'The version of PyTorch should be at least 1.8.0')\n\n trans = getattr(ptv_trans, type, None)\n assert trans, f'Transform {type} not in pytorchvideo'\n\n supported_pytorchvideo_trans = ('AugMix', 'RandAugment',\n 'RandomResizedCrop', 'ShortSideScale',\n 'RandomShortSideScale')\n assert type in supported_pytorchvideo_trans,\\\n f'PytorchVideo Transform {type} is not supported in MMAction2'\n\n self.trans = trans(**kwargs)\n self.type = type\n\n def __call__(self, results):\n assert 'imgs' in results\n\n assert 'gt_bboxes' not in results,\\\n f'PytorchVideo {self.type} doesn\\'t support bboxes yet.'\n assert 'proposals' not in results,\\\n f'PytorchVideo {self.type} doesn\\'t support bboxes yet.'\n\n if self.type in ('AugMix', 'RandAugment'):\n # list[ndarray(h, w, 3)] -> torch.tensor(t, c, h, w)\n imgs = [x.transpose(2, 0, 1) for x in results['imgs']]\n imgs = to_tensor(np.stack(imgs))\n else:\n # list[ndarray(h, w, 3)] -> torch.tensor(c, t, h, w)\n # uint8 -> float32\n imgs = to_tensor((np.stack(results['imgs']).transpose(3, 0, 1, 2) /\n 255.).astype(np.float32))\n\n imgs = self.trans(imgs).data.numpy()\n\n if self.type in ('AugMix', 'RandAugment'):\n imgs[imgs > 255] = 255\n imgs[imgs < 0] = 0\n imgs = imgs.astype(np.uint8)\n\n # torch.tensor(t, c, h, w) -> list[ndarray(h, w, 3)]\n imgs = [x.transpose(1, 2, 0) for x in imgs]\n else:\n # float32 -> uint8\n imgs = imgs * 255\n imgs[imgs > 255] = 255\n imgs[imgs < 0] = 0\n imgs = imgs.astype(np.uint8)\n\n # torch.tensor(c, t, h, w) -> list[ndarray(h, w, 3)]\n imgs = [x for x in imgs.transpose(1, 2, 3, 0)]\n\n results['imgs'] = imgs\n\n return results\n\n\[email protected]_module()\nclass PoseCompact:\n \"\"\"Convert the coordinates of keypoints to make it more compact.\n Specifically, it first find a tight bounding box that surrounds all joints\n in each frame, then we expand the tight box by a given padding ratio. For\n example, if 'padding == 0.25', then the expanded box has unchanged center,\n and 1.25x width and height.\n\n Required keys in results are \"img_shape\", \"keypoint\", add or modified keys\n are \"img_shape\", \"keypoint\", \"crop_quadruple\".\n\n Args:\n padding (float): The padding size. Default: 0.25.\n threshold (int): The threshold for the tight bounding box. If the width\n or height of the tight bounding box is smaller than the threshold,\n we do not perform the compact operation. Default: 10.\n hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded\n box. Float indicates the specific ratio and tuple indicates a\n ratio range. If set as None, it means there is no requirement on\n hw_ratio. Default: None.\n allow_imgpad (bool): Whether to allow expanding the box outside the\n image to meet the hw_ratio requirement. Default: True.\n\n Returns:\n type: Description of returned object.\n \"\"\"\n\n def __init__(self,\n padding=0.25,\n threshold=10,\n hw_ratio=None,\n allow_imgpad=True):\n\n self.padding = padding\n self.threshold = threshold\n if hw_ratio is not None:\n hw_ratio = _pair(hw_ratio)\n\n self.hw_ratio = hw_ratio\n\n self.allow_imgpad = allow_imgpad\n assert self.padding >= 0\n\n def __call__(self, results):\n img_shape = results['img_shape']\n h, w = img_shape\n kp = results['keypoint']\n\n # Make NaN zero\n kp[np.isnan(kp)] = 0.\n kp_x = kp[..., 0]\n kp_y = kp[..., 1]\n\n min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)\n min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)\n max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)\n max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)\n\n # The compact area is too small\n if max_x - min_x < self.threshold or max_y - min_y < self.threshold:\n return results\n\n center = ((max_x + min_x) / 2, (max_y + min_y) / 2)\n half_width = (max_x - min_x) / 2 * (1 + self.padding)\n half_height = (max_y - min_y) / 2 * (1 + self.padding)\n\n if self.hw_ratio is not None:\n half_height = max(self.hw_ratio[0] * half_width, half_height)\n half_width = max(1 / self.hw_ratio[1] * half_height, half_width)\n\n min_x, max_x = center[0] - half_width, center[0] + half_width\n min_y, max_y = center[1] - half_height, center[1] + half_height\n\n # hot update\n if not self.allow_imgpad:\n min_x, min_y = int(max(0, min_x)), int(max(0, min_y))\n max_x, max_y = int(min(w, max_x)), int(min(h, max_y))\n else:\n min_x, min_y = int(min_x), int(min_y)\n max_x, max_y = int(max_x), int(max_y)\n\n kp_x[kp_x != 0] -= min_x\n kp_y[kp_y != 0] -= min_y\n\n new_shape = (max_y - min_y, max_x - min_x)\n results['img_shape'] = new_shape\n\n # the order is x, y, w, h (in [0, 1]), a tuple\n crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))\n new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,\n (max_y - min_y) / h)\n crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)\n results['crop_quadruple'] = crop_quadruple\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '\n f'threshold={self.threshold}, '\n f'hw_ratio={self.hw_ratio}, '\n f'allow_imgpad={self.allow_imgpad})')\n return repr_str\n\n\[email protected]_module()\nclass Imgaug:\n \"\"\"Imgaug augmentation.\n\n Adds custom transformations from imgaug library.\n Please visit `https://imgaug.readthedocs.io/en/latest/index.html`\n to get more information. Two demo configs could be found in tsn and i3d\n config folder.\n\n It's better to use uint8 images as inputs since imgaug works best with\n numpy dtype uint8 and isn't well tested with other dtypes. It should be\n noted that not all of the augmenters have the same input and output dtype,\n which may cause unexpected results.\n\n Required keys are \"imgs\", \"img_shape\"(if \"gt_bboxes\" is not None) and\n \"modality\", added or modified keys are \"imgs\", \"img_shape\", \"gt_bboxes\"\n and \"proposals\".\n\n It is worth mentioning that `Imgaug` will NOT create custom keys like\n \"interpolation\", \"crop_bbox\", \"flip_direction\", etc. So when using\n `Imgaug` along with other mmaction2 pipelines, we should pay more attention\n to required keys.\n\n Two steps to use `Imgaug` pipeline:\n 1. Create initialization parameter `transforms`. There are three ways\n to create `transforms`.\n 1) string: only support `default` for now.\n e.g. `transforms='default'`\n 2) list[dict]: create a list of augmenters by a list of dicts, each\n dict corresponds to one augmenter. Every dict MUST contain a key\n named `type`. `type` should be a string(iaa.Augmenter's name) or\n an iaa.Augmenter subclass.\n e.g. `transforms=[dict(type='Rotate', rotate=(-20, 20))]`\n e.g. `transforms=[dict(type=iaa.Rotate, rotate=(-20, 20))]`\n 3) iaa.Augmenter: create an imgaug.Augmenter object.\n e.g. `transforms=iaa.Rotate(rotate=(-20, 20))`\n 2. Add `Imgaug` in dataset pipeline. It is recommended to insert imgaug\n pipeline before `Normalize`. A demo pipeline is listed as follows.\n ```\n pipeline = [\n dict(\n type='SampleFrames',\n clip_len=1,\n frame_interval=1,\n num_clips=16,\n ),\n dict(type='RawFrameDecode'),\n dict(type='Resize', scale=(-1, 256)),\n dict(\n type='MultiScaleCrop',\n input_size=224,\n scales=(1, 0.875, 0.75, 0.66),\n random_crop=False,\n max_wh_scale_gap=1,\n num_fixed_crops=13),\n dict(type='Resize', scale=(224, 224), keep_ratio=False),\n dict(type='Flip', flip_ratio=0.5),\n dict(type='Imgaug', transforms='default'),\n # dict(type='Imgaug', transforms=[\n # dict(type='Rotate', rotate=(-20, 20))\n # ]),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='FormatShape', input_format='NCHW'),\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\n dict(type='ToTensor', keys=['imgs', 'label'])\n ]\n ```\n\n Args:\n transforms (str | list[dict] | :obj:`iaa.Augmenter`): Three different\n ways to create imgaug augmenter.\n \"\"\"\n\n def __init__(self, transforms):\n import imgaug.augmenters as iaa\n\n if transforms == 'default':\n self.transforms = self.default_transforms()\n elif isinstance(transforms, list):\n assert all(isinstance(trans, dict) for trans in transforms)\n self.transforms = transforms\n elif isinstance(transforms, iaa.Augmenter):\n self.aug = self.transforms = transforms\n else:\n raise ValueError('transforms must be `default` or a list of dicts'\n ' or iaa.Augmenter object')\n\n if not isinstance(transforms, iaa.Augmenter):\n self.aug = iaa.Sequential(\n [self.imgaug_builder(t) for t in self.transforms])\n\n @staticmethod\n def default_transforms():\n \"\"\"Default transforms for imgaug.\n\n Implement RandAugment by imgaug.\n Plase visit `https://arxiv.org/abs/1909.13719` for more information.\n\n Augmenters and hyper parameters are borrowed from the following repo:\n https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa\n\n Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.\n\n Returns:\n dict: The constructed RandAugment transforms.\n \"\"\"\n # RandAugment hyper params\n num_augmenters = 2\n cur_magnitude, max_magnitude = 9, 10\n cur_level = 1.0 * cur_magnitude / max_magnitude\n\n return [\n dict(\n type='SomeOf',\n n=num_augmenters,\n children=[\n dict(\n type='ShearX',\n shear=17.19 * cur_level * random.choice([-1, 1])),\n dict(\n type='ShearY',\n shear=17.19 * cur_level * random.choice([-1, 1])),\n dict(\n type='TranslateX',\n percent=.2 * cur_level * random.choice([-1, 1])),\n dict(\n type='TranslateY',\n percent=.2 * cur_level * random.choice([-1, 1])),\n dict(\n type='Rotate',\n rotate=30 * cur_level * random.choice([-1, 1])),\n dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),\n dict(type='Solarize', threshold=256 * cur_level),\n dict(type='EnhanceColor', factor=1.8 * cur_level + .1),\n dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),\n dict(\n type='EnhanceBrightness', factor=1.8 * cur_level + .1),\n dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),\n dict(type='Autocontrast', cutoff=0),\n dict(type='Equalize'),\n dict(type='Invert', p=1.),\n dict(\n type='Cutout',\n nb_iterations=1,\n size=0.2 * cur_level,\n squared=True)\n ])\n ]\n\n def imgaug_builder(self, cfg):\n \"\"\"Import a module from imgaug.\n\n It follows the logic of :func:`build_from_cfg`. Use a dict object to\n create an iaa.Augmenter object.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n\n Returns:\n obj:`iaa.Augmenter`: The constructed imgaug augmenter.\n \"\"\"\n import imgaug.augmenters as iaa\n\n assert isinstance(cfg, dict) and 'type' in cfg\n args = cfg.copy()\n\n obj_type = args.pop('type')\n if mmcv.is_str(obj_type):\n obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \\\n else getattr(iaa.pillike, obj_type)\n elif issubclass(obj_type, iaa.Augmenter):\n obj_cls = obj_type\n else:\n raise TypeError(\n f'type must be a str or valid type, but got {type(obj_type)}')\n\n if 'children' in args:\n args['children'] = [\n self.imgaug_builder(child) for child in args['children']\n ]\n\n return obj_cls(**args)\n\n def __repr__(self):\n repr_str = self.__class__.__name__ + f'(transforms={self.aug})'\n return repr_str\n\n def __call__(self, results):\n assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'\n in_type = results['imgs'][0].dtype.type\n\n cur_aug = self.aug.to_deterministic()\n\n results['imgs'] = [\n cur_aug.augment_image(frame) for frame in results['imgs']\n ]\n img_h, img_w, _ = results['imgs'][0].shape\n\n out_type = results['imgs'][0].dtype.type\n assert in_type == out_type, \\\n ('Imgaug input dtype and output dtype are not the same. ',\n f'Convert from {in_type} to {out_type}')\n\n if 'gt_bboxes' in results:\n from imgaug.augmentables import bbs\n bbox_list = [\n bbs.BoundingBox(\n x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])\n for bbox in results['gt_bboxes']\n ]\n bboxes = bbs.BoundingBoxesOnImage(\n bbox_list, shape=results['img_shape'])\n bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])\n results['gt_bboxes'] = [[\n max(bbox.x1, 0),\n max(bbox.y1, 0),\n min(bbox.x2, img_w),\n min(bbox.y2, img_h)\n ] for bbox in bbox_aug.items]\n if 'proposals' in results:\n bbox_list = [\n bbs.BoundingBox(\n x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])\n for bbox in results['proposals']\n ]\n bboxes = bbs.BoundingBoxesOnImage(\n bbox_list, shape=results['img_shape'])\n bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])\n results['proposals'] = [[\n max(bbox.x1, 0),\n max(bbox.y1, 0),\n min(bbox.x2, img_w),\n min(bbox.y2, img_h)\n ] for bbox in bbox_aug.items]\n\n results['img_shape'] = (img_h, img_w)\n\n return results\n\n\[email protected]_module()\nclass Fuse:\n \"\"\"Fuse lazy operations.\n\n Fusion order:\n crop -> resize -> flip\n\n Required keys are \"imgs\", \"img_shape\" and \"lazy\", added or modified keys\n are \"imgs\", \"lazy\".\n Required keys in \"lazy\" are \"crop_bbox\", \"interpolation\", \"flip_direction\".\n \"\"\"\n\n def __call__(self, results):\n if 'lazy' not in results:\n raise ValueError('No lazy operation detected')\n lazyop = results['lazy']\n imgs = results['imgs']\n\n # crop\n left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)\n imgs = [img[top:bottom, left:right] for img in imgs]\n\n # resize\n img_h, img_w = results['img_shape']\n if lazyop['interpolation'] is None:\n interpolation = 'bilinear'\n else:\n interpolation = lazyop['interpolation']\n imgs = [\n mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)\n for img in imgs\n ]\n\n # flip\n if lazyop['flip']:\n for img in imgs:\n mmcv.imflip_(img, lazyop['flip_direction'])\n\n results['imgs'] = imgs\n del results['lazy']\n\n return results\n\n\[email protected]_module()\nclass RandomScale:\n \"\"\"Resize images by a random scale.\n\n Required keys are \"imgs\", \"img_shape\", \"modality\", added or modified\n keys are \"imgs\", \"img_shape\", \"keep_ratio\", \"scale_factor\", \"lazy\",\n \"scale\", \"resize_size\". Required keys in \"lazy\" is None, added or\n modified key is \"interpolation\".\n\n Args:\n scales (tuple[int]): Tuple of scales to be chosen for resize.\n mode (str): Selection mode for choosing the scale. Options are \"range\"\n and \"value\". If set to \"range\", The short edge will be randomly\n chosen from the range of minimum and maximum on the shorter one\n in all tuples. Otherwise, the longer edge will be randomly chosen\n from the range of minimum and maximum on the longer one in all\n tuples. Default: 'range'.\n \"\"\"\n\n def __init__(self, scales, mode='range', **kwargs):\n warnings.warn('\"RandomScale\" is deprecated and will be removed in '\n 'later versions. It is currently not used in MMAction2')\n self.mode = mode\n if self.mode not in ['range', 'value']:\n raise ValueError(f\"mode should be 'range' or 'value', \"\n f'but got {self.mode}')\n self.scales = scales\n self.kwargs = kwargs\n\n def select_scale(self, scales):\n num_scales = len(scales)\n if num_scales == 1:\n # specify a fixed scale\n scale = scales[0]\n elif num_scales == 2:\n if self.mode == 'range':\n scale_long = [max(s) for s in scales]\n scale_short = [min(s) for s in scales]\n long_edge = np.random.randint(\n min(scale_long),\n max(scale_long) + 1)\n short_edge = np.random.randint(\n min(scale_short),\n max(scale_short) + 1)\n scale = (long_edge, short_edge)\n elif self.mode == 'value':\n scale = random.choice(scales)\n else:\n if self.mode != 'value':\n raise ValueError(\"Only 'value' mode supports more than \"\n '2 image scales')\n scale = random.choice(scales)\n\n return scale\n\n def __call__(self, results):\n scale = self.select_scale(self.scales)\n results['scale'] = scale\n resize = Resize(scale, **self.kwargs)\n results = resize(results)\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'scales={self.scales}, mode={self.mode})')\n return repr_str\n\n\[email protected]_module()\nclass RandomCrop:\n \"\"\"Vanilla square random crop that specifics the output size.\n\n Required keys in results are \"img_shape\", \"keypoint\" (optional), \"imgs\"\n (optional), added or modified keys are \"keypoint\", \"imgs\", \"lazy\"; Required\n keys in \"lazy\" are \"flip\", \"crop_bbox\", added or modified key is\n \"crop_bbox\".\n\n Args:\n size (int): The output size of the images.\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n def __init__(self, size, lazy=False):\n if not isinstance(size, int):\n raise TypeError(f'Size must be an int, but got {type(size)}')\n self.size = size\n self.lazy = lazy\n\n @staticmethod\n def _crop_kps(kps, crop_bbox):\n return kps - crop_bbox[:2]\n\n @staticmethod\n def _crop_imgs(imgs, crop_bbox):\n x1, y1, x2, y2 = crop_bbox\n return [img[y1:y2, x1:x2] for img in imgs]\n\n @staticmethod\n def _box_crop(box, crop_bbox):\n \"\"\"Crop the bounding boxes according to the crop_bbox.\n\n Args:\n box (np.ndarray): The bounding boxes.\n crop_bbox(np.ndarray): The bbox used to crop the original image.\n \"\"\"\n\n x1, y1, x2, y2 = crop_bbox\n img_w, img_h = x2 - x1, y2 - y1\n\n box_ = box.copy()\n box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)\n box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1)\n return box_\n\n def _all_box_crop(self, results, crop_bbox):\n \"\"\"Crop the gt_bboxes and proposals in results according to crop_bbox.\n\n Args:\n results (dict): All information about the sample, which contain\n 'gt_bboxes' and 'proposals' (optional).\n crop_bbox(np.ndarray): The bbox used to crop the original image.\n \"\"\"\n results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox)\n if 'proposals' in results and results['proposals'] is not None:\n assert results['proposals'].shape[1] == 4\n results['proposals'] = self._box_crop(results['proposals'],\n crop_bbox)\n return results\n\n def __call__(self, results):\n \"\"\"Performs the RandomCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n\n img_h, img_w = results['img_shape']\n assert self.size <= img_h and self.size <= img_w\n\n y_offset = 0\n x_offset = 0\n if img_h > self.size:\n y_offset = int(np.random.randint(0, img_h - self.size))\n if img_w > self.size:\n x_offset = int(np.random.randint(0, img_w - self.size))\n\n if 'crop_quadruple' not in results:\n results['crop_quadruple'] = np.array(\n [0, 0, 1, 1], # x, y, w, h\n dtype=np.float32)\n\n x_ratio, y_ratio = x_offset / img_w, y_offset / img_h\n w_ratio, h_ratio = self.size / img_w, self.size / img_h\n\n old_crop_quadruple = results['crop_quadruple']\n old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]\n old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]\n new_crop_quadruple = [\n old_x_ratio + x_ratio * old_w_ratio,\n old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,\n h_ratio * old_x_ratio\n ]\n results['crop_quadruple'] = np.array(\n new_crop_quadruple, dtype=np.float32)\n\n new_h, new_w = self.size, self.size\n\n crop_bbox = np.array(\n [x_offset, y_offset, x_offset + new_w, y_offset + new_h])\n results['crop_bbox'] = crop_bbox\n\n results['img_shape'] = (new_h, new_w)\n\n if not self.lazy:\n if 'keypoint' in results:\n results['keypoint'] = self._crop_kps(results['keypoint'],\n crop_bbox)\n if 'imgs' in results:\n results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Put Flip at last for now')\n\n # record crop_bbox in lazyop dict to ensure only crop once in Fuse\n lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']\n left = x_offset * (lazy_right - lazy_left) / img_w\n right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w\n top = y_offset * (lazy_bottom - lazy_top) / img_h\n bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h\n lazyop['crop_bbox'] = np.array([(lazy_left + left),\n (lazy_top + top),\n (lazy_left + right),\n (lazy_top + bottom)],\n dtype=np.float32)\n\n # Process entity boxes\n if 'gt_bboxes' in results:\n assert not self.lazy\n results = self._all_box_crop(results, results['crop_bbox'])\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}(size={self.size}, '\n f'lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass RandomResizedCrop(RandomCrop):\n \"\"\"Random crop that specifics the area and height-weight ratio range.\n\n Required keys in results are \"img_shape\", \"crop_bbox\", \"imgs\" (optional),\n \"keypoint\" (optional), added or modified keys are \"imgs\", \"keypoint\",\n \"crop_bbox\" and \"lazy\"; Required keys in \"lazy\" are \"flip\", \"crop_bbox\",\n added or modified key is \"crop_bbox\".\n\n Args:\n area_range (Tuple[float]): The candidate area scales range of\n output cropped images. Default: (0.08, 1.0).\n aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of\n output cropped images. Default: (3 / 4, 4 / 3).\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n def __init__(self,\n area_range=(0.08, 1.0),\n aspect_ratio_range=(3 / 4, 4 / 3),\n lazy=False):\n self.area_range = area_range\n self.aspect_ratio_range = aspect_ratio_range\n self.lazy = lazy\n if not mmcv.is_tuple_of(self.area_range, float):\n raise TypeError(f'Area_range must be a tuple of float, '\n f'but got {type(area_range)}')\n if not mmcv.is_tuple_of(self.aspect_ratio_range, float):\n raise TypeError(f'Aspect_ratio_range must be a tuple of float, '\n f'but got {type(aspect_ratio_range)}')\n\n @staticmethod\n def get_crop_bbox(img_shape,\n area_range,\n aspect_ratio_range,\n max_attempts=10):\n \"\"\"Get a crop bbox given the area range and aspect ratio range.\n\n Args:\n img_shape (Tuple[int]): Image shape\n area_range (Tuple[float]): The candidate area scales range of\n output cropped images. Default: (0.08, 1.0).\n aspect_ratio_range (Tuple[float]): The candidate aspect\n ratio range of output cropped images. Default: (3 / 4, 4 / 3).\n max_attempts (int): The maximum of attempts. Default: 10.\n max_attempts (int): Max attempts times to generate random candidate\n bounding box. If it doesn't qualified one, the center bounding\n box will be used.\n Returns:\n (list[int]) A random crop bbox within the area range and aspect\n ratio range.\n \"\"\"\n assert 0 < area_range[0] <= area_range[1] <= 1\n assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]\n\n img_h, img_w = img_shape\n area = img_h * img_w\n\n min_ar, max_ar = aspect_ratio_range\n aspect_ratios = np.exp(\n np.random.uniform(\n np.log(min_ar), np.log(max_ar), size=max_attempts))\n target_areas = np.random.uniform(*area_range, size=max_attempts) * area\n candidate_crop_w = np.round(np.sqrt(target_areas *\n aspect_ratios)).astype(np.int32)\n candidate_crop_h = np.round(np.sqrt(target_areas /\n aspect_ratios)).astype(np.int32)\n\n for i in range(max_attempts):\n crop_w = candidate_crop_w[i]\n crop_h = candidate_crop_h[i]\n if crop_h <= img_h and crop_w <= img_w:\n x_offset = random.randint(0, img_w - crop_w)\n y_offset = random.randint(0, img_h - crop_h)\n return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h\n\n # Fallback\n crop_size = min(img_h, img_w)\n x_offset = (img_w - crop_size) // 2\n y_offset = (img_h - crop_size) // 2\n return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size\n\n def __call__(self, results):\n \"\"\"Performs the RandomResizeCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n\n img_h, img_w = results['img_shape']\n\n left, top, right, bottom = self.get_crop_bbox(\n (img_h, img_w), self.area_range, self.aspect_ratio_range)\n new_h, new_w = bottom - top, right - left\n\n if 'crop_quadruple' not in results:\n results['crop_quadruple'] = np.array(\n [0, 0, 1, 1], # x, y, w, h\n dtype=np.float32)\n\n x_ratio, y_ratio = left / img_w, top / img_h\n w_ratio, h_ratio = new_w / img_w, new_h / img_h\n\n old_crop_quadruple = results['crop_quadruple']\n old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]\n old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]\n new_crop_quadruple = [\n old_x_ratio + x_ratio * old_w_ratio,\n old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,\n h_ratio * old_x_ratio\n ]\n results['crop_quadruple'] = np.array(\n new_crop_quadruple, dtype=np.float32)\n\n crop_bbox = np.array([left, top, right, bottom])\n results['crop_bbox'] = crop_bbox\n results['img_shape'] = (new_h, new_w)\n\n if not self.lazy:\n if 'keypoint' in results:\n results['keypoint'] = self._crop_kps(results['keypoint'],\n crop_bbox)\n if 'imgs' in results:\n results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Put Flip at last for now')\n\n # record crop_bbox in lazyop dict to ensure only crop once in Fuse\n lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']\n left = left * (lazy_right - lazy_left) / img_w\n right = right * (lazy_right - lazy_left) / img_w\n top = top * (lazy_bottom - lazy_top) / img_h\n bottom = bottom * (lazy_bottom - lazy_top) / img_h\n lazyop['crop_bbox'] = np.array([(lazy_left + left),\n (lazy_top + top),\n (lazy_left + right),\n (lazy_top + bottom)],\n dtype=np.float32)\n\n if 'gt_bboxes' in results:\n assert not self.lazy\n results = self._all_box_crop(results, results['crop_bbox'])\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'area_range={self.area_range}, '\n f'aspect_ratio_range={self.aspect_ratio_range}, '\n f'lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass MultiScaleCrop(RandomCrop):\n \"\"\"Crop images with a list of randomly selected scales.\n\n Randomly select the w and h scales from a list of scales. Scale of 1 means\n the base size, which is the minimal of image width and height. The scale\n level of w and h is controlled to be smaller than a certain value to\n prevent too large or small aspect ratio.\n\n Required keys are \"img_shape\", \"imgs\" (optional), \"keypoint\" (optional),\n added or modified keys are \"imgs\", \"crop_bbox\", \"img_shape\", \"lazy\" and\n \"scales\". Required keys in \"lazy\" are \"crop_bbox\", added or modified key is\n \"crop_bbox\".\n\n Args:\n input_size (int | tuple[int]): (w, h) of network input.\n scales (tuple[float]): width and height scales to be selected.\n max_wh_scale_gap (int): Maximum gap of w and h scale levels.\n Default: 1.\n random_crop (bool): If set to True, the cropping bbox will be randomly\n sampled, otherwise it will be sampler from fixed regions.\n Default: False.\n num_fixed_crops (int): If set to 5, the cropping bbox will keep 5\n basic fixed regions: \"upper left\", \"upper right\", \"lower left\",\n \"lower right\", \"center\". If set to 13, the cropping bbox will\n append another 8 fix regions: \"center left\", \"center right\",\n \"lower center\", \"upper center\", \"upper left quarter\",\n \"upper right quarter\", \"lower left quarter\", \"lower right quarter\".\n Default: 5.\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n def __init__(self,\n input_size,\n scales=(1, ),\n max_wh_scale_gap=1,\n random_crop=False,\n num_fixed_crops=5,\n lazy=False):\n self.input_size = _pair(input_size)\n if not mmcv.is_tuple_of(self.input_size, int):\n raise TypeError(f'Input_size must be int or tuple of int, '\n f'but got {type(input_size)}')\n\n if not isinstance(scales, tuple):\n raise TypeError(f'Scales must be tuple, but got {type(scales)}')\n\n if num_fixed_crops not in [5, 13]:\n raise ValueError(f'Num_fix_crops must be in {[5, 13]}, '\n f'but got {num_fixed_crops}')\n\n self.scales = scales\n self.max_wh_scale_gap = max_wh_scale_gap\n self.random_crop = random_crop\n self.num_fixed_crops = num_fixed_crops\n self.lazy = lazy\n\n def __call__(self, results):\n \"\"\"Performs the MultiScaleCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n\n img_h, img_w = results['img_shape']\n base_size = min(img_h, img_w)\n crop_sizes = [int(base_size * s) for s in self.scales]\n\n candidate_sizes = []\n for i, h in enumerate(crop_sizes):\n for j, w in enumerate(crop_sizes):\n if abs(i - j) <= self.max_wh_scale_gap:\n candidate_sizes.append([w, h])\n\n crop_size = random.choice(candidate_sizes)\n for i in range(2):\n if abs(crop_size[i] - self.input_size[i]) < 3:\n crop_size[i] = self.input_size[i]\n\n crop_w, crop_h = crop_size\n\n if self.random_crop:\n x_offset = random.randint(0, img_w - crop_w)\n y_offset = random.randint(0, img_h - crop_h)\n else:\n w_step = (img_w - crop_w) // 4\n h_step = (img_h - crop_h) // 4\n candidate_offsets = [\n (0, 0), # upper left\n (4 * w_step, 0), # upper right\n (0, 4 * h_step), # lower left\n (4 * w_step, 4 * h_step), # lower right\n (2 * w_step, 2 * h_step), # center\n ]\n if self.num_fixed_crops == 13:\n extra_candidate_offsets = [\n (0, 2 * h_step), # center left\n (4 * w_step, 2 * h_step), # center right\n (2 * w_step, 4 * h_step), # lower center\n (2 * w_step, 0 * h_step), # upper center\n (1 * w_step, 1 * h_step), # upper left quarter\n (3 * w_step, 1 * h_step), # upper right quarter\n (1 * w_step, 3 * h_step), # lower left quarter\n (3 * w_step, 3 * h_step) # lower right quarter\n ]\n candidate_offsets.extend(extra_candidate_offsets)\n x_offset, y_offset = random.choice(candidate_offsets)\n\n new_h, new_w = crop_h, crop_w\n\n crop_bbox = np.array(\n [x_offset, y_offset, x_offset + new_w, y_offset + new_h])\n results['crop_bbox'] = crop_bbox\n results['img_shape'] = (new_h, new_w)\n results['scales'] = self.scales\n\n if 'crop_quadruple' not in results:\n results['crop_quadruple'] = np.array(\n [0, 0, 1, 1], # x, y, w, h\n dtype=np.float32)\n\n x_ratio, y_ratio = x_offset / img_w, y_offset / img_h\n w_ratio, h_ratio = new_w / img_w, new_h / img_h\n\n old_crop_quadruple = results['crop_quadruple']\n old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]\n old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]\n new_crop_quadruple = [\n old_x_ratio + x_ratio * old_w_ratio,\n old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,\n h_ratio * old_x_ratio\n ]\n results['crop_quadruple'] = np.array(\n new_crop_quadruple, dtype=np.float32)\n\n if not self.lazy:\n if 'keypoint' in results:\n results['keypoint'] = self._crop_kps(results['keypoint'],\n crop_bbox)\n if 'imgs' in results:\n results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Put Flip at last for now')\n\n # record crop_bbox in lazyop dict to ensure only crop once in Fuse\n lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']\n left = x_offset * (lazy_right - lazy_left) / img_w\n right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w\n top = y_offset * (lazy_bottom - lazy_top) / img_h\n bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h\n lazyop['crop_bbox'] = np.array([(lazy_left + left),\n (lazy_top + top),\n (lazy_left + right),\n (lazy_top + bottom)],\n dtype=np.float32)\n\n if 'gt_bboxes' in results:\n assert not self.lazy\n results = self._all_box_crop(results, results['crop_bbox'])\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'input_size={self.input_size}, scales={self.scales}, '\n f'max_wh_scale_gap={self.max_wh_scale_gap}, '\n f'random_crop={self.random_crop}, '\n f'num_fixed_crops={self.num_fixed_crops}, '\n f'lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass Resize:\n \"\"\"Resize images to a specific size.\n\n Required keys are \"img_shape\", \"modality\", \"imgs\" (optional), \"keypoint\"\n (optional), added or modified keys are \"imgs\", \"img_shape\", \"keep_ratio\",\n \"scale_factor\", \"lazy\", \"resize_size\". Required keys in \"lazy\" is None,\n added or modified key is \"interpolation\".\n\n Args:\n scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling\n factor or maximum size:\n If it is a float number, the image will be rescaled by this\n factor, else if it is a tuple of 2 integers, the image will\n be rescaled as large as possible within the scale.\n Otherwise, it serves as (w, h) of output size.\n keep_ratio (bool): If set to True, Images will be resized without\n changing the aspect ratio. Otherwise, it will resize images to a\n given size. Default: True.\n interpolation (str): Algorithm used for interpolation:\n \"nearest\" | \"bilinear\". Default: \"bilinear\".\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n def __init__(self,\n scale,\n keep_ratio=True,\n interpolation='bilinear',\n lazy=False):\n if isinstance(scale, float):\n if scale <= 0:\n raise ValueError(f'Invalid scale {scale}, must be positive.')\n elif isinstance(scale, tuple):\n max_long_edge = max(scale)\n max_short_edge = min(scale)\n if max_short_edge == -1:\n # assign np.inf to long edge for rescaling short edge later.\n scale = (np.inf, max_long_edge)\n else:\n raise TypeError(\n f'Scale must be float or tuple of int, but got {type(scale)}')\n self.scale = scale\n self.keep_ratio = keep_ratio\n self.interpolation = interpolation\n self.lazy = lazy\n\n def _resize_imgs(self, imgs, new_w, new_h):\n return [\n mmcv.imresize(\n img, (new_w, new_h), interpolation=self.interpolation)\n for img in imgs\n ]\n\n @staticmethod\n def _resize_kps(kps, scale_factor):\n return kps * scale_factor\n\n @staticmethod\n def _box_resize(box, scale_factor):\n \"\"\"Rescale the bounding boxes according to the scale_factor.\n\n Args:\n box (np.ndarray): The bounding boxes.\n scale_factor (np.ndarray): The scale factor used for rescaling.\n \"\"\"\n assert len(scale_factor) == 2\n scale_factor = np.concatenate([scale_factor, scale_factor])\n return box * scale_factor\n\n def __call__(self, results):\n \"\"\"Performs the Resize augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n\n if 'scale_factor' not in results:\n results['scale_factor'] = np.array([1, 1], dtype=np.float32)\n img_h, img_w = results['img_shape']\n\n if self.keep_ratio:\n new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)\n else:\n new_w, new_h = self.scale\n\n self.scale_factor = np.array([new_w / img_w, new_h / img_h],\n dtype=np.float32)\n\n results['img_shape'] = (new_h, new_w)\n results['keep_ratio'] = self.keep_ratio\n results['scale_factor'] = results['scale_factor'] * self.scale_factor\n\n if not self.lazy:\n if 'imgs' in results:\n results['imgs'] = self._resize_imgs(results['imgs'], new_w,\n new_h)\n if 'keypoint' in results:\n results['keypoint'] = self._resize_kps(results['keypoint'],\n self.scale_factor)\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Put Flip at last for now')\n lazyop['interpolation'] = self.interpolation\n\n if 'gt_bboxes' in results:\n assert not self.lazy\n results['gt_bboxes'] = self._box_resize(results['gt_bboxes'],\n self.scale_factor)\n if 'proposals' in results and results['proposals'] is not None:\n assert results['proposals'].shape[1] == 4\n results['proposals'] = self._box_resize(\n results['proposals'], self.scale_factor)\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'scale={self.scale}, keep_ratio={self.keep_ratio}, '\n f'interpolation={self.interpolation}, '\n f'lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass RandomRescale:\n \"\"\"Randomly resize images so that the short_edge is resized to a specific\n size in a given range. The scale ratio is unchanged after resizing.\n\n Required keys are \"imgs\", \"img_shape\", \"modality\", added or modified\n keys are \"imgs\", \"img_shape\", \"keep_ratio\", \"scale_factor\", \"resize_size\",\n \"short_edge\".\n\n Args:\n scale_range (tuple[int]): The range of short edge length. A closed\n interval.\n interpolation (str): Algorithm used for interpolation:\n \"nearest\" | \"bilinear\". Default: \"bilinear\".\n \"\"\"\n\n def __init__(self, scale_range, interpolation='bilinear'):\n self.scale_range = scale_range\n # make sure scale_range is legal, first make sure the type is OK\n assert mmcv.is_tuple_of(scale_range, int)\n assert len(scale_range) == 2\n assert scale_range[0] < scale_range[1]\n assert np.all([x > 0 for x in scale_range])\n\n self.keep_ratio = True\n self.interpolation = interpolation\n\n def __call__(self, results):\n \"\"\"Performs the Resize augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n short_edge = np.random.randint(self.scale_range[0],\n self.scale_range[1] + 1)\n resize = Resize((-1, short_edge),\n keep_ratio=True,\n interpolation=self.interpolation,\n lazy=False)\n results = resize(results)\n\n results['short_edge'] = short_edge\n return results\n\n def __repr__(self):\n scale_range = self.scale_range\n repr_str = (f'{self.__class__.__name__}('\n f'scale_range=({scale_range[0]}, {scale_range[1]}), '\n f'interpolation={self.interpolation})')\n return repr_str\n\n\[email protected]_module()\nclass Flip:\n \"\"\"Flip the input images with a probability.\n\n Reverse the order of elements in the given imgs with a specific direction.\n The shape of the imgs is preserved, but the elements are reordered.\n\n Required keys are \"img_shape\", \"modality\", \"imgs\" (optional), \"keypoint\"\n (optional), added or modified keys are \"imgs\", \"keypoint\", \"lazy\" and\n \"flip_direction\". Required keys in \"lazy\" is None, added or modified key\n are \"flip\" and \"flip_direction\". The Flip augmentation should be placed\n after any cropping / reshaping augmentations, to make sure crop_quadruple\n is calculated properly.\n\n Args:\n flip_ratio (float): Probability of implementing flip. Default: 0.5.\n direction (str): Flip imgs horizontally or vertically. Options are\n \"horizontal\" | \"vertical\". Default: \"horizontal\".\n flip_label_map (Dict[int, int] | None): Transform the label of the\n flipped image with the specific label. Default: None.\n left_kp (list[int]): Indexes of left keypoints, used to flip keypoints.\n Default: None.\n right_kp (list[ind]): Indexes of right keypoints, used to flip\n keypoints. Default: None.\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n _directions = ['horizontal', 'vertical']\n\n def __init__(self,\n flip_ratio=0.5,\n direction='horizontal',\n flip_label_map=None,\n left_kp=None,\n right_kp=None,\n lazy=False):\n if direction not in self._directions:\n raise ValueError(f'Direction {direction} is not supported. '\n f'Currently support ones are {self._directions}')\n self.flip_ratio = flip_ratio\n self.direction = direction\n self.flip_label_map = flip_label_map\n self.left_kp = left_kp\n self.right_kp = right_kp\n self.lazy = lazy\n\n def _flip_imgs(self, imgs, modality):\n _ = [mmcv.imflip_(img, self.direction) for img in imgs]\n lt = len(imgs)\n if modality == 'Flow':\n # The 1st frame of each 2 frames is flow-x\n for i in range(0, lt, 2):\n imgs[i] = mmcv.iminvert(imgs[i])\n return imgs\n\n def _flip_kps(self, kps, kpscores, img_width):\n kp_x = kps[..., 0]\n kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0]\n new_order = list(range(kps.shape[2]))\n if self.left_kp is not None and self.right_kp is not None:\n for left, right in zip(self.left_kp, self.right_kp):\n new_order[left] = right\n new_order[right] = left\n kps = kps[:, :, new_order]\n if kpscores is not None:\n kpscores = kpscores[:, :, new_order]\n return kps, kpscores\n\n @staticmethod\n def _box_flip(box, img_width):\n \"\"\"Flip the bounding boxes given the width of the image.\n\n Args:\n box (np.ndarray): The bounding boxes.\n img_width (int): The img width.\n \"\"\"\n box_ = box.copy()\n box_[..., 0::4] = img_width - box[..., 2::4]\n box_[..., 2::4] = img_width - box[..., 0::4]\n return box_\n\n def __call__(self, results):\n \"\"\"Performs the Flip augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n assert self.direction == 'horizontal', (\n 'Only horizontal flips are'\n 'supported for human keypoints')\n\n modality = results['modality']\n if modality == 'Flow':\n assert self.direction == 'horizontal'\n\n flip = np.random.rand() < self.flip_ratio\n\n results['flip'] = flip\n results['flip_direction'] = self.direction\n img_width = results['img_shape'][1]\n\n if self.flip_label_map is not None and flip:\n results['label'] = self.flip_label_map.get(results['label'],\n results['label'])\n\n if not self.lazy:\n if flip:\n if 'imgs' in results:\n results['imgs'] = self._flip_imgs(results['imgs'],\n modality)\n if 'keypoint' in results:\n kp = results['keypoint']\n kpscore = results.get('keypoint_score', None)\n kp, kpscore = self._flip_kps(kp, kpscore, img_width)\n results['keypoint'] = kp\n if 'keypoint_score' in results:\n results['keypoint_score'] = kpscore\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Use one Flip please')\n lazyop['flip'] = flip\n lazyop['flip_direction'] = self.direction\n\n if 'gt_bboxes' in results and flip:\n assert not self.lazy and self.direction == 'horizontal'\n width = results['img_shape'][1]\n results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width)\n if 'proposals' in results and results['proposals'] is not None:\n assert results['proposals'].shape[1] == 4\n results['proposals'] = self._box_flip(results['proposals'],\n width)\n\n return results\n\n def __repr__(self):\n repr_str = (\n f'{self.__class__.__name__}('\n f'flip_ratio={self.flip_ratio}, direction={self.direction}, '\n f'flip_label_map={self.flip_label_map}, lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass Normalize:\n \"\"\"Normalize images with the given mean and std value.\n\n Required keys are \"imgs\", \"img_shape\", \"modality\", added or modified\n keys are \"imgs\" and \"img_norm_cfg\". If modality is 'Flow', additional\n keys \"scale_factor\" is required\n\n Args:\n mean (Sequence[float]): Mean values of different channels.\n std (Sequence[float]): Std values of different channels.\n to_bgr (bool): Whether to convert channels from RGB to BGR.\n Default: False.\n adjust_magnitude (bool): Indicate whether to adjust the flow magnitude\n on 'scale_factor' when modality is 'Flow'. Default: False.\n \"\"\"\n\n def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):\n if not isinstance(mean, Sequence):\n raise TypeError(\n f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'\n )\n\n if not isinstance(std, Sequence):\n raise TypeError(\n f'Std must be list, tuple or np.ndarray, but got {type(std)}')\n\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_bgr = to_bgr\n self.adjust_magnitude = adjust_magnitude\n\n def __call__(self, results):\n modality = results['modality']\n\n if modality == 'RGB':\n n = len(results['imgs'])\n h, w, c = results['imgs'][0].shape\n imgs = np.empty((n, h, w, c), dtype=np.float32)\n for i, img in enumerate(results['imgs']):\n imgs[i] = img\n\n for img in imgs:\n mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)\n\n results['imgs'] = imgs\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_bgr=self.to_bgr)\n return results\n if modality == 'Flow':\n num_imgs = len(results['imgs'])\n assert num_imgs % 2 == 0\n assert self.mean.shape[0] == 2\n assert self.std.shape[0] == 2\n n = num_imgs // 2\n h, w = results['imgs'][0].shape\n x_flow = np.empty((n, h, w), dtype=np.float32)\n y_flow = np.empty((n, h, w), dtype=np.float32)\n for i in range(n):\n x_flow[i] = results['imgs'][2 * i]\n y_flow[i] = results['imgs'][2 * i + 1]\n x_flow = (x_flow - self.mean[0]) / self.std[0]\n y_flow = (y_flow - self.mean[1]) / self.std[1]\n if self.adjust_magnitude:\n x_flow = x_flow * results['scale_factor'][0]\n y_flow = y_flow * results['scale_factor'][1]\n imgs = np.stack([x_flow, y_flow], axis=-1)\n results['imgs'] = imgs\n args = dict(\n mean=self.mean,\n std=self.std,\n to_bgr=self.to_bgr,\n adjust_magnitude=self.adjust_magnitude)\n results['img_norm_cfg'] = args\n return results\n raise NotImplementedError\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'mean={self.mean}, '\n f'std={self.std}, '\n f'to_bgr={self.to_bgr}, '\n f'adjust_magnitude={self.adjust_magnitude})')\n return repr_str\n\n\[email protected]_module()\nclass ColorJitter:\n \"\"\"Perform ColorJitter to each img.\n\n Required keys are \"imgs\", added or modified keys are \"imgs\".\n\n Args:\n brightness (float | tuple[float]): The jitter range for brightness, if\n set as a float, the range will be (1 - brightness, 1 + brightness).\n Default: 0.5.\n contrast (float | tuple[float]): The jitter range for contrast, if set\n as a float, the range will be (1 - contrast, 1 + contrast).\n Default: 0.5.\n saturation (float | tuple[float]): The jitter range for saturation, if\n set as a float, the range will be (1 - saturation, 1 + saturation).\n Default: 0.5.\n hue (float | tuple[float]): The jitter range for hue, if set as a\n float, the range will be (-hue, hue). Default: 0.1.\n \"\"\"\n\n @staticmethod\n def check_input(val, max, base):\n if isinstance(val, tuple):\n assert base - max <= val[0] <= val[1] <= base + max\n return val\n assert val <= max\n return (base - val, base + val)\n\n @staticmethod\n def rgb_to_grayscale(img):\n return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]\n\n @staticmethod\n def adjust_contrast(img, factor):\n val = np.mean(ColorJitter.rgb_to_grayscale(img))\n return factor * img + (1 - factor) * val\n\n @staticmethod\n def adjust_saturation(img, factor):\n gray = np.stack([ColorJitter.rgb_to_grayscale(img)] * 3, axis=-1)\n return factor * img + (1 - factor) * gray\n\n @staticmethod\n def adjust_hue(img, factor):\n img = np.clip(img, 0, 255).astype(np.uint8)\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n offset = int(factor * 255)\n hsv[..., 0] = (hsv[..., 0] + offset) % 180\n img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n return img.astype(np.float32)\n\n def __init__(self, brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1):\n self.brightness = self.check_input(brightness, 1, 1)\n self.contrast = self.check_input(contrast, 1, 1)\n self.saturation = self.check_input(saturation, 1, 1)\n self.hue = self.check_input(hue, 0.5, 0)\n self.fn_idx = np.random.permutation(4)\n\n def __call__(self, results):\n imgs = results['imgs']\n num_clips, clip_len = 1, len(imgs)\n\n new_imgs = []\n for i in range(num_clips):\n b = np.random.uniform(\n low=self.brightness[0], high=self.brightness[1])\n c = np.random.uniform(low=self.contrast[0], high=self.contrast[1])\n s = np.random.uniform(\n low=self.saturation[0], high=self.saturation[1])\n h = np.random.uniform(low=self.hue[0], high=self.hue[1])\n start, end = i * clip_len, (i + 1) * clip_len\n\n for img in imgs[start:end]:\n img = img.astype(np.float32)\n for fn_id in self.fn_idx:\n if fn_id == 0 and b != 1:\n img *= b\n if fn_id == 1 and c != 1:\n img = self.adjust_contrast(img, c)\n if fn_id == 2 and s != 1:\n img = self.adjust_saturation(img, s)\n if fn_id == 3 and h != 0:\n img = self.adjust_hue(img, h)\n img = np.clip(img, 0, 255).astype(np.uint8)\n new_imgs.append(img)\n results['imgs'] = new_imgs\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'brightness={self.brightness}, '\n f'contrast={self.contrast}, '\n f'saturation={self.saturation}, '\n f'hue={self.hue})')\n return repr_str\n\n\[email protected]_module()\nclass CenterCrop(RandomCrop):\n \"\"\"Crop the center area from images.\n\n Required keys are \"img_shape\", \"imgs\" (optional), \"keypoint\" (optional),\n added or modified keys are \"imgs\", \"keypoint\", \"crop_bbox\", \"lazy\" and\n \"img_shape\". Required keys in \"lazy\" is \"crop_bbox\", added or modified key\n is \"crop_bbox\".\n\n Args:\n crop_size (int | tuple[int]): (w, h) of crop size.\n lazy (bool): Determine whether to apply lazy operation. Default: False.\n \"\"\"\n\n def __init__(self, crop_size, lazy=False):\n self.crop_size = _pair(crop_size)\n self.lazy = lazy\n if not mmcv.is_tuple_of(self.crop_size, int):\n raise TypeError(f'Crop_size must be int or tuple of int, '\n f'but got {type(crop_size)}')\n\n def __call__(self, results):\n \"\"\"Performs the CenterCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, self.lazy)\n if 'keypoint' in results:\n assert not self.lazy, ('Keypoint Augmentations are not compatible '\n 'with lazy == True')\n\n img_h, img_w = results['img_shape']\n crop_w, crop_h = self.crop_size\n\n left = (img_w - crop_w) // 2\n top = (img_h - crop_h) // 2\n right = left + crop_w\n bottom = top + crop_h\n new_h, new_w = bottom - top, right - left\n\n crop_bbox = np.array([left, top, right, bottom])\n results['crop_bbox'] = crop_bbox\n results['img_shape'] = (new_h, new_w)\n\n if 'crop_quadruple' not in results:\n results['crop_quadruple'] = np.array(\n [0, 0, 1, 1], # x, y, w, h\n dtype=np.float32)\n\n x_ratio, y_ratio = left / img_w, top / img_h\n w_ratio, h_ratio = new_w / img_w, new_h / img_h\n\n old_crop_quadruple = results['crop_quadruple']\n old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]\n old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]\n new_crop_quadruple = [\n old_x_ratio + x_ratio * old_w_ratio,\n old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,\n h_ratio * old_x_ratio\n ]\n results['crop_quadruple'] = np.array(\n new_crop_quadruple, dtype=np.float32)\n\n if not self.lazy:\n if 'keypoint' in results:\n results['keypoint'] = self._crop_kps(results['keypoint'],\n crop_bbox)\n if 'imgs' in results:\n results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)\n else:\n lazyop = results['lazy']\n if lazyop['flip']:\n raise NotImplementedError('Put Flip at last for now')\n\n # record crop_bbox in lazyop dict to ensure only crop once in Fuse\n lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']\n left = left * (lazy_right - lazy_left) / img_w\n right = right * (lazy_right - lazy_left) / img_w\n top = top * (lazy_bottom - lazy_top) / img_h\n bottom = bottom * (lazy_bottom - lazy_top) / img_h\n lazyop['crop_bbox'] = np.array([(lazy_left + left),\n (lazy_top + top),\n (lazy_left + right),\n (lazy_top + bottom)],\n dtype=np.float32)\n\n if 'gt_bboxes' in results:\n assert not self.lazy\n results = self._all_box_crop(results, results['crop_bbox'])\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, '\n f'lazy={self.lazy})')\n return repr_str\n\n\[email protected]_module()\nclass ThreeCrop:\n \"\"\"Crop images into three crops.\n\n Crop the images equally into three crops with equal intervals along the\n shorter side.\n Required keys are \"imgs\", \"img_shape\", added or modified keys are \"imgs\",\n \"crop_bbox\" and \"img_shape\".\n\n Args:\n crop_size(int | tuple[int]): (w, h) of crop size.\n \"\"\"\n\n def __init__(self, crop_size):\n self.crop_size = _pair(crop_size)\n if not mmcv.is_tuple_of(self.crop_size, int):\n raise TypeError(f'Crop_size must be int or tuple of int, '\n f'but got {type(crop_size)}')\n\n def __call__(self, results):\n \"\"\"Performs the ThreeCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, False)\n if 'gt_bboxes' in results or 'proposals' in results:\n warnings.warn('ThreeCrop cannot process bounding boxes')\n\n imgs = results['imgs']\n img_h, img_w = results['imgs'][0].shape[:2]\n crop_w, crop_h = self.crop_size\n assert crop_h == img_h or crop_w == img_w\n\n if crop_h == img_h:\n w_step = (img_w - crop_w) // 2\n offsets = [\n (0, 0), # left\n (2 * w_step, 0), # right\n (w_step, 0), # middle\n ]\n elif crop_w == img_w:\n h_step = (img_h - crop_h) // 2\n offsets = [\n (0, 0), # top\n (0, 2 * h_step), # down\n (0, h_step), # middle\n ]\n\n cropped = []\n crop_bboxes = []\n for x_offset, y_offset in offsets:\n bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]\n crop = [\n img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]\n for img in imgs\n ]\n cropped.extend(crop)\n crop_bboxes.extend([bbox for _ in range(len(imgs))])\n\n crop_bboxes = np.array(crop_bboxes)\n results['imgs'] = cropped\n results['crop_bbox'] = crop_bboxes\n results['img_shape'] = results['imgs'][0].shape[:2]\n\n return results\n\n def __repr__(self):\n repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'\n return repr_str\n\n\[email protected]_module()\nclass TenCrop:\n \"\"\"Crop the images into 10 crops (corner + center + flip).\n\n Crop the four corners and the center part of the image with the same\n given crop_size, and flip it horizontally.\n Required keys are \"imgs\", \"img_shape\", added or modified keys are \"imgs\",\n \"crop_bbox\" and \"img_shape\".\n\n Args:\n crop_size(int | tuple[int]): (w, h) of crop size.\n \"\"\"\n\n def __init__(self, crop_size):\n self.crop_size = _pair(crop_size)\n if not mmcv.is_tuple_of(self.crop_size, int):\n raise TypeError(f'Crop_size must be int or tuple of int, '\n f'but got {type(crop_size)}')\n\n def __call__(self, results):\n \"\"\"Performs the TenCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n _init_lazy_if_proper(results, False)\n\n if 'gt_bboxes' in results or 'proposals' in results:\n warnings.warn('TenCrop cannot process bounding boxes')\n\n imgs = results['imgs']\n\n img_h, img_w = results['imgs'][0].shape[:2]\n crop_w, crop_h = self.crop_size\n\n w_step = (img_w - crop_w) // 4\n h_step = (img_h - crop_h) // 4\n\n offsets = [\n (0, 0), # upper left\n (4 * w_step, 0), # upper right\n (0, 4 * h_step), # lower left\n (4 * w_step, 4 * h_step), # lower right\n (2 * w_step, 2 * h_step), # center\n ]\n\n img_crops = list()\n crop_bboxes = list()\n for x_offset, y_offsets in offsets:\n crop = [\n img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]\n for img in imgs\n ]\n flip_crop = [np.flip(c, axis=1).copy() for c in crop]\n bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]\n img_crops.extend(crop)\n img_crops.extend(flip_crop)\n crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])\n\n crop_bboxes = np.array(crop_bboxes)\n results['imgs'] = img_crops\n results['crop_bbox'] = crop_bboxes\n results['img_shape'] = results['imgs'][0].shape[:2]\n\n return results\n\n def __repr__(self):\n repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'\n return repr_str\n\n\[email protected]_module()\nclass MultiGroupCrop:\n \"\"\"Randomly crop the images into several groups.\n\n Crop the random region with the same given crop_size and bounding box\n into several groups.\n Required keys are \"imgs\", added or modified keys are \"imgs\", \"crop_bbox\"\n and \"img_shape\".\n\n Args:\n crop_size(int | tuple[int]): (w, h) of crop size.\n groups(int): Number of groups.\n \"\"\"\n\n def __init__(self, crop_size, groups):\n self.crop_size = _pair(crop_size)\n self.groups = groups\n if not mmcv.is_tuple_of(self.crop_size, int):\n raise TypeError('Crop size must be int or tuple of int, '\n f'but got {type(crop_size)}')\n\n if not isinstance(groups, int):\n raise TypeError(f'Groups must be int, but got {type(groups)}.')\n\n if groups <= 0:\n raise ValueError('Groups must be positive.')\n\n def __call__(self, results):\n \"\"\"Performs the MultiGroupCrop augmentation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n if 'gt_bboxes' in results or 'proposals' in results:\n warnings.warn('MultiGroupCrop cannot process bounding boxes')\n\n imgs = results['imgs']\n img_h, img_w = imgs[0].shape[:2]\n crop_w, crop_h = self.crop_size\n\n img_crops = []\n crop_bboxes = []\n for _ in range(self.groups):\n x_offset = random.randint(0, img_w - crop_w)\n y_offset = random.randint(0, img_h - crop_h)\n\n bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]\n crop = [\n img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]\n for img in imgs\n ]\n img_crops.extend(crop)\n crop_bboxes.extend([bbox for _ in range(len(imgs))])\n\n crop_bboxes = np.array(crop_bboxes)\n results['imgs'] = img_crops\n results['crop_bbox'] = crop_bboxes\n results['img_shape'] = results['imgs'][0].shape[:2]\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}'\n f'(crop_size={self.crop_size}, '\n f'groups={self.groups})')\n return repr_str\n\n\[email protected]_module()\nclass AudioAmplify:\n \"\"\"Amplify the waveform.\n\n Required keys are \"audios\", added or modified keys are \"audios\",\n \"amplify_ratio\".\n\n Args:\n ratio (float): The ratio used to amplify the audio waveform.\n \"\"\"\n\n def __init__(self, ratio):\n if isinstance(ratio, float):\n self.ratio = ratio\n else:\n raise TypeError('Amplification ratio should be float.')\n\n def __call__(self, results):\n \"\"\"Perfrom the audio amplification.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n\n assert 'audios' in results\n results['audios'] *= self.ratio\n results['amplify_ratio'] = self.ratio\n\n return results\n\n def __repr__(self):\n repr_str = f'{self.__class__.__name__}(ratio={self.ratio})'\n return repr_str\n\n\[email protected]_module()\nclass MelSpectrogram:\n \"\"\"MelSpectrogram. Transfer an audio wave into a melspectogram figure.\n\n Required keys are \"audios\", \"sample_rate\", \"num_clips\", added or modified\n keys are \"audios\".\n\n Args:\n window_size (int): The window size in milisecond. Default: 32.\n step_size (int): The step size in milisecond. Default: 16.\n n_mels (int): Number of mels. Default: 80.\n fixed_length (int): The sample length of melspectrogram maybe not\n exactly as wished due to different fps, fix the length for batch\n collation by truncating or padding. Default: 128.\n \"\"\"\n\n def __init__(self,\n window_size=32,\n step_size=16,\n n_mels=80,\n fixed_length=128):\n if all(\n isinstance(x, int)\n for x in [window_size, step_size, n_mels, fixed_length]):\n self.window_size = window_size\n self.step_size = step_size\n self.n_mels = n_mels\n self.fixed_length = fixed_length\n else:\n raise TypeError('All arguments should be int.')\n\n def __call__(self, results):\n \"\"\"Perform MelSpectrogram transformation.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n try:\n import librosa\n except ImportError:\n raise ImportError('Install librosa first.')\n signals = results['audios']\n sample_rate = results['sample_rate']\n n_fft = int(round(sample_rate * self.window_size / 1000))\n hop_length = int(round(sample_rate * self.step_size / 1000))\n melspectrograms = list()\n for clip_idx in range(results['num_clips']):\n clip_signal = signals[clip_idx]\n mel = librosa.feature.melspectrogram(\n y=clip_signal,\n sr=sample_rate,\n n_fft=n_fft,\n hop_length=hop_length,\n n_mels=self.n_mels)\n if mel.shape[0] >= self.fixed_length:\n mel = mel[:self.fixed_length, :]\n else:\n mel = np.pad(\n mel, ((0, mel.shape[-1] - self.fixed_length), (0, 0)),\n mode='edge')\n melspectrograms.append(mel)\n\n results['audios'] = np.array(melspectrograms)\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}'\n f'(window_size={self.window_size}), '\n f'step_size={self.step_size}, '\n f'n_mels={self.n_mels}, '\n f'fixed_length={self.fixed_length})')\n return repr_str\n",
"import numpy as np\nimport pytest\nfrom mmcv.utils import assert_dict_has_keys\n\nfrom mmaction.datasets.pipelines import (CenterCrop, MultiGroupCrop,\n MultiScaleCrop, RandomCrop,\n RandomResizedCrop, TenCrop, ThreeCrop)\nfrom .base import check_crop\n\n\nclass TestCrops:\n\n @staticmethod\n def test_random_crop():\n with pytest.raises(TypeError):\n # size must be an int\n RandomCrop(size=(112, 112))\n with pytest.raises(AssertionError):\n # \"size > height\" or \"size > width\" is not allowed\n imgs = list(np.random.rand(2, 224, 341, 3))\n results = dict(imgs=imgs)\n random_crop = RandomCrop(size=320)\n random_crop(results)\n\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n\n # General case\n imgs = list(np.random.rand(2, 224, 341, 3))\n results = dict(imgs=imgs)\n random_crop = RandomCrop(size=224)\n results['gt_bboxes'] = np.array([[0, 0, 340, 224]])\n results['proposals'] = np.array([[0, 0, 340, 224]])\n kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])\n results['keypoint'] = kp\n random_crop_result = random_crop(results)\n assert assert_dict_has_keys(random_crop_result, target_keys)\n assert check_crop(imgs, random_crop_result['imgs'],\n results['crop_bbox'])\n h, w = random_crop_result['img_shape']\n assert h == w == 224\n\n # Test the case that no need for cropping\n imgs = list(np.random.rand(2, 224, 224, 3))\n results = dict(imgs=imgs)\n random_crop = RandomCrop(size=224)\n random_crop_result = random_crop(results)\n assert assert_dict_has_keys(random_crop_result, target_keys)\n assert check_crop(imgs, random_crop_result['imgs'],\n results['crop_bbox'])\n h, w = random_crop_result['img_shape']\n assert h == w == 224\n\n # Test the one-side-equal case\n imgs = list(np.random.rand(2, 224, 225, 3))\n results = dict(imgs=imgs)\n random_crop = RandomCrop(size=224)\n random_crop_result = random_crop(results)\n assert assert_dict_has_keys(random_crop_result, target_keys)\n assert check_crop(imgs, random_crop_result['imgs'],\n results['crop_bbox'])\n h, w = random_crop_result['img_shape']\n assert h == w == 224\n\n assert repr(random_crop) == (f'{random_crop.__class__.__name__}'\n f'(size={224}, lazy={False})')\n\n @staticmethod\n def test_random_resized_crop():\n with pytest.raises(TypeError):\n # area_range must be a tuple of float\n RandomResizedCrop(area_range=0.5)\n with pytest.raises(TypeError):\n # aspect_ratio_range must be a tuple of float\n RandomResizedCrop(area_range=(0.08, 1.0), aspect_ratio_range=0.1)\n\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n # There will be a slight difference because of rounding\n eps = 0.01\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n results['gt_bboxes'] = np.array([[0, 0, 340, 256]])\n results['proposals'] = np.array([[0, 0, 340, 256]])\n kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])\n results['keypoint'] = kp\n\n with pytest.raises(AssertionError):\n # area_range[0] > area_range[1], which is wrong\n random_crop = RandomResizedCrop(area_range=(0.9, 0.7))\n random_crop(results)\n with pytest.raises(AssertionError):\n # 0 > area_range[0] and area_range[1] > 1, which is wrong\n random_crop = RandomResizedCrop(aspect_ratio_range=(-0.1, 2.0))\n random_crop(results)\n\n random_crop = RandomResizedCrop()\n random_crop_result = random_crop(results)\n assert assert_dict_has_keys(random_crop_result, target_keys)\n assert check_crop(imgs, random_crop_result['imgs'],\n results['crop_bbox'])\n h, w = random_crop_result['img_shape']\n assert ((0.08 - eps <= h * w / 256 / 341)\n and (h * w / 256 / 341 <= 1 + eps))\n assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.)\n assert repr(random_crop) == (f'{random_crop.__class__.__name__}'\n f'(area_range={(0.08, 1.0)}, '\n f'aspect_ratio_range={(3 / 4, 4 / 3)}, '\n f'lazy={False})')\n\n random_crop = RandomResizedCrop(\n area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1))\n # Test fallback cases by very big area range\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n random_crop_result = random_crop(results)\n assert assert_dict_has_keys(random_crop_result, target_keys)\n assert check_crop(imgs, random_crop_result['imgs'],\n results['crop_bbox'])\n h, w = random_crop_result['img_shape']\n assert h == w == 256\n\n @staticmethod\n def test_multi_scale_crop():\n with pytest.raises(TypeError):\n # input_size must be int or tuple of int\n MultiScaleCrop(0.5)\n\n with pytest.raises(TypeError):\n # input_size must be int or tuple of int\n MultiScaleCrop('224')\n\n with pytest.raises(TypeError):\n # scales must be tuple.\n MultiScaleCrop(\n 224, scales=[\n 1,\n ])\n\n with pytest.raises(ValueError):\n # num_fix_crops must be in [5, 13]\n MultiScaleCrop(224, num_fixed_crops=6)\n\n target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales']\n\n # MultiScaleCrop with normal crops.\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n results['gt_bboxes'] = np.array([[0, 0, 340, 256]])\n results['proposals'] = np.array([[0, 0, 340, 256]])\n kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])\n results['keypoint'] = kp\n config = dict(\n input_size=224,\n scales=(1, 0.8),\n random_crop=False,\n max_wh_scale_gap=0)\n multi_scale_crop = MultiScaleCrop(**config)\n multi_scale_crop_results = multi_scale_crop(results)\n assert assert_dict_has_keys(multi_scale_crop_results, target_keys)\n assert check_crop(imgs, multi_scale_crop_results['imgs'],\n multi_scale_crop_results['crop_bbox'])\n assert multi_scale_crop_results['img_shape'] in [(256, 256),\n (204, 204)]\n\n # MultiScaleCrop with more fixed crops.\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n config = dict(\n input_size=224,\n scales=(1, 0.8),\n random_crop=False,\n max_wh_scale_gap=0,\n num_fixed_crops=13)\n multi_scale_crop = MultiScaleCrop(**config)\n multi_scale_crop_results = multi_scale_crop(results)\n assert assert_dict_has_keys(multi_scale_crop_results, target_keys)\n assert check_crop(imgs, multi_scale_crop_results['imgs'],\n multi_scale_crop_results['crop_bbox'])\n assert multi_scale_crop_results['img_shape'] in [(256, 256),\n (204, 204)]\n\n # MultiScaleCrop with random crop.\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n config = dict(\n input_size=224,\n scales=(1, 0.8),\n random_crop=True,\n max_wh_scale_gap=0)\n multi_scale_crop = MultiScaleCrop(**config)\n multi_scale_crop_results = multi_scale_crop(results)\n assert assert_dict_has_keys(multi_scale_crop_results, target_keys)\n assert check_crop(imgs, multi_scale_crop_results['imgs'],\n multi_scale_crop_results['crop_bbox'])\n assert (multi_scale_crop_results['img_shape'] in [(256, 256),\n (204, 204)])\n\n assert repr(multi_scale_crop) == (\n f'{multi_scale_crop.__class__.__name__}'\n f'(input_size={(224, 224)}, scales={(1, 0.8)}, '\n f'max_wh_scale_gap={0}, random_crop={True}, '\n f'num_fixed_crops=5, lazy={False})')\n\n @staticmethod\n def test_center_crop():\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n CenterCrop(0.5)\n\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n CenterCrop('224')\n\n # center crop with crop_size 224\n # add kps in test_center_crop\n imgs = list(np.random.rand(2, 240, 320, 3))\n results = dict(imgs=imgs)\n kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])\n results['keypoint'] = kp\n\n results['gt_bboxes'] = np.array([[0, 0, 320, 240]])\n results['proposals'] = np.array([[0, 0, 320, 240]])\n center_crop = CenterCrop(crop_size=224)\n center_crop_results = center_crop(results)\n target_keys = ['imgs', 'crop_bbox', 'img_shape', 'keypoint']\n assert assert_dict_has_keys(center_crop_results, target_keys)\n assert check_crop(imgs, center_crop_results['imgs'],\n center_crop_results['crop_bbox'])\n assert np.all(\n center_crop_results['crop_bbox'] == np.array([48, 8, 272, 232]))\n assert center_crop_results['img_shape'] == (224, 224)\n assert np.all(center_crop_results['keypoint'] == 112)\n\n assert repr(center_crop) == (f'{center_crop.__class__.__name__}'\n f'(crop_size={(224, 224)}, lazy={False})')\n\n @staticmethod\n def test_three_crop():\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n ThreeCrop(0.5)\n\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n ThreeCrop('224')\n\n # three crop with crop_size 120\n imgs = list(np.random.rand(2, 240, 120, 3))\n results = dict(imgs=imgs)\n three_crop = ThreeCrop(crop_size=120)\n three_crop_results = three_crop(results)\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n assert assert_dict_has_keys(three_crop_results, target_keys)\n assert check_crop(imgs, three_crop_results['imgs'],\n three_crop_results['crop_bbox'], 3)\n assert three_crop_results['img_shape'] == (120, 120)\n\n # three crop with crop_size 224\n imgs = list(np.random.rand(2, 224, 224, 3))\n results = dict(imgs=imgs)\n three_crop = ThreeCrop(crop_size=224)\n three_crop_results = three_crop(results)\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n assert assert_dict_has_keys(three_crop_results, target_keys)\n assert check_crop(imgs, three_crop_results['imgs'],\n three_crop_results['crop_bbox'], 3)\n assert three_crop_results['img_shape'] == (224, 224)\n\n assert repr(three_crop) == (f'{three_crop.__class__.__name__}'\n f'(crop_size={(224, 224)})')\n\n @staticmethod\n def test_ten_crop():\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n TenCrop(0.5)\n\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n TenCrop('224')\n\n # ten crop with crop_size 256\n imgs = list(np.random.rand(2, 256, 256, 3))\n results = dict(imgs=imgs)\n ten_crop = TenCrop(crop_size=224)\n ten_crop_results = ten_crop(results)\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n assert assert_dict_has_keys(ten_crop_results, target_keys)\n assert check_crop(imgs, ten_crop_results['imgs'],\n ten_crop_results['crop_bbox'], 10)\n assert ten_crop_results['img_shape'] == (224, 224)\n\n assert repr(ten_crop) == (f'{ten_crop.__class__.__name__}'\n f'(crop_size={(224, 224)})')\n\n @staticmethod\n def test_multi_group_crop():\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n MultiGroupCrop(0.5, 1)\n\n with pytest.raises(TypeError):\n # crop_size must be int or tuple of int\n MultiGroupCrop('224', 1)\n\n with pytest.raises(TypeError):\n # groups must be int\n MultiGroupCrop(224, '1')\n\n with pytest.raises(ValueError):\n # groups must be positive\n MultiGroupCrop(224, 0)\n\n target_keys = ['imgs', 'crop_bbox', 'img_shape']\n\n # multi_group_crop with crop_size 224, groups 3\n imgs = list(np.random.rand(2, 256, 341, 3))\n results = dict(imgs=imgs)\n multi_group_crop = MultiGroupCrop(224, 3)\n multi_group_crop_result = multi_group_crop(results)\n assert assert_dict_has_keys(multi_group_crop_result, target_keys)\n assert check_crop(imgs, multi_group_crop_result['imgs'],\n multi_group_crop_result['crop_bbox'],\n multi_group_crop.groups)\n assert multi_group_crop_result['img_shape'] == (224, 224)\n\n assert repr(multi_group_crop) == (\n f'{multi_group_crop.__class__.__name__}'\n f'(crop_size={(224, 224)}, groups={3})')\n",
"import argparse\nimport os\nimport os.path as osp\nimport shutil\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv import DictAction\nfrom mmcv.runner import load_checkpoint\n\nfrom mmaction.datasets.pipelines import Compose\nfrom mmaction.models import build_model\nfrom mmaction.utils import import_module_error_func\n\ntry:\n from mmdet.apis import inference_detector, init_detector\n from mmpose.apis import (init_pose_model, inference_top_down_pose_model,\n vis_pose_result)\nexcept (ImportError, ModuleNotFoundError):\n\n @import_module_error_func('mmdet')\n def inference_detector(*args, **kwargs):\n pass\n\n @import_module_error_func('mmdet')\n def init_detector(*args, **kwargs):\n pass\n\n @import_module_error_func('mmpose')\n def init_pose_model(*args, **kwargs):\n pass\n\n @import_module_error_func('mmpose')\n def inference_top_down_pose_model(*args, **kwargs):\n pass\n\n @import_module_error_func('mmpose')\n def vis_pose_result(*args, **kwargs):\n pass\n\n\ntry:\n import moviepy.editor as mpy\nexcept ImportError:\n raise ImportError('Please install moviepy to enable output file')\n\nFONTFACE = cv2.FONT_HERSHEY_DUPLEX\nFONTSCALE = 0.75\nFONTCOLOR = (255, 255, 255) # BGR, white\nTHICKNESS = 1\nLINETYPE = 1\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMAction2 demo')\n parser.add_argument('video', help='video file/url')\n parser.add_argument('out_filename', help='output filename')\n parser.add_argument(\n '--config',\n default=('configs/skeleton/posec3d/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint.py'),\n help='posec3d config file path')\n parser.add_argument(\n '--checkpoint',\n default=('https://download.openmmlab.com/mmaction/skeleton/posec3d/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint-6736b03f.pth'),\n help='posec3d checkpoint file/url')\n parser.add_argument(\n '--det-config',\n default='demo/faster_rcnn_r50_fpn_2x_coco.py',\n help='human detection config file path (from mmdet)')\n parser.add_argument(\n '--det-checkpoint',\n default=('http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/'\n 'faster_rcnn_r50_fpn_2x_coco/'\n 'faster_rcnn_r50_fpn_2x_coco_'\n 'bbox_mAP-0.384_20200504_210434-a5d8aa15.pth'),\n help='human detection checkpoint file/url')\n parser.add_argument(\n '--pose-config',\n default='demo/hrnet_w32_coco_256x192.py',\n help='human pose estimation config file path (from mmpose)')\n parser.add_argument(\n '--pose-checkpoint',\n default=('https://download.openmmlab.com/mmpose/top_down/hrnet/'\n 'hrnet_w32_coco_256x192-c78dce93_20200708.pth'),\n help='human pose estimation checkpoint file/url')\n parser.add_argument(\n '--det-score-thr',\n type=float,\n default=0.9,\n help='the threshold of human detection score')\n parser.add_argument(\n '--label-map',\n default='demo/label_map_ntu120.txt',\n help='label map file')\n parser.add_argument(\n '--device', type=str, default='cuda:0', help='CPU/CUDA device option')\n parser.add_argument(\n '--short-side',\n type=int,\n default=480,\n help='specify the short-side length of the image')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n default={},\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. For example, '\n \"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'\")\n args = parser.parse_args()\n return args\n\n\ndef frame_extraction(video_path, short_side):\n \"\"\"Extract frames given video_path.\n\n Args:\n video_path (str): The video_path.\n \"\"\"\n # Load the video, extract frames into ./tmp/video_name\n target_dir = osp.join('./tmp', osp.basename(osp.splitext(video_path)[0]))\n os.makedirs(target_dir, exist_ok=True)\n # Should be able to handle videos up to several hours\n frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')\n vid = cv2.VideoCapture(video_path)\n frames = []\n frame_paths = []\n flag, frame = vid.read()\n cnt = 0\n new_h, new_w = None, None\n while flag:\n if new_h is None:\n h, w, _ = frame.shape\n new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))\n\n frame = mmcv.imresize(frame, (new_w, new_h))\n\n frames.append(frame)\n frame_path = frame_tmpl.format(cnt + 1)\n frame_paths.append(frame_path)\n\n cv2.imwrite(frame_path, frame)\n cnt += 1\n flag, frame = vid.read()\n\n return frame_paths, frames\n\n\ndef detection_inference(args, frame_paths):\n \"\"\"Detect human boxes given frame paths.\n\n Args:\n args (argparse.Namespace): The arguments.\n frame_paths (list[str]): The paths of frames to do detection inference.\n\n Returns:\n list[np.ndarray]: The human detection results.\n \"\"\"\n model = init_detector(args.det_config, args.det_checkpoint, args.device)\n assert model.CLASSES[0] == 'person', ('We require you to use a detector '\n 'trained on COCO')\n results = []\n print('Performing Human Detection for each frame')\n prog_bar = mmcv.ProgressBar(len(frame_paths))\n for frame_path in frame_paths:\n result = inference_detector(model, frame_path)\n # We only keep human detections with score larger than det_score_thr\n result = result[0][result[0][:, 4] >= args.det_score_thr]\n results.append(result)\n prog_bar.update()\n return results\n\n\ndef pose_inference(args, frame_paths, det_results):\n model = init_pose_model(args.pose_config, args.pose_checkpoint,\n args.device)\n ret = []\n print('Performing Human Pose Estimation for each frame')\n prog_bar = mmcv.ProgressBar(len(frame_paths))\n for f, d in zip(frame_paths, det_results):\n # Align input format\n d = [dict(bbox=x) for x in list(d)]\n pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]\n ret.append(pose)\n prog_bar.update()\n return ret\n\n\ndef main():\n args = parse_args()\n\n frame_paths, original_frames = frame_extraction(args.video,\n args.short_side)\n num_frame = len(frame_paths)\n h, w, _ = original_frames[0].shape\n\n # Get clip_len, frame_interval and calculate center index of each clip\n config = mmcv.Config.fromfile(args.config)\n config.merge_from_dict(args.cfg_options)\n\n test_pipeline = Compose(config.data.test.pipeline)\n\n # Load label_map\n label_map = [x.strip() for x in open(args.label_map).readlines()]\n\n # Get Human detection results\n det_results = detection_inference(args, frame_paths)\n torch.cuda.empty_cache()\n\n pose_results = pose_inference(args, frame_paths, det_results)\n torch.cuda.empty_cache()\n\n fake_anno = dict(\n frame_dir='',\n label=-1,\n img_shape=(h, w),\n original_shape=(h, w),\n start_index=0,\n modality='Pose',\n total_frames=num_frame)\n num_person = max([len(x) for x in pose_results])\n # Current PoseC3D models are trained on COCO-keypoints (17 keypoints)\n num_keypoint = 17\n keypoint = np.zeros((num_person, num_frame, num_keypoint, 2),\n dtype=np.float16)\n keypoint_score = np.zeros((num_person, num_frame, num_keypoint),\n dtype=np.float16)\n for i, poses in enumerate(pose_results):\n for j, pose in enumerate(poses):\n pose = pose['keypoints']\n keypoint[j, i] = pose[:, :2]\n keypoint_score[j, i] = pose[:, 2]\n fake_anno['keypoint'] = keypoint\n fake_anno['keypoint_score'] = keypoint_score\n\n imgs = test_pipeline(fake_anno)['imgs'][None]\n imgs = imgs.to(args.device)\n\n model = build_model(config.model)\n load_checkpoint(model, args.checkpoint, map_location=args.device)\n model.to(args.device)\n model.eval()\n\n with torch.no_grad():\n output = model(return_loss=False, imgs=imgs)\n\n action_idx = np.argmax(output)\n action_label = label_map[action_idx]\n\n pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,\n args.device)\n vis_frames = [\n vis_pose_result(pose_model, frame_paths[i], pose_results[i])\n for i in range(num_frame)\n ]\n for frame in vis_frames:\n cv2.putText(frame, action_label, (10, 30), FONTFACE, FONTSCALE,\n FONTCOLOR, THICKNESS, LINETYPE)\n\n cv2.imwrite('frame.jpg', vis_frames[0])\n vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames], fps=24)\n vid.write_videofile(args.out_filename, remove_temp=True)\n\n tmp_frame_dir = osp.dirname(frame_paths[0])\n shutil.rmtree(tmp_frame_dir)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.log",
"numpy.pad",
"numpy.sqrt",
"numpy.clip",
"numpy.min",
"numpy.isnan",
"numpy.stack",
"numpy.concatenate",
"numpy.max",
"numpy.all",
"numpy.random.permutation",
"numpy.random.rand",
"torch.nn.modules.utils._pair",
"numpy.random.uniform",
"numpy.array",
"numpy.flip",
"numpy.empty",
"numpy.random.randint"
],
[
"numpy.all",
"numpy.array",
"numpy.random.rand"
],
[
"torch.no_grad",
"numpy.argmax",
"numpy.zeros",
"torch.cuda.empty_cache"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akhandait/curiosity-driven-world-models | [
"544326f1ed4274c2e96addebd414f05dead721a9"
] | [
"models_sep.py"
] | [
"import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport math\nfrom torch.nn import init\nfrom torch.distributions.normal import Normal\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\nclass CnnActorCriticNetwork(nn.Module):\n def __init__(self, input_size, output_size, use_noisy_net=False):\n super(CnnActorCriticNetwork, self).__init__()\n\n # if use_noisy_net:\n # print('use NoisyNet')\n # linear = NoisyLinear\n # else:\n linear = nn.Linear\n\n self.feature = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=32,\n kernel_size=8,\n stride=4),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1),\n nn.LeakyReLU(),\n Flatten(),\n linear(\n 7 * 7 * 64,\n 512),\n nn.LeakyReLU()\n )\n\n self.actor = nn.Sequential(\n linear(512, 512),\n nn.LeakyReLU(),\n linear(512, output_size)\n )\n\n self.critic = nn.Sequential(\n linear(512, 512),\n nn.LeakyReLU(),\n linear(512, 1)\n )\n\n # self.actor = nn.Sequential(\n # linear(512, output_size)\n # )\n\n # self.critic = nn.Sequential(\n # linear(512, 1)\n # )\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.orthogonal_(p.weight, np.sqrt(2))\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.orthogonal_(p.weight, np.sqrt(2))\n p.bias.data.zero_()\n\n for i in range(len(self.actor)):\n if type(self.actor[i]) == nn.Linear:\n init.orthogonal_(self.actor[i].weight, 0.01)\n self.actor[i].bias.data.zero_()\n\n for i in range(len(self.critic)):\n if type(self.critic[i]) == nn.Linear:\n init.orthogonal_(self.critic[i].weight, 0.01)\n self.critic[i].bias.data.zero_()\n\n def forward(self, state, icm, rnn, prev_state, prev_action):\n x = self.feature(state)\n\n # feat_prev_s = icm.features_forward(prev_state).reshape(1, prev_state.shape[0], 256)\n # rnn_h = F.leaky_relu(rnn.get_hidden(prev_action, feat_prev_s).detach()).reshape(feat_prev_s.shape[1], 256)\n # feat_s = icm.features_forward(state).detach()\n\n # x = torch.cat([rnn_h, feat_s], dim=-1)\n\n policy = self.actor(x)\n value = self.critic(x)\n return policy, value\n\n\nclass ICMModel(nn.Module):\n def __init__(self, input_size, output_size, use_cuda=True):\n super(ICMModel, self).__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n self.device = torch.device('cuda' if use_cuda else 'cpu')\n\n self.feature = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=32,\n kernel_size=8,\n stride=4),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2),\n nn.LeakyReLU(),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1),\n nn.LeakyReLU(),\n Flatten(),\n nn.Linear(7 * 7 * 64, 256)\n )\n\n self.inverse_net = nn.Sequential(\n nn.Linear(256 * 2, 512),\n nn.ReLU(),\n nn.Linear(512, output_size)\n )\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.kaiming_uniform_(p.weight)\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.kaiming_uniform_(p.weight, a=1.0)\n p.bias.data.zero_()\n\n def features_forward(self, state):\n return F.leaky_relu(self.feature(state))\n\n def forward(self, inputs):\n state, next_state, action = inputs\n\n encode_state = self.feature(state)\n encode_next_state = self.feature(next_state)\n # get pred action\n pred_action = torch.cat((encode_state, encode_next_state), 1)\n pred_action = self.inverse_net(pred_action)\n\n return pred_action\n\n\nclass _MDRNNBase(nn.Module):\n def __init__(self, latents, actions, hiddens, gaussians):\n super().__init__()\n self.latents = latents\n self.actions = actions\n self.hiddens = hiddens\n self.gaussians = gaussians\n\n self.gmm_linear = nn.Linear(\n hiddens, (2 * latents + 1) * gaussians + 1)\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.kaiming_uniform_(p.weight)\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.kaiming_uniform_(p.weight, a=1.0)\n p.bias.data.zero_()\n\n def forward(self, *inputs):\n pass\n\nclass MDRNN(_MDRNNBase):\n \"\"\" MDRNN model for multi steps forward \"\"\"\n def __init__(self, latents, actions, hiddens, gaussians):\n super().__init__(latents, actions, hiddens, gaussians)\n self.rnn = nn.LSTM(latents + actions, hiddens)\n\n for name, param in self.rnn.named_parameters():\n init.uniform_(param)\n if 'weight' in name:\n init.kaiming_uniform_(param, a=1.0)\n else:\n param.data.zero_()\n\n def forward(self, actions, latents): # pylint: disable=arguments-differ\n \"\"\" MULTI STEPS forward.\n\n :args actions: (SEQ_LEN, BSIZE, ASIZE) torch tensor # NOTE: not needed, use batch_first in LSTM\n :args latents: (SEQ_LEN, BSIZE, LSIZE) torch tensor\n\n :returns: mu_nlat, sig_nlat, pi_nlat, rs, ds, parameters of the GMM\n prediction for the next latent, gaussian prediction of the reward and\n logit prediction of terminality.\n - mu_nlat: (SEQ_LEN, BSIZE, N_GAUSS, LSIZE) torch tensor\n - sigma_nlat: (SEQ_LEN, BSIZE, N_GAUSS, LSIZE) torch tensor\n - logpi_nlat: (SEQ_LEN, BSIZE, N_GAUSS) torch tensor\n - rs: (SEQ_LEN, BSIZE) torch tensor\n - ds: (SEQ_LEN, BSIZE) torch tensor\n \"\"\"\n seq_len, bs = actions.size(0), actions.size(1)\n\n ins = torch.cat([actions, latents], dim=-1)\n outs, _ = self.rnn(ins)\n gmm_outs = self.gmm_linear(outs)\n\n stride = self.gaussians * self.latents\n\n mus = gmm_outs[:, :, :stride]\n mus = mus.view(seq_len, bs, self.gaussians, self.latents)\n\n sigmas = gmm_outs[:, :, stride:2 * stride]\n sigmas = sigmas.view(seq_len, bs, self.gaussians, self.latents)\n sigmas = torch.exp(sigmas)\n\n pi = gmm_outs[:, :, 2 * stride: 2 * stride + self.gaussians]\n pi = pi.view(seq_len, bs, self.gaussians)\n logpi = F.log_softmax(pi, dim=-1)\n\n dones = gmm_outs[:, :, -1]\n\n return mus, sigmas, logpi, dones\n\n def get_hidden(self, actions, latents):\n ins = torch.cat([actions, latents], dim=-1)\n outs, (h, c) = self.rnn(ins)\n\n return h\n\ndef gmm_loss(batch, mus, sigmas, logpi, reduce=True): # pylint: disable=too-many-arguments\n \"\"\" Computes the gmm loss.\n\n Compute minus the log probability of batch under the GMM model described\n by mus, sigmas, pi. Precisely, with bs1, bs2, ... the sizes of the batch\n dimensions (several batch dimension are useful when you have both a batch\n axis and a time step axis), gs the number of mixtures and fs the number of\n features.\n\n :args batch: (bs1, bs2, *, fs) torch tensor\n :args mus: (bs1, bs2, *, gs, fs) torch tensor\n :args sigmas: (bs1, bs2, *, gs, fs) torch tensor\n :args logpi: (bs1, bs2, *, gs) torch tensor\n :args reduce: if not reduce, the mean in the following formula is ommited\n\n :returns:\n loss(batch) = - mean_{i1=0..bs1, i2=0..bs2, ...} log(\n sum_{k=1..gs} pi[i1, i2, ..., k] * N(\n batch[i1, i2, ..., :] | mus[i1, i2, ..., k, :], sigmas[i1, i2, ..., k, :]))\n\n NOTE: The loss is not reduced along the feature dimension (i.e. it should scale ~linearily\n with fs).\n \"\"\"\n batch = batch.unsqueeze(-2)\n normal_dist = Normal(mus, sigmas)\n g_log_probs = normal_dist.log_prob(batch)\n g_log_probs = logpi + torch.sum(g_log_probs, dim=-1)\n max_log_probs = torch.max(g_log_probs, dim=-1, keepdim=True)[0]\n g_log_probs = g_log_probs - max_log_probs\n\n g_probs = torch.exp(g_log_probs)\n probs = torch.sum(g_probs, dim=-1)\n\n log_prob = max_log_probs.squeeze() + torch.log(probs)\n if reduce:\n return - torch.mean(log_prob)\n return - log_prob\n"
] | [
[
"torch.mean",
"torch.nn.init.uniform_",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.LSTM",
"numpy.sqrt",
"torch.nn.Conv2d",
"torch.sum",
"torch.exp",
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.log",
"torch.nn.LeakyReLU",
"torch.nn.init.orthogonal_",
"torch.device",
"torch.distributions.normal.Normal",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cdawei/stellargraph | [
"53206a0bf133b47261d5f96f5325aa72ad424138",
"53206a0bf133b47261d5f96f5325aa72ad424138",
"60edf4a6268f29b49b7c768c382e235af4108506"
] | [
"tests/core/test_utils.py",
"stellargraph/data/explorer.py",
"tests/data/test_epgm.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2019 Data61, CSIROß\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nUtils tests:\n\n\"\"\"\nimport pytest\nimport random\nimport networkx as nx\nimport numpy as np\nimport scipy as sp\n\nfrom stellargraph.core.utils import *\nfrom stellargraph.core.graph import *\n\n\ndef example_graph(feature_size=None, n_edges=20, n_nodes=6, n_isolates=1):\n G = nx.Graph()\n n_noniso = n_nodes - n_isolates\n edges = [\n (random.randint(0, n_noniso - 1), random.randint(0, n_noniso - 1))\n for _ in range(n_edges)\n ]\n G.add_nodes_from(range(n_nodes))\n G.add_edges_from(edges, label=\"default\")\n\n # Add example features\n if feature_size is not None:\n for v in G.nodes():\n G.node[v][\"feature\"] = int(v) * np.ones(feature_size, dtype=\"int\")\n return StellarGraph(G, node_features=\"feature\")\n\n else:\n return StellarGraph(G)\n\n\[email protected](scope=\"session\", autouse=True)\ndef beforeall():\n G = example_graph(feature_size=4, n_nodes=6, n_isolates=1, n_edges=20)\n pytest.G = G\n\n\ndef test_normalize_adj():\n node_list = list(pytest.G.nodes())\n Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)\n csr = normalize_adj(Aadj)\n dense = csr.todense()\n assert 5 == pytest.approx(dense.sum(), 0.1)\n assert csr.get_shape() == Aadj.get_shape()\n\n csr = normalize_adj(Aadj, symmetric=False)\n dense = csr.todense()\n assert 5 == pytest.approx(dense.sum(), 0.1)\n assert csr.get_shape() == Aadj.get_shape()\n\n\ndef test_normalized_laplacian():\n node_list = list(pytest.G.nodes())\n Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)\n laplacian = normalized_laplacian(Aadj)\n assert 1 == pytest.approx(laplacian.sum(), 0.2)\n assert laplacian.get_shape() == Aadj.get_shape()\n\n laplacian = normalized_laplacian(Aadj, symmetric=False)\n assert 1 == pytest.approx(laplacian.sum(), 0.2)\n assert laplacian.get_shape() == Aadj.get_shape()\n\n\ndef test_rescale_laplacian():\n node_list = list(pytest.G.nodes())\n Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)\n rl = rescale_laplacian(normalized_laplacian(Aadj))\n assert rl.max() < 1\n assert rl.get_shape() == Aadj.get_shape()\n\n\ndef test_chebyshev_polynomial():\n node_list = list(pytest.G.nodes())\n Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)\n\n k = 2\n cp = chebyshev_polynomial(rescale_laplacian(normalized_laplacian(Aadj)), k)\n assert len(cp) == k + 1\n assert np.array_equal(cp[0].todense(), sp.eye(Aadj.shape[0]).todense())\n assert cp[1].max() < 1\n assert 5 == pytest.approx(cp[2].todense()[:5, :5].sum(), 0.1)\n\n\ndef test_GCN_Aadj_feats_op():\n node_list = list(pytest.G.nodes())\n Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)\n features = pytest.G.get_feature_for_nodes(node_list)\n\n features_, Aadj_ = GCN_Aadj_feats_op(features=features, A=Aadj, method=\"gcn\")\n assert np.array_equal(features, features_)\n assert 6 == pytest.approx(Aadj_.todense().sum(), 0.1)\n\n features_, Aadj_ = GCN_Aadj_feats_op(\n features=features, A=Aadj, method=\"chebyshev\", k=2\n )\n assert len(features_) == 4\n assert np.array_equal(features_[0], features_[0])\n assert np.array_equal(features_[1].todense(), sp.eye(Aadj.shape[0]).todense())\n assert features_[2].max() < 1\n assert 5 == pytest.approx(features_[3].todense()[:5, :5].sum(), 0.1)\n assert Aadj.get_shape() == Aadj_.get_shape()\n\n # k must an integer greater than or equal to 2\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"chebyshev\", k=1)\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"chebyshev\", k=2.0)\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"chebyshev\", k=None)\n\n # k must be positive integer\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"sgc\", k=None)\n\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"sgc\", k=0)\n\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"sgc\", k=-191)\n\n with pytest.raises(ValueError):\n GCN_Aadj_feats_op(features=features, A=Aadj, method=\"sgc\", k=2.0)\n\n features_, Aadj_ = GCN_Aadj_feats_op(features=features, A=Aadj, method=\"sgc\", k=2)\n\n assert len(features_) == 6\n assert np.array_equal(features, features_)\n assert Aadj.get_shape() == Aadj_.get_shape()\n\n # Check if the power of the normalised adjacency matrix is calculated correctly.\n # First retrieve the normalised adjacency matrix using localpool filter.\n features_, Aadj_norm = GCN_Aadj_feats_op(features=features, A=Aadj, method=\"gcn\")\n Aadj_norm = Aadj_norm.todense()\n Aadj_power_2 = np.linalg.matrix_power(Aadj_norm, 2) # raise it to the power of 2\n # Both matrices should have the same shape\n assert Aadj_power_2.shape == Aadj_.get_shape()\n # and the same values.\n assert pytest.approx(Aadj_power_2) == Aadj_.todense()\n",
"# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2018 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n \"UniformRandomWalk\",\n \"BiasedRandomWalk\",\n \"UniformRandomMetaPathWalk\",\n \"DepthFirstWalk\",\n \"BreadthFirstWalk\",\n \"SampledBreadthFirstWalk\",\n \"SampledHeterogeneousBreadthFirstWalk\",\n]\n\n\nimport networkx as nx\nimport numpy as np\nimport random\nfrom collections import defaultdict\n\nfrom ..core.schema import GraphSchema\nfrom ..core.graph import StellarGraphBase\nfrom ..core.utils import is_real_iterable\n\n\nclass GraphWalk(object):\n \"\"\"\n Base class for exploring graphs.\n \"\"\"\n\n def __init__(self, graph, graph_schema=None, seed=None):\n self.graph = graph\n\n # Initialize the random state\n self._random_state = random.Random(seed)\n\n # Initialize a numpy random state (for numpy random methods)\n self._np_random_state = np.random.RandomState(seed=seed)\n\n # We require a StellarGraph for this\n if not isinstance(graph, StellarGraphBase):\n raise TypeError(\n \"Graph must be a StellarGraph or StellarDiGraph to use heterogeneous sampling.\"\n )\n\n if not graph_schema:\n self.graph_schema = self.graph.create_graph_schema(create_type_maps=True)\n else:\n self.graph_schema = graph_schema\n\n if self.graph_schema is not None and type(self.graph_schema) is not GraphSchema:\n raise ValueError(\n \"({}) The parameter graph_schema should be either None or of type GraphSchema.\".format(\n type(self).__name__\n )\n )\n\n # Create a dict of adjacency lists per edge type, for faster neighbour sampling from graph in SampledHeteroBFS:\n # TODO: this could be better placed inside StellarGraph class\n edge_types = self.graph_schema.edge_types\n self.adj = dict()\n for et in edge_types:\n self.adj.update({et: defaultdict(lambda: [None])})\n\n for n1, nbrdict in graph.adjacency():\n for et in edge_types:\n neigh_et = [\n n2\n for n2, nkeys in nbrdict.items()\n for k in iter(nkeys)\n if self.graph_schema.is_of_edge_type((n1, n2, k), et)\n ]\n # Create adjacency list in lexographical order\n # Otherwise sampling methods will not be deterministic\n # even when the seed is set.\n self.adj[et][n1] = sorted(neigh_et, key=str)\n\n def neighbors(self, node):\n if node not in self.graph:\n print(\"node {} not in graph\".format(node))\n print(\"Graph nodes {}\".format(self.graph.nodes()))\n return list(nx.neighbors(self.graph, node))\n\n def run(self, **kwargs):\n \"\"\"\n To be overridden by subclasses. It is the main entry point for performing random walks on the given\n graph.\n It should return the sequences of nodes in each random walk.\n\n Args:\n **kwargs:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n\nclass UniformRandomWalk(GraphWalk):\n \"\"\"\n Performs uniform random walks on the given graph\n \"\"\"\n\n def run(self, nodes=None, n=None, length=None, seed=None):\n \"\"\"\n Perform a random walk starting from the root nodes.\n\n Args:\n nodes: <list> The root nodes as a list of node IDs\n n: <int> Total number of random walks per root node\n length: <int> Maximum length of each random walk\n seed: <int> Random number generator seed; default is None\n\n Returns:\n <list> List of lists of nodes ids for each of the random walks\n\n \"\"\"\n self._check_parameter_values(nodes=nodes, n=n, length=length, seed=seed)\n\n if seed:\n # seed the random number generator\n rs = random.Random(seed)\n else:\n # Restore the random state\n rs = self._random_state\n\n walks = []\n for node in nodes: # iterate over root nodes\n for walk_number in range(n): # generate n walks per root node\n walk = list()\n current_node = node\n for _ in range(length):\n walk.extend([current_node])\n neighbours = self.neighbors(current_node)\n if (\n len(neighbours) == 0\n ): # for whatever reason this node has no neighbours so stop\n break\n else:\n rs.shuffle(neighbours) # shuffles the list in place\n current_node = neighbours[0] # select the first node to follow\n\n walks.append(walk)\n\n return walks\n\n def _check_parameter_values(self, nodes, n, length, seed):\n \"\"\"\n Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the\n parameter (the first one encountered in the checks) with invalid value.\n\n Args:\n nodes: <list> A list of root node ids such that from each node a uniform random walk of up to length l\n will be generated.\n n: <int> Number of walks per node id.\n length: <int> Maximum length of walk measured as the number of edges followed from root node.\n seed: <int> Random number generator seed\n\n \"\"\"\n if nodes is None:\n raise ValueError(\n \"({}) A list of root node IDs was not provided.\".format(\n type(self).__name__\n )\n )\n if not is_real_iterable(nodes):\n raise ValueError(\"nodes parameter should be an iterable of node IDs.\")\n if (\n len(nodes) == 0\n ): # this is not an error but maybe a warning should be printed to inform the caller\n print(\n \"WARNING: ({}) No root node IDs given. An empty list will be returned as a result.\".format(\n type(self).__name__\n )\n )\n\n if type(n) != int:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be integer type.\".format(\n type(self).__name__\n )\n )\n if n <= 0:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be a positive integer.\".format(\n type(self).__name__\n )\n )\n\n if type(length) != int:\n raise ValueError(\n \"({}) The walk length, length, should be integer type.\".format(\n type(self).__name__\n )\n )\n if length <= 0:\n raise ValueError(\n \"({}) The walk length, length, should be positive integer.\".format(\n type(self).__name__\n )\n )\n\n if seed is not None:\n if type(seed) != int:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be integer type or None.\".format(\n type(self).__name__\n )\n )\n if seed < 0:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be positive integer or None.\".format(\n type(self).__name__\n )\n )\n\n\ndef naive_weighted_choices(rs, weights):\n \"\"\"\n Select an index at random, weighted by the iterator `weights` of\n arbitrary (non-negative) floats. That is, `x` will be returned\n with probability `weights[x]/sum(weights)`.\n\n For doing a single sample with arbitrary weights, this is much (5x\n or more) faster than numpy.random.choice, because the latter\n requires a lot of preprocessing (normalized probabilties), and\n does a lot of conversions/checks/preprocessing internally.\n \"\"\"\n\n # divide the interval [0, sum(weights)) into len(weights)\n # subintervals [x_i, x_{i+1}), where the width x_{i+1} - x_i ==\n # weights[i]\n subinterval_ends = []\n running_total = 0\n for w in weights:\n assert w >= 0\n running_total += w\n subinterval_ends.append(running_total)\n\n # pick a place in the overall interval\n x = rs.random() * running_total\n\n # find the subinterval that contains the place, by looking for the\n # first subinterval where the end is (strictly) after it\n for idx, end in enumerate(subinterval_ends):\n if x < end:\n break\n\n return idx\n\n\nclass BiasedRandomWalk(GraphWalk):\n \"\"\"\n Performs biased second order random walks (like those used in Node2Vec algorithm\n https://snap.stanford.edu/node2vec/) controlled by the values of two parameters p and q.\n \"\"\"\n\n def run(\n self,\n nodes=None,\n n=None,\n p=1.0,\n q=1.0,\n length=None,\n seed=None,\n weighted=False,\n edge_weight_label=\"weight\",\n ):\n\n \"\"\"\n Perform a random walk starting from the root nodes.\n\n Args:\n nodes: <list> The root nodes as a list of node IDs\n n: <int> Total number of random walks per root node\n p: <float> Defines probability, 1/p, of returning to source node\n q: <float> Defines probability, 1/q, for moving to a node away from the source node\n length: <int> Maximum length of each random walk\n seed: <int> Random number generator seed; default is None\n weighted: <False or True> Indicates whether the walk is unweighted or weighted\n edge_weight_label: <string> Label of the edge weight property.\n\n Returns:\n <list> List of lists of nodes ids for each of the random walks\n\n \"\"\"\n self._check_parameter_values(\n nodes=nodes,\n n=n,\n p=p,\n q=q,\n length=length,\n seed=seed,\n weighted=weighted,\n edge_weight_label=edge_weight_label,\n )\n\n if seed:\n # seed a new random number generator\n rs = random.Random(seed)\n else:\n # Restore the random state\n rs = self._random_state\n\n if weighted:\n # Check that all edge weights are greater than or equal to 0.\n # Also, if the given graph is a MultiGraph, then check that there are no two edges between\n # the same two nodes with different weights.\n for node in self.graph.nodes():\n for neighbor in self.neighbors(node):\n\n wts = set()\n for k, v in self.graph[node][neighbor].items():\n weight = v.get(edge_weight_label)\n if weight is None or np.isnan(weight) or weight == np.inf:\n raise ValueError(\n \"Missing or invalid edge weight ({}) between ({}) and ({}).\".format(\n weight, node, neighbor\n )\n )\n if not isinstance(weight, (int, float)):\n raise ValueError(\n \"Edge weight between nodes ({}) and ({}) is not numeric ({}).\".format(\n node, neighbor, weight\n )\n )\n if weight < 0: # check if edge has a negative weight\n raise ValueError(\n \"An edge weight between nodes ({}) and ({}) is negative ({}).\".format(\n node, neighbor, weight\n )\n )\n\n wts.add(weight)\n if (\n len(wts) > 1\n ): # multigraph with different weights on edges between same pair of nodes\n raise ValueError(\n \"({}) and ({}) have multiple edges with weights ({}). Ambiguous to choose an edge for the random walk.\".format(\n node, neighbor, list(wts)\n )\n )\n\n ip = 1.0 / p\n iq = 1.0 / q\n\n walks = []\n for node in nodes: # iterate over root nodes\n for walk_number in range(n): # generate n walks per root node\n # the walk starts at the root\n walk = [node]\n\n neighbours = self.neighbors(node)\n\n previous_node = node\n previous_node_neighbours = neighbours\n\n # calculate the appropriate unnormalised transition\n # probability, given the history of the walk\n def transition_probability(\n nn, current_node, weighted, edge_weight_label\n ):\n\n if weighted:\n weight_cn = self.graph[current_node][nn][0].get(\n edge_weight_label\n )\n else:\n weight_cn = 1.0\n\n if nn == previous_node: # d_tx = 0\n return ip * weight_cn\n elif nn in previous_node_neighbours: # d_tx = 1\n return 1.0 * weight_cn\n else: # d_tx = 2\n return iq * weight_cn\n\n if neighbours:\n current_node = rs.choice(neighbours)\n for _ in range(length - 1):\n walk.append(current_node)\n neighbours = self.neighbors(current_node)\n\n if not neighbours:\n break\n\n # select one of the neighbours using the\n # appropriate transition probabilities\n choice = naive_weighted_choices(\n rs,\n (\n transition_probability(\n nn, current_node, weighted, edge_weight_label\n )\n for nn in neighbours\n ),\n )\n\n previous_node = current_node\n previous_node_neighbours = neighbours\n current_node = neighbours[choice]\n\n walks.append(walk)\n\n return walks\n\n def _check_parameter_values(\n self, nodes, n, p, q, length, seed, weighted, edge_weight_label\n ):\n \"\"\"\n Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the\n parameter (the first one encountered in the checks) with invalid value.\n\n Args:\n nodes: <list> A list of root node ids such that from each node a uniform random walk of up to length l\n will be generated.\n n: <int> Number of walks per node id.\n p: <float>\n q: <float>\n length: <int> Maximum length of walk measured as the number of edges followed from root node.\n seed: <int> Random number generator seed.\n weighted: <False or True> Indicates whether the walk is unweighted or weighted.\n edge_weight_label: <string> Label of the edge weight property.\n\n \"\"\"\n if nodes is None:\n raise ValueError(\n \"({}) A list of root node IDs was not provided.\".format(\n type(self).__name__\n )\n )\n if not is_real_iterable(nodes):\n raise ValueError(\"nodes parameter should be an iterableof node IDs.\")\n if (\n len(nodes) == 0\n ): # this is not an error but maybe a warning should be printed to inform the caller\n print(\n \"WARNING: ({}) No root node IDs given. An empty list will be returned as a result.\".format(\n type(self).__name__\n )\n )\n\n if type(n) != int:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if n <= 0:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be a positive integer.\".format(\n type(self).__name__\n )\n )\n\n if p <= 0.0:\n raise ValueError(\n \"({}) Parameter p should be greater than 0.\".format(type(self).__name__)\n )\n\n if q <= 0.0:\n raise ValueError(\n \"({}) Parameter q should be greater than 0.\".format(type(self).__name__)\n )\n\n if type(length) != int:\n raise ValueError(\n \"({}) The walk length, length, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if length <= 0:\n raise ValueError(\n \"({}) The walk length, length, should be positive integer.\".format(\n type(self).__name__\n )\n )\n\n if seed is not None:\n if seed < 0:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be positive integer or None.\".format(\n type(self).__name__\n )\n )\n if type(seed) != int:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be integer type or None.\".format(\n type(self).__name__\n )\n )\n\n if type(weighted) != bool:\n raise ValueError(\n \"({}) Parameter weighted has to be either False (unweighted random walks) or True (weighted random walks).\".format(\n type(self).__name__\n )\n )\n\n if not isinstance(edge_weight_label, str):\n raise ValueError(\n \"({}) The edge weight property label has to be of type string\".format(\n type(self).__name__\n )\n )\n\n\nclass UniformRandomMetaPathWalk(GraphWalk):\n \"\"\"\n For heterogeneous graphs, it performs uniform random walks based on given metapaths.\n \"\"\"\n\n def run(\n self,\n nodes=None,\n n=None,\n length=None,\n metapaths=None,\n node_type_attribute=\"label\",\n seed=None,\n ):\n \"\"\"\n Performs metapath-driven uniform random walks on heterogeneous graphs.\n\n Args:\n nodes: <list> The root nodes as a list of node IDs\n n: <int> Total number of random walks per root node\n length: <int> Maximum length of each random walk\n metapaths: <list> List of lists of node labels that specify a metapath schema, e.g.,\n [['Author', 'Paper', 'Author'], ['Author, 'Paper', 'Venue', 'Paper', 'Author']] specifies two metapath\n schemas of length 3 and 5 respectively.\n node_type_attribute: <str> The node attribute name that stores the node's type\n seed: <int> Random number generator seed; default is None\n\n Returns:\n <list> List of lists of nodes ids for each of the random walks generated\n \"\"\"\n self._check_parameter_values(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=metapaths,\n node_type_attribute=node_type_attribute,\n seed=seed,\n )\n\n if seed:\n # seed the random number generator\n rs = random.Random(seed)\n else:\n # Restore the random state\n rs = self._random_state\n\n walks = []\n\n for node in nodes:\n # retrieve node type\n label = self.graph.node[node][node_type_attribute]\n filtered_metapaths = [\n metapath\n for metapath in metapaths\n if len(metapath) > 0 and metapath[0] == label\n ]\n\n for metapath in filtered_metapaths:\n # augment metapath to be length long\n # if (\n # len(metapath) == 1\n # ): # special case for random walks like in a homogeneous graphs\n # metapath = metapath * length\n # else:\n metapath = metapath[1:] * ((length // (len(metapath) - 1)) + 1)\n for _ in range(n):\n walk = (\n []\n ) # holds the walk data for this walk; first node is the starting node\n current_node = node\n for d in range(length):\n walk.append(current_node)\n # d+1 can also be used to index metapath to retrieve the node type for the next step in the walk\n neighbours = self.neighbors(current_node)\n # filter these by node type\n neighbours = [\n n_node\n for n_node in neighbours\n if self.graph.node[n_node][node_type_attribute]\n == metapath[d]\n ]\n if len(neighbours) == 0:\n # if no neighbours of the required type as dictated by the metapath exist, then stop.\n break\n # select one of the neighbours uniformly at random\n current_node = rs.choice(\n neighbours\n ) # the next node in the walk\n\n walks.append(walk) # store the walk\n\n return walks\n\n def _check_parameter_values(\n self, nodes, n, length, metapaths, node_type_attribute, seed\n ):\n \"\"\"\n Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the\n parameter (the first one encountered in the checks) with invalid value.\n\n Args:\n nodes: <list> The starting nodes as a list of node IDs.\n n: <int> Number of walks per node id.\n length: <int> Maximum length of of each random walk\n metapaths: <list> List of lists of node labels that specify a metapath schema, e.g.,\n [['Author', 'Paper', 'Author'], ['Author, 'Paper', 'Venue', 'Paper', 'Author']] specifies two metapath\n schemas of length 3 and 5 respectively.\n node_type_attribute: <str> The node attribute name that stores the node's type\n seed: <int> Random number generator seed\n\n \"\"\"\n if nodes is None:\n raise ValueError(\n \"({}) A list of starting node IDs was not provided (parameter nodes is None).\".format(\n type(self).__name__\n )\n )\n if not is_real_iterable(nodes):\n raise ValueError(\n \"({}) The nodes parameter should be an iterable of node IDs.\".format(\n type(self).__name__\n )\n )\n if (\n len(nodes) == 0\n ): # this is not an error but maybe a warning should be printed to inform the caller\n print(\n \"WARNING: ({}) No starting node IDs given. An empty list will be returned as a result.\".format(\n type(self).__name__\n )\n )\n if n <= 0:\n raise ValueError(\n \"({}) The number of walks per starting node, n, should be a positive integer.\".format(\n type(self).__name__\n )\n )\n if type(n) != int:\n raise ValueError(\n \"({}) The number of walks per starting node, n, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if length <= 0:\n raise ValueError(\n \"({}) The walk length parameter, length, should be positive integer.\".format(\n type(self).__name__\n )\n )\n if type(length) != int:\n raise ValueError(\n \"({}) The walk length parameter, length, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if type(metapaths) != list:\n raise ValueError(\n \"({}) The metapaths parameter must be a list of lists.\".format(\n type(self).__name__\n )\n )\n for metapath in metapaths:\n if type(metapath) != list:\n raise ValueError(\n \"({}) Each metapath must be list type of node labels\".format(\n type(self).__name__\n )\n )\n if len(metapath) < 2:\n raise ValueError(\n \"({}) Each metapath must specify at least two node types\".format(\n type(self).__name__\n )\n )\n\n for node_label in metapath:\n if type(node_label) != str:\n raise ValueError(\n \"({}) Node labels in metapaths must be string type.\".format(\n type(self).__name__\n )\n )\n if metapath[0] != metapath[-1]:\n raise ValueError(\n \"({} The first and last node type in a metapath should be the same.\".format(\n type(self).__name__\n )\n )\n\n if type(node_type_attribute) != str:\n raise ValueError(\n \"({}) The parameter label should be string type not {} as given\".format(\n type(self).__name__, type(node_type_attribute).__name__\n )\n )\n\n if seed is not None:\n if seed < 0:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be positive integer or None.\".format(\n type(self).__name__\n )\n )\n if type(seed) != int:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be integer type or None.\".format(\n type(self).__name__\n )\n )\n\n\nclass DepthFirstWalk(GraphWalk):\n \"\"\"\n Depth First Walk that generates all paths from a starting node to a given depth.\n It can be used to extract, in a memory efficient way, a sub-graph starting from a node and up to a given depth.\n \"\"\"\n\n # TODO: Implement the run method\n pass\n\n\nclass BreadthFirstWalk(GraphWalk):\n \"\"\"\n Breadth First Walk that generates all paths from a starting node to a given depth.\n It can be used to extract a sub-graph starting from a node and up to a given depth.\n \"\"\"\n\n # TODO: Implement the run method\n pass\n\n\nclass SampledBreadthFirstWalk(GraphWalk):\n \"\"\"\n Breadth First Walk that generates a sampled number of paths from a starting node.\n It can be used to extract a random sub-graph starting from a set of initial nodes.\n \"\"\"\n\n def run(self, nodes=None, n=1, n_size=None, seed=None):\n \"\"\"\n Performs a sampled breadth-first walk starting from the root nodes.\n\n Args:\n nodes: <list> A list of root node ids such that from each node n BFWs will be generated up to the\n given depth d.\n n: <int> Number of walks per node id.\n n_size: <list> The number of neighbouring nodes to expand at each depth of the walk. Sampling of\n neighbours with replacement is always used regardless of the node degree and number of neighbours\n requested.\n seed: <int> Random number generator seed; default is None\n\n Returns:\n A list of lists such that each list element is a sequence of ids corresponding to a BFW.\n \"\"\"\n self._check_parameter_values(nodes=nodes, n=n, n_size=n_size, seed=seed)\n\n walks = []\n d = len(n_size) # depth of search\n\n if seed:\n # seed the random number generator\n rs = random.Random(seed)\n else:\n # Restore the random state\n rs = self._random_state\n\n for node in nodes: # iterate over root nodes\n for _ in range(n): # do n bounded breadth first walks from each root node\n q = list() # the queue of neighbours\n walk = list() # the list of nodes in the subgraph of node\n # extend() needs iterable as parameter; we use list of tuples (node id, depth)\n q.extend([(node, 0)])\n\n while len(q) > 0:\n # remove the top element in the queue\n # index 0 pop the item from the front of the list\n frontier = q.pop(0)\n depth = frontier[1] + 1 # the depth of the neighbouring nodes\n walk.extend([frontier[0]]) # add to the walk\n\n # consider the subgraph up to and including depth d from root node\n if depth <= d:\n neighbours = self.neighbors(frontier[0])\n if len(neighbours) == 0:\n break\n else:\n # sample with replacement\n neighbours = [\n rs.choice(neighbours) for _ in range(n_size[depth - 1])\n ]\n\n # add them to the back of the queue\n q.extend([(sampled_node, depth) for sampled_node in neighbours])\n\n # finished i-th walk from node so add it to the list of walks as a list\n walks.append(walk)\n\n return walks\n\n def _check_parameter_values(self, nodes, n, n_size, seed):\n \"\"\"\n Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the\n parameter (the first one encountered in the checks) with invalid value.\n\n Args:\n nodes: <list> A list of root node ids such that from each node n BFWs will be generated up to the\n given depth d.\n n: <int> Number of walks per node id.\n n_size: <list> The number of neighbouring nodes to expand at each depth of the walk.\n seed: <int> Random number generator seed; default is None\n\n \"\"\"\n if nodes is None:\n raise ValueError(\n \"({}) A list of root node IDs was not provided (nodes parameter is None).\".format(\n type(self).__name__\n )\n )\n if not is_real_iterable(nodes):\n raise ValueError(\n \"({}) The nodes parameter should be an iterable of node IDs.\".format(\n type(self).__name__\n )\n )\n if (\n len(nodes) == 0\n ): # this is not an error but maybe a warning should be printed to inform the caller\n print(\n \"WARNING: ({}) No root node IDs given. An empty list will be returned as a result.\".format(\n type(self).__name__\n )\n )\n\n if type(n) != int:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if n <= 0:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be a positive integer.\".format(\n type(self).__name__\n )\n )\n\n if n_size is None:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be a list of integers not None.\".format(\n type(self).__name__\n )\n )\n if type(n_size) != list:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be a list of integers.\".format(\n type(self).__name__\n )\n )\n\n if len(n_size) == 0:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, should not be empty list.\".format(\n type(self).__name__\n )\n )\n\n for d in n_size:\n if type(d) != int:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be list of positive integers or 0.\".format(\n type(self).__name__\n )\n )\n if d < 0:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be list of positive integers or 0.\".format(\n type(self).__name__\n )\n )\n\n if seed is not None:\n if type(seed) != int:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be integer type or None.\".format(\n type(self).__name__\n )\n )\n if seed < 0:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be positive integer or None.\".format(\n type(self).__name__\n )\n )\n\n\nclass SampledHeterogeneousBreadthFirstWalk(GraphWalk):\n \"\"\"\n Breadth First Walk for heterogeneous graphs that generates a sampled number of paths from a starting node.\n It can be used to extract a random sub-graph starting from a set of initial nodes.\n \"\"\"\n\n def run(self, nodes=None, n=1, n_size=None, seed=None):\n \"\"\"\n Performs a sampled breadth-first walk starting from the root nodes.\n\n Args:\n nodes: <list> A list of root node ids such that from each node n BFWs will be generated\n with the number of samples per hop specified in n_size.\n n: <int> Number of walks per node id.\n n_size: <list> The number of neighbouring nodes to expand at each depth of the walk. Sampling of\n neighbours with replacement is always used regardless of the node degree and number of neighbours\n requested.\n graph_schema: <GraphSchema> If None then the graph schema is extracted from self.graph\n seed: <int> Random number generator seed; default is None\n\n Returns:\n A list of lists such that each list element is a sequence of ids corresponding to a sampled Heterogeneous\n BFW.\n \"\"\"\n self._check_parameter_values(\n nodes=nodes, n=n, n_size=n_size, graph_schema=self.graph_schema, seed=seed\n )\n\n walks = []\n d = len(n_size) # depth of search\n\n if seed:\n # seed the random number generator\n rs = random.Random(seed)\n else:\n # Restore the random state\n rs = self._random_state\n\n for node in nodes: # iterate over root nodes\n for _ in range(n): # do n bounded breadth first walks from each root node\n q = list() # the queue of neighbours\n walk = list() # the list of nodes in the subgraph of node\n\n # Start the walk by adding the head node, and node type to the frontier list q\n node_type = self.graph_schema.get_node_type(node)\n q.extend([(node, node_type, 0)])\n\n # add the root node to the walks\n walk.append([node])\n while len(q) > 0:\n # remove the top element in the queue and pop the item from the front of the list\n frontier = q.pop(0)\n current_node, current_node_type, depth = frontier\n depth = depth + 1 # the depth of the neighbouring nodes\n\n # consider the subgraph up to and including depth d from root node\n if depth <= d:\n # Find edge types for current node type\n current_edge_types = self.graph_schema.schema[current_node_type]\n\n # Create samples of neigbhours for all edge types\n for et in current_edge_types:\n neigh_et = self.adj[et][current_node]\n\n # If there are no neighbours of this type then we return None\n # in the place of the nodes that would have been sampled\n # YT update: with the new way to get neigh_et from self.adj[et][current_node], len(neigh_et) is always > 0.\n # In case of no neighbours of the current node for et, neigh_et == [None],\n # and samples automatically becomes [None]*n_size[depth-1]\n if len(neigh_et) > 0:\n samples = [\n rs.choice(neigh_et)\n for _ in range(n_size[depth - 1])\n ]\n # Choices limits us to Python 3.6+\n # samples = random.choices(neigh_et, k=n_size[depth - 1])\n else: # this doesn't happen anymore, see the comment above\n samples = [None] * n_size[depth - 1]\n\n walk.append(samples)\n q.extend(\n [\n (sampled_node, et.n2, depth)\n for sampled_node in samples\n ]\n )\n\n # finished i-th walk from node so add it to the list of walks as a list\n walks.append(walk)\n\n return walks\n\n def _check_parameter_values(self, nodes, n, n_size, graph_schema, seed):\n \"\"\"\n Checks that the parameter values are valid or raises ValueError exceptions with a message indicating the\n parameter (the first one encountered in the checks) with invalid value.\n\n Args:\n nodes: <list> A list of root node ids such that from each node n BFWs will be generated up to the\n given depth d.\n n: <int> Number of walks per node id.\n n_size: <list> The number of neighbouring nodes to expand at each depth of the walk.\n graph_schema: <GraphSchema> None or a stellargraph graph schema object\n seed: <int> Random number generator seed; default is None\n\n \"\"\"\n if nodes is None:\n raise ValueError(\n \"({}) A list of root node IDs was not provided (nodes parameter is None).\".format(\n type(self).__name__\n )\n )\n if not is_real_iterable(nodes):\n raise ValueError(\n \"({}) The nodes parameter should be an iterable of node IDs.\".format(\n type(self).__name__\n )\n )\n if (\n len(nodes) == 0\n ): # this is not an error but maybe a warning should be printed to inform the caller\n print(\n \"WARNING: ({}) No root node IDs given. An empty list will be returned as a result.\".format(\n type(self).__name__\n )\n )\n\n if type(n) != int:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be integer type.\".format(\n type(self).__name__\n )\n )\n\n if n <= 0:\n raise ValueError(\n \"({}) The number of walks per root node, n, should be a positive integer.\".format(\n type(self).__name__\n )\n )\n\n if n_size is None:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be a list of integers not None.\".format(\n type(self).__name__\n )\n )\n if type(n_size) != list:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be a list of integers.\".format(\n type(self).__name__\n )\n )\n\n if len(n_size) == 0:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, should not be empty list.\".format(\n type(self).__name__\n )\n )\n\n for d in n_size:\n if type(d) != int:\n raise ValueError(\n \"({}) The neighbourhood size, n_size, must be list of integers.\".format(\n type(self).__name__\n )\n )\n if d < 0:\n raise ValueError(\n \"({}) n_sie should be positive integer or 0.\".format(\n type(self).__name__\n )\n )\n\n if graph_schema is not None and type(graph_schema) is not GraphSchema:\n raise ValueError(\n \"({}) The parameter graph_schema should be either None or of type GraphSchema.\".format(\n type(self).__name__\n )\n )\n\n if seed is not None:\n if type(seed) != int:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be integer type or None.\".format(\n type(self).__name__\n )\n )\n if seed < 0:\n raise ValueError(\n \"({}) The random number generator seed value, seed, should be positive integer or None.\".format(\n type(self).__name__\n )\n )\n",
"# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2018 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Tests of EPGM class defined in epgm.py\n# Author: Yuriy Tyshetskiy\n\nimport pytest\nimport os\nimport numpy as np\nfrom stellargraph.data.epgm import EPGM\n\n\nclass Test_EPGM_IO_Homogeneous(object):\n \"\"\"Test IO operations on homogeneous EPGM graphs\"\"\"\n\n if os.getcwd().split(\"/\")[-1] == \"tests\":\n input_dir = os.path.expanduser(\"./resources/data/cora/cora.epgm\")\n else:\n input_dir = os.path.expanduser(\"./tests/resources/data/cora/cora.epgm\")\n\n dataset_name = \"cora\"\n node_type = \"paper\"\n target_attribute = \"subject\"\n epgm_input = True\n\n def test_load_epgm(self):\n \"\"\"Test that the EPGM is loaded correctly from epgm path\"\"\"\n G_epgm = EPGM(self.input_dir)\n print(self.input_dir)\n\n assert \"graphs\" in G_epgm.G.keys()\n assert \"vertices\" in G_epgm.G.keys()\n assert \"edges\" in G_epgm.G.keys()\n\n # check that G_epgm.G['graphs] has at least one graph head:\n assert len(G_epgm.G[\"graphs\"]) > 0\n\n # cora nodes should have a subject attribute\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n assert self.target_attribute in G_epgm.node_attributes(graph_id, self.node_type)\n\n # cora should have 2708 vertices\n n_nodes = 2708\n nodes = G_epgm.G[\"vertices\"]\n assert len(nodes) == n_nodes\n\n # cora nodes should have 7 unique values for subject attribute:\n assert sum([\"data\" in v for v in nodes]) == n_nodes\n subjects = np.unique([v[\"data\"][self.target_attribute] for v in nodes])\n assert len(subjects) == 7\n\n def test_node_types(self):\n \"\"\"Test the .node_types() method\"\"\"\n G_epgm = EPGM(self.input_dir)\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n\n # cora has a single 'paper' node type:\n node_types = G_epgm.node_types(graph_id)\n\n assert len(node_types) == 1\n assert self.node_type in node_types\n\n with pytest.raises(Exception):\n G_epgm.node_types(\"invalid_graph_id\")\n\n def test_node_attributes(self):\n \"\"\"Test the .node_attributes() method\"\"\"\n G_epgm = EPGM(self.input_dir)\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n\n # cora has 1433 unique node attributes, including 'subject'\n node_attributes = G_epgm.node_attributes(graph_id, self.node_type)\n\n assert self.target_attribute in node_attributes\n\n # after the predictions cora has 1434 attributes, including subject and subject_PREDICTED\n if self.epgm_input:\n assert (\n len(node_attributes) == 1433\n ), \"There should be 1433 unique node attributes; found {}\".format(\n len(node_attributes)\n )\n else:\n assert (\n len(node_attributes) == 1434\n ), \"There should be 1434 unique node attributes; found {}\".format(\n len(node_attributes)\n )\n\n # passing a non-existent node type should return an empty array of node attributes:\n assert len(G_epgm.node_attributes(graph_id, \"person\")) == 0\n\n # if node_type is not supplied, a TypeError should be raised:\n with pytest.raises(TypeError):\n G_epgm.node_attributes(graph_id)\n\n\nclass Test_EPGM_IO_Heterogeneous(object):\n \"\"\"Test IO operations on heterogeneous EPGM graphs\"\"\"\n\n if os.getcwd().split(\"/\")[-1] == \"tests\":\n input_dir = os.path.expanduser(\"./resources/data/hin_random/\")\n else:\n input_dir = os.path.expanduser(\"./tests/resources/data/hin_random\")\n\n dataset_name = \"hin\"\n node_type = \"person\"\n target_attribute = \"elite\"\n\n def test_load_epgm(self):\n \"\"\"Test that the EPGM is loaded correctly from epgm path\"\"\"\n G_epgm = EPGM(self.input_dir)\n\n assert \"graphs\" in G_epgm.G.keys()\n assert \"vertices\" in G_epgm.G.keys()\n assert \"edges\" in G_epgm.G.keys()\n\n # check that G_epgm.G['graphs] has at least one graph head:\n assert len(G_epgm.G[\"graphs\"]) > 0\n\n # graph nodes of self.node_type type should have a self.target_attribute attribute\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n assert self.target_attribute in G_epgm.node_attributes(graph_id, self.node_type)\n\n # graph should have 260 vertices\n n_nodes = 260\n nodes = G_epgm.G[\"vertices\"]\n assert len(nodes) == n_nodes\n\n # 'user' nodes should have 3 unique values for 'elite' attribute:\n # first make sure that all nodes have 'data' key\n assert sum([\"data\" in v for v in nodes]) == n_nodes\n labels_all = [v[\"data\"].get(self.target_attribute) for v in nodes]\n labels = list(filter(lambda l: l is not None, labels_all))\n assert len(np.unique(labels)) == 3\n\n def test_node_types(self):\n \"\"\"Test the .node_types() method\"\"\"\n G_epgm = EPGM(self.input_dir)\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n\n # dataset has multiple node types:\n node_types = G_epgm.node_types(graph_id)\n\n assert len(node_types) == 3\n assert \"person\" in node_types\n assert \"paper\" in node_types\n assert \"venue\" in node_types\n\n with pytest.raises(Exception):\n G_epgm.node_types(\"invalid_graph_id\")\n\n def test_node_attributes(self):\n \"\"\"Test the .node_attributes() method\"\"\"\n G_epgm = EPGM(self.input_dir)\n graph_id = G_epgm.G[\"graphs\"][0][\"id\"]\n\n # dataset has 1 unique 'user' node attribute, 'elite'\n node_attributes = G_epgm.node_attributes(graph_id, self.node_type)\n\n assert self.target_attribute in node_attributes\n assert (\n len(node_attributes) == 1\n ), \"There should be 1 unique node attribute; found {}\".format(\n len(node_attributes)\n )\n\n # passing a non-existent node type should return an empty array of node attributes:\n assert len(G_epgm.node_attributes(graph_id, \"business\")) == 0\n\n # if node_type is not supplied, a TypeError should be raised:\n with pytest.raises(TypeError):\n G_epgm.node_attributes(graph_id)\n\n\nclass Test_EPGMOutput(Test_EPGM_IO_Homogeneous):\n \"\"\"Tests for the epgm produced by epgm_writer\"\"\"\n\n if os.getcwd().split(\"/\")[-1] == \"tests\":\n input_dir = os.path.expanduser(\"./resources/data/cora/cora.out\")\n else:\n input_dir = os.path.expanduser(\"./tests/resources/data/cora/cora.out\")\n\n epgm_input = False\n"
] | [
[
"scipy.eye",
"numpy.linalg.matrix_power",
"numpy.array_equal",
"numpy.ones"
],
[
"numpy.isnan",
"numpy.random.RandomState"
],
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hofbi/vibromaf | [
"7678042d18fa3b4ab006283bdbd1b1cc6d84e822",
"7678042d18fa3b4ab006283bdbd1b1cc6d84e822",
"7678042d18fa3b4ab006283bdbd1b1cc6d84e822"
] | [
"examples/evaluate_wav.py",
"vibromaf/util/matlab.py",
"vibromaf/util/common.py"
] | [
"\"\"\"Example to evaluate WAV files\"\"\"\n\nimport sys\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, FileType\nfrom pathlib import Path\n\nfrom scipy.io import wavfile\n\n# This is to make the local vibromaf package available\ntry:\n sys.path.append(str(Path(__file__).absolute().parents[1]))\nexcept IndexError:\n pass\n\nfrom vibromaf.metrics.snr import snr\nfrom vibromaf.metrics.spqi import spqi\nfrom vibromaf.metrics.stsim import st_sim\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = ArgumentParser(\n description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"distorted\",\n type=FileType(\"r\"),\n help=\"Distorted .wav file\",\n )\n parser.add_argument(\n \"reference\",\n type=FileType(\"r\"),\n help=\"Undistorted reference .wav file\",\n )\n return parser.parse_args()\n\n\ndef main():\n \"\"\"main\"\"\"\n args = parse_arguments()\n\n distorted_signal = wavfile.read(args.distorted.name)[1]\n reference_signal = wavfile.read(args.reference.name)[1]\n\n # Calculate metric scores\n snr_score = snr(distorted_signal, reference_signal)\n st_sim_score = st_sim(distorted_signal, reference_signal)\n spqi_score = spqi(distorted_signal, reference_signal)\n\n # Print individual metric scores\n print(f\"SNR score: {snr_score}\")\n print(f\"ST-SIM score: {st_sim_score}\")\n print(f\"SPQI score: {spqi_score}\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"\"\"\"Utility functions for MATLAB files\"\"\"\n\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom scipy import io\n\nfrom vibromaf import config\n\n\ndef load_signal_from_mat(mat_file: Path, signal_name: str) -> np.array:\n \"\"\"Load .mat file and parse signal from it\"\"\"\n mat = io.loadmat(str(mat_file))\n try:\n return mat[signal_name]\n except KeyError as exc:\n raise KeyError(f\"Available keys: {mat.keys()}\") from exc\n\n\ndef load_data_for_metric(\n metric: str, test_indices: List[int]\n) -> Tuple[np.array, np.array]:\n \"\"\"Load and concatenate the training and test data\"\"\"\n vcpwq = load_signal_from_mat(\n config.DATA_PATH / f\"{metric}_VCPWQ.mat\", f\"{metric}_VCPWQ\"\n )\n pvcslp = load_signal_from_mat(\n config.DATA_PATH / f\"{metric}_PVCSLP.mat\", f\"{metric}_PVCSLP\"\n )\n vpcds = load_signal_from_mat(\n config.DATA_PATH / f\"{metric}_VPCDS.mat\", f\"{metric}_VPCDS\"\n )\n\n train_indices = [\n element for element in range(0, vcpwq.shape[1]) if element not in test_indices\n ]\n\n return np.concatenate(\n [\n vcpwq[:, train_indices].flatten(),\n pvcslp[:, train_indices].flatten(),\n vpcds[:, train_indices].flatten(),\n ]\n ), np.concatenate(\n [\n vcpwq[:, test_indices].flatten(),\n pvcslp[:, test_indices].flatten(),\n vpcds[:, test_indices].flatten(),\n ]\n )\n\n\ndef split_per_codec(data: np.array, number_of_codecs: int = 3) -> np.array:\n \"\"\"\n Split the data into equal pieces:\n As we concatenate them per codec this is a split per codec\n \"\"\"\n return np.split(data, number_of_codecs)\n\n\ndef reshape_per_compression_rate(\n data: np.array, number_of_compression_levels: int = 17\n) -> np.array:\n \"\"\"\n Reshape the data into same compression level per row\n \"\"\"\n number_of_columns = int(data.size / number_of_compression_levels)\n return data.reshape((number_of_compression_levels, number_of_columns))\n\n\nclass MatSignalLoader:\n \"\"\"Helper class to load test signals from mat files\"\"\"\n\n def __init__(self, metric: str, codec: str = \"VCPWQ\"):\n self.__reference = load_signal_from_mat(\n config.DATA_PATH / \"Signals.mat\", \"Signals\"\n )\n self.__distorted = load_signal_from_mat(\n config.DATA_PATH / f\"recsig_{codec}.mat\", f\"recsig_{codec}\"\n )\n self.__metric_scores = load_signal_from_mat(\n config.DATA_PATH / f\"{metric}_{codec}.mat\", f\"{metric}_{codec}\"\n )\n\n def signal_ids(self):\n return range(self.__reference.shape[1])\n\n def compression_levels(self):\n return range(self.__distorted.shape[0])\n\n def load_reference_signal(self, signal_id: int):\n return self.__reference[:, signal_id]\n\n def load_distorted_signal(self, signal_id: int, compression_level: int):\n return self.__distorted[compression_level, signal_id].reshape(\n -1,\n )\n\n def load_quality_score(self, signal_id: int, compression_level: int):\n return self.__metric_scores[compression_level, signal_id]\n",
"\"\"\"Common utilities\"\"\"\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n\n\ndef print_metirc(text: str, score: float):\n \"\"\"Print metric score in predefined format\"\"\"\n print(f\"{text:40s} {score:.3f}\")\n\n\ndef print_mse_and_pc(name: str, y_true, y_pred):\n \"\"\"Print MSE and Pearson Correlation for signal\"\"\"\n mse_test = mean_squared_error(y_true, y_pred)\n cor_test = np.corrcoef(y_true, y_pred)[0, 1]\n\n print_metirc(f\"{name} MSE\", mse_test)\n print_metirc(f\"{name} PC\", cor_test)\n"
] | [
[
"scipy.io.wavfile.read"
],
[
"numpy.split"
],
[
"numpy.corrcoef",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
agamemnonc/sklearn-ext | [
"1a3a55f36bf0398d175fb2660a0eeff032c6c522",
"1a3a55f36bf0398d175fb2660a0eeff032c6c522"
] | [
"sklearn_ext/discriminant_analysis.py",
"sklearn_ext/tests/test_discriminant_analysis.py"
] | [
"\"\"\"\nRegularized Discriminant Analysis\n\"\"\"\n\nimport warnings\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.covariance import ledoit_wolf, empirical_covariance\nfrom sklearn.covariance import shrunk_covariance\nfrom sklearn.utils import check_array, check_X_y\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom sklearn.preprocessing import StandardScaler\n\n\n__all__ = ['RegularizedDiscriminantAnalysis']\n\n\ndef _cov(X, shrinkage=None):\n \"\"\"Estimate covariance matrix (using optional shrinkage).\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data.\n\n shrinkage : string or float, optional\n Shrinkage parameter, possible values:\n - None or 'empirical': no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Returns\n -------\n s : array, shape (n_features, n_features)\n Estimated covariance matrix.\n \"\"\"\n shrinkage = \"empirical\" if shrinkage is None else shrinkage\n if isinstance(shrinkage, str):\n if shrinkage == 'auto':\n sc = StandardScaler() # standardize features\n X = sc.fit_transform(X)\n s = ledoit_wolf(X)[0]\n s = sc.scale_[:, np.newaxis] * s * \\\n sc.scale_[np.newaxis, :] # rescale\n elif shrinkage == 'empirical':\n s = empirical_covariance(X)\n else:\n raise ValueError('unknown shrinkage parameter')\n elif isinstance(shrinkage, float) or isinstance(shrinkage, int):\n if shrinkage < 0 or shrinkage > 1:\n raise ValueError('shrinkage parameter must be between 0 and 1')\n s = shrunk_covariance(empirical_covariance(X), shrinkage)\n else:\n raise TypeError('shrinkage must be of string or int type')\n return s\n\n\ndef _class_means(X, y):\n \"\"\"Compute class means.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n means : array-like, shape (n_features,)\n Class means.\n \"\"\"\n means = []\n classes = np.unique(y)\n for group in classes:\n Xg = X[y == group, :]\n means.append(Xg.mean(0))\n return np.asarray(means)\n\n\ndef _class_cov(X, y, priors=None, shrinkage=None):\n \"\"\"Compute class covariance matrix.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n priors : array-like, shape (n_classes,)\n Class priors.\n\n shrinkage : string or float, optional\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Returns\n -------\n cov : array-like, shape (n_features, n_features)\n Class covariance matrix.\n \"\"\"\n classes = np.unique(y)\n covs = []\n for group in classes:\n Xg = X[y == group, :]\n covs.append(np.atleast_2d(_cov(Xg, shrinkage)))\n return np.average(covs, axis=0, weights=priors)\n\n\nclass RegularizedDiscriminantAnalysis(BaseEstimator, ClassifierMixin):\n \"\"\"\n Regularized Discriminant Analysis\n\n A classifier with a quadratic decision boundary, generated\n by fitting class conditional densities to the data\n and using Bayes' rule.\n\n The model fits a Gaussian density to each class. The covariance matrix\n for each class is a compromise between the sample estimate for the\n particular class and the pooled covariance matrix.\n\n\n Parameters\n ----------\n priors : array, optional, shape = [n_classes]\n Priors on classes\n\n reg_param_alpha : float, optional\n Regularizes the covariance estimate of each class Sigma_k as\n ``reg_param_alpha*Sigma_k + (1-a)*Sigma\n\n reg_param_gamma : float, optional\n Regularizes the covariance estimate as\n ``(1-reg_param_gamma)*Sigma + reg_param_gamma*np.eye(n_features)``.\n Applies to both the pooled and class-specific covariance matrices.\n\n shrinkage : string or float, optional\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n\n Attributes\n ----------\n covariances_ : list of array-like, shape = [n_features, n_features]\n Covariance matrices of each class.\n\n pooled_covariance_ : pooled covariance matrix\n\n means_ : array-like, shape = [n_classes, n_features]\n Class means.\n\n priors_ : array-like, shape = [n_classes]\n Class priors (sum to 1).\n\n quad_coef_ : array, shape (n_features, n_features)\n Quadratic coefficients\n\n linear_coef_ : array, shape (n_features,)\n Linear coefficients .\n\n intercept_ : array, shape (n_features,)\n Intercept term.\n\n Examples\n --------\n >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n >>> import numpy as np\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = RegularizedDiscriminantAnalysis()\n >>> clf.fit(X, y)\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n RegularizedDiscriminantAnalysis(priors=None, reg_param_alpha=0.5,\n reg_param_gamma=0.0, shrinkage=None,\n tol=0.0001)\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n\n See also\n --------\n sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear\n Discriminant Analysis\n sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic\n Discriminant Analysis\n \"\"\"\n\n def __init__(self, priors=None, reg_param_alpha=0., reg_param_gamma=0.,\n shrinkage=None):\n self.priors = np.asarray(priors) if priors is not None else None\n self.reg_param_alpha = reg_param_alpha\n self.reg_param_gamma = reg_param_gamma\n self.shrinkage = shrinkage\n\n def fit(self, X, y, tol=None):\n \"\"\"Fit the model according to the given training data and parameters.\n\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n y : array, shape = [n_samples]\n Target values (integers)\n \"\"\"\n X, y = check_X_y(X, y)\n check_classification_targets(y)\n self.classes_, y = np.unique(y, return_inverse=True)\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n if n_classes < 2:\n raise ValueError('y has less than 2 classes')\n if self.priors is None:\n self.priors_ = np.bincount(y) / float(n_samples)\n else:\n self.priors_ = self.priors\n\n pooledcov = _class_cov(X, y, self.priors_, self.shrinkage)\n self.pooled_covariance_ = (1 - self.reg_param_gamma) * \\\n pooledcov + self.reg_param_gamma * np.diag(np.diag(pooledcov))\n cov = []\n means = []\n quad_coef = []\n linear_coef = []\n intercept = []\n for ind in range(n_classes):\n Xg = X[y == ind, :]\n meang = Xg.mean(0)\n if len(Xg) == 1:\n raise ValueError('y has only 1 sample in class %s, covariance '\n 'is ill defined.' % str(self.classes_[ind]))\n\n covg = np.atleast_2d(_cov(Xg, self.shrinkage))\n covg = (1 - self.reg_param_gamma) * covg + \\\n self.reg_param_gamma * np.diag(np.diag(covg))\n covg = self.reg_param_alpha * covg + \\\n (1 - self.reg_param_alpha) * self.pooled_covariance_\n\n U, S, V = np.linalg.svd(covg)\n temp1 = np.dot(U, np.dot(np.diag(1 / S), U.T))\n temp2 = np.dot(meang, temp1)\n\n intercept.append(-0.5 * (np.dot(temp2, meang) +\n np.linalg.slogdet(covg)[1]) +\n np.log(self.priors_[ind]))\n linear_coef.append(temp2)\n quad_coef.append(-0.5 * temp1)\n means.append(meang)\n cov.append(covg)\n\n self.means_ = np.asarray(means)\n self.covariances_ = np.asarray(cov)\n self.quad_coef_ = np.asarray(quad_coef)\n self.linear_coef_ = np.asarray(linear_coef)\n self.intercept_ = np.asarray(intercept)\n return self\n\n def _decision_function(self, X):\n check_is_fitted(self, 'classes_')\n\n X = check_array(X)\n norm2 = []\n for i in range(len(self.classes_)):\n norm2.append(self.intercept_[i] +\n np.dot(self.linear_coef_[i], X.T) +\n np.diag(np.dot(X, np.dot(self.quad_coef_[i], X.T))))\n norm2 = np.array(norm2).T # shape = [len(X), n_classes]\n return norm2\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples (test vectors).\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,], giving the\n log likelihood ratio of the positive class.\n \"\"\"\n dec_func = self._decision_function(X)\n # handle special case of two classes\n if len(self.classes_) == 2:\n return dec_func[:, 1] - dec_func[:, 0]\n return dec_func\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n The predicted class C for each sample in X is returned.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n C : array, shape = [n_samples]\n \"\"\"\n d = self._decision_function(X)\n y_pred = self.classes_.take(d.argmax(1))\n return y_pred\n\n def predict_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples/test vectors.\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes]\n Posterior probabilities of classification per class.\n \"\"\"\n values = self._decision_function(X)\n # compute the likelihood of the underlying gaussian models\n # up to a multiplicative constant.\n likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])\n # compute posterior probabilities\n return likelihood / likelihood.sum(axis=1)[:, np.newaxis]\n\n def predict_log_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples/test vectors.\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes]\n Posterior log-probabilities of classification per class.\n \"\"\"\n probas_ = self.predict_proba(X)\n return np.log(probas_)\n",
"from sklearn.utils.testing import assert_array_almost_equal\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom sklearn_ext.discriminant_analysis import RegularizedDiscriminantAnalysis\n\n\ndef test_rda():\n for n_classes in [2, 3]:\n n_features = 5\n n_samples = 1000\n X, y = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=n_classes, random_state=11)\n\n # 1. LDA/RDA\n lda_clf = LinearDiscriminantAnalysis(solver='lsqr')\n rda_clf = RegularizedDiscriminantAnalysis(\n reg_param_alpha=0., reg_param_gamma=0.)\n\n lda_clf.fit(X, y)\n rda_clf.fit(X, y)\n\n assert_array_almost_equal(\n lda_clf.predict_proba(X),\n rda_clf.predict_proba(X))\n\n # 2. RDA/QDA\n qda_clf = QuadraticDiscriminantAnalysis()\n rda_clf = RegularizedDiscriminantAnalysis(\n reg_param_alpha=1., reg_param_gamma=0.)\n\n qda_clf.fit(X, y)\n rda_clf.fit(X, y)\n\n assert_array_almost_equal(\n qda_clf.predict_proba(X),\n rda_clf.predict_proba(X))\n\n # 3. RDA/GNB\n gnb_clf = GaussianNB()\n rda_clf = RegularizedDiscriminantAnalysis(\n reg_param_alpha=1., reg_param_gamma=1.)\n\n gnb_clf.fit(X, y)\n rda_clf.fit(X, y)\n\n assert_array_almost_equal(\n gnb_clf.predict_proba(X),\n rda_clf.predict_proba(X))\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.log",
"numpy.linalg.svd",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.check_X_y",
"numpy.diag",
"numpy.unique",
"numpy.asarray",
"sklearn.utils.check_array",
"sklearn.utils.multiclass.check_classification_targets",
"numpy.linalg.slogdet",
"sklearn.covariance.ledoit_wolf",
"sklearn.covariance.empirical_covariance",
"numpy.bincount",
"sklearn.preprocessing.StandardScaler",
"numpy.average"
],
[
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"sklearn.naive_bayes.GaussianNB",
"sklearn.datasets.make_blobs",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danhey/exoplanet | [
"bc82756dfa1b084e82cbcfa6185800833415e847",
"bc82756dfa1b084e82cbcfa6185800833415e847"
] | [
"src/exoplanet/light_curves/interpolated.py",
"tests/light_curves_test.py"
] | [
"# -*- coding: utf-8 -*-\n\n__all__ = [\"InterpolatedLightCurve\"]\n\nimport aesara_theano_fallback.tensor as tt\nimport numpy as np\n\n\ndef interp(n, x, xmin, xmax, dx, func):\n \"\"\"One-dimensional regularly spaced cubic interpolation\n\n Args:\n n (int): The axis of the output that should be interpolated\n x (tensor): The x coordinates where the model should be evaluated\n xmin (scalar): The first coordinate in the grid\n xmax (scalar): The last coordinate in the grid\n dx (scalar): The grid spacing\n func (callable): The function that should be interpolated\n\n Returns:\n y: The function ``func`` interpolated to the coordinates ``x``\n \"\"\"\n xp = tt.arange(xmin - dx, xmax + 2.5 * dx, dx)\n yp = func(xp)\n\n y0 = yp[:-3, n]\n y1 = yp[1:-2, n]\n y2 = yp[2:-1, n]\n y3 = yp[3:, n]\n\n a0 = y1\n a1 = -y0 / 3.0 - 0.5 * y1 + y2 - y3 / 6.0\n a2 = 0.5 * (y0 + y2) - y1\n a3 = 0.5 * ((y1 - y2) + (y3 - y0) / 3.0)\n\n inds = tt.cast(tt.floor((x - xmin) / dx), \"int64\")\n x0 = (x - xp[inds + 1]) / dx\n return a0[inds] + a1[inds] * x0 + a2[inds] * x0 ** 2 + a3[inds] * x0 ** 3\n\n\nclass InterpolatedLightCurve:\n \"\"\"This light curve object is an EXPERIMENTAL and UNTESTED interface for\n pre-computing transit light curves on a grid and then interpolating this\n model onto the observed datapoints. This can improve the computational\n cost of a light curve model, especially when the dataset is large or the\n planet is short period. WARNING: You should only use this at your own risk\n if you know what you're doing!\n \"\"\"\n\n def __init__(\n self, base_light_curve, num_phase, num_planets=None, **kwargs\n ):\n self.base_light_curve = base_light_curve\n self.num_phase = int(num_phase)\n self.num_planets = num_planets\n\n def get_light_curve(\n self,\n orbit=None,\n r=None,\n t=None,\n texp=None,\n oversample=7,\n order=0,\n use_in_transit=None,\n light_delay=False,\n ):\n if self.num_planets is None:\n try:\n vec = orbit.period.tag.test_value\n except AttributeError:\n raise ValueError(\n \"Can't compute num_planets, please provide a value\"\n )\n num_planets = len(np.atleast_1d(vec))\n else:\n num_planets = int(self.num_planets)\n\n if num_planets <= 1:\n func = _wrapper(\n self.base_light_curve,\n orbit=orbit,\n r=r,\n texp=texp,\n oversample=oversample,\n order=order,\n use_in_transit=use_in_transit,\n light_delay=light_delay,\n )\n mn = orbit.t0\n mx = orbit.t0 + orbit.period\n return interp(\n 0,\n tt.mod(t - orbit.t0, orbit.period) + orbit.t0,\n mn,\n mx,\n (mx - mn) / (self.num_phase + 1),\n func,\n )[:, None]\n\n ys = []\n for n in range(num_planets):\n func = _wrapper(\n self.base_light_curve,\n orbit=orbit,\n r=r,\n texp=texp,\n oversample=oversample,\n order=order,\n use_in_transit=use_in_transit,\n light_delay=light_delay,\n )\n mn = orbit.t0[n]\n mx = orbit.t0[n] + orbit.period[n]\n ys.append(\n interp(\n n,\n tt.mod(t - orbit.t0[n], orbit.period[n]) + orbit.t0[n],\n mn,\n mx,\n (mx - mn) / (self.num_phase + 1),\n func,\n )\n )\n\n return tt.stack(ys, axis=-1)\n\n\nclass _wrapper:\n def __init__(self, base_light_curve, *args, **kwargs):\n self.base_light_curve = base_light_curve\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, x):\n kwargs = dict(t=x, **self.kwargs)\n return self.base_light_curve.get_light_curve(*self.args, **kwargs)\n",
"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport aesara_theano_fallback.tensor as tt\nimport numpy as np\nimport pytest\nfrom aesara_theano_fallback import aesara as theano\nfrom packaging import version\n\nfrom exoplanet.light_curves import (\n LimbDarkLightCurve,\n SecondaryEclipseLightCurve,\n)\nfrom exoplanet.orbits import KeplerianOrbit\n\ntry:\n import starry\nexcept ImportError:\n starry = None\n\n\[email protected](starry is None, reason=\"starry is not installed\")\ndef test_light_curve():\n u_val = np.array([0.2, 0.3])\n b_val = np.linspace(-1.5, 1.5, 100)\n r_val = 0.1 + np.zeros_like(b_val)\n lc = LimbDarkLightCurve(u_val[0], u_val[1])\n evaluated = lc._compute_light_curve(b_val, r_val).eval()\n\n if version.parse(starry.__version__) < version.parse(\"0.9.9\"):\n m = starry.Map(lmax=len(u_val))\n m[:] = u_val\n expect = m.flux(xo=b_val, ro=r_val) - 1\n else:\n m = starry.Map(udeg=len(u_val))\n m[1:] = u_val\n expect = m.flux(xo=b_val, ro=r_val[0]).eval() - 1\n\n assert np.allclose(expect, evaluated)\n\n\ndef test_light_curve_grad(caplog):\n u_val = np.array([0.2, 0.3])\n b_val = np.linspace(-1.5, 1.5, 20)\n r_val = 0.1 + np.zeros_like(b_val)\n\n lc = lambda u, b, r: LimbDarkLightCurve( # NOQA\n u[0], u[1]\n )._compute_light_curve(b, r)\n\n with theano.configparser.change_flags(compute_test_value=\"off\"):\n with caplog.at_level(logging.DEBUG, logger=\"theano.gof.cmodule\"):\n theano.gradient.verify_grad(\n lc, [u_val, b_val, r_val], rng=np.random\n )\n\n\ndef test_vector_params():\n u = tt.vector()\n u.tag.test_value = u_val = np.array([0.3, 0.2])\n b = np.linspace(-1.5, 1.5, 20)\n r = 0.1 + np.zeros_like(b)\n\n with pytest.warns(DeprecationWarning, match=r\"vector of limb darkening\"):\n lc1 = theano.function(\n [u], LimbDarkLightCurve(u)._compute_light_curve(b, r)\n )(u_val)\n lc2 = theano.function(\n [u], LimbDarkLightCurve(u[0], u[1])._compute_light_curve(b, r)\n )(u_val)\n np.testing.assert_allclose(lc1, lc2)\n\n with pytest.raises(AssertionError):\n theano.function(\n [], LimbDarkLightCurve([0.3])._compute_light_curve(b, r)\n )()\n\n\ndef test_in_transit():\n t = np.linspace(-20, 20, 1000)\n m_planet = np.array([0.3, 0.5])\n m_star = 1.45\n orbit = KeplerianOrbit(\n m_star=m_star,\n r_star=1.5,\n t0=np.array([0.5, 17.4]),\n period=np.array([10.0, 5.3]),\n ecc=np.array([0.1, 0.8]),\n omega=np.array([0.5, 1.3]),\n m_planet=m_planet,\n )\n u = np.array([0.2, 0.3])\n r = np.array([0.1, 0.01])\n\n lc = LimbDarkLightCurve(u[0], u[1])\n model1 = lc.get_light_curve(r=r, orbit=orbit, t=t)\n model2 = lc.get_light_curve(r=r, orbit=orbit, t=t, use_in_transit=False)\n vals = theano.function([], [model1, model2])()\n assert np.allclose(*vals)\n\n model1 = lc.get_light_curve(r=r, orbit=orbit, t=t, texp=0.1)\n model2 = lc.get_light_curve(\n r=r, orbit=orbit, t=t, texp=0.1, use_in_transit=False\n )\n vals = theano.function([], [model1, model2])()\n assert np.allclose(*vals)\n\n\ndef test_variable_texp():\n t = np.linspace(-20, 20, 1000)\n m_planet = np.array([0.3, 0.5])\n m_star = 1.45\n orbit = KeplerianOrbit(\n m_star=m_star,\n r_star=1.5,\n t0=np.array([0.5, 17.4]),\n period=np.array([10.0, 5.3]),\n ecc=np.array([0.1, 0.8]),\n omega=np.array([0.5, 1.3]),\n m_planet=m_planet,\n )\n u = np.array([0.2, 0.3])\n r = np.array([0.1, 0.01])\n texp0 = 0.1\n\n lc = LimbDarkLightCurve(u[0], u[1])\n model1 = lc.get_light_curve(\n r=r, orbit=orbit, t=t, texp=texp0, use_in_transit=False\n )\n model2 = lc.get_light_curve(\n r=r,\n orbit=orbit,\n t=t,\n use_in_transit=False,\n texp=texp0 + np.zeros_like(t),\n )\n vals = theano.function([], [model1, model2])()\n assert np.allclose(*vals)\n\n model1 = lc.get_light_curve(r=r, orbit=orbit, t=t, texp=texp0)\n model2 = lc.get_light_curve(\n r=r,\n orbit=orbit,\n t=t,\n texp=texp0 + np.zeros_like(t),\n use_in_transit=False,\n )\n vals = theano.function([], [model1, model2])()\n assert np.allclose(*vals)\n\n\ndef test_contact_bug():\n orbit = KeplerianOrbit(period=3.456, ecc=0.6, omega=-1.5)\n t = np.linspace(-0.1, 0.1, 1000)\n u = [0.3, 0.2]\n y1 = (\n LimbDarkLightCurve(u[0], u[1])\n .get_light_curve(orbit=orbit, r=0.1, t=t, texp=0.02)\n .eval()\n )\n y2 = (\n LimbDarkLightCurve(u[0], u[1])\n .get_light_curve(\n orbit=orbit, r=0.1, t=t, texp=0.02, use_in_transit=False\n )\n .eval()\n )\n assert np.allclose(y1, y2)\n\n\ndef test_small_star():\n from batman.transitmodel import TransitModel, TransitParams\n\n u_star = [0.2, 0.1]\n r = 0.04221468\n\n m_star = 0.151\n r_star = 0.189\n period = 0.4626413\n t0 = 0.2\n b = 0.5\n ecc = 0.1\n omega = 0.1\n t = np.linspace(0, period, 500)\n\n r_pl = r * r_star\n\n orbit = KeplerianOrbit(\n r_star=r_star,\n m_star=m_star,\n period=period,\n t0=t0,\n b=b,\n ecc=ecc,\n omega=omega,\n )\n a = orbit.a.eval()\n incl = orbit.incl.eval()\n\n lc = LimbDarkLightCurve(u_star[0], u_star[1])\n\n model1 = lc.get_light_curve(r=r_pl, orbit=orbit, t=t)\n model2 = lc.get_light_curve(r=r_pl, orbit=orbit, t=t, use_in_transit=False)\n vals = theano.function([], [model1, model2])()\n assert np.allclose(*vals)\n\n params = TransitParams()\n params.t0 = t0\n params.per = period\n params.rp = r\n params.a = a / r_star\n params.inc = np.degrees(incl)\n params.ecc = ecc\n params.w = np.degrees(omega)\n params.u = u_star\n params.limb_dark = \"quadratic\"\n\n model = TransitModel(params, t)\n flux = model.light_curve(params)\n assert np.allclose(vals[0][:, 0], flux - 1)\n\n\ndef test_singular_points():\n u = np.array([0.2, 0.3])\n b = tt.vector()\n b.tag.test_value = np.array([0.5])\n r = tt.vector()\n r.tag.test_value = np.array([0.1])\n lc = LimbDarkLightCurve(u[0], u[1])\n f = lc._compute_light_curve(b, r)\n func = theano.function([b, r], f)\n\n def compare(b_val, r_val, b_eps, r_eps):\n \"\"\"\n Compare the flux at a singular point\n to the flux at neighboring points.\n\n \"\"\"\n b_val = [b_val - b_eps, b_val + b_eps, b_val]\n r_val = [r_val - r_eps, r_val + r_eps, r_val]\n flux = func(b_val, r_val)\n assert np.allclose(np.mean(flux[:2]), flux[2])\n\n # Test the b = 1 - r singular point\n compare(0.1, 0.9, 1e-8, 0.0)\n\n # Test the b = r = 0.5 singular point\n compare(0.5, 0.5, 1e-8, 0.0)\n\n # Test the b = 0 singular point\n compare(0.0, 0.1, 1e-8, 0.0)\n\n # Test the b = 0, r = 1 singular point\n compare(0.0, 1.0, 0.0, 1e-8)\n\n # Test the b = 1 + r singular point\n compare(1.1, 0.1, 1e-8, 0.0)\n\n\ndef _check_quad(u, b, depth, ror):\n u1 = u[0]\n u2 = u[1]\n mu = np.sqrt(1 - b ** 2)\n expect = np.sqrt(\n depth\n * (1 - u1 / 3 - u2 / 6)\n / (1 - u1 * (1 - mu) - u2 * (1 - mu) ** 2)\n )\n assert np.shape(expect) == np.shape(ror)\n assert np.allclose(expect, ror)\n\n\ndef test_approx_transit_depth():\n u = np.array([0.3, 0.2])\n lc = LimbDarkLightCurve(u[0], u[1])\n\n for b, delta in [\n (np.float64(0.5), np.float64(0.01)),\n (np.array([0.1, 0.9]), np.array([0.1, 0.5])),\n (np.array([0.1, 0.9, 0.3]), np.array([0.1, 0.5, 0.0234])),\n ]:\n dv = tt.as_tensor_variable(delta)\n ror, jac = lc.get_ror_from_approx_transit_depth(dv, b, jac=True)\n _check_quad(u, b, delta, ror.eval())\n assert np.allclose(theano.grad(tt.sum(ror), dv).eval(), jac.eval())\n\n\ndef test_secondary_eclipse():\n u1 = np.array([0.3, 0.2])\n lc1 = LimbDarkLightCurve(u1[0], u1[1])\n\n u2 = np.array([0.4, 0.1])\n lc2 = LimbDarkLightCurve(u2[0], u2[1])\n\n s = 0.3\n ror = 0.08\n f = ror ** 2 * s\n lc = SecondaryEclipseLightCurve(u1, u2, s)\n\n t = np.linspace(-6.435, 10.4934, 5000)\n orbit1 = KeplerianOrbit(period=1.543, t0=-0.123)\n orbit2 = KeplerianOrbit(\n period=orbit1.period,\n t0=orbit1.t0 + 0.5 * orbit1.period,\n r_star=ror,\n m_star=1.0,\n )\n\n y1 = lc1.get_light_curve(orbit=orbit1, r=ror, t=t).eval()\n y2 = lc2.get_light_curve(orbit=orbit2, r=1.0, t=t).eval()\n y = lc.get_light_curve(orbit=orbit1, r=ror, t=t).eval()\n y_expect = (y1 + f * y2) / (1 + f)\n\n assert np.allclose(y_expect, y, atol=5e-6)\n"
] | [
[
"numpy.atleast_1d"
],
[
"numpy.allclose",
"numpy.linspace",
"numpy.sqrt",
"numpy.degrees",
"numpy.zeros_like",
"numpy.shape",
"numpy.mean",
"numpy.testing.assert_allclose",
"numpy.float64",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tknapen/pearl_3T | [
"5e199973002766349b7eb13a04dafe62827f34ec"
] | [
"pearl/stop/roi.py"
] | [
"from __future__ import division, print_function\n\n# def prepare_mcf_data(in_mcf_data):\n\n# np.hstack(np.vstack([np.zeros(in_mcf_data.shape[0]), np.diff(in_mcf_data)])\n\n# return \n\n\ndef fit_FIR_roi(experiment,\n h5_file,\n in_files,\n vol_regressor_list, \n behavior_file_list, \n mapper_file = 'zstat2_flirt',\n mask_threshold = 0.0,\n mask_direction = 'pos',\n fmri_data_type = 'psc',\n fir_frequency = 4,\n fir_interval = [-3.0,12.0],\n roi_list = ['maxSTN25exc','SThR25max','SST_GO_preSMA','SST_GO_rIFG', 'Caudate', 'PvmPFCNoventri'],\n TR = 2.0, \n output_pdf_dir = '', \n output_tsv_dir = ''):\n\n import nibabel as nib\n import numpy as np\n import numpy.linalg as LA\n import scipy as sp\n import os\n import os.path as op\n import pandas as pd\n import matplotlib.pyplot as pl\n import seaborn as sn\n from spynoza.nodes.utils import get_scaninfo\n from fir import FIRDeconvolution\n import tempfile\n from .behavior import process_tsv\n from ..utils.utils import roi_data_from_hdf\n from IPython import embed as shell\n\n run_durations = []\n for ifn in in_files:\n non_TR, dims, dyns, voxsize, affine = get_scaninfo(ifn)\n run_durations.append(TR*dyns)\n\n ################################################################################## \n # behavior data generalizes across ROIs of course\n ##################################################################################\n all_event_df = process_tsv(behavior_file_list, run_durations)\n\n # first, just run a FIR on the image pairs\n stim_event_names = ['correct', 'succesful_stop', 'Failed_stop']\n stim_event_list = []\n for en in stim_event_names:\n stim_event_list.append(np.array(all_event_df['onset'])[np.array(all_event_df['Response'] == en)])\n\n all_event_names = stim_event_names\n\n ################################################################################## \n # whole-brain nuisance data generalizes across ROIs of course\n ##################################################################################\n\n if vol_regressor_list != []:\n all_vol_regs = []\n for x in range(len(vol_regressor_list)):\n all_vol_regs.append(np.loadtxt(vol_regressor_list[x]))\n all_vol_regs = np.vstack(all_vol_regs)\n\n ################################################################################## \n # per-roi data\n ##################################################################################\n for roi in roi_list:\n # shell()\n contrast_data = roi_data_from_hdf(data_types_wildcards = [roi], roi_name_wildcard = roi, hdf5_file = h5_file, folder_alias = 'rois')\n time_course_data = [roi_data_from_hdf(data_types_wildcards = [os.path.split(in_f)[-1][:-7]], roi_name_wildcard = roi, hdf5_file = h5_file, folder_alias = fmri_data_type) for in_f in in_files]\n\n time_course_data = np.hstack(time_course_data)\n\n # if mask_threshold < 0:\n # mask_threshold = -mask_threshold\n # contrast_data = -contrast_data\n\n over_mask_threshold = (contrast_data[:,0]>mask_threshold)\n iceberg_tip = contrast_data[over_mask_threshold, 0]\n\n projected_time_course = np.dot(time_course_data[over_mask_threshold].T, iceberg_tip) / np.sum(iceberg_tip)\n av_time_course = time_course_data.mean(axis = 0)\n\n # nuisance_regressors = np.nan_to_num(all_vol_reg)\n fd = FIRDeconvolution(\n signal = projected_time_course, \n events = [stim_event_list[0], stim_event_list[1], stim_event_list[2]], # dictate order\n event_names = stim_event_names, \n # durations = {'AB':stim_duration_list[0], 'CD':stim_duration_list[1], 'EF':stim_duration_list[2], 'fb':fb_durations},\n # events = [stim_events], # dictate order\n # event_names = ['stim'], \n # durations = {'stim':stim_durations},\n # covariates = covariates,\n sample_frequency = 1.0/TR,\n deconvolution_frequency = fir_frequency,\n deconvolution_interval = fir_interval\n )\n\n fd.resampled_signal = np.nan_to_num(fd.resampled_signal)\n # we then tell it to create its design matrix\n fd.create_design_matrix()\n\n # resample mocos and so forth\n # all_nuisances = sp.signal.resample(nuisance_regressors, fd.resampled_signal_size, axis = -1)\n # fd.add_continuous_regressors_to_design_matrix(all_nuisances)\n\n # fit\n fd.regress(method = 'lstsq')\n # fd.ridge_regress(cv = 10)\n fd.calculate_rsq()\n\n # plot\n sn.set_style('ticks')\n f = pl.figure(figsize = (6,3))\n s = f.add_subplot(111)\n s.axhline(0, c='k', lw = 0.25)\n s.axvline(0, c='k', lw = 0.25)\n s.set_xlabel('Time [s]')\n s.set_ylabel('BOLD % signal change')\n for en in all_event_names:\n this_tc = np.squeeze(np.nan_to_num(fd.betas_for_cov(en).T))\n pl.plot(fd.deconvolution_interval_timepoints, this_tc, label = en)\n pl.legend()\n sn.despine(offset = 10, ax = s)\n pl.tight_layout()\n\n pl.savefig(op.join(output_pdf_dir, roi + '_deco.pdf'))\n\n f = pl.figure(figsize = (9,3))\n s = f.add_subplot(111)\n s.axhline(0, c='k', lw = 0.25)\n s.set_title('data and predictions, rsq %1.3f'%fd.rsq)\n s.set_xlabel('Time [s]')\n s.set_ylabel('BOLD % signal change')\n pl.plot(np.linspace(0,np.sum(run_durations), fd.resampled_signal.shape[1]), fd.resampled_signal.T, 'r', label = 'data')\n pl.plot(np.linspace(0,np.sum(run_durations), fd.resampled_signal.shape[1]), fd.predict_from_design_matrix(fd.design_matrix).T, 'k', label = 'model')\n pl.legend()\n sn.despine(offset = 10, ax = s)\n pl.tight_layout()\n pl.savefig(op.join(output_pdf_dir, roi + '_deco_tc.pdf'))\n\n op_df = pd.DataFrame(np.array([np.squeeze(np.nan_to_num(fd.betas_for_cov(en).T)) for en in all_event_names]), \n columns = fd.deconvolution_interval_timepoints, \n index = all_event_names)\n # np.savetxt(op.join(output_tsv_dir, roi + '_deco.tsv'), np.array(op_df), delimiter = '\\t')\n op_df.to_csv(op.join(output_tsv_dir, roi + '_deco_stop.tsv'), sep = '\\t')\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.hstack",
"matplotlib.pyplot.tight_layout",
"numpy.dot",
"numpy.nan_to_num",
"matplotlib.pyplot.plot",
"numpy.loadtxt",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ehsanasgari/1000langs | [
"6a6b745c1094581cd95e992e6be4e06967426570"
] | [
"run_crawler/lang1000.py"
] | [
"import sys\n\nsys.path.append('../')\nfrom utility.visualization_utility import methods2venn2\nimport pandas as pd\nimport numpy as np\nfrom metaAPI.metadata import getMassiveparallel_meta\nfrom bdpAPI.bdpAPI import BDPAPl\nfrom bibleCLOUDAPI.biblecloudAPI import BibleCloudAPl\nfrom biblePNGAPI.pngAPI import PNGAPl\nfrom bibleCOMAPI.biblecomAPI import BibleComAPl\nimport argparse\nimport warnings\nimport requests\n\ndef warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\ndef checkArgs(args):\n '''\n This function checks the input arguments and returns the extracted inputs and if there is an error False\n '''\n # Using the argument parser in case of -h or wrong usage the correct argument usage\n # will be prompted\n parser = argparse.ArgumentParser()\n #5120f8eb40005070f19e9\n # output directory #################################################################################################\n parser.add_argument('--outdir', action='store', dest='output_dir', default=False, type=str,\n help=\"directory for storing the output files, if doesn't exist will be created.\")\n #f03a423aad9\n # output directory #################################################################################################\n parser.add_argument('--apikey', action='store', dest='apikey', default=False, type=str,\n help=\"API key of the Bible Digital Platform\")\n\n # to override the previous files or to continue ####################################################################\n parser.add_argument('--override', action='store', dest='override', default=1, type=int,\n help='Override the existing files?')\n\n # to override the previous files or to continue ####################################################################\n parser.add_argument('--repeat', action='store', dest='repeat', default=4, type=int,\n help='Maximum trials on a source to retrieve more languages?')\n\n # to override the previous files or to continue ####################################################################\n parser.add_argument('--updatemeta', action='store', dest='updatemeta', default=1, type=int,\n help='Would you like to update metadata information from the sources? 1 otherwise 0')\n\n # cores ################################################################################################\n parser.add_argument('--cores', action='store', dest='cores', default=4, type=int,\n help='Number of cores to be used, default is 4')\n\n try:\n parsedArgs = parser.parse_args()\n\n response = requests.get('https://dbt.io/api/apiversion?key=' + parsedArgs.apikey + '&v=2')\n if response.status_code != 200:\n print('Enter a correct API code')\n return False\n else:\n print('The API code is verified..')\n return parsedArgs.output_dir, parsedArgs.override, parsedArgs.updatemeta, parsedArgs.apikey, parsedArgs.cores, parsedArgs.repeat\n except:\n exit(0)\n return False\n\n\nif __name__ == '__main__':\n\n warnings.filterwarnings('ignore')\n out = checkArgs(sys.argv)\n if not out:\n exit()\n else:\n output_dir, override, updatemeta, apikey, cores, repeat = out\n\n # parameters\n out_path = output_dir\n nump = cores\n update_metadata = (override == 1)\n override = (updatemeta == 1)\n\n print('=====================================')\n print('>>>> The PBC files are being generated at '+out_path )\n print('=====================================')\n\n\n print('=====================================')\n print('>>>> Start retrieveing parallel bibles from the bible digital platform..')\n print('=====================================')\n # API call\n BDP_obj = BDPAPl(apikey, out_path)\n BDP_obj.create_BPC(nump=nump, update_meta_data=update_metadata, override=override, repeat=repeat)\n\n print('=====================================')\n print('<<<< ✓ Retrieveing parallel bibles from bible digital platform is completed..')\n print(' Report is generated at '+out_path+'/reports/'+'crawl_report_API.tsv')\n print(' Aggregated report '+out_path+'/reports/'+'final_rep.tsv')\n print('=====================================')\n print(\"\")\n print(\"\")\n\n\n print('=====================================')\n print('>>>> Start retrieveing parallel bibles from biblecloud..')\n print('=====================================')\n # BibleCloud call\n CL = BibleCloudAPl(out_path)\n CL.crawl_bible_cloud(nump=nump, override=override, repeat=repeat)\n print('=====================================')\n print('<<<< ✓ Retrieveing parallel bibles from bible cloud is completed..')\n print(' Report is generated at '+out_path+'/reports/'+'crawl_report_cloud.tsv')\n print(' Aggregated report '+out_path+'/reports/'+'final_rep.tsv')\n print('=====================================')\n print(\"\")\n print(\"\")\n\n print('=====================================')\n print('>>>> Start retrieveing parallel bibles from PNGscripture..')\n print('=====================================')\n # PNG call\n PNG = PNGAPl(out_path)\n PNG.crawl_bpc(nump=nump, override=override, repeat=repeat)\n print('=====================================')\n print('<<<< ✓ Retrieveing parallel bibles from PNGscripture is completed..')\n print(' Report is generated at '+out_path+'/reports/'+'crawl_report_png.tsv')\n print(' Aggregated report '+out_path+'/reports/'+'final_rep.tsv')\n print('=====================================')\n print(\"\")\n print(\"\")\n\n\n print('=====================================')\n print('>>>> Start retrieveing parallel bibles from biblecom..')\n print('=====================================')\n # BibleCom\n BCA = BibleComAPl(out_path)\n BCA.crawl_bpc(nump=nump, update_meta=update_metadata, override=override, repeat=repeat)\n print('=====================================')\n print('<<<< ✓ Retrieveing parallel bibles from biblecom is completed..')\n print(' Report is generated at '+out_path+'/reports/'+'crawl_report_biblecom.tsv')\n print(' Aggregated report '+out_path+'/reports/'+'final_rep.tsv')\n print('=====================================')\n print(\"\")\n print(\"\")\n\n\n print('>>>> Comparison with massively parallel bible corpora ')\n df_massivepar = getMassiveparallel_meta(update=False)\n\n out_path = out_path\n df_1000Langs = pd.read_table(out_path + '/reports/final_rep.tsv')\n df_1000Langs_stat = dict()\n for x, y in df_1000Langs.groupby('language_iso')['verses'].apply(list).to_dict().items():\n df_1000Langs_stat[x] = [len(y), max(y), np.mean(y)]\n\n rows = []\n for iso, scores in df_1000Langs_stat.items():\n rows.append([iso, scores[0], scores[1], scores[2]])\n df_1000Langs = pd.DataFrame(rows)\n df_1000Langs = df_1000Langs.rename(index=str,\n columns={0: 'language_iso', 1: '#trans-1000Langs', 2: 'max-verse-1000Langs',\n 3: 'mean-verse-1000Langs'})\n df_1000Langs = df_1000Langs.set_index('language_iso')\n\n lange_overlap = {'MassiveParallel': df_massivepar.language_iso.tolist(),\n '1000Langs': df_1000Langs.index.tolist()}\n\n l = methods2venn2(lange_overlap, name=out_path + '/reports/venn')\n\n comp_table = df_1000Langs.join(df_massivepar.set_index('language_iso'), on='language_iso')\n comp_table = comp_table.fillna(0)\n writer = pd.ExcelWriter(out_path+'/reports/comparison.xlsx')\n comp_table.to_excel(writer, 'Comparison with massively parallel corpora')\n writer.save()\n print('In ', comp_table[comp_table['max-verse-1000Langs'] >= comp_table['max-verse-massivepar']].shape[0],\n ' iso codes out of ', comp_table.shape[0],\n ' total, 1000Langs crawled more verses for that language!')\n print('In ', comp_table[(comp_table['max-verse-1000Langs'] >= comp_table['max-verse-massivepar']) & (\n comp_table['max-verse-massivepar'] > 0)].shape[0], ' iso codes out of ',\n comp_table[(comp_table['max-verse-1000Langs'] > 0) & (comp_table['max-verse-massivepar'] > 0)].shape[0],\n ' total intersections, 1000Langs crawled more verses in that language!')\n\n\n print('>>>> Comparison with massively parallel bible corpora ')\n print(' See the Venn diagram '+out_path+'/reports/'+'venn.pdf')\n print(' See the detailed report on the comparison of the crawled corpus with the massively parallel corpus here: '+out_path+'/reports/comparison.xlsx')\n"
] | [
[
"pandas.read_table",
"numpy.mean",
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
google/multi-task-architecture-search | [
"f75c0b4893b3c6ac897bc05c84096ee3eb93d79f"
] | [
"train/distill.py"
] | [
"# Copyright 2019 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom mtl.train import multiclass\nimport numpy as np\nimport torch\n\n\ndef setup_extra_args(parser):\n parser.add_argument('--dist_loss_wt', type=float, default=1)\n parser.add_argument('--class_loss_wt', type=float, default=0)\n\n\nclass Task(multiclass.Task):\n \"\"\"Distillation training manager.\"\"\"\n\n def __init__(self, opt, ds, dataloaders):\n super().__init__(opt, ds, dataloaders)\n self.ce_loss = torch.nn.CrossEntropyLoss()\n self.mse_loss = torch.nn.MSELoss()\n\n def run(self, split, step):\n opt, ds = self.opt, self.ds\n self.step = step\n self.split = split\n\n # Sample task\n task_idx = self.sample_task()\n self.task_idx = task_idx\n self.curr_task = ds['train'][task_idx].task_name\n\n # Get samples + model output\n inp, label, _ = self.get_next_sample(split, task_idx)\n ref_feats, pred_feats, pred = self.model(inp, task_idx, split, step)\n\n # Calculate loss\n _, class_preds = torch.max(pred, 1)\n t_min, t_max = self.model.task_low[task_idx], self.model.task_high[task_idx]\n accuracy = class_preds.eq(label).float().mean()\n accuracy = (accuracy - t_min) / (t_max - t_min)\n\n class_loss = self.ce_loss(pred, label)\n distill_loss = self.mse_loss(pred_feats, ref_feats.detach())\n\n self.net_loss = 0\n if opt.dist_loss_wt:\n self.net_loss += opt.dist_loss_wt * distill_loss\n if opt.class_loss_wt:\n self.net_loss += opt.class_loss_wt * class_loss\n\n if split == 'valid':\n self.valid_accuracy_track[task_idx] += [accuracy.data.item()]\n self.update_log('accuracy', accuracy.data.item())\n self.update_log('network_loss', self.net_loss.data.item())\n self.score = np.array([d['valid'] for d in self.log['accuracy']]).mean()\n\n self.global_trained_steps += 1\n self.task_trained_steps[task_idx] += 1\n"
] | [
[
"numpy.array",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CU-NESS/pylinex | [
"b6f342595b6a154e129eb303782e5268088f34d5",
"b6f342595b6a154e129eb303782e5268088f34d5",
"b6f342595b6a154e129eb303782e5268088f34d5",
"b6f342595b6a154e129eb303782e5268088f34d5"
] | [
"pylinex/basis/TrainingSetPlot.py",
"pylinex/model/OutputInterpolatedModel.py",
"pylinex/model/ExpandedModel.py",
"pylinex/model/DirectSumModel.py"
] | [
"\"\"\"\nFile: pylinex/basis/TrainingSetPlot.py\nAuthor: Keith Tauscher\nDate: 22 Feb 2019\n\nDescription: File containing a function which plots a three panel figure\n summarizing a given training set.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom .TrainedBasis import TrainedBasis\n\ndef plot_training_set_with_modes(training_set, num_modes, error=None,\\\n mean_translation=False, x_values=None, curve_slice=slice(None),\\\n subtract_mean=False, alpha=1., fontsize=24, xlabel='',\\\n extra_ylabel_string='', title='', figsize=(12,20), show=False):\n \"\"\"\n Plots a three panel figure summarizing the given training set. The top\n panel shows the training set itself. The middle panel shows the basis\n coming from the training set (with the given number of modes) assuming the\n given error curve. The bottom panel shows the residuals when the basis\n shown in the second panel is used to fit the training set.\n \n training_set: 2D numpy.ndarray of shape (ncurves, nchannels) containing set\n of training curves\n num_modes: the number of eigenmodes to use in the basis to plot\n error: the error distribution expected in data for which the training set\n will be used to fit\n mean_translation: if True (default False), the mean of the training set is\n subtracted before taking SVD.\n x_values: np.ndarray of x values with which to plot training set, basis,\n and residuals. If None, set to np.arange(training_set.shape[1])\n curve_slice: slice to apply to the first axis of training_set and residuals\n when they are plotted in the top and bottom panels\n subtract_mean: if True (default: False), mean of training set is subtracted\n in top panel\n alpha: opacity of curves plotted in training set and residuals panels\n fontsize: size of fonts for labels and title\n xlabel: string label describing x_values\n extra_ylabel_string: string to add to end of ylabel of each panel (usually\n a space and a units string)\n title: title to put on top of Figure\n figsize: size of figure on which to plot 3 panels\n show: if True, matplotlib.pyplot.show() is called before this function\n returns\n \n returns: if show is False, returns Figure object, otherwise None\n \"\"\"\n if type(x_values) is type(None):\n x_values = np.arange(training_set.shape[1])\n xlim = (x_values[0], x_values[-1])\n basis = TrainedBasis(training_set, num_modes, error=error,\\\n mean_translation=mean_translation)\n residuals = training_set - basis(basis.training_set_fit_coefficients)\n if mean_translation:\n residuals = residuals - np.mean(training_set, axis=0)[np.newaxis,:]\n fig = pl.figure(figsize=figsize)\n ax = fig.add_subplot(311)\n ax.plot(x_values, (training_set[curve_slice,:] -\\\n (np.mean(training_set[curve_slice,:], axis=0, keepdims=True)\\\n if subtract_mean else 0)).T, alpha=alpha)\n ax.set_xlim(xlim)\n ax.set_title(title, size=fontsize)\n ax.set_ylabel('Training set{0!s}{1!s}'.format(\\\n ' - mean' if subtract_mean else '', extra_ylabel_string),\\\n size=fontsize)\n ax.tick_params(labelsize=fontsize, length=7.5, width=2.5, which='major',\\\n labelbottom=False, direction='inout')\n ax.tick_params(labelsize=fontsize, length=4.5, width=1.5, which='minor',\\\n labelbottom=False, direction='inout')\n ax = fig.add_subplot(312)\n ax.plot(x_values, basis.basis.T)\n ax.set_xlim(xlim)\n ax.set_ylabel('Modes{!s}'.format(extra_ylabel_string), size=fontsize)\n ax.tick_params(labelsize=fontsize, length=7.5, width=2.5, which='major',\\\n top=True, labelbottom=False, direction='inout')\n ax.tick_params(labelsize=fontsize, length=4.5, width=1.5, which='minor',\\\n top=True, labelbottom=False, direction='inout')\n ax = fig.add_subplot(313)\n ax.plot(x_values, residuals[curve_slice,:].T, alpha=alpha)\n ax.set_xlim(xlim)\n ax.set_xlabel(xlabel, size=fontsize)\n ax.set_ylabel('Residuals{!s}'.format(extra_ylabel_string), size=fontsize)\n ax.tick_params(labelsize=fontsize, length=7.5, width=2.5, which='major',\\\n top=True, direction='inout')\n ax.tick_params(labelsize=fontsize, length=4.5, width=1.5, which='minor',\\\n top=True, direction='inout')\n fig.subplots_adjust(top=0.95, bottom=0.1, left=0.15, right=0.95, hspace=0)\n if show:\n pl.show()\n else:\n return fig\n\n",
"\"\"\"\nFile: pylinex/model/OutputInterpolatedModel.py\nAuthor: Keith Tauscher\nDate: 30 Jul 2019\n\nDescription: File containing a class representing a model which is an\n interpolated (in the output space) version of an input model.\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nfrom scipy.interpolate import make_interp_spline as make_spline\nfrom ..util import int_types, sequence_types, create_hdf5_dataset\nfrom .Model import Model\n\nclass OutputInterpolatedModel(Model):\n \"\"\"\n Class representing a model which is an interpolation (in the output space)\n version of an input model.\n \"\"\"\n def __init__(self, model, old_xs, new_xs, order=1):\n \"\"\"\n Initializes a TransformedModel based around the given underlying model\n and the binner which will Bin it.\n \n model: a Model object\n old_xs: the x values at which underlying model returns values\n new_xs: the x values at which this model should return values\n order: order of spline to use in interpolation. Default is 1 (linear\n interpolation). odd positive integer, usually in {1,3,5}\n \"\"\"\n self.model = model\n self.old_xs = old_xs\n self.new_xs = new_xs\n self.order = order\n \n @property\n def num_channels(self):\n \"\"\"\n Property storing the number of channels in the output of this model.\n \"\"\"\n if not hasattr(self, '_num_channels'):\n self._num_channels = len(self.new_xs)\n return self._num_channels\n \n @property\n def old_xs(self):\n \"\"\"\n Property storing the x values at which underlying model returns\n values.\n \"\"\"\n if not hasattr(self, '_old_xs'):\n raise AttributeError(\"old_xs was referenced before it was set.\")\n return self._old_xs\n \n @old_xs.setter\n def old_xs(self, value):\n \"\"\"\n Setter for the x values at which underlying model returns values.\n \n value: 1D array with length given by num_channels property of\n underlying model\n \"\"\"\n if type(value) in sequence_types:\n value = np.array(value)\n if value.shape == (self.model.num_channels,):\n self._old_xs = value\n else:\n raise ValueError(\"old_xs did not have the same length as \" +\\\n \"the outputs of the underlying model.\")\n else:\n raise TypeError(\"old_xs was set to a non-array.\")\n \n @property\n def new_xs(self):\n \"\"\"\n Property storing the x values at which this model returns values.\n \"\"\"\n if not hasattr(self, '_new_xs'):\n raise AttributeError(\"new_xs was referenced before it was set.\")\n return self._new_xs\n \n @new_xs.setter\n def new_xs(self, value):\n \"\"\"\n Setter for the x values at which this model returns values.\n \n value: 1D array\n \"\"\"\n if type(value) in sequence_types:\n self._new_xs = np.array(value)\n else:\n raise TypeError(\"new_xs was set to a non-array.\")\n \n @property\n def order(self):\n \"\"\"\n Property storing the order of the spline interpolation used.\n \"\"\"\n if not hasattr(self, '_order'):\n raise AttributeError(\"order was referenced before it was set.\")\n return self._order\n \n @order.setter\n def order(self, value):\n \"\"\"\n Setter for the order of the spline interpolation to use.\n \n value: odd positive integer, usually one of {1,3,5}\n \"\"\"\n if type(value) in int_types:\n if value > 0:\n if (value % 2) == 0:\n print(\"WARNING: order of spline interpolation is being \" +\\\n \"set to an even integer, which may produce strange \" +\\\n \"results. Is this definitely what you want?\")\n self._order = value\n else:\n raise ValueError(\"order was set to a non-positive integer.\")\n else:\n raise TypeError(\"order was set to a non-int.\")\n \n @property\n def model(self):\n \"\"\"\n Property storing the inner model (as a Model object) which is being\n interpolated (in output space).\n \"\"\"\n if not hasattr(self, '_model'):\n raise AttributeError(\"model referenced before it was set.\")\n return self._model\n \n @model.setter\n def model(self, value):\n \"\"\"\n Setter for the inner model which is being interpolated (in output\n space).\n \n value: a Model object\n \"\"\"\n if isinstance(value, Model):\n self._model = value\n else:\n raise TypeError(\"model was set to a non-Model object.\")\n \n @property\n def parameters(self):\n \"\"\"\n Property storing a list of strings associated with the parameters\n necessitated by this model.\n \"\"\"\n return self.model.parameters\n \n def __call__(self, parameters):\n \"\"\"\n Evaluates the model at the given parameters.\n \n parameters: 1D numpy.ndarray of parameter values\n \n returns: array of size (num_channels,)\n \"\"\"\n return make_spline(self.old_xs, self.model(parameters),\\\n k=self.order)(self.new_xs)\n \n @property\n def gradient_computable(self):\n \"\"\"\n Property storing a boolean describing whether the gradient of this\n model is computable.\n \"\"\"\n return False\n \n @property\n def hessian_computable(self):\n \"\"\"\n Property storing a boolean describing whether the hessian of this model\n is computable.\n \"\"\"\n return False\n \n def fill_hdf5_group(self, group):\n \"\"\"\n Fills the given hdf5 file group with information about this model.\n \n group: hdf5 file group to fill with information about this model\n \"\"\"\n group.attrs['class'] = 'OutputInterpolatedModel'\n group.attrs['order'] = self.order\n self.model.fill_hdf5_group(group.create_group('model'))\n create_hdf5_dataset(group, 'old_xs', data=self.old_xs)\n create_hdf5_dataset(group, 'new_xs', data=self.new_xs)\n \n def __eq__(self, other):\n \"\"\"\n Checks for equality with other.\n \n other: object to check for equality\n \n returns: True if other is equal to this model, False otherwise\n \"\"\"\n if isinstance(other, OutputInterpolatedModel):\n if self.model == other.model:\n if self.order == other.order:\n if np.all(self.old_xs == other.old_xs):\n if self.new_xs.shape == other.new_xs.shape:\n return np.all(self.new_xs == other.new_xs)\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n \n def quick_fit(self, data, error):\n \"\"\"\n Performs a quick fit to the given data.\n \n data: curve to fit with the model\n error: noise level in the data\n \n returns: (parameter_mean, parameter_covariance)\n \"\"\"\n raise NotImplementedError(\"quick_fit not implemented for \" +\\\n \"OutputInterpolatedModel objects.\")\n \n @property\n def bounds(self):\n \"\"\"\n Property storing the natural bounds of the parameters of this model.\n Since this is just a rebranding of the underlying model, the bounds are\n passed through with no changes.\n \"\"\"\n return self.model.bounds\n\n",
"\"\"\"\nFile: pylinex/model/ExpandedModel.py\nAuthor: Keith Tauscher\nDate: 15 Jan 2018\n\nDescription: File containing a class which represents a model which simply\n expands the output of another model using an Expander from the\n pylinex.expander module.\n\"\"\"\nimport numpy as np\nfrom ..expander import Expander\nfrom .Model import Model\n\nclass ExpandedModel(Model):\n \"\"\"\n Class which represents a model which simply expands the output of another\n model using an Expander from the pylinex.expander module.\n \"\"\"\n def __init__(self, model, expander):\n \"\"\"\n Creates an ExpandedModel with the given model and expander.\n \n model: Model object to build this model around\n expander: Expander object with which to expand the output of model\n \"\"\"\n self.model = model\n self.expander = expander\n \n def expanderless(self):\n \"\"\"\n Finds and returns a version of this model that exists in the unexpanded\n space.\n \"\"\"\n return self.model\n \n @property\n def model(self):\n \"\"\"\n Property storing the Model object at the core of this model.\n \"\"\"\n if not hasattr(self, '_model'):\n raise AttributeError(\"model referenced before it was set.\")\n return self._model\n \n @model.setter\n def model(self, value):\n \"\"\"\n Setter for the Model object at the core of this model.\n \n value: must be a Model object\n \"\"\"\n if isinstance(value, Model):\n self._model = value\n else:\n raise TypeError(\"model was not a Model object.\")\n \n @property\n def expander(self):\n \"\"\"\n Property storing the Expander object which expands the output of the\n core model to the output of this model.\n \"\"\"\n if not hasattr(self, '_expander'):\n raise AttributeError(\"expander referenced before it was set.\")\n return self._expander\n \n @expander.setter\n def expander(self, value):\n \"\"\"\n Setter for the expander object which expands the output of the core\n model to the output of this model.\n \n value: must be an Expander object\n \"\"\"\n if isinstance(value, Expander):\n self._expander = value\n else:\n raise TypeError(\"expander was not an Expander object.\")\n \n @property\n def parameters(self):\n \"\"\"\n Property storing a list of strings associated with the parameters\n necessitated by this model. These are the same as the parameters\n necessitated by the parameters of the core model.\n \"\"\"\n return self.model.parameters\n \n @property\n def num_channels(self):\n \"\"\"\n Property storing the number of channels in the outputs of this model.\n \"\"\"\n if not hasattr(self, '_num_channels'):\n self._num_channels =\\\n self.expander.expanded_space_size(self.model.num_channels)\n return self._num_channels\n \n def __call__(self, parameters):\n \"\"\"\n Gets the expanded curve associated with the given parameters.\n \n returns: array of size (num_channels,)\n \"\"\"\n return self.expander(self.model(parameters))\n \n @property\n def gradient_computable(self):\n \"\"\"\n Property storing whether the gradient of this model is computable. This\n is true as long as the gradient of the core model is computable.\n \"\"\"\n return self.model.gradient_computable\n \n def gradient(self, parameters):\n \"\"\"\n Function which computes the gradient of this model at the given\n parameters.\n \n parameters: numpy.ndarray of parameter values. shape: (num_parameters,)\n \n returns: numpy.ndarray of gradient values of this model of shape\n (num_channels, num_parameters)\n \"\"\"\n return self.expander(self.model.gradient(parameters).T).T\n \n @property\n def hessian_computable(self):\n \"\"\"\n Property storing whether the hessian of this model is computable. This\n is true as long as the hessian of the core model is computable.\n \"\"\"\n return self.model.hessian_computable\n \n def hessian(self, parameters):\n \"\"\"\n Function which computes the hessian of this model at the given\n parameters.\n \n parameters: numpy.ndarray of parameter values. shape: (num_parameters,)\n \n returns: numpy.ndarray of hessian values of this model of shape\n (num_channels, num_parameters, num_parameters)\n \"\"\"\n return self.expander(self.model.hessian(parameters).T).T\n \n def fill_hdf5_group(self, group):\n \"\"\"\n Fills the given hdf5 file group with information necessary to reload\n it at a later time.\n \n group: the hdf5 file group to fill with information about this model\n \"\"\"\n group.attrs['class'] = 'ExpandedModel'\n self.model.fill_hdf5_group(group.create_group('model'))\n self.expander.fill_hdf5_group(group.create_group('expander'))\n \n def __eq__(self, other):\n \"\"\"\n Checks if other is equivalent to this model.\n \n other: object to check for equality\n \n returns: False unless other is an ExpandedModel with the same core\n model and expander.\n \"\"\"\n if isinstance(other, ExpandedModel):\n return ((self.model == other.model) and\\\n (self.expander == other.expander))\n else:\n return False\n \n def quick_fit(self, data, error, quick_fit_parameters=[], prior=None):\n \"\"\"\n Performs a quick fit of this model to the given data with (or without)\n a given noise level.\n \n data: 1D array to fit with this expanded model.\n error: if None, the unweighted least square fit is given for\n parameter_mean and parameter_covariance will be\n nonsense\n otherwise, error should a 1D array of same length as data\n quick_fit_parameters: quick fit parameters to pass to underlying model\n prior: either None or a GaussianDistribution object containing priors\n (in space of underlying model)\n \n returns: (parameter_mean, parameter_covariance) which are 1D and 2D\n arrays respectively\n \"\"\"\n if type(error) is type(None):\n error = np.ones_like(data)\n try:\n smaller_data = self.expander.invert(data, error)\n except:\n raise NotImplementedError(\"This ExpandedModel does not have a \" +\\\n \"quick_fit function because the Expander it was made with \" +\\\n \"does not implement the invert method.\")\n smaller_error = self.expander.contract_error(error)\n return self.model.quick_fit(smaller_data, smaller_error,\\\n quick_fit_parameters=quick_fit_parameters, prior=prior)\n \n @property\n def quick_fit_parameters(self):\n \"\"\"\n Property storing the parameters necessary to call quick_fit.\n \"\"\"\n if not hasattr(self, '_quick_fit_parameters'):\n self._quick_fit_parameters = self.model.quick_fit_parameters\n return self._quick_fit_parameters\n \n @property\n def bounds(self):\n \"\"\"\n Property storing the natural bounds of the parameters of this model.\n Since this is just a rebranding of he underlying model, the bounds are\n passed through with no changes.\n \"\"\"\n return self.model.bounds\n\n",
"\"\"\"\nFile: pylinex/model/DirectSumModel.py\nAuthor: Keith Tauscher\nDate: 30 Jun 2018\n\nDescription: File containing a class representing a special SumModel: one whose\n submodels do not overlap in channel space.\n\"\"\"\nimport numpy as np\nimport scipy.linalg as scila\nimport matplotlib.pyplot as pl\nfrom distpy import GaussianDistribution\nfrom ..expander import ExpanderSet\nfrom .BasisModel import BasisModel\nfrom .TruncatedBasisHyperModel import TruncatedBasisHyperModel\nfrom .ExpandedModel import ExpandedModel\nfrom .SumModel import SumModel\n\nclass DirectSumModel(SumModel):\n \"\"\"\n Class representing a special SumModel: one whose submodels do not overlap\n in channel space.\n \"\"\"\n def __init__(self, names, models):\n \"\"\"\n Initializes a new DirectSumModel with the given names and submodels.\n \n names: sequence of string names of submodels\n models: sequence of models corresponding to given names which are\n either ExpandedModel objects or BasisModel objects which\n include an Expander\n \"\"\"\n self.names = names\n self.models = models\n if not ExpanderSet(np.zeros(self.num_channels),\\\n np.ones(self.num_channels),\\\n **dict(zip(self.names, self.expanders))).separable:\n raise ValueError(\"The expanders of the given model were not \" +\\\n \"separable, so you might as well use the SumModel class \" +\\\n \"instead of the DirectSumModel class.\")\n \n @property\n def expanders(self):\n \"\"\"\n Property storing the expanders of the models of this DirectSumModel.\n \"\"\"\n if not hasattr(self, '_expanders'):\n expanders = []\n for model in self.models:\n try:\n expanders.append(model.expander)\n except:\n raise TypeError((\"At least one model (type: {!s}) did \" +\\\n \"not have an expander property.\").format(type(model)))\n self._expanders = expanders\n return self._expanders\n \n def quick_fit(self, data, error, quick_fit_parameters=[], prior=None):\n \"\"\"\n Performs a quick fit to the given data with the error.\n \n data: 1D vector in output space of all expanders\n error: non-negative 1D vector of errors on each data point\n quick_fit_parameters: quick_fit_parameters to use for underlying models\n if necessary\n \n returns: (mean, covariance) where mean and covariance are those of the\n parameter distribution\n \"\"\"\n if type(error) is type(None):\n error = np.ones_like(data)\n if len(quick_fit_parameters) != self.num_quick_fit_parameters:\n raise ValueError(\"quick_fit_parameters length was not equal to \" +\\\n \"the number of quick_fit_parameters of this model.\")\n if type(prior) is type(None):\n priors = [None] * self.num_models\n elif isinstance(prior, GaussianDistribution):\n priors = []\n pars_used = 0\n for (imodel, model) in enumerate(self.models):\n priors.append(prior.marginalize(\\\n slice(pars_used, pars_used + model.num_parameters)))\n pars_used += model.num_parameters\n else:\n raise TypeError(\"prior must either be None or a \" +\\\n \"GaussianDistribution object.\")\n fits = []\n pars_used = 0\n for (imodel, model) in enumerate(self.models):\n these_quick_fit_parameters = quick_fit_parameters[\\\n pars_used:pars_used+model.num_quick_fit_parameters]\n fits.append(model.quick_fit(data, error,\\\n quick_fit_parameters=these_quick_fit_parameters,\\\n prior=priors[imodel]))\n pars_used = pars_used + model.num_quick_fit_parameters\n means = [fit[0] for fit in fits]\n covariances = [fit[1] for fit in fits]\n mean = np.concatenate(means)\n covariance = scila.block_diag(*covariances)\n return (mean, covariance)\n \n @property\n def quick_fit_parameters(self):\n \"\"\"\n Property storing the quick_fit parameters\n \"\"\"\n if not hasattr(self, '_quick_fit_parameters'):\n self._quick_fit_parameters = []\n for (iname, name) in enumerate(self.names):\n self._quick_fit_parameters = self._quick_fit_parameters +\\\n ['{0!s}_{1!s}'.format(name, parameter)\\\n for parameter in self.models[iname].quick_fit_parameters]\n return self._quick_fit_parameters\n \n def fill_hdf5_group(self, group):\n \"\"\"\n Fills an hdf5 file group with information about this SumModel.\n \n group: hdf5 file group to fill with information about this SumModel\n \"\"\"\n group.attrs['class'] = 'DirectSumModel'\n subgroup = group.create_group('models')\n for (iname, name) in enumerate(self.names):\n subsubgroup = subgroup.create_group('{:d}'.format(iname))\n subsubgroup.attrs['name'] = name\n self.models[iname].fill_hdf5_group(subsubgroup)\n \n def __eq__(self, other):\n \"\"\"\n Checks for equality between this DirectSumModel and other.\n \n other: object to check for equality\n \n returns: False unless other is a DirectSumModel with the same names and\n submodels\n \"\"\"\n if not isinstance(other, DirectSumModel):\n return False\n if self.names != other.names:\n return False\n return all([(smodel == omodel)\\\n for (smodel, omodel) in zip(self.models, other.models)])\n\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.mean",
"matplotlib.pyplot.figure"
],
[
"numpy.all",
"numpy.array"
],
[
"numpy.ones_like"
],
[
"numpy.ones_like",
"scipy.linalg.block_diag",
"numpy.ones",
"numpy.concatenate",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
AlexSath/prodivis | [
"a0061ee54e09721da4e3f4fe0af4a2b6dec81383"
] | [
"scripts/norm_heatmap.py"
] | [
"import os\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport tools\r\nimport normalize\r\nimport numpy as np\r\nimport argparse\r\nimport sys\r\nimport warnings\r\n\r\n\"\"\"\r\nFilename: norm_heatmap.py\r\nAuthor: Alexandre R. Sathler\r\nDate: 05/04/2022\r\nDescription: Primary interface file users interact with for heatmap production.\r\nMain function handles command line arguments, normalization, and production /\r\noutput of heatmaps.\r\n\"\"\"\r\n\r\ndef create_matrix(tiff_list):\r\n l = []\r\n for tiff in tiff_list:\r\n l.append(cv2.cvtColor(cv2.imread(tiff), cv2.COLOR_BGR2GRAY))\r\n l = np.asarray(l, float)\r\n return l\r\n\r\ndef mult_view(img, z_mult):\r\n out = []\r\n for row in img:\r\n out.extend(np.tile(row, (z_mult,1)))\r\n out = np.asarray(out)\r\n return out\r\n\r\ndef matrix_stack(tiff_list, viewpoints, z_mult):\r\n print(f\"Loading data into matrix...\")\r\n A = create_matrix(tiff_list)\r\n img_list = []\r\n for viewpoint in viewpoints:\r\n print(f\"Generating heatmap for viewpoint {viewpoint}\")\r\n if viewpoint == 'z':\r\n img_list.append(tools.min_max_scale(np.nanmean(A, axis = 0)))\r\n elif viewpoint == 'y':\r\n img_list.append(tools.min_max_scale(mult_view(np.nanmean(A, axis = 1), z_mult)))\r\n elif viewpoint == 'x':\r\n img_list.append(tools.min_max_scale(mult_view(np.nanmean(A, axis = 2), z_mult)))\r\n return img_list\r\n\r\n\r\n\r\n# Function: stack()\r\n# Description: Stacks provided tiffs into composite image from each of the views\r\n# given. 'x', 'y', and 'z' are valid views, representing the 3D axes\r\n# of the image composite.\r\n# Pre-Conditions: Provided list of path-like strings to tiff files, list of desired\r\n# viewpoints, integer multiplier for widening of z-axis on x and y\r\n# views, and boolean 'norm' (currently non-functional)\r\n# Post-Conditions: List of composite image objects generated from each view.\r\ndef stack(tiff_list, viewpoints, z_mult, norm = False):\r\n shape = cv2.cvtColor(cv2.imread(tiff_list[0]), cv2.COLOR_BGR2GRAY).shape\r\n stack_size = len(tiff_list)\r\n img_list = []\r\n for viewpoint in viewpoints:\r\n if viewpoint == 'z':\r\n img_list.append(np.zeros(shape))\r\n elif viewpoint == 'y':\r\n img_list.append(np.zeros((stack_size, shape[0])))\r\n elif viewpoint == 'x':\r\n img_list.append(np.zeros((stack_size, shape[1])))\r\n else:\r\n raise ValueError(f\"Viewpoint {viewpoint} not understood. Please choose from x, y, or z\")\r\n\r\n print(f\"Generating composites for {', '.join(viewpoints[:-1])} and {viewpoints[-1] if len(viewpoints) > 1 else viewpoints[0]} view(s) from {os.path.dirname(tiff_list[0])}...\")\r\n for idx, tiff in enumerate(tiff_list):\r\n img = cv2.imread(tiff)\r\n bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n if bw.shape != shape:\r\n raise ValueError(\"Tiffs in the provided folder not all the same size. Cannot compile composite heatmap.\")\r\n\r\n if 'x' in viewpoints or 'y' in viewpoints:\r\n transposed = cv2.transpose(bw)\r\n\r\n for i in range(len(viewpoints)):\r\n if viewpoints[i] == 'z':\r\n img_list[i] += bw\r\n elif viewpoints[i] == 'y':\r\n for jdx, row in enumerate(bw):\r\n img_list[i][idx][jdx] = np.mean(row)\r\n elif viewpoints[i] == 'x':\r\n for jdx, row in enumerate(transposed):\r\n img_list[i][idx][jdx] = np.mean(row)\r\n\r\n print(f\"Completing post-processing for {', '.join(viewpoints[:-1])} and {viewpoints[-1] if len(viewpoints) > 1 else viewpoints[0]} composite(s) from {os.path.dirname(tiff_list[0])}...\")\r\n for idx, viewpoint in enumerate(viewpoints):\r\n if viewpoint == 'z':\r\n img_list[idx] /= stack_size\r\n else:\r\n out = np.zeros((img_list[idx].shape[0] * z_mult, img_list[idx].shape[1]))\r\n for rdx, rows in enumerate(out):\r\n for cdx, cols in enumerate(rows):\r\n og_row = int(np.floor(rdx / z_mult))\r\n out[rdx][cdx] = img_list[idx][og_row][cdx]\r\n img_list[idx] = out\r\n img_list[idx] = tools.min_max_scale(img_list[idx])\r\n\r\n return img_list\r\n\r\n\r\n\r\ndef save_heatmaps(out_dir, prefix, viewpoints, out):\r\n # Using matplotlib to generate a heatmap for every image object generated.\r\n for v, o in zip(viewpoints, out):\r\n output_file = os.path.join(out_dir, f'{prefix}_{v}.tif')\r\n print(f\"Saving heatmap '{output_file}'...\")\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.imshow(o, cmap = 'magma', interpolation = 'nearest')\r\n ax.axis('off')\r\n fig.tight_layout()\r\n fig.savefig(output_file, format='tif', dpi = 1200, bbox_inches = 'tight')\r\n\r\n\r\ndef main():\r\n # Creating command-line parser\r\n parser = argparse.ArgumentParser(description = 'Create a Basic Heatmap from an Image Stack')\r\n parser.add_argument('stack_dir', help = 'Path to directory with image stacks')\r\n parser.add_argument('-o', '--out', help = 'The directory where the heatmaps should be outputted', default = os.path.join(os.path.dirname(__file__), 'heatmaps'))\r\n parser.add_argument('-M', '--multiplier', help = 'The y-multiplier to thicken each slice in front and side views', default = 1)\r\n parser.add_argument('-rn', '--raw-normalization', nargs = '?', help = 'Added when normalization stack should not have threshold / stddev filters', default = False, const = True)\r\n parser.add_argument('-n', '--norm', help = 'The directory where the normalization stack can be found', default = 0)\r\n parser.add_argument('-t', '--threshold', help = 'Pixels under intensity threshold are not considered for normalization', default = 0)\r\n parser.add_argument('-O', '--outlierHandling', help = \"Pixels with intensity values greater than 'O' standard deviations from the mean are ignored\", default = -1)\r\n parser.add_argument('-Zs', '--zStart', help = 'The smallest z value to be used (counts up from 0)', default = 0)\r\n parser.add_argument('-Ze', '--zEnd', help = 'The largest z value to be used (cannot be higher than stack size)', default = -1)\r\n parser.add_argument('-v', '--view', nargs = '*', help = 'Which heatmaps to generate; choose from x, y, and z', default = ['z'])\r\n\r\n '''Deprecated command-line arguments (some may make a return)'''\r\n # parser.add_argument('-a', '--algorithm', help = 'How heatmaps will be generated - 0 indicates stacking (for normal computers), 1 indicates matrices (10gb+ may be needed)', default = 0)\r\n # parser.add_argument('-V', '--visualization', nargs = '*', help = 'Indicates what type of visualization should be generated. \"m\" for mean heatmap. \"c\" for cell boundary heatmap. \"v\" for 3D visualization', default = [])\r\n # parser.add_argument('-b', '--cellBoundary', help = \"Directory with cell boundary stack, typically a Phalloidin stain. REQUIRED with '-V'\", default = 0)\r\n # parser.add_argument('-p', '--prototxt', help = \"Path to '.prototxt' file for use with edge detection CNN. REQUIRED with '-V'\", default = '')\r\n # parser.add_argument('-d', '--model', help = \"Path to CNN model file for use with edge detection. REQUIRED with '-V'\", default = '')\r\n\r\n args = parser.parse_args()\r\n\r\n # Ensuring provided directories are valid\r\n stack_dir = tools.norm_dirname(args.stack_dir, 'tiff stack for heatmap', False)\r\n out_dir = tools.norm_dirname(args.out, 'output', True)\r\n norm_dir = tools.norm_dirname(args.norm, 'tiff stack for normalization', False)\r\n # bound_dir = tools.norm_dirname(args.cellBoundary, 'tiff stack with cell boundary stain', False)\r\n\r\n # Extracting other variables\r\n viewpoints = args.view\r\n threshold = args.threshold\r\n z_multiplier = tools.smart_check_int_param(args.multiplier, 'multiplier', 1, 100)\r\n raw_norm = args.raw_normalization\r\n threshold = tools.smart_check_int_param(args.threshold, 'threshold', 0, 50)\r\n n_stddevs = -1 if args.outlierHandling == -1 else tools.smart_check_int_param(args.outlierHandling, 'number of standard deviations', 1, 7)\r\n # algorithm = tools.smart_check_int_param(args.algorithm, 'algorithm', 0, 1)\r\n\r\n # Getting all tiffs from the stack directory\r\n tiffs = tools.get_files(stack_dir)\r\n # Get minimum and maximum z values to be processed\r\n z_min = tools.smart_check_int_param(args.zStart, 'start of z stack bounds', 0, len(tiffs) - 3)\r\n z_max = tools.smart_check_int_param(args.zEnd if args.zEnd != -1 else str(len(tiffs)), 'end of the z stack bounds', z_min + 1, len(tiffs))\r\n\r\n # Processing normalization information\r\n if norm_dir == 0:\r\n raise ValueError('Program call must include \"-n\" with directory that contains the normalization stack')\r\n norms = tools.get_files(norm_dir)\r\n\r\n tiffsM = normalize.mean_normalizer(tiffs, norms, threshold, n_stddevs, args.raw_normalization)\r\n # Prefix represents file prefix for generated heatmaps\r\n prefix = f'{stack_dir.split(os.path.sep)[-2]}_{stack_dir.split(os.path.sep)[-1]}' + \\\r\n f\"_{'n_' if not raw_norm else 'rn_'}{norm_dir.split(os.path.sep)[-1]}{'' if threshold == 0 else f'_t{threshold}'}\" + \\\r\n f\"{'' if n_stddevs == -1 else f'_{n_stddevs}std'}_z{z_min}-{z_max}\"\r\n\r\n\r\n # Creating new directory within the output directory for the heatmaps with\r\n # the specific parameters provided in this run\r\n out_dir = os.path.join(out_dir, f'{stack_dir.split(os.path.sep)[-2]}')\r\n if not os.path.isdir(out_dir):\r\n os.mkdir(out_dir)\r\n out_dir = os.path.join(out_dir, prefix)\r\n tools.smart_make_dir(out_dir)\r\n # Getting list of output heatmap image objects (pixel arrays)\r\n # if algorithm == 1:\r\n # out = matrix_stack(tiffsM[z_min:z_max], viewpoints, z_multiplier)\r\n # else:\r\n out = stack(tiffsM[z_min:z_max], viewpoints, z_multiplier)\r\n save_heatmaps(out_dir, prefix, viewpoints, out)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] | [
[
"numpy.asarray",
"numpy.tile",
"numpy.mean",
"numpy.nanmean",
"numpy.floor",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.