repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
rajgiriUW/pyUSID | [
"064dcd81d9c42f4eb4782f0a41fd437b3f56f50c",
"064dcd81d9c42f4eb4782f0a41fd437b3f56f50c",
"064dcd81d9c42f4eb4782f0a41fd437b3f56f50c"
] | [
"pyUSID/io/hdf_utils/simple.py",
"tests/io/hdf_utils/test_model.py",
"tests/io/simple_process.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nLower-level and simpler USID-specific HDF5 utilities that facilitate higher-level data operations\n\nCreated on Tue Nov 3 21:14:25 2015\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nimport collections\nfrom warnings import warn\nimport sys\nimport h5py\nimport numpy as np\nimport dask.array as da\nfrom sidpy.hdf.hdf_utils import get_auxiliary_datasets, link_h5_obj_as_alias, \\\n write_simple_attrs, is_editable_h5, validate_h5_objs_in_same_h5_file, \\\n get_attr\nfrom sidpy.hdf.dtype_utils import validate_dtype\nfrom sidpy.hdf import hdf_utils as hut\nfrom sidpy.base.string_utils import validate_single_string_arg, validate_list_of_strings\nfrom sidpy.base.num_utils import contains_integers\nfrom sidpy.base.string_utils import clean_string_att\n\nfrom ..anc_build_utils import build_ind_val_matrices, INDICES_DTYPE, VALUES_DTYPE\nfrom ..dimension import DimType, Dimension\nfrom .base import write_book_keeping_attrs\n\nif sys.version_info.major == 3:\n unicode = str\n\"\"\"\n__all__ = ['assign_group_index', 'check_and_link_ancillary', 'check_for_matching_attrs', 'check_for_old',\n 'check_if_main', 'copy_attributes', 'copy_main_attributes']\n\"\"\"\n\n\ndef get_all_main(parent, verbose=False):\n \"\"\"\n Simple function to recursively print the contents of an hdf5 group\n\n Parameters\n ----------\n parent : :class:`h5py.Group`\n HDF5 Group to search within\n verbose : bool, optional. Default = False\n If true, extra print statements (usually for debugging) are enabled\n\n Returns\n -------\n main_list : list of h5py.Dataset\n The datasets found in the file that meet the 'Main Data' criteria.\n\n \"\"\"\n if not isinstance(parent, (h5py.Group, h5py.File)):\n raise TypeError('parent should be a h5py.File or h5py.Group object')\n\n from ..usi_data import USIDataset\n\n main_list = list()\n\n def __check(name, obj):\n if verbose:\n print(name, obj)\n if isinstance(obj, h5py.Dataset):\n if verbose:\n print(name, 'is an HDF5 Dataset.')\n ismain = check_if_main(obj)\n if ismain:\n if verbose:\n print(name, 'is a `Main` dataset.')\n main_list.append(USIDataset(obj))\n\n if verbose:\n print('Checking the group {} for `Main` datasets.'.format(parent.name))\n parent.visititems(__check)\n\n return main_list\n\n\ndef find_dataset(h5_group, dset_name):\n \"\"\"\n Uses visit() to find all datasets with the desired name\n\n Parameters\n ----------\n h5_group : :class:`h5py.Group`\n Group to search within for the Dataset\n dset_name : str\n Name of the dataset to search for\n\n Returns\n -------\n datasets : list\n List of [Name, object] pairs corresponding to datasets that match `ds_name`.\n\n \"\"\"\n from ..usi_data import USIDataset\n\n datasets = list()\n for obj in hut.find_dataset(h5_group, dset_name):\n try:\n datasets.append(USIDataset(obj))\n except TypeError:\n datasets.append(obj)\n\n return datasets\n\n\ndef find_results_groups(h5_main, tool_name, h5_parent_group=None):\n \"\"\"\n Finds a list of all groups containing results of the process of name\n `tool_name` being applied to the dataset\n\n Parameters\n ----------\n h5_main : h5 dataset reference\n Reference to the target dataset to which the tool was applied\n tool_name : String / unicode\n Name of the tool applied to the target dataset\n h5_parent_group : h5py.Group, optional. Default = None\n Parent group under which the results group will be searched for. Use\n this option when the results groups are contained in different HDF5\n file compared to `h5_main`. BY default, this function will search\n within the same group that contains `h5_main`\n\n Returns\n -------\n groups : list of references to :class:`h5py.Group` objects\n groups whose name contains the tool name and the dataset name\n\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if h5_parent_group is not None:\n if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_group = h5_main.parent\n\n dset_name = h5_main.name.split('/')[-1]\n groups = []\n for key in h5_parent_group.keys():\n if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):\n groups.append(h5_parent_group[key])\n return groups\n\n\ndef check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):\n \"\"\"\n This function will add references to auxilliary datasets as attributes\n of an input dataset.\n If the entries in anc_refs are valid references, they will be added\n as attributes with the name taken from the corresponding entry in\n anc_names.\n If an entry in anc_refs is not a valid reference, the function will\n attempt to get the attribute with the same name from the h5_main\n dataset\n\n Parameters\n ----------\n h5_dset : HDF5 Dataset\n dataset to which the attributes will be written\n anc_names : list of str\n the attribute names to be used\n h5_main : HDF5 Dataset, optional\n dataset from which attributes will be copied if `anc_refs` is None\n anc_refs : list of HDF5 Object References, optional\n references that correspond to the strings in `anc_names`\n\n Returns\n -------\n None\n\n Notes\n -----\n Either `h5_main` or `anc_refs` MUST be provided and `anc_refs` has the\n higher priority if both are present.\n\n \"\"\"\n if not isinstance(h5_dset, h5py.Dataset):\n raise TypeError('h5_dset should be a h5py.Dataset object')\n\n if isinstance(anc_names, (str, unicode)):\n anc_names = [anc_names]\n if isinstance(anc_refs, (h5py.Dataset, h5py.Group, h5py.File,\n h5py.Reference)):\n anc_refs = [anc_refs]\n\n if not isinstance(anc_names, (list, tuple)):\n raise TypeError('anc_names should be a list / tuple')\n if h5_main is not None:\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n validate_h5_objs_in_same_h5_file(h5_dset, h5_main)\n if anc_refs is not None:\n if not isinstance(anc_refs, (list, tuple)):\n raise TypeError('anc_refs should be a list / tuple')\n\n if anc_refs is None and h5_main is None:\n raise ValueError('No objected provided to link as ancillary')\n\n def __check_and_link_single(h5_obj_ref, target_ref_name):\n if isinstance(h5_obj_ref, h5py.Reference):\n # TODO: Same HDF5 file?\n h5_dset.attrs[target_ref_name] = h5_obj_ref\n elif isinstance(h5_obj_ref, (h5py.Dataset, h5py.Group, h5py.File)):\n validate_h5_objs_in_same_h5_file(h5_obj_ref, h5_dset)\n h5_dset.attrs[target_ref_name] = h5_obj_ref.ref\n elif h5_main is not None:\n h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])\n if len(h5_anc) == 1:\n link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)\n else:\n warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))\n warn(warnstring)\n\n if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):\n \"\"\"\n anc_refs can be iterated over\n \"\"\"\n for ref_name, h5_ref in zip(anc_names, anc_refs):\n __check_and_link_single(h5_ref, ref_name)\n elif anc_refs is not None:\n \"\"\"\n anc_refs is just a single value\n \"\"\"\n __check_and_link_single(anc_refs, anc_names)\n elif isinstance(anc_names, str) or isinstance(anc_names, unicode):\n \"\"\"\n Single name provided\n \"\"\"\n __check_and_link_single(None, anc_names)\n else:\n \"\"\"\n Iterable of names provided\n \"\"\"\n for name in anc_names:\n __check_and_link_single(None, name)\n\n h5_dset.file.flush()\n\n\ndef validate_main_dset(h5_main, must_be_h5):\n \"\"\"\n Checks to make sure that the provided object is a USID main dataset\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n h5_main : h5py.Dataset or numpy.ndarray or Dask.array.core.array\n object that represents the USID main data\n must_be_h5 : bool\n Set to True if the expecting an h5py.Dataset object.\n Set to False if expecting a numpy.ndarray or Dask.array.core.array\n\n Returns\n -------\n\n \"\"\"\n # Check that h5_main is a dataset\n if must_be_h5:\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))\n else:\n if not isinstance(h5_main, (np.ndarray, da.core.Array)):\n raise TypeError('raw_data should either be a np.ndarray or a da.core.Array')\n\n # Check dimensionality\n if len(h5_main.shape) != 2:\n raise ValueError('Main data is not 2D. Provided object has shape: {}'.format(h5_main.shape))\n\n\ndef validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):\n \"\"\"\n Checks ancillary HDF5 datasets against shape of a main dataset.\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n h5_inds : h5py.Dataset\n HDF5 dataset corresponding to the ancillary Indices dataset\n h5_vals : h5py.Dataset\n HDF5 dataset corresponding to the ancillary Values dataset\n main_shape : array-like\n Shape of the main dataset expressed as a tuple or similar\n is_spectroscopic : bool, Optional. Default = True\n set to True if ``dims`` correspond to Spectroscopic Dimensions.\n False otherwise.\n \"\"\"\n if not isinstance(h5_inds, h5py.Dataset):\n raise TypeError('h5_inds must be a h5py.Dataset object')\n if not isinstance(h5_vals, h5py.Dataset):\n raise TypeError('h5_vals must be a h5py.Dataset object')\n if h5_inds.shape != h5_vals.shape:\n raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '\n 'shape'.format(h5_inds.shape, h5_vals.shape))\n if isinstance(main_shape, (list, tuple)):\n if not contains_integers(main_shape, min_val=1) or \\\n len(main_shape) != 2:\n raise ValueError(\"'main_shape' must be a valid HDF5 dataset shape\")\n else:\n raise TypeError('main_shape should be of the following types:'\n 'h5py.Dataset, tuple, or list. {} provided'\n ''.format(type(main_shape)))\n\n if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:\n raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '\n 'should be equal'.format(int(is_spectroscopic),\n h5_inds.shape, main_shape))\n\n\ndef validate_dims_against_main(main_shape, dims, is_spectroscopic=True):\n \"\"\"\n Checks Dimension objects against a given shape for main datasets.\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n main_shape : array-like\n Tuple or list with the shape of the main data\n dims : iterable\n List of Dimension objects\n is_spectroscopic : bool, Optional. Default = True\n set to True if ``dims`` correspond to Spectroscopic Dimensions.\n False otherwise.\n \"\"\"\n if not isinstance(main_shape, (list, tuple)):\n raise TypeError('main_shape should be a list or tuple. Provided object'\n ' was of type: {}'.format(type(main_shape)))\n if len(main_shape) != 2:\n raise ValueError('\"main_shape\" should be of length 2')\n contains_integers(main_shape, min_val=1)\n\n if isinstance(dims, Dimension):\n dims = [dims]\n elif not isinstance(dims, (list, tuple)):\n raise TypeError('\"dims\" must be a list or tuple of usid.Dimension '\n 'objects. Provided object was of type: {}'\n ''.format(type(dims)))\n if not all([isinstance(obj, Dimension) for obj in dims]):\n raise TypeError('One or more objects in \"dims\" was not usid.Dimension')\n\n if is_spectroscopic:\n main_dim = 1\n dim_category = 'Spectroscopic'\n else:\n main_dim = 0\n dim_category = 'Position'\n\n # TODO: This is where the dimension type will need to be taken into account\n lhs = main_shape[main_dim]\n rhs = np.product([len(x.values) for x in dims])\n if lhs != rhs:\n raise ValueError(dim_category +\n ' dimensions in main data of size: {} do not match '\n 'with product of values in provided Dimension objects'\n ': {}'.format(lhs, rhs))\n\n\ndef check_if_main(h5_main, verbose=False):\n \"\"\"\n Checks the input dataset to see if it has all the necessary\n features to be considered a Main dataset. This means it is\n 2D and has the following attributes:\n\n * Position_Indices\n * Position_Values\n * Spectroscopic_Indices\n * Spectroscopic_Values\n * quantity\n * units\n\n In addition, the shapes of the ancillary matrices should match with that of\n h5_main\n\n Parameters\n ----------\n h5_main : HDF5 Dataset\n Dataset of interest\n verbose : Boolean (Optional. Default = False)\n Whether or not to print statements\n\n Returns\n -------\n success : Boolean\n True if all tests pass\n\n \"\"\"\n try:\n validate_main_dset(h5_main, True)\n except Exception as exep:\n if verbose:\n print(exep)\n return False\n\n h5_name = h5_main.name.split('/')[-1]\n\n success = True\n\n # Check for Datasets\n dset_names = ['Position_Indices', 'Position_Values',\n 'Spectroscopic_Indices', 'Spectroscopic_Values']\n for name in dset_names:\n try:\n h5_anc_dset = h5_main.file[h5_main.attrs[name]]\n success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])\n except:\n if verbose:\n print('{} not found as an attribute of {}.'.format(name, h5_name))\n return False\n\n attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])\n if not attr_success:\n if verbose:\n print('{} does not have the mandatory \"quantity\" and \"units\" attributes'.format(h5_main.name))\n return False\n\n for attr_name in ['quantity', 'units']:\n val = get_attr(h5_main, attr_name)\n if not isinstance(val, (str, unicode)):\n if verbose:\n print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))\n return False\n\n # Blindly linking four datasets is still not sufficient. The sizes need to match:\n anc_shape_match = list()\n h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]\n h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]\n anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))\n for anc_dset in [h5_pos_vals, h5_pos_inds]:\n anc_shape_match.append(np.all(h5_main.shape[0] == anc_dset.shape[0]))\n if not np.all(anc_shape_match):\n if verbose:\n print('The shapes of the Position indices:{}, values:{} datasets did not match with that of the main '\n 'dataset: {}'.format(h5_pos_inds.shape, h5_pos_vals.shape, h5_main.shape))\n return False\n\n anc_shape_match = list()\n h5_spec_inds = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]\n h5_spec_vals = h5_main.file[h5_main.attrs['Spectroscopic_Values']]\n anc_shape_match.append(np.all(h5_spec_inds.shape == h5_spec_vals.shape))\n for anc_dset in [h5_spec_inds, h5_spec_vals]:\n anc_shape_match.append(np.all(h5_main.shape[1] == anc_dset.shape[1]))\n if not np.all(anc_shape_match):\n if verbose:\n print('The shapes of the Spectroscopic indices:{}, values:{} datasets did not match with that of the main '\n 'dataset: {}'.format(h5_spec_inds.shape, h5_spec_vals.shape, h5_main.shape))\n return False\n\n try:\n validate_anc_dset_attrs(h5_pos_inds, h5_pos_vals, is_spec=False)\n except ValueError:\n if verbose:\n print('Attributes of Position datasets did not match')\n return False\n try:\n validate_anc_dset_attrs(h5_spec_inds, h5_spec_vals, is_spec=True)\n except ValueError:\n if verbose:\n print('Attributes of Spectroscopic datasets did not match')\n return False\n\n return success\n\n\ndef validate_anc_dset_attrs(h5_inds, h5_vals, is_spec=True):\n \"\"\"\n Validates the attributes of a pair of indices and values datasets.\n Throws ValueErrors if any rule is not satisfied\n\n Parameters\n ----------\n h5_inds : h5py.Dataset\n Indices dataset\n h5_vals : h5py.Dataset\n Values Dataset\n is_spec : bool, optional. Default = True\n Set to True if spectroscopic. Else - Position datasets\n \"\"\"\n def lists_match(left, right):\n if len(left) != len(right):\n return False\n return all([l_it == r_it for l_it, r_it in zip(left, right)])\n\n v_names = get_attr(h5_vals, 'labels')\n v_units = get_attr(h5_vals, 'units')\n i_names = get_attr(h5_inds, 'labels')\n i_units = get_attr(h5_inds, 'units')\n\n for names, units, dset_type in zip([v_names, i_names], [v_units, i_units],\n ['Values', 'Indices']):\n if len(names) != len(units):\n raise ValueError('Length of labels: {} and units: {} for the {} '\n 'dataset do not match'\n ''.format(len(names), len(units), dset_type))\n for i_item, v_item, prop in zip([i_names, i_units], [v_names, v_units],\n ['labels', 'units']):\n if not lists_match(i_item, v_item):\n raise ValueError('The \"{}\" values of the Indices: {} and Values: '\n '{} datasets do not match'.format(prop, i_item,\n v_item))\n\n # Now check the rows / cols nums against size of any attr:\n if h5_inds.shape != h5_vals.shape:\n raise ValueError('Shape of Indices: {} and Values: {} datasets do '\n 'not match'.format(h5_inds.shape, h5_vals.shape))\n dim_ind = 1\n if is_spec:\n dim_ind = 0\n if h5_inds.shape[dim_ind] != len(v_names):\n raise ValueError('Length of mandatory attributes: {} did not match '\n 'dimension: {} of the ancillary dataset of shape: {}'\n ''.format(len(v_names), dim_ind, h5_inds.shape))\n\ndef link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals):\n \"\"\"\n Links the object references to the four position and spectroscopic datasets as\n attributes of `h5_main`\n\n Parameters\n ----------\n h5_main : h5py.Dataset\n 2D Dataset which will have the references added as attributes\n h5_pos_inds : h5py.Dataset\n Dataset that will be linked with the name 'Position_Indices'\n h5_pos_vals : h5py.Dataset\n Dataset that will be linked with the name 'Position_Values'\n h5_spec_inds : h5py.Dataset\n Dataset that will be linked with the name 'Spectroscopic_Indices'\n h5_spec_vals : h5py.Dataset\n Dataset that will be linked with the name 'Spectroscopic_Values'\n\n Returns\n -------\n pyUSID.USIDataset\n USIDataset version of h5_main now that it is a USID Main dataset\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n\n validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, h5_main.shape,\n is_spectroscopic=False)\n validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, h5_main.shape,\n is_spectroscopic=True)\n\n link_h5_obj_as_alias(h5_main, h5_pos_inds, 'Position_Indices')\n link_h5_obj_as_alias(h5_main, h5_pos_vals, 'Position_Values')\n link_h5_obj_as_alias(h5_main, h5_spec_inds, 'Spectroscopic_Indices')\n link_h5_obj_as_alias(h5_main, h5_spec_vals, 'Spectroscopic_Values')\n\n from ..usi_data import USIDataset\n try:\n # If all other conditions are satisfied\n return USIDataset(h5_main)\n except TypeError:\n # If some other conditions are yet to be satisfied\n return h5_main\n\n\ndef check_for_old(h5_base, tool_name, new_parms=None, target_dset=None,\n h5_parent_goup=None, verbose=False):\n \"\"\"\n Check to see if the results of a tool already exist and if they\n were performed with the same parameters.\n\n Parameters\n ----------\n h5_base : h5py.Dataset object\n Dataset on which the tool is being applied to\n tool_name : str\n process or analysis name\n new_parms : dict, optional\n Parameters with which this tool will be performed.\n target_dset : str, optional, default = None\n Name of the dataset whose attributes will be compared against new_parms.\n Default - checking against the group\n h5_parent_goup : h5py.Group, optional. Default = None\n The group to search under. Use this option when `h5_base` and\n the potential results groups (within `h5_parent_goup` are located\n in different HDF5 files. Default - search within h5_base.parent\n verbose : bool, optional, default = False\n Whether or not to print debugging statements\n\n Returns\n -------\n group : list\n List of all :class:`h5py.Group` objects with parameters matching those in `new_parms`\n \"\"\"\n if not isinstance(h5_base, h5py.Dataset):\n raise TypeError('h5_base should be a h5py.Dataset object')\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if h5_parent_goup is not None:\n if not isinstance(h5_parent_goup, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_goup = h5_base.parent\n\n if new_parms is None:\n new_parms = dict()\n else:\n if not isinstance(new_parms, dict):\n raise TypeError('new_parms should be a dict')\n if target_dset is not None:\n target_dset = validate_single_string_arg(target_dset, 'target_dset')\n\n matching_groups = []\n groups = find_results_groups(h5_base, tool_name,\n h5_parent_group=h5_parent_goup)\n\n for group in groups:\n if verbose:\n print('Looking at group - {}'.format(group.name.split('/')[-1]))\n\n h5_obj = group\n if target_dset is not None:\n if target_dset in group.keys():\n h5_obj = group[target_dset]\n else:\n if verbose:\n print('{} did not contain the target dataset: {}'.format(group.name.split('/')[-1],\n target_dset))\n continue\n\n if check_for_matching_attrs(h5_obj, new_parms=new_parms, verbose=verbose):\n # return group\n matching_groups.append(group)\n\n return matching_groups\n\n\ndef get_source_dataset(h5_group):\n \"\"\"\n Find the name of the source dataset used to create the input `h5_group`,\n so long as the source dataset is in the same HDF5 file\n\n Parameters\n ----------\n h5_group : :class:`h5py.Group`\n Child group whose source dataset will be returned\n\n Returns\n -------\n h5_source : USIDataset object\n Main dataset from which this group was generated\n\n \"\"\"\n if not isinstance(h5_group, h5py.Group):\n raise TypeError('h5_group should be a h5py.Group object')\n\n h5_parent_group = h5_group.parent\n group_name = h5_group.name.split('/')[-1]\n # What if the group name was not formatted according to Pycroscopy rules?\n name_split = group_name.split('-')\n if len(name_split) != 2:\n raise ValueError(\"The provided group's name could not be split by '-' as expected in \"\n \"SourceDataset-ProcessName_000\")\n h5_source = h5_parent_group[name_split[0]]\n\n if not isinstance(h5_source, h5py.Dataset):\n raise ValueError('Source object was not a dataset!')\n\n from ..usi_data import USIDataset\n\n return USIDataset(h5_source)\n\n\ndef assign_group_index(h5_parent_group, base_name, verbose=False):\n \"\"\"\n Searches the parent h5 group to find the next available index for the group\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` object\n Parent group under which the new group object will be created\n base_name : str or unicode\n Base name of the new group without index\n verbose : bool, optional. Default=False\n Whether or not to print debugging statements\n\n Returns\n -------\n base_name : str or unicode\n Base name of the new group with the next available index as a suffix\n\n \"\"\"\n if not isinstance(h5_parent_group, h5py.Group):\n raise TypeError('h5_parent_group should be a h5py.Group object')\n base_name = validate_single_string_arg(base_name, 'base_name')\n\n if len(base_name) == 0:\n raise ValueError('base_name should not be an empty string')\n\n if not base_name.endswith('_'):\n base_name += '_'\n\n temp = [key for key in h5_parent_group.keys()]\n if verbose:\n print('Looking for group names starting with {} in parent containing items: '\n '{}'.format(base_name, temp))\n previous_indices = []\n for item_name in temp:\n if isinstance(h5_parent_group[item_name], h5py.Group) and item_name.startswith(base_name):\n previous_indices.append(int(item_name.replace(base_name, '')))\n previous_indices = np.sort(previous_indices)\n if verbose:\n print('indices of existing groups with the same prefix: {}'.format(previous_indices))\n if len(previous_indices) == 0:\n index = 0\n else:\n index = previous_indices[-1] + 1\n return base_name + '{:03d}'.format(index)\n\n\ndef create_indexed_group(h5_parent_group, base_name):\n \"\"\"\n Creates a group with an indexed name (eg - 'Measurement_012') under h5_parent_group using the provided base_name\n as a prefix for the group's name\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`\n File or group within which the new group will be created\n base_name : str or unicode\n Prefix for the group name. This need not end with a '_'. It will be added automatically\n\n Returns\n -------\n\n \"\"\"\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should be a h5py.File or Group object')\n base_name = validate_single_string_arg(base_name, 'base_name')\n\n group_name = assign_group_index(h5_parent_group, base_name)\n h5_new_group = h5_parent_group.create_group(group_name)\n write_book_keeping_attrs(h5_new_group)\n return h5_new_group\n\n\ndef create_results_group(h5_main, tool_name, h5_parent_group=None):\n \"\"\"\n Creates a h5py.Group object autoindexed and named as 'DatasetName-ToolName_00x'\n\n Parameters\n ----------\n h5_main : h5py.Dataset object\n Reference to the dataset based on which the process / analysis is being performed\n tool_name : string / unicode\n Name of the Process / Analysis applied to h5_main\n h5_parent_group : h5py.Group, optional. Default = None\n Parent group under which the results group will be created. Use this\n option to write results into a new HDF5 file. By default, results will\n be written into the same group containing `h5_main`\n\n Returns\n -------\n h5_group : :class:`h5py.Group`\n Results group which can now house the results datasets\n\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n if h5_parent_group is not None:\n if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_group = h5_main.parent\n\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if '-' in tool_name:\n warn('tool_name should not contain the \"-\" character. Reformatted name from:{} to '\n '{}'.format(tool_name, tool_name.replace('-', '_')))\n tool_name = tool_name.replace('-', '_')\n\n group_name = h5_main.name.split('/')[-1] + '-' + tool_name + '_'\n group_name = assign_group_index(h5_parent_group, group_name)\n\n h5_group = h5_parent_group.create_group(group_name)\n\n write_book_keeping_attrs(h5_group)\n\n # Also add some basic attributes like source and tool name. This will allow relaxation of nomenclature restrictions:\n # this are NOT being used right now but will be in the subsequent versions of pyUSID\n write_simple_attrs(h5_group, {'tool': tool_name, 'num_source_dsets': 1})\n # in this case, there is only one source\n if h5_parent_group.file == h5_main.file:\n for dset_ind, dset in enumerate([h5_main]):\n h5_group.attrs['source_' + '{:03d}'.format(dset_ind)] = dset.ref\n\n return h5_group\n\n\ndef copy_main_attributes(h5_main, h5_new):\n \"\"\"\n Copies the units and quantity name from one dataset to another\n\n Parameters\n ----------\n h5_main : h5py.Dataset\n Dataset containing the target attributes\n h5_new : h5py.Dataset\n Dataset to which the target attributes are to be copied\n\n \"\"\"\n for param, param_name in zip([h5_main, h5_new], ['h5_main', 'h5_new']):\n if not isinstance(param, h5py.Dataset):\n raise TypeError(param_name + ' should be a h5py.Dataset object')\n\n for att_name in ['quantity', 'units']:\n if att_name not in h5_main.attrs:\n raise KeyError('Attribute: {} does not exist in {}'.format(att_name, h5_main))\n val = get_attr(h5_main, att_name)\n h5_new.attrs[att_name] = clean_string_att(val)\n\n\ndef create_empty_dataset(source_dset, dtype, dset_name, h5_group=None,\n new_attrs=None, skip_refs=False):\n \"\"\"\n Creates an empty dataset in the h5 file based on the provided dataset in\n the same or specified group\n\n Parameters\n ----------\n source_dset : h5py.Dataset object\n Source object that provides information on the group and shape of the dataset\n dtype : dtype\n Data type of the fit / guess datasets\n dset_name : String / Unicode\n Name of the dataset\n h5_group : :class:`h5py.Group`, optional. Default = None\n Group within which this dataset will be created\n new_attrs : dictionary (Optional)\n Any new attributes that need to be written to the dataset\n skip_refs : boolean, optional\n Should ObjectReferences be skipped when copying attributes from the\n `source_dset`\n\n Returns\n -------\n h5_new_dset : h5py.Dataset object\n Newly created dataset\n\n \"\"\"\n if not isinstance(source_dset, h5py.Dataset):\n raise TypeError('source_deset should be a h5py.Dataset object')\n _ = validate_dtype(dtype)\n if new_attrs is not None:\n if not isinstance(new_attrs, dict):\n raise TypeError('new_attrs should be a dictionary')\n else:\n new_attrs = dict()\n\n if h5_group is None:\n h5_group = source_dset.parent\n else:\n if not isinstance(h5_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_group should be a h5py.Group or h5py.File object')\n\n if source_dset.file != h5_group.file and not skip_refs:\n # Cannot carry over references\n warn('H5 object references will not be copied over since {} is in '\n 'a different HDF5 file as {}'.format(h5_group, source_dset))\n skip_refs = True\n\n dset_name = validate_single_string_arg(dset_name, 'dset_name')\n if '-' in dset_name:\n warn('dset_name should not contain the \"-\" character. Reformatted name from:{} to '\n '{}'.format(dset_name, dset_name.replace('-', '_')))\n dset_name = dset_name.replace('-', '_')\n\n kwargs = {'shape': source_dset.shape, 'dtype': dtype, 'compression': source_dset.compression,\n 'chunks': source_dset.chunks}\n\n if source_dset.file.driver == 'mpio':\n if kwargs.pop('compression', None) is not None:\n warn('This HDF5 file has been opened wth the \"mpio\" communicator. '\n 'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')\n\n if dset_name in h5_group.keys():\n if isinstance(h5_group[dset_name], h5py.Dataset):\n warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))\n h5_new_dset = h5_group[dset_name]\n # Make sure it has the correct shape and dtype\n if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):\n warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '\n 'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,\n source_dset.shape,\n h5_new_dset.dtype,\n dtype))\n del h5_new_dset, h5_group[dset_name]\n h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)\n else:\n raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),\n h5_group.name))\n\n else:\n h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)\n\n # This should link the ancillary datasets correctly\n h5_new_dset = hut.copy_attributes(source_dset, h5_new_dset,\n skip_refs=skip_refs)\n if source_dset.file != h5_group.file:\n hut.copy_linked_objects(source_dset, h5_new_dset)\n h5_new_dset.attrs.update(new_attrs)\n\n if check_if_main(h5_new_dset):\n from ..usi_data import USIDataset\n\n h5_new_dset = USIDataset(h5_new_dset)\n # update book keeping attributes\n write_book_keeping_attrs(h5_new_dset)\n\n return h5_new_dset\n\n\ndef check_for_matching_attrs(h5_obj, new_parms=None, verbose=False):\n \"\"\"\n Compares attributes in the given H5 object against those in the provided dictionary and returns True if\n the parameters match, and False otherwise\n\n Parameters\n ----------\n h5_obj : h5py object (Dataset or :class:`h5py.Group`)\n Object whose attributes will be compared against new_parms\n new_parms : dict, optional. default = empty dictionary\n Parameters to compare against the attributes present in h5_obj\n verbose : bool, optional, default = False\n Whether or not to print debugging statements\n\n Returns\n -------\n tests: bool\n Whether or not all paramters in new_parms matched with those in h5_obj's attributes\n\n \"\"\"\n if not isinstance(h5_obj, (h5py.Dataset, h5py.Group, h5py.File)):\n raise TypeError('h5_obj should be a h5py.Dataset, h5py.Group, or h5py.File object')\n if new_parms is None:\n new_parms = dict()\n else:\n if not isinstance(new_parms, dict):\n raise TypeError('new_parms should be a dictionary')\n\n tests = []\n for key in new_parms.keys():\n\n if verbose:\n print('Looking for new attribute named: {}'.format(key))\n\n # HDF5 cannot store None as an attribute anyway. ignore\n if new_parms[key] is None:\n continue\n\n try:\n old_value = get_attr(h5_obj, key)\n except KeyError:\n # if parameter was not found assume that something has changed\n if verbose:\n print('New parm: {} \\t- new parm not in group *****'.format(key))\n tests.append(False)\n break\n\n if isinstance(old_value, np.ndarray):\n if not isinstance(new_parms[key], collections.Iterable):\n if verbose:\n print('New parm: {} \\t- new parm not iterable unlike old parm *****'.format(key))\n tests.append(False)\n break\n new_array = np.array(new_parms[key])\n if old_value.size != new_array.size:\n if verbose:\n print('New parm: {} \\t- are of different sizes ****'.format(key))\n tests.append(False)\n else:\n try:\n answer = np.allclose(old_value, new_array)\n except TypeError:\n # comes here when comparing string arrays\n # Not sure of a better way\n answer = []\n for old_val, new_val in zip(old_value, new_array):\n answer.append(old_val == new_val)\n answer = np.all(answer)\n if verbose:\n print('New parm: {} \\t- match: {}'.format(key, answer))\n tests.append(answer)\n else:\n \"\"\"if isinstance(new_parms[key], collections.Iterable):\n if verbose:\n print('New parm: {} \\t- new parm is iterable unlike old parm *****'.format(key))\n tests.append(False)\n break\"\"\"\n answer = np.all(new_parms[key] == old_value)\n if verbose:\n print('New parm: {} \\t- match: {}'.format(key, answer))\n tests.append(answer)\n if verbose:\n print('')\n\n return all(tests)\n\n\ndef write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=False, base_name=None,\n slow_to_fast=False):\n \"\"\"\n Creates h5py.Datasets for the position OR spectroscopic indices and values of the data.\n Remember that the contents of the dataset can be changed if need be after the creation of the datasets.\n For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific\n dimension in the Spectroscopic_Values dataset can be manually overwritten.\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`\n Group under which the indices and values datasets will be created\n dimensions : Dimension or array-like of Dimension objects\n Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values\n datasets\n is_spectral : bool, optional. default = True\n Spectroscopic (True) or Position (False)\n verbose : Boolean, optional\n Whether or not to print statements for debugging purposes\n base_name : str or unicode, optional\n Prefix for the datasets. Default: 'Position' when is_spectral is False, 'Spectroscopic' otherwise\n slow_to_fast : bool, Optional. Default=False\n Set to True if the dimensions are arranged from slowest varying to fastest varying.\n Set to False otherwise.\n\n Returns\n -------\n h5_spec_inds : h5py.Dataset\n Dataset containing the position indices\n h5_spec_vals : h5py.Dataset\n Dataset containing the value at each position\n\n Notes\n -----\n `steps`, `initial_values`, `labels`, and 'units' must be the same length as\n `dimensions` when they are specified.\n\n Dimensions should be in the order from fastest varying to slowest.\n\n \"\"\"\n if isinstance(dimensions, Dimension):\n dimensions = [dimensions]\n if not isinstance(dimensions, (list, np.ndarray, tuple)):\n raise TypeError('dimensions should be array-like ')\n if not np.all([isinstance(x, Dimension) for x in dimensions]):\n raise TypeError('dimensions should be a sequence of Dimension objects')\n\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should be a h5py.File or Group object')\n if not is_editable_h5(h5_parent_group):\n raise ValueError('The provided h5 object is not valid / open')\n\n if base_name is not None:\n base_name = validate_single_string_arg(base_name, 'base_name')\n if not base_name.endswith('_'):\n base_name += '_'\n else:\n base_name = 'Position_'\n if is_spectral:\n base_name = 'Spectroscopic_'\n\n if not slow_to_fast:\n warn('In the future write_ind_val_dsets will default to requiring dimensions to be arranged from slowest to fastest varying')\n\n # check if the datasets already exist. If they do, there's no point in going any further\n for sub_name in ['Indices', 'Values']:\n if base_name + sub_name in h5_parent_group.keys():\n raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,\n h5_parent_group.name))\n modes = [dim.mode for dim in dimensions]\n sing_mode = np.unique(modes)\n\n if sing_mode.size > 1:\n raise NotImplementedError('Cannot yet work on combinations of modes for Dimensions. Consider doing manually')\n\n sing_mode = sing_mode[0]\n\n if sing_mode == DimType.DEFAULT:\n if slow_to_fast:\n # Ensure that the dimensions are arranged from fast to slow instead\n dimensions = dimensions[::-1]\n indices, values = build_ind_val_matrices([dim.values for dim in dimensions],\n is_spectral=is_spectral)\n\n # At this point, dimensions and unit values are arranged from fastest to slowest\n # We want dimensions to be arranged from slowest to fastest:\n rev_func = np.flipud if is_spectral else np.fliplr\n dimensions = dimensions[::-1]\n indices = rev_func(indices)\n values = rev_func(values)\n\n elif sing_mode == DimType.INCOMPLETE:\n lengths = np.unique([len(dim.values) for dim in dimensions])\n if len(lengths) > 1:\n raise ValueError('Values for dimensions not of same length')\n single_dim = np.arange(lengths[0], dtype=INDICES_DTYPE)\n indices = np.tile(single_dim, (2, 1)).T\n values = np.dstack(tuple([dim.values for dim in dimensions])).squeeze()\n\n if is_spectral:\n indices = indices.T\n values = values.T\n else:\n raise NotImplementedError('Cannot yet work on Dependent dimensions')\n\n if verbose:\n print('Indices:')\n print(indices)\n print('Values:')\n print(values)\n\n # Create the Datasets for both Indices and Values\n h5_indices = h5_parent_group.create_dataset(base_name + 'Indices', data=INDICES_DTYPE(indices), dtype=INDICES_DTYPE)\n h5_values = h5_parent_group.create_dataset(base_name + 'Values', data=VALUES_DTYPE(values), dtype=VALUES_DTYPE)\n\n for h5_dset in [h5_indices, h5_values]:\n write_simple_attrs(h5_dset, {'units': [x.units for x in dimensions], 'labels': [x.name for x in dimensions],\n 'type': [dim.mode.value for dim in dimensions]})\n\n warn('pyUSID.io.hdf_utils.simple.write_ind_val_dsets no longer creates'\n 'region references for each dimension. Please use '\n 'pyUSID.io.reg_ref.write_region_references to manually create region '\n 'references')\n\n return h5_indices, h5_values\n\n\ndef write_reduced_anc_dsets(h5_parent_group, h5_inds, h5_vals, dim_name, basename=None, is_spec=None,\n verbose=False):\n \"\"\"\n Creates new Ancillary Indices and Values datasets from the input datasets by dropping the specified dimensions\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or h5py.File\n Group under which the indices and values datasets will be created\n h5_inds : HDF5 Dataset\n Spectroscopic or Positions indices dataset\n h5_vals : HDF5 Dataset\n Spectroscopic or Positions values dataset\n dim_name : str or unicode or list of strings\n Names of the dimension(s) to remove\n basename : str or unicode, Optional\n String to which '_Indices' and '_Values' will be appended to get the names of the new datasets.\n Default = 'Position' or 'Spectroscopic'\n is_spec : bool, optional\n Whether or not the provided ancillary datasets are position or spectroscopic\n The user is recommended to supply this parameter whenever it is known or possible.\n By default, this function will attempt to recognize the answer based on the shape of the datasets.\n verbose : bool, optional. Default = False\n Whether or not to print debugging print statements\n\n Returns\n -------\n h5_inds_new : h5py.Dataset\n Reduced indices dataset\n h5_vals_new : h5py.Dataset\n Reduces values dataset\n\n \"\"\"\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should either be a h5py. Group or File object')\n\n for param, param_name in zip([h5_inds, h5_vals], ['h5_inds', 'h5_vals']):\n if not isinstance(param, h5py.Dataset):\n raise TypeError(param_name + ' should be a h5py.Dataset object')\n if dim_name is not None:\n dim_name = validate_list_of_strings(dim_name, 'dim_name')\n\n all_dim_names = list(get_attr(h5_inds, 'labels'))\n for item in dim_name:\n if item not in all_dim_names:\n raise KeyError('Requested dimension: {} not in the list of labels: {}'.format(item, all_dim_names))\n\n ind_mat = h5_inds[()]\n val_mat = h5_vals[()]\n\n if is_spec is None:\n # Attempt to recognize the type automatically\n is_spec = False\n if ind_mat.shape[0] == ind_mat.shape[1]:\n raise ValueError('Unable automatically guess whether the provided datasets are position or '\n 'spectroscopic. Please explicitely specify via the \"is_spec\" boolean kwarg')\n if ind_mat.shape[0] < ind_mat.shape[1]:\n is_spec = True\n else:\n if not isinstance(is_spec, bool):\n raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))\n\n if basename is not None:\n basename = validate_single_string_arg(basename, 'basename')\n if basename.endswith('_'):\n basename = basename[:-1]\n else:\n if is_spec:\n basename = 'Spectroscopic'\n else:\n basename = 'Position'\n\n for sub_name in ['_Indices', '_Values']:\n if basename + sub_name in h5_parent_group.keys():\n raise KeyError('Dataset: {} already exists in provided group: {}'.format(basename + sub_name,\n h5_parent_group.name))\n\n if set(dim_name) != set(all_dim_names):\n # At least one dimension will remain\n\n if verbose:\n print('All Dimensions: {}. Dimensions to be removed: {}'.format(all_dim_names, dim_name))\n\n if not is_spec:\n # Convert to spectral shape\n ind_mat = np.transpose(ind_mat)\n val_mat = np.transpose(val_mat)\n\n # For all dimensions, find where the index = 0\n # basically, we are indexing all dimensions to 0\n first_indices = []\n keep_dim = np.ones(len(all_dim_names), dtype=bool)\n for cur_dim in dim_name:\n dim_ind = all_dim_names.index(cur_dim)\n keep_dim[dim_ind] = False\n # check equality against the minimum value instead of 0 to account for cases when a dimension does not start\n # from 0 (already been sliced) - think of multi-dimensional slicing!\n first_indices.append(ind_mat[dim_ind] == np.min(ind_mat[dim_ind]))\n first_indices = np.vstack(first_indices)\n\n if verbose:\n print('Raw first_indices:')\n print(first_indices)\n print('Dimensions to keep: {}'.format(keep_dim))\n\n step_starts = np.all(first_indices, axis=0)\n\n if verbose:\n print('Columns in dataset to keep:')\n print(step_starts)\n\n '''\n Extract all rows that we want to keep from input indices and values\n '''\n # TODO: handle TypeError: Indexing elements must be in increasing order\n ind_mat = ind_mat[keep_dim, :][:, step_starts]\n val_mat = val_mat[keep_dim, :][:, step_starts]\n\n if not is_spec:\n # Convert back to position shape\n ind_mat = np.transpose(ind_mat)\n val_mat = np.transpose(val_mat)\n\n '''\n Create new Datasets to hold the data\n Name them based on basename\n '''\n h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=ind_mat, dtype=h5_inds.dtype)\n h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=val_mat, dtype=h5_vals.dtype)\n # Extracting the labels from the original spectroscopic data sets\n labels = h5_inds.attrs['labels'][keep_dim]\n # Creating the dimension slices for the new spectroscopic data sets\n\n # Adding the labels and units to the new spectroscopic data sets\n for dset in [h5_inds_new, h5_vals_new]:\n write_simple_attrs(dset, {'labels': labels, 'units': h5_inds.attrs['units'][keep_dim]})\n\n else:\n # Remove all dimensions:\n h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=np.array([[0]]), dtype=INDICES_DTYPE)\n h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=np.array([[0]]), dtype=VALUES_DTYPE)\n\n for dset in [h5_inds_new, h5_vals_new]:\n write_simple_attrs(dset, {'labels': ['Single_Step'], 'units': ['a. u.']})\n\n return h5_inds_new, h5_vals_new\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 15:07:16 2017\n\n@author: Suhas Somnath\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals, absolute_import\nimport unittest\nimport os\nimport sys\nimport h5py\nimport numpy as np\nimport dask.array as da\nimport shutil\n\nsys.path.append(\"../../pyUSID/\")\nfrom pyUSID.io import hdf_utils, Dimension, USIDataset\n\nfrom tests.io import data_utils\n\n\nif sys.version_info.major == 3:\n unicode = str\n\n\nclass TestModel(unittest.TestCase):\n\n def setUp(self):\n data_utils.make_beps_file()\n data_utils.make_sparse_sampling_file()\n data_utils.make_incomplete_measurement_file()\n data_utils.make_relaxation_file()\n\n def tearDown(self):\n for file_path in [data_utils.std_beps_path, \n data_utils.sparse_sampling_path,\n data_utils.incomplete_measurement_path,\n data_utils.relaxation_path]:\n data_utils.delete_existing_file(file_path)\n\n\nclass TestGetDimensionality(TestModel):\n\n def test_legal_no_sort(self):\n self.__helper_no_sort(hdf_dsets=True)\n self.__helper_no_sort(hdf_dsets=False)\n\n def __helper_no_sort(self, hdf_dsets=True):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/Position_Indices']]\n expected_shapes = [[7, 2],\n [7],\n [5, 3]]\n for h5_dset, exp_shape in zip(h5_dsets, expected_shapes):\n if not hdf_dsets:\n h5_dset = h5_dset[()]\n self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset)))\n\n def test_legal_w_sort(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/Position_Indices']]\n expected_shapes = [[2, 7],\n [7],\n [3, 5]]\n sort_orders = [[1, 0],\n [0],\n [1, 0]]\n for h5_dset, s_oder, exp_shape in zip(h5_dsets, sort_orders, expected_shapes):\n self.assertTrue(np.all(exp_shape == hdf_utils.get_dimensionality(h5_dset, index_sort=s_oder)))\n\n def test_not_hdf_dset(self):\n for obj in [15, 'srds']:\n with self.assertRaises(TypeError):\n _ = hdf_utils.get_dimensionality(obj)\n\n def test_invalid_sort(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_dset = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_dimensionality(h5_dset, index_sort=[3, 4])\n _ = hdf_utils.get_dimensionality(h5_dset, index_sort=['a', np.arange(5)])\n\n\nclass TestGetSortOrder(TestModel):\n\n def test_invalid_types(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n for obj in ['fdfdfd', h5_f]:\n with self.assertRaises(TypeError):\n _ = hdf_utils.get_sort_order(obj)\n\n def test_simple(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_dsets = [h5_f['/Raw_Measurement/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],\n h5_f['/Raw_Measurement/Position_Indices']]\n expected_order = [[0, 1], [0], [0, 1]]\n for h5_dset, exp_order in zip(h5_dsets, expected_order):\n self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))\n\n def test_reversed(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_dsets = [np.flipud(h5_f['/Raw_Measurement/Spectroscopic_Indices']),\n h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],\n np.fliplr(h5_f['/Raw_Measurement/Position_Indices'])]\n expected_order = [[1, 0], [0], [1, 0]]\n for h5_dset, exp_order in zip(h5_dsets, expected_order):\n self.assertTrue(np.all(exp_order == hdf_utils.get_sort_order(h5_dset)))\n\n\nclass TestGetUnitValues(TestModel):\n\n def test_source_spec_all(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']\n expected = {}\n for dim_name in ['Bias', 'Cycle']:\n expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]\n ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)\n self.assertEqual(len(expected), len(ret_val))\n for key, exp in expected.items():\n self.assertTrue(np.allclose(exp, ret_val[key]))\n\n def test_source_spec_all_explicit(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']\n expected = {}\n for dim_name in ['Bias', 'Cycle']:\n expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]\n ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])\n self.assertEqual(len(expected), len(ret_val))\n for key, exp in expected.items():\n self.assertTrue(np.allclose(exp, ret_val[key]))\n\n def test_illegal_key(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']\n with self.assertRaises(KeyError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Does not exist'])\n\n def test_illegal_dset(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n h5_vals = h5_f['/Raw_Measurement/Ancillary']\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Cycle', 'Bias'])\n\n def test_source_spec_single(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n h5_vals = h5_f['/Raw_Measurement/Spectroscopic_Values']\n expected = {'Bias': h5_f['/Raw_Measurement/Bias'][()]}\n ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Bias')\n self.assertEqual(len(expected), len(ret_val))\n for key, exp in expected.items():\n self.assertTrue(np.allclose(exp, ret_val[key]))\n\n def test_source_pos_all(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Position_Indices']\n h5_vals = h5_f['/Raw_Measurement/Position_Values']\n expected = {}\n for dim_name in ['X', 'Y']:\n expected[dim_name] = h5_f['/Raw_Measurement/' + dim_name][()]\n ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals)\n self.assertEqual(len(expected), len(ret_val))\n for key, exp in expected.items():\n self.assertTrue(np.allclose(exp, ret_val[key]))\n\n def test_source_pos_single(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Position_Indices']\n h5_vals = h5_f['/Raw_Measurement/Position_Values']\n expected = {'Y': h5_f['/Raw_Measurement/Y'][()]}\n ret_val = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names='Y')\n self.assertEqual(len(expected), len(ret_val))\n for key, exp in expected.items():\n self.assertTrue(np.allclose(exp, ret_val[key]))\n\n def test_all_dim_names_not_provided(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_inds = h5_f['/Raw_Measurement/Position_Indices'][()]\n h5_vals = h5_f['/Raw_Measurement/Position_Values'][()]\n\n with self.assertRaises(TypeError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])\n\n def test_dependent_dim(self):\n with h5py.File(data_utils.relaxation_path, mode='r') as h5_f:\n h5_inds = h5_f['/Measurement_000/Channel_000/Spectroscopic_Indices']\n h5_vals = h5_f['/Measurement_000/Channel_000/Spectroscopic_Values']\n spec_dim_names = hdf_utils.get_attr(h5_inds, 'labels')\n ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)\n for dim_ind, dim_name in enumerate(spec_dim_names):\n exp_val = hdf_utils.get_attr(h5_inds, 'unit_vals_dim_' + str(dim_ind))\n act_val = ret_dict[dim_name]\n self.assertTrue(np.allclose(exp_val, act_val))\n\n def test_sparse_samp_no_attr(self):\n # What should the user expect this function to do? throw an error.\n # Without the attribute, this function will have no idea that it is looking at a sparse sampling case\n # it will return the first and second columns of vals blindly\n with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:\n h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']\n h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']\n dim_names = hdf_utils.get_attr(h5_inds, 'labels')\n ret_dict = hdf_utils.get_unit_values(h5_inds, h5_vals)\n for dim_ind, dim_name in enumerate(dim_names):\n exp_val = h5_vals[:, dim_ind]\n act_val = ret_dict[dim_name]\n self.assertTrue(np.allclose(exp_val, act_val))\n\n def test_sparse_samp_w_attr(self):\n # What should the user expect this function to do? throw an error.\n with h5py.File(data_utils.sparse_sampling_path, mode='r') as h5_f:\n h5_inds = h5_f['/Measurement_000/Channel_001/Position_Indices']\n h5_vals = h5_f['/Measurement_000/Channel_001/Position_Values']\n\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])\n\n def test_incomp_dim_no_attr(self):\n # What should the user expect this function to do? throw an error.\n # Given that the unit values for each tile are different, it should throw a ValueError for X.\n # Even though we know Y is incomplete, it won't know since it wasn't looking at X.\n # However, now this function will automatically find unit values for ALL dimensions just to catch such scenarios\n with h5py.File(data_utils.incomplete_measurement_path, mode='r') as h5_f:\n h5_inds = h5_f['/Measurement_000/Channel_000/Position_Indices']\n h5_vals = h5_f['/Measurement_000/Channel_000/Position_Values']\n\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals)\n\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['X'])\n\n with self.assertRaises(ValueError):\n _ = hdf_utils.get_unit_values(h5_inds, h5_vals, dim_names=['Y'])\n\n\nclass TestReshapeToNDims(TestModel):\n\n def test_h5_already_sorted(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]\n\n h5_main = h5_f['/Raw_Measurement/source_main']\n # Data is always slowest to fastest\n # Anc dims arranged from fastest to slowest\n # Expecting data dims to be arranged according to anc dims order\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=False,\n lazy=False, verbose=True)\n self.assertTrue(np.all([x == y for x, y in zip(labels, ['X', 'Y', 'Bias', 'Cycle'])]))\n self.assertTrue(success)\n nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)\n self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))\n\n # Anc dims arranged from fastest to slowest\n # Expecting data dims to be arranged according to slow to fast\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main, get_labels=True, sort_dims=True,\n lazy=False, verbose=True)\n self.assertTrue(success)\n self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))\n self.assertTrue(np.allclose(nd_slow_to_fast, n_dim))\n\n def test_h5_manually_provided_anc_dsets_h5(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]\n nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)\n exp_labs = ['X', 'Y', 'Bias', 'Cycle']\n\n\n h5_main = h5_f['/Raw_Measurement/source_main']\n h5_pos_inds = h5_f['/Raw_Measurement/Position_Indices']\n h5_spec_inds = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n\n # BOTH POS AND SPEC\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,\n h5_pos=h5_pos_inds,\n h5_spec=h5_spec_inds,\n get_labels=True,\n sort_dims=False,\n lazy=False, verbose=True)\n self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))\n self.assertTrue(success)\n self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))\n\n # ONLY POS:\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,\n h5_pos=h5_pos_inds,\n h5_spec=None,\n get_labels=True,\n sort_dims=False,\n lazy=False,\n verbose=True)\n self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))\n self.assertTrue(success)\n self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))\n\n # ONLY SPEC\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_main,\n h5_pos=None,\n h5_spec=h5_spec_inds,\n get_labels=True,\n sort_dims=False,\n lazy=False,\n verbose=True)\n self.assertTrue(np.all([x == y for x, y in zip(labels, exp_labs)]))\n self.assertTrue(success)\n self.assertTrue(np.allclose(nd_fast_to_slow, n_dim))\n\n def test_h5_not_main_dset(self):\n with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:\n h5_main = h5_f['/Raw_Measurement/Ancillary']\n h5_pos = h5_f['/Raw_Measurement/Position_Indices']\n h5_spec = h5_f['/Raw_Measurement/Spectroscopic_Indices']\n\n # Not main\n with self.assertRaises(ValueError):\n _ = hdf_utils.reshape_to_n_dims(h5_main)\n\n # Not main and not helping that we are supplign incompatible ancillary datasets\n with self.assertRaises(ValueError):\n _ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)\n\n # main but we are supplign incompatible ancillary datasets\n h5_main = h5_f['/Raw_Measurement/source_main-Fitter_000/results_main']\n with self.assertRaises(ValueError):\n _ = hdf_utils.reshape_to_n_dims(h5_main, h5_pos=h5_pos, h5_spec=h5_spec)\n\n def build_main_anc_4d(self):\n num_rows = 3\n num_cols = 5\n num_cycles = 2\n num_cycle_pts = 7\n # arrange as fast, slow\n pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),\n np.repeat(np.arange(num_rows), num_cols))).T\n # arrange as fast, slow\n spec_inds = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),\n np.repeat(np.arange(num_cycles), num_cycle_pts)))\n\n # Data is arranged from slowest to fastest\n main_nd = np.zeros(shape=(num_rows, num_cols, num_cycles,\n num_cycle_pts), dtype=np.uint8)\n for row_ind in range(num_rows):\n for col_ind in range(num_cols):\n for cycle_ind in range(num_cycles):\n # for bias_ind in range(num_cycle_pts):\n val = 1E+3*row_ind + 1E+2*col_ind + 1E+1*cycle_ind + np.arange(num_cycle_pts)\n main_nd[row_ind, col_ind, cycle_ind] = val\n\n return main_nd, pos_inds, spec_inds\n\n def base_comparison_4d(self, flip_pos_inds, flip_spec_inds, lazy_in=False,\n lazy_out=False, verbose=False):\n # Generated Data dims from slowest to fastest\n exp_nd_s2f, pos_inds, spec_inds = self.build_main_anc_4d()\n # nd (Y, X, Cycle, Bias)\n main_2d = exp_nd_s2f.reshape(np.prod(exp_nd_s2f.shape[:2]),\n np.prod(exp_nd_s2f.shape[2:]))\n\n # Dimension names arranged from slowest to fastest\n labs_s2f = ['Position Dimension 1', 'Position Dimension 0',\n 'Spectral Dimension 1', 'Spectral Dimension 0']\n\n # Generated ancillary dimensions are arranged from fastest to slowest\n # Unless any flipping is requested, as-is order should be fast to slow\n as_is_nd_order = [1, 0, 3, 2]\n # Unless any flipping is requested, s2f order is already in place\n s2f_lab_order = [0, 1, 2, 3]\n if flip_pos_inds:\n # arranged as slow to fast\n pos_inds = np.fliplr(pos_inds)\n as_is_nd_order = as_is_nd_order[:2][::-1] + as_is_nd_order[2:]\n s2f_lab_order = [1, 0] + s2f_lab_order[2:]\n if flip_spec_inds:\n # arranged as slow to fast\n as_is_nd_order = as_is_nd_order[:2] + as_is_nd_order[2:][::-1]\n s2f_lab_order = s2f_lab_order[:2] + [3, 2]\n spec_inds = np.flipud(spec_inds)\n\n if lazy_in:\n main_2d = da.from_array(main_2d, chunks=main_2d.shape)\n pos_inds = da.from_array(pos_inds, chunks=pos_inds.shape)\n spec_inds = da.from_array(spec_inds, chunks=spec_inds.shape)\n\n n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,\n h5_pos=pos_inds,\n h5_spec=spec_inds, sort_dims=True,\n get_labels=True,\n lazy=lazy_out,\n verbose=verbose)\n if lazy_out:\n self.assertIsInstance(n_dim, da.core.Array)\n self.assertTrue(np.allclose(exp_nd_s2f, n_dim))\n self.assertTrue(suc)\n # labels were auto-generated and these will be flipped blindly\n exp_labs = np.array(labs_s2f)[s2f_lab_order]\n self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))\n\n if verbose:\n print('~~~~~~~~~~~~~~~~~~~~~~ UNSORTED ~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n n_dim, suc, labs = hdf_utils.reshape_to_n_dims(main_2d,\n h5_pos=pos_inds,\n h5_spec=spec_inds,\n sort_dims=False,\n get_labels=True,\n lazy=lazy_out,\n verbose=verbose)\n if lazy_out:\n self.assertIsInstance(n_dim, da.core.Array)\n\n # Rearrange the dim labels and N-dim form from slow-to-fast to:\n if verbose:\n print('N-dim order will be permuted as: {}'.format(as_is_nd_order))\n print('Labels will be permuted as: {}'.format([1, 0, 3, 2]))\n exp_nd = exp_nd_s2f.transpose(tuple(as_is_nd_order))\n \"\"\"\n This is sort of confusing:\n No matter how the pos / spec dims are ordered, the names will always\n start as P0, P1, S0, S1\n \"\"\"\n exp_labs = np.array(labs_s2f)[[1, 0, 3, 2]]\n if verbose:\n print('Expected N-dim shape: {} and labels: {}'\n ''.format(exp_nd.shape, exp_labs))\n\n self.assertTrue(np.allclose(exp_nd, n_dim))\n self.assertTrue(suc)\n self.assertTrue(np.all([x == y for x, y in zip(labs, exp_labs)]))\n\n def test_numpy_ordinary(self):\n self.base_comparison_4d(False, False)\n\n def test_dask_input(self):\n self.base_comparison_4d(False, False, lazy_in=True, lazy_out=False)\n\n def test_dask_output(self):\n self.base_comparison_4d(False, False, lazy_in=False, lazy_out=True)\n\n def test_dask_all(self):\n self.base_comparison_4d(False, False, lazy_in=True, lazy_out=True)\n\n def test_numpy_pos_inds_order_flipped(self):\n self.base_comparison_4d(True, False)\n\n def test_numpy_spec_inds_order_flipped(self):\n # This is the same situation as in BEPS\n self.base_comparison_4d(False, True)\n\n def test_numpy_both_inds_order_flipped(self):\n self.base_comparison_4d(True, True)\n\n def test_dask_all_both_inds_order_flipped(self):\n self.base_comparison_4d(True, True, lazy_in=True, lazy_out=True)\n\n def build_main_anc_1_2d(self, is_2d=True, is_spec=False):\n num_rows = 2\n num_cols = 3\n # arrange as fast, slow\n pos_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),\n np.repeat(np.arange(num_rows), num_cols))).T\n\n # Data is arranged from slowest to fastest\n main_nd = np.random.randint(0, high=255, size=(num_rows, num_cols),\n dtype=np.uint8)\n if not is_2d:\n pos_inds = np.expand_dims(np.arange(num_rows), axis=1)\n main_nd = np.random.randint(0, high=255, size=num_rows,\n dtype=np.uint8)\n\n spec_inds= np.expand_dims([0], axis=0)\n\n if is_spec:\n return main_nd, spec_inds, pos_inds.T\n\n return main_nd, pos_inds, spec_inds\n\n def base_comparison_1_2d(self, is_2d, is_spec, flip_inds,\n lazy_in=False, lazy_out=False):\n # Data is always stored from fastest to slowest\n # By default the ancillary dimensions are arranged from fastest to slowest\n main_nd, pos_inds, spec_inds = self.build_main_anc_1_2d(is_2d=is_2d,\n is_spec=is_spec)\n\n main_2d = main_nd.reshape(-1, 1)\n main_nd_w_sing = np.expand_dims(main_nd, axis=-1)\n if is_spec:\n main_2d = main_2d.T\n main_nd_w_sing = np.expand_dims(main_nd, axis=0)\n\n # nd (Y, X)\n order = [1, 0, 2]\n if is_spec:\n order = [0, 2, 1]\n if flip_inds:\n # arranged as slow to fast\n if is_spec:\n spec_inds = np.flipud(spec_inds)\n order = [0] + order[1:][::-1]\n else:\n pos_inds = np.fliplr(pos_inds)\n order = order[:2][::-1] + [2]\n\n print('2D: {}, Spec: {}, Flip: {}'.format(is_2d, is_spec, flip_inds))\n print('Main data shapes ND: {}, 2D: {}'.format(main_nd.shape, main_2d.shape))\n\n print(main_nd)\n print(main_2d)\n\n if lazy_in:\n main_2d = da.from_array(main_2d, chunks=main_2d.shape)\n\n n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,\n h5_spec=spec_inds,\n sort_dims=True,\n get_labels=False,\n lazy=lazy_out,\n verbose=True)\n if lazy_out:\n self.assertIsInstance(n_dim, da.core.Array)\n self.assertTrue(np.allclose(main_nd_w_sing, n_dim))\n\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n n_dim, success = hdf_utils.reshape_to_n_dims(main_2d, h5_pos=pos_inds,\n h5_spec=spec_inds,\n sort_dims=False,\n get_labels=False,\n lazy=lazy_out,\n verbose=True)\n if lazy_out:\n self.assertIsInstance(n_dim, da.core.Array)\n\n if is_2d:\n main_nd_w_sing = main_nd_w_sing.transpose(order)\n\n self.assertTrue(np.allclose(main_nd_w_sing, n_dim))\n\n def test_numpy_ordinary_1d_pos(self):\n self.base_comparison_1_2d(False, False, False)\n\n def test_dask_in_ordinary_1d_pos(self):\n self.base_comparison_1_2d(False, False, False,\n lazy_in=True, lazy_out=False)\n\n def test_dask_out_ordinary_1d_pos(self):\n self.base_comparison_1_2d(False, False, False,\n lazy_in=False, lazy_out=True)\n\n def test_dask_all_ordinary_1d_pos(self):\n self.base_comparison_1_2d(False, False, False,\n lazy_in=True, lazy_out=True)\n\n def test_numpy_ordinary_1d_spec(self):\n self.base_comparison_1_2d(False, True, False)\n\n def test_dask_in_ordinary_1d_spec(self):\n self.base_comparison_1_2d(False, True, False,\n lazy_in=True, lazy_out=False)\n\n def test_dask_out_ordinary_1d_spec(self):\n self.base_comparison_1_2d(False, True, False,\n lazy_in=False, lazy_out=True)\n\n def test_dask_all_ordinary_1d_spec(self):\n self.base_comparison_1_2d(False, True, False,\n lazy_in=True, lazy_out=True)\n\n def test_numpy_ordinary_2d_pos(self):\n self.base_comparison_1_2d(True, False, False)\n\n def test_numpy_ordinary_2d_spec(self):\n self.base_comparison_1_2d(True, True, False)\n\n\n\n def test_h5_both_inds_flipped(self):\n # Flipping both the spec and pos dimensions means that the order in which\n # the data is stored is the same order in which dimensions are arranged\n # In other words, sort should make no difference at all!\n file_path = 'reshape_to_n_dim_sort_required.h5'\n data_utils.delete_existing_file(file_path)\n with h5py.File(file_path, mode='w') as h5_f:\n h5_raw_grp = h5_f.create_group('Raw_Measurement')\n\n main_nd, source_pos_data, source_spec_data = self.build_main_anc_4d()\n\n # arrange as slow, fast instead of fast, slow\n source_pos_data = np.fliplr(source_pos_data)\n # make spectroscopic slow, fast instead of fast, slow\n source_spec_data = np.flipud(source_spec_data)\n\n source_dset_name = 'source_main'\n\n # Arrange from slow to fast\n pos_attrs = {'units': ['nm', 'um'], 'labels': ['Y', 'X']}\n\n #def build_ind_val_dsets(name, inds, attrs, is_spec):\n\n h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices', data=source_pos_data, dtype=np.uint16)\n data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'], is_spec=False)\n data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)\n\n h5_pos_vals = h5_raw_grp.create_dataset('Position_Values', data=source_pos_data, dtype=np.float32)\n data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'], is_spec=False)\n data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)\n\n source_main_data = main_nd.reshape(np.prod(main_nd.shape[:2]),\n np.prod(main_nd.shape[2:]))\n h5_source_main = h5_raw_grp.create_dataset(source_dset_name, data=source_main_data)\n data_utils.write_safe_attrs(h5_source_main, {'units': 'A', 'quantity': 'Current'})\n\n # Remember to set from slow to faset\n source_spec_attrs = {'units': ['', 'V'], 'labels': ['Cycle', 'Bias']}\n\n h5_source_spec_inds = h5_raw_grp.create_dataset('Spectroscopic_Indices', data=source_spec_data,\n dtype=np.uint16)\n data_utils.write_aux_reg_ref(h5_source_spec_inds, source_spec_attrs['labels'], is_spec=True)\n data_utils.write_string_list_as_attr(h5_source_spec_inds, source_spec_attrs)\n\n h5_source_spec_vals = h5_raw_grp.create_dataset('Spectroscopic_Values', data=source_spec_data,\n dtype=np.float32)\n data_utils.write_aux_reg_ref(h5_source_spec_vals, source_spec_attrs['labels'], is_spec=True)\n data_utils.write_string_list_as_attr(h5_source_spec_vals, source_spec_attrs)\n\n # Now need to link as main!\n for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds, h5_source_spec_vals]:\n h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref\n\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(h5_source_main, get_labels=True, sort_dims=True,\n lazy=False, verbose=False)\n self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))\n self.assertTrue(np.allclose(main_nd, n_dim))\n\n expected_n_dim = main_nd # np.transpose(main_nd, [1, 0, 3, 2])\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(\n h5_source_main, get_labels=True, sort_dims=False,\n lazy=False, verbose=False)\n self.assertTrue(np.all([x == y for x, y in zip(labels, ['Y', 'X', 'Cycle', 'Bias'])]))\n self.assertTrue(np.allclose(expected_n_dim, n_dim))\n\n os.remove(file_path)\n\n def test_h5_beps_field(self):\n # Flipping both the spec and pos dimensions means that the order in which\n # the data is stored is the same order in which dimensions are arranged\n # In other words, sort should make no difference at all!\n file_path = 'reshape_to_n_dim_sort_required.h5'\n data_utils.delete_existing_file(file_path)\n with h5py.File(file_path, mode='w') as h5_f:\n h5_raw_grp = h5_f.create_group('Raw_Measurement')\n\n num_rows = 3\n num_cols = 5\n num_fields = 2\n num_cycle_pts = 7\n # arrange as fast, slow\n source_pos_data = np.vstack(\n (np.tile(np.arange(num_cols), num_rows),\n np.repeat(np.arange(num_rows), num_cols))).T\n # arrange as fast, slow\n source_spec_data = np.vstack(\n (np.tile(np.arange(num_fields), num_cycle_pts),\n np.repeat(np.arange(num_cycle_pts), num_fields),))\n\n # Data is arranged from slowest to fastest\n\n test = np.vstack((np.arange(num_cycle_pts) * -1 - 1,\n np.arange(num_cycle_pts) + 1))\n\n main_nd = np.zeros(\n shape=(num_rows, num_cols, num_fields, num_cycle_pts),\n dtype=np.float16)\n for row_ind in range(num_rows):\n for col_ind in range(num_cols):\n main_nd[\n row_ind, col_ind] = 1E+3 * row_ind + 1E+2 * col_ind + test\n\n main_nd = main_nd.transpose(0, 1, 3, 2)\n\n source_dset_name = 'source_main'\n\n # Arrange from fast to slow\n pos_attrs = {'units': ['nm', 'um'], 'labels': ['X', 'Y']}\n\n h5_pos_inds = h5_raw_grp.create_dataset('Position_Indices',\n data=source_pos_data,\n dtype=np.uint16)\n data_utils.write_aux_reg_ref(h5_pos_inds, pos_attrs['labels'],\n is_spec=False)\n data_utils.write_string_list_as_attr(h5_pos_inds, pos_attrs)\n\n h5_pos_vals = h5_raw_grp.create_dataset('Position_Values',\n data=source_pos_data,\n dtype=np.float32)\n data_utils.write_aux_reg_ref(h5_pos_vals, pos_attrs['labels'],\n is_spec=False)\n data_utils.write_string_list_as_attr(h5_pos_vals, pos_attrs)\n\n source_main_data = main_nd.reshape(np.prod(main_nd.shape[:2]),\n np.prod(main_nd.shape[2:]))\n h5_source_main = h5_raw_grp.create_dataset(source_dset_name,\n data=source_main_data)\n data_utils.write_safe_attrs(h5_source_main,\n {'units': 'A', 'quantity': 'Current'})\n\n # Remember to set from fast to slow\n source_spec_attrs = {'units': ['', 'V'],\n 'labels': ['Field', 'Bias']}\n\n h5_source_spec_inds = h5_raw_grp.create_dataset(\n 'Spectroscopic_Indices', data=source_spec_data,\n dtype=np.uint16)\n data_utils.write_aux_reg_ref(h5_source_spec_inds,\n source_spec_attrs['labels'],\n is_spec=True)\n data_utils.write_string_list_as_attr(h5_source_spec_inds,\n source_spec_attrs)\n\n h5_source_spec_vals = h5_raw_grp.create_dataset(\n 'Spectroscopic_Values', data=source_spec_data,\n dtype=np.float32)\n data_utils.write_aux_reg_ref(h5_source_spec_vals,\n source_spec_attrs['labels'],\n is_spec=True)\n data_utils.write_string_list_as_attr(h5_source_spec_vals,\n source_spec_attrs)\n\n # Now need to link as main!\n for dset in [h5_pos_inds, h5_pos_vals, h5_source_spec_inds,\n h5_source_spec_vals]:\n h5_source_main.attrs[dset.name.split('/')[-1]] = dset.ref\n\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(\n h5_source_main, get_labels=True, sort_dims=True,\n lazy=False, verbose=False)\n self.assertTrue(np.all(\n [x == y for x, y in zip(labels, ['Y', 'X', 'Bias', 'Field'])]))\n self.assertTrue(np.allclose(main_nd, n_dim))\n\n expected_n_dim = np.transpose(main_nd, [1, 0, 3, 2])\n n_dim, success, labels = hdf_utils.reshape_to_n_dims(\n h5_source_main, get_labels=True, sort_dims=False,\n lazy=False, verbose=False)\n self.assertTrue(np.all(\n [x == y for x, y in zip(labels, ['X', 'Y', 'Field', 'Bias'])]))\n self.assertTrue(np.allclose(expected_n_dim, n_dim))\n\n os.remove(file_path)\n\n\nclass TestReshapeFromNDims(TestModel):\n\n def test_pos_and_spec_provided(self):\n num_rows = 3\n num_cols = 5\n num_cycles = 2\n num_cycle_pts = 7\n\n # the N dimensional dataset should be arranged in the following order:\n # [positions slowest to fastest, spectroscopic slowest to fastest]\n source_nd = np.zeros(shape=(num_rows, num_cols, num_cycles, num_cycle_pts), dtype=np.float16)\n expected_2d = np.zeros(shape=(num_rows * num_cols, num_cycle_pts * num_cycles), dtype=np.float16)\n for row_ind in range(num_rows):\n for col_ind in range(num_cols):\n for cycle_ind in range(num_cycles):\n for bias_ind in range(num_cycle_pts):\n val = 1E+3 * row_ind + 1E+2 * col_ind + 1E+1 * cycle_ind + bias_ind\n expected_2d[row_ind * num_cols + col_ind, cycle_ind * num_cycle_pts + bias_ind] = val\n source_nd[row_ind, col_ind, cycle_ind, bias_ind] = val\n\n # case 1: Pos and Spec both arranged as slow to fast:\n source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),\n np.tile(np.arange(num_cols), num_rows))).T\n source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),\n np.tile(np.arange(num_cycle_pts), num_cycles)))\n\n ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_pos=source_pos_data, h5_spec=source_spec_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n # case 2: Only Pos arranged as slow to fast:\n main_pos_sorted = np.transpose(source_nd, (0, 1, 3, 2))\n source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),\n np.tile(np.arange(num_cols), num_rows))).T\n source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),\n np.repeat(np.arange(num_cycles), num_cycle_pts),))\n\n ret_2d, success = hdf_utils.reshape_from_n_dims(main_pos_sorted, h5_pos=source_pos_data,\n h5_spec=source_spec_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n # case 3: only Spec arranged as slow to fast:\n main_spec_sorted = np.transpose(source_nd, (1, 0, 2, 3))\n source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),\n np.repeat(np.arange(num_rows), num_cols))).T\n source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),\n np.tile(np.arange(num_cycle_pts), num_cycles)))\n\n ret_2d, success = hdf_utils.reshape_from_n_dims(main_spec_sorted, h5_pos=source_pos_data,\n h5_spec=source_spec_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n # case 4: neither pos nor spec arranged as slow to fast:\n main_not_sorted = np.transpose(source_nd, (1, 0, 3, 2))\n source_pos_data = np.vstack((np.tile(np.arange(num_cols), num_rows),\n np.repeat(np.arange(num_rows), num_cols))).T\n source_spec_data = np.vstack((np.tile(np.arange(num_cycle_pts), num_cycles),\n np.repeat(np.arange(num_cycles), num_cycle_pts),))\n\n ret_2d, success = hdf_utils.reshape_from_n_dims(main_not_sorted, h5_pos=source_pos_data,\n h5_spec=source_spec_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n def test_pos_and_spec_may_may_not_be_provided(self):\n num_rows = 3\n num_cols = 5\n num_cycles = 2\n num_cycle_pts = 7\n\n # the N dimensional dataset should be arranged in the following order:\n # [positions slowest to fastest, spectroscopic slowest to fastest]\n source_nd = np.zeros(shape=(num_rows, num_cols, num_cycles, num_cycle_pts), dtype=np.float16)\n expected_2d = np.zeros(shape=(num_rows * num_cols, num_cycle_pts * num_cycles), dtype=np.float16)\n for row_ind in range(num_rows):\n for col_ind in range(num_cols):\n for cycle_ind in range(num_cycles):\n for bias_ind in range(num_cycle_pts):\n val = 1E+3 * row_ind + 1E+2 * col_ind + 1E+1 * cycle_ind + bias_ind\n expected_2d[row_ind * num_cols + col_ind, cycle_ind * num_cycle_pts + bias_ind] = val\n source_nd[row_ind, col_ind, cycle_ind, bias_ind] = val\n\n source_pos_data = np.vstack((np.repeat(np.arange(num_rows), num_cols),\n np.tile(np.arange(num_cols), num_rows))).T\n source_spec_data = np.vstack((np.repeat(np.arange(num_cycles), num_cycle_pts),\n np.tile(np.arange(num_cycle_pts), num_cycles)))\n\n # case 1: only pos provided:\n ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_pos=source_pos_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n # case 2: only spec provided:\n ret_2d, success = hdf_utils.reshape_from_n_dims(source_nd, h5_spec=source_spec_data)\n self.assertTrue(success)\n self.assertTrue(np.allclose(ret_2d, expected_2d))\n\n # case 3: neither pos nor spec provided:\n with self.assertRaises(ValueError):\n _ = hdf_utils.reshape_from_n_dims(source_nd)\n\n\nclass TestWriteMainDataset(TestModel):\n\n def base_write(self, lazy_main=False, empty_main=False, pre_pos=False,\n pre_spec=False, to_new_file=False):\n file_path = 'test.h5'\n new_file_path = 'new.h5'\n data_utils.delete_existing_file(file_path)\n main_data = np.random.rand(15, 14)\n main_data_name = 'Test_Main'\n quantity = 'Current'\n dset_units = 'nA'\n\n pos_sizes = [5, 3]\n pos_names = ['X', 'Y']\n pos_units = ['nm', 'um']\n\n pos_dims = []\n for length, name, units in zip(pos_sizes, pos_names, pos_units):\n pos_dims.append(Dimension(name, units, np.arange(length)))\n pos_data = np.vstack((np.tile(np.arange(5), 3),\n np.repeat(np.arange(3), 5))).T\n\n spec_sizes = [7, 2]\n spec_names = ['Bias', 'Cycle']\n spec_units = ['V', '']\n spec_dims = []\n for length, name, units in zip(spec_sizes, spec_names, spec_units):\n spec_dims.append(Dimension(name, units, np.arange(length)))\n spec_data = np.vstack((np.tile(np.arange(7), 2),\n np.repeat(np.arange(2), 7)))\n\n input_data = main_data\n kwargs = {}\n if lazy_main:\n input_data = da.from_array(main_data, chunks=main_data.shape)\n if empty_main:\n input_data = main_data.shape\n kwargs.update({'dtype': np.float16})\n\n with h5py.File(file_path, mode='w') as h5_f:\n if pre_spec:\n h5_spec_inds, h5_spec_vals = hdf_utils.write_ind_val_dsets(\n h5_f, spec_dims, is_spectral=True)\n spec_dims = None\n kwargs.update({'h5_spec_inds': h5_spec_inds,\n 'h5_spec_vals': h5_spec_vals})\n\n if pre_pos:\n h5_pos_inds, h5_pos_vals = hdf_utils.write_ind_val_dsets(h5_f,\n pos_dims,\n is_spectral=False)\n pos_dims = None\n kwargs.update({'h5_pos_inds': h5_pos_inds,\n 'h5_pos_vals': h5_pos_vals})\n\n targ_loc = h5_f\n if to_new_file:\n h5_f_2 = h5py.File(new_file_path, mode='w')\n targ_loc = h5_f_2\n\n usid_main = hdf_utils.write_main_dataset(targ_loc, input_data, main_data_name, quantity, dset_units, pos_dims,\n spec_dims, main_dset_attrs=None, slow_to_fast=False, verbose=True, **kwargs)\n self.assertIsInstance(usid_main, USIDataset)\n self.assertEqual(usid_main.name.split('/')[-1], main_data_name)\n self.assertEqual(usid_main.parent, targ_loc)\n if not empty_main:\n self.assertTrue(np.allclose(main_data, usid_main[()]))\n\n data_utils.validate_aux_dset_pair(self, targ_loc, usid_main.h5_pos_inds, usid_main.h5_pos_vals, pos_names, pos_units,\n pos_data, h5_main=usid_main, is_spectral=False, slow_to_fast=False)\n\n data_utils.validate_aux_dset_pair(self, targ_loc, usid_main.h5_spec_inds, usid_main.h5_spec_vals, spec_names, spec_units,\n spec_data, h5_main=usid_main, is_spectral=True, slow_to_fast=False)\n\n if to_new_file:\n os.remove(new_file_path)\n os.remove(file_path)\n\n def test_numpy_small(self):\n self.base_write()\n\n def test_dask_small(self):\n self.base_write(lazy_main=True)\n\n def test_empty_main(self):\n self.base_write(empty_main=True)\n\n def test_write_main_existing_pos_aux(self):\n self.base_write(pre_pos=True, pre_spec=False)\n\n def test_write_main_existing_pos_aux_diff_file(self):\n self.base_write(pre_pos=True, pre_spec=False, to_new_file=True)\n\n def test_write_main_existing_spec_aux(self):\n self.base_write(pre_pos=False, pre_spec=True)\n\n def test_write_main_existing_spec_aux_diff_file(self):\n self.base_write(pre_pos=False, pre_spec=True, to_new_file=True)\n\n def test_write_main_both_existing_aux(self):\n self.base_write(pre_pos=True, pre_spec=True)\n\n def test_write_main_both_existing_aux_diff_file(self):\n self.base_write(pre_pos=True, pre_spec=True, to_new_file=True)\n\n def test_prod_sizes_mismatch(self):\n file_path = 'test.h5'\n data_utils.delete_existing_file(file_path)\n main_data = np.random.rand(15, 14)\n main_data_name = 'Test_Main'\n quantity = 'Current'\n dset_units = 'nA'\n\n pos_sizes = [5, 15] # too many steps in the Y direction\n pos_names = ['X', 'Y']\n pos_units = ['nm', 'um']\n pos_dims = []\n for length, name, units in zip(pos_sizes, pos_names, pos_units):\n pos_dims.append(Dimension(name, units, np.arange(length)))\n\n spec_sizes = [7, 2]\n spec_names = ['Bias', 'Cycle']\n spec_units = ['V', '']\n spec_dims = []\n for length, name, units in zip(spec_sizes, spec_names, spec_units):\n spec_dims.append(Dimension(name, units, np.arange(length)))\n\n with h5py.File(file_path, mode='w') as h5_f:\n with self.assertRaises(ValueError):\n _ = hdf_utils.write_main_dataset(h5_f, main_data, main_data_name, quantity, dset_units, pos_dims,\n spec_dims)\n os.remove(file_path)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nSimple process class for purpose of testing.\nCreated on: Jul 19, 2019\nAuthor: Emily Costa\n\"\"\"\n\nimport h5py\nfrom pyUSID.processing.process import Process\nimport numpy as np\nfrom pyUSID import hdf_utils\nimport matplotlib.pyplot as plt\n\nclass SimpleProcess(Process):\n def __init__(self, h5_main, verbose=True, **kwargs):\n super(SimpleProcess, self).__init__(h5_main, verbose, **kwargs)\n self.data = None\n self.test_data = None\n self.results = None\n self.chunk_amount = 0\n self.process_name = 'Simple_Process'\n if self.verbose: print('Done with initializing book-keepings')\n\n def test(self):\n if self.mpi_rank > 0:\n return\n ran_ind = np.random.randint(0, high=self.h5_main.shape[0])\n self.test_data = np.fft.fftshift(np.fft.fft(self.h5_main[ran_ind]))\n \n def _create_results_datasets(self):\n self.h5_results_grp = hdf_utils.create_results_group(self.h5_main, self.process_name)\n assert isinstance(self.h5_results_grp, h5py.Group)\n if self.verbose: print('Results group created.')\n self.results = hdf_utils.create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',\n h5_group=self.h5_results_grp)\n #self.results = hdf_utils.write_main_dataset(self.h5_results_grp, (self.h5_main.shape[0], 1), \"Results\", \"Results\", \"Units\", None,\n #usid.io.write_utils.Dimension('arb', '', [1]), h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals, dtype=np.float32)\n if self.verbose: print('Empty main dataset for results written')\n\n def _write_results_chunk(self):\n pos_in_batch = self._get_pixels_in_current_batch()\n print(type(self.data))\n print(type(self.results))\n self.results[pos_in_batch, :] = self.data\n #self.results = self.h5_results_grp['Simple_Data']\n self.chunk_amount = self.chunk_amount + 1\n if self.verbose: print('Chunk {} written.'.format(self.chunk_amount))\n \n def _unit_computation(self):\n self.data = np.fft.fftshift(np.fft.fft(self.data, axis=1), axes=1)\n \n def plot_test(self):\n fig, axis = plt.subplots()\n axis.plot(self.test_data)\n plt.savefig('test_partial.png')\n if self.verbose: print('Test image created.')\n\n\n\n"
] | [
[
"numpy.allclose",
"numpy.unique",
"numpy.min",
"numpy.arange",
"numpy.tile",
"numpy.sort",
"numpy.all",
"numpy.transpose",
"numpy.iterable",
"numpy.array",
"numpy.vstack"
],
[
"numpy.expand_dims",
"numpy.allclose",
"numpy.fliplr",
"numpy.arange",
"numpy.flipud",
"numpy.random.rand",
"numpy.prod",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.fft.fft",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jovian-Dsouza/Avenger_FaceNet | [
"e8bdffd017c9c27d4dc0f347f6992f760f1af5db"
] | [
"test.py"
] | [
"import os\nimport torch\nimport numpy as np\nfrom torchvision import transforms \nfrom torch import nn\nfrom torch.nn import Softmax\nfrom facenet_pytorch import MTCNN\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom loadOpenFace import prepareOpenFace\nfrom collections import OrderedDict\nimport argparse\n\n# Check if CUDA GPU is available\nuseCuda = torch.cuda.is_available()\nif useCuda:\n print('CUDA is avialable')\n device = torch.device('cuda:0')\nelse:\n print('CUDA is not avialable')\n device = torch.device('cpu')\n\ndef load_model_from_chk(chk_path):\n '''Returns model and idx_to_class dictionary'''\n try:\n # Load checkpoint \n checkpoint = torch.load(chk_path, map_location=torch.device('cpu'))\n idx_to_class = checkpoint['idx_to_class']\n\n # Load the inception model\n model = prepareOpenFace(useCuda)\n model.eval()\n n_classes = len(idx_to_class)\n\n # Initialize the classifier model\n classifier_model = nn.Sequential(OrderedDict([\n (\"nn4_small_v2\", model),\n (\"fc\", nn.Linear(736, n_classes))\n ]))\n\n # load the trained parameters\n classifier_model.load_state_dict(checkpoint['model_state_dict'])\n print(\"Model Loaded from %s\" % chk_path)\n return classifier_model, idx_to_class\n\n except FileNotFoundError:\n print(\"Model checkpoint not found %s\" % chk_path)\n return None\n\n# Load mtcnn to align and crop images\nmtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False,\n device=device\n)\n\n# tranfomation applied to croped image\nface_transform = transforms.Compose([transforms.Resize(96),\n transforms.ToTensor()])\n\nsoftmax = Softmax(dim=1)\n\n# Load the model \nchk_path = 'models/AvengersClassifier.pth'\nclassifier_model, idx_to_class = load_model_from_chk(chk_path)\nclassifier_model = classifier_model.to(device)\nclassifier_model.eval()\n\n\ndef predict(img_path, prob_theshold = 0.9):\n try:\n img = Image.open(img_path)\n except FileNotFoundError:\n return \n\n # Crop, Align and standardize the Image \n mtcnn_img = mtcnn(img.convert('RGB'))\n\n # If no face then return\n if mtcnn_img is None:\n plt.show()\n print(\"ERROR, Could not detect a face in image\")\n return\n \n # Convert to PIL image\n mtcnn_img = Image.fromarray(np.array(mtcnn_img.permute(1, 2, 0).numpy(), dtype=np.uint8))\n\n # Do the Prediction\n mtcnn_img = face_transform(mtcnn_img).unsqueeze(0)\n mtcnn_img = mtcnn_img.to(device)\n\n with torch.no_grad():\n label = classifier_model(mtcnn_img)\n label = softmax(label) # To Convert the logit to probabilities\n\n prob, pred = label.data.max(1, keepdim=True)\n prob, pred = float(prob), int(pred)\n\n if prob < prob_theshold:\n print(\"UNKNOWN FACE, but similar to %s with %0.2f%% probability\" %\n (idx_to_class[pred], 100 * prob))\n else:\n print(\"%s with %0.2f%% probability\" %\n (idx_to_class[pred], 100 * prob))\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Takes in image path and does prediction')\n parser.add_argument('-p', '--path', help='Image path')\n\n args = parser.parse_args()\n img_path = args.path\n\n print()\n predict(img_path)"
] | [
[
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajinkyakhoche/Object-Detection-Project | [
"3964fd5b445957581205478bb46db58fba3a9fc3",
"3964fd5b445957581205478bb46db58fba3a9fc3"
] | [
"ssd_keras/ssd7_training_inferenceonvideo.py",
"ssd_keras/ssd7_training.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"ssd7_training_inferenceonvideo.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1gMZm_sCuKq7g_cZIGfEcYyKoVw-U7jTX\n\"\"\"\n\n# from IPython.display import clear_output\n\n# ! rm -rf *\n# ! wget -O repo.zip https://github.com/pierluigiferrari/ssd_keras/archive/master.zip\n# ! unzip -o repo.zip\n# ! mv ssd_keras-master/* .\n# ! pip install tqdm\n# ! rm -rf ssd_keras-master\n# clear_output()\n# ! wget https://drive.google.com/uc?export=download&confirm=m0XG&id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D\n# ! rm *.md *.ipynb *.txt *.zip\n# ! ls\n\n\"\"\"# SSD7 Training Tutorial\n\nThis tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.\n\nDisclaimer about SSD7:\nAs you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.\n\"\"\"\n\nfrom keras.optimizers import Adam , SGD\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd7 import build_model\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\nfrom data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\nfrom data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n\n# %matplotlib inline\n\n\"\"\"## 1. Set the model configuration parameters\n\nThe cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.\n\nHere are just some comments on a few of the parameters, read the documentation for more details:\n\n* Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.\n* The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.\n* The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.\n* The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.\n* `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.\n* If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.\n* If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.\n* In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.\n* `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.\n\"\"\"\n\nimg_height = 300 # Height of the input images\nimg_width = 480 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nintensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nn_classes = 5 # Number of positive classes\nscales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\naspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes\ntwo_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\nsteps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\noffsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled\nnormalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size\n\n\"\"\"## 2. Build or load the model\n\nYou will want to execute either of the two code cells in the subsequent two sub-sections, not both.\n\n### 2.1 Create a new model\n\nIf you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.\n\nThe code cell below does the following things:\n1. It calls the function `build_model()` to build the model.\n2. It optionally loads some weights into the model.\n3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n\n`SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.\n\"\"\"\n\n# 1: Build the Keras model\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = build_model(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n aspect_ratios_per_layer=None,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=intensity_mean,\n divide_by_stddev=intensity_range)\n\n# 2: Optional: Load some weights\n\n#model.load_weights('./ssd7_weights.h5', by_name=True)\n\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model\n\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nsgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False) # Recommed to fix bug [https://github.com/pierluigiferrari/ssd_keras/issues/84]\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)\n\n\"\"\"### 2.2 Load a saved model\n\nIf you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.\n\nThe SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.\n\nThis next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.\n\"\"\"\n\nLOAD_MODEL = True\n\nif LOAD_MODEL:\n # TODO: Set the path to the `.h5` file of the model to be loaded.\n model_path = '../udacity_data/SavedModels/training1/ssd7_epoch-05_loss-2.5061_val_loss-2.5454.h5'\n\n # We need to create an SSDLoss object in order to pass that to the model loader.\n ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\n K.clear_session() # Clear previous models from memory.\n\n model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'compute_loss': ssd_loss.compute_loss})\n\n\"\"\"## 3. Set up the data generators for the training\n\nThe code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.\n\nNote that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.\n\nSet the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.\n\nThe `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n\nThe image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.\n\nAn `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.\n\n### Note:\n\nThe example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).\n\"\"\"\n\n# ! wget --header 'Host: doc-08-64-docs.googleusercontent.com' --user-agent 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-GB,en;q=0.5' --referer 'https://drive.google.com/uc?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D&export=download' --header 'Cookie: AUTH_jnah6s13kkbb9peqjnhhrvs24bcqfb6v=06338804252926118732|1535551200000|ag6qrtoegj3b578klq9mv59em3e2u2ll' --header 'Upgrade-Insecure-Requests: 1' 'https://doc-08-64-docs.googleusercontent.com/docs/securesc/dbqrqv6dp9ts3hf02kejajr0k5nf0854/g19v9tjp4on3gskf6gjiibmlmfk52r5q/1535551200000/01021765827329596762/06338804252926118732/1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D?e=download' --output-document 'udacity_driving_datasets.zip'\n# ! unzip udacity_driving_datasets.zip\n# #clear_output()\n# ! rm *.zip\n# ! ls\n\n# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets.\n\n# TODO: Set the paths to your dataset here.\n\n# Images\nimages_dir = '../udacity_data/udacity_driving_datasets/'\n\n# Ground truth\ntrain_labels_filename = '../udacity_data/udacity_driving_datasets/labels_train.csv'\nval_labels_filename = '../udacity_data/udacity_driving_datasets/labels_val.csv'\n\ntrain_dataset.parse_csv(images_dir=images_dir,\n labels_filename=train_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.\n include_classes='all')\n\nval_dataset.parse_csv(images_dir=images_dir,\n labels_filename=val_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],\n include_classes='all')\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\n#train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n#val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n\n# 3: Set the batch size.\n\nbatch_size = 16\n\n# 4: Define the image processing chain.\n\ndata_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),\n random_contrast=(0.5, 1.8, 0.5),\n random_saturation=(0.5, 1.8, 0.5),\n random_hue=(18, 0.5),\n random_flip=0.5,\n random_translate=((0.03,0.5), (0.03,0.5), 0.5),\n random_scale=(0.5, 2.0, 0.5),\n n_trials_max=3,\n clip_boxes=True,\n overlap_criterion='area',\n bounds_box_filter=(0.3, 1.0),\n bounds_validator=(0.5, 1.0),\n n_boxes_min=1,\n background=(0,0,0))\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n model.get_layer('classes5').output_shape[1:3],\n model.get_layer('classes6').output_shape[1:3],\n model.get_layer('classes7').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.3,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[data_augmentation_chain],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n\"\"\"## 4. Set the remaining training parameters and train the model\n\nWe've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.\n\nI'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.\n\"\"\"\n\n# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the weights.\nmodel_checkpoint = ModelCheckpoint(filepath='../udacity_data/SavedModels/training2/ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n\ncsv_logger = CSVLogger(filename='ssd7_training_log.csv',\n separator=',',\n append=True)\n\nearly_stopping = EarlyStopping(monitor='val_loss',\n min_delta=0.0,\n patience=10,\n verbose=1)\n\nreduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',\n factor=0.2,\n patience=8,\n verbose=1,\n epsilon=0.001,\n cooldown=0,\n min_lr=0.00001)\n\ncallbacks = [model_checkpoint,\n csv_logger,\n early_stopping,\n reduce_learning_rate]\n\n\"\"\"I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.\n\nInstead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.\n\nIn order to only run a partial training and resume smoothly later on, there are a few things you should note:\n1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.\n\"\"\"\n\n# TODO: Set the epochs to train for.\n# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 25\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)\n\n\"\"\"Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:\"\"\"\n\nplt.figure(figsize=(20,12))\nplt.plot(history.history['loss'], label='loss')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.legend(loc='upper right', prop={'size': 24});\n\n\"\"\"The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss.\n\n### 5. Make predictions\n\nNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.\n\nYou can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.\n\"\"\"\n\n# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images',\n 'processed_labels',\n 'filenames'},\n keep_images_without_gt=False)\n\n# 2: Generate samples\n\nbatch_images, batch_labels, batch_filenames = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(batch_labels[i])\n\n# 3: Make a prediction\n\ny_pred = model.predict(batch_images)\n\n\"\"\"Now let's decode the raw predictions in `y_pred`.\n\nHad we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n\n`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.\n\"\"\"\n\n# 4: Decode the raw prediction `y_pred`\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded[i])\n\n\"\"\"Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.\"\"\"\n\n# 5: Draw the predicted boxes onto the image\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_images[i])\n\ncurrent_axis = plt.gca()\n\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\nclasses = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\n\n# Draw the ground truth boxes in green (omit the label for more clarity)\nfor box in batch_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\n# Draw the predicted boxes in blue\nfor box in y_pred_decoded[i]:\n xmin = box[-4]\n ymin = box[-3]\n xmax = box[-2]\n ymax = box[-1]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n\n# !pip install pytube==9.1.0\n\n# from pytube import YouTube\n# YouTube('https://www.youtube.com/watch?v=_Ad7Co55alM').streams.first().download()\n\n# ! ls\n\n# ! mkdir output_frames\n\n# Offline video processing\n\n# i = 0\n\n# cap = cv2.VideoCapture('test_videos/Driving in Russia 4K video Car driving in winter.mp4')\n# width = int(cap.get(3))\n# height = int(cap.get(4))\n# property_id = int(cv2.CAP_PROP_FRAME_COUNT) \n# fps = cap.get(cv2.CAP_PROP_FPS)\n# total_frames = int(cv2.VideoCapture.get(cap, property_id))\n\n# # Define the codec and create VideoWriter object\n# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n# out = cv2.VideoWriter('output.avi',fourcc, fps, (width,height))\n\n# # Read until video is completed\n# for j in range(total_frames):\n# print(str(j)+'/'+str(total_frames))\n# # Capture frame-by-frame\n# ret, frame = cap.read()\n# if ret == True:\n \n# frame = frame[...,::-1]\n# frame_resized = cv2.resize(frame, (480, 300)) \n# frame_tensor = np.expand_dims(frame_resized, axis=0)\n# y_pred = model.predict(frame_tensor)\n# y_pred_decoded = decode_detections(y_pred,\n# confidence_thresh=0.5,\n# iou_threshold=0.45,\n# top_k=200,\n# normalize_coords=normalize_coords,\n# img_height=img_height,\n# img_width=img_width)\n \n# plt.figure(figsize=(20,12))\n# plt.imshow(frame_resized)\n\n# current_axis = plt.gca()\n\n# colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\n# classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\n\n# # Draw the predicted boxes in blue\n# for box in y_pred_decoded[i]:\n# xmin = box[-4]\n# ymin = box[-3]\n# xmax = box[-2]\n# ymax = box[-1]\n# color = colors[int(box[0])]\n# label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n \n \n# plt.savefig('output_frames/video_frame'+str(j)+'.png')\n# plt.close('all')\n \n# if j % 10 == 0:\n# clear_output()\n \n \n# # Break the loop\n# else: \n# break\n\n# out.release()\n# cap.release()",
"\n# coding: utf-8\n\n# # SSD7 Training Tutorial\n# \n# This tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.\n# \n# Disclaimer about SSD7:\n# As you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.\n\n# In[1]:\n\n\nfrom math import ceil\n\nimport numpy as np\nfrom data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger\nfrom keras.optimizers import Adam\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom matplotlib import pyplot as plt\nfrom models.keras_ssd7 import build_model\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections\n\n#get_ipython().magic(u'matplotlib inline')\n\n\n# ## 1. Set the model configuration parameters\n# \n# The cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.\n# \n# Here are just some comments on a few of the parameters, read the documentation for more details:\n# \n# * Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.\n# * The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.\n# * The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.\n# * The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.\n# * `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.\n# * If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.\n# * If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.\n# * In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.\n# * `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.\n\n# In[4]:\n\n\nimg_height = 720 # 720 # Height of the input images\nimg_width = 1280 # 1280 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nintensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nn_classes = 20 # Conecase: 5 # Number of positive classes\nscales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\naspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes\ntwo_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\nsteps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\noffsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled\nnormalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size\n\n\n# ## 2. Build or load the model\n#\n# You will want to execute either of the two code cells in the subsequent two sub-sections, not both.\n\n# ### 2.1 Create a new model\n#\n# If you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.\n#\n# The code cell below does the following things:\n# 1. It calls the function `build_model()` to build the model.\n# 2. It optionally loads some weights into the model.\n# 3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n#\n# `SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.\n\n# In[5]:\n\n\n# 1: Build the Keras model\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = build_model(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n aspect_ratios_per_layer=None,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=intensity_mean,\n divide_by_stddev=intensity_range)\n\n# 2: Optional: Load some weights\n\n#model.load_weights('./ssd7_weights.h5', by_name=True)\n\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model\n\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)\n\n\n\"\"\"\n# ### 2.2 Load a saved model\n# \n# If you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.\n# \n# The SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.\n# \n# This next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.\n\n# In[ ]:\n\n\n# # TODO: Set the path to the `.h5` file of the model to be loaded.\n# model_path = 'ssd7.h5'\n\n# # We need to create an SSDLoss object in order to pass that to the model loader.\n# ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\n# K.clear_session() # Clear previous models from memory.\n\n# model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n# 'compute_loss': ssd_loss.compute_loss})\n\n\"\"\"\n\n# ## 3. Set up the data generators for the training\n#\n# The code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.\n#\n# Note that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.\n#\n# Set the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.\n#\n# The `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n#\n# The image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.\n#\n# An `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.\n\n# ### Note:\n#\n# The example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).\n\n# In[14]:\n\n\n# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets.\n\n# TODO: Set the paths to your dataset here.\n\n# For Cone data set\n\n# The directories that contain the images.\ntrain_images_path = '../ConeData/resized_images/train'\nvalidation_images_path = '../ConeData/resized_images/validation'\ntest_images_path = '../ConeData/resized_images/test'\n\n# The paths to the image sets.\ntrain_setFileName = '../ConeData/train_set_filename.txt'\nvalidation_setFileName = '../ConeData/validation_set_filename.txt'\ntest_setFileName = '../ConeData/test_set_filename.txt'\n\n# The directories that contain the annotations.\ntrain_annotations_path = '../ConeData/annotations/train'\nvalidation_annotations_path = '../ConeData/annotations/validation'\ntest_annotations_path = '../ConeData/annotations/test'\n\n\n'''\nclasses = ['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n'''\nclasses = ['background', 'Cone']\n\ntrain_dataset.parse_xml(images_dirs=[train_images_path],\n image_set_filenames=[train_setFileName],\n annotations_dirs=[train_annotations_path],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False)\n\nval_dataset.parse_xml(images_dirs=[validation_images_path],\n image_set_filenames=[validation_setFileName],\n annotations_dirs=[validation_annotations_path],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=True,\n ret=False)\n\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\n# train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n\n\n# In[ ]:\n\n\n# 3: Set the batch size.\n\nbatch_size = 2\n\n# 4: Define the image processing chain.\n\ndata_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),\n random_contrast=(0.5, 1.8, 0.5),\n random_saturation=(0.5, 1.8, 0.5),\n random_hue=(18, 0.5),\n random_flip=0.5,\n random_translate=((0.03,0.5), (0.03,0.5), 0.5),\n random_scale=(0.5, 2.0, 0.5),\n n_trials_max=3,\n clip_boxes=True,\n overlap_criterion='area',\n bounds_box_filter=(0.3, 1.0),\n bounds_validator=(0.5, 1.0),\n n_boxes_min=1,\n background=(0,0,0))\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n model.get_layer('classes5').output_shape[1:3],\n model.get_layer('classes6').output_shape[1:3],\n model.get_layer('classes7').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.3,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[data_augmentation_chain],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n\n# ## 4. Set the remaining training parameters and train the model\n# \n# We've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.\n# \n# I'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.\n\n# In[ ]:\n\n\n# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the weights.\nmodel_checkpoint = ModelCheckpoint(filepath='../ConeData/SavedModels/ssd7_cone_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n\ncsv_logger = CSVLogger(filename='../ConeData/ss7_cone_training_log.csv',\n separator=',',\n append=True)\n\n\nearly_stopping = EarlyStopping(monitor='val_loss',\n min_delta=0.0,\n patience=10,\n verbose=1)\n\nreduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',\n factor=0.2,\n patience=8,\n verbose=1,\n epsilon=0.001,\n cooldown=0,\n min_lr=0.00001)\n\ncallbacks = [model_checkpoint,\n csv_logger,\n early_stopping,\n reduce_learning_rate]\n\n\n# I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.\n# \n# Instead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.\n# \n# In order to only run a partial training and resume smoothly later on, there are a few things you should note:\n# 1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n# 2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n# 3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.\n\n# In[ ]:\n\n\n# TODO: Set the epochs to train for.\n# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 5\nsteps_per_epoch = 4\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)\n\n\n# Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:\n\n# In[ ]:\n\n\nplt.figure(figsize=(20,12))\nplt.plot(history.history['loss'], label='loss')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.legend(loc='upper right', prop={'size': 24});\n\n\n# The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss.\n\n# ### 5. Make predictions\n# \n# Now let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.\n# \n# You can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.\n\n# In[ ]:\n\n\n# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images',\n 'processed_labels',\n 'filenames'},\n keep_images_without_gt=False)\n\n\n# In[ ]:\n\n\n# 2: Generate samples\n\nbatch_images, batch_labels, batch_filenames = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(batch_labels[i])\n\n\n# In[ ]:\n\n\n# 3: Make a prediction\n\ny_pred = model.predict(batch_images)\n\n\n# Now let's decode the raw predictions in `y_pred`.\n# \n# Had we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n# \n# `decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.\n\n# In[ ]:\n\n\n# 4: Decode the raw prediction `y_pred`\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded[i])\n\n\n# Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.\n\n# In[ ]:\n\n\n# 5: Draw the predicted boxes onto the image\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_images[i])\n\ncurrent_axis = plt.gca()\n\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\n#classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\nclasses = ['background', 'Cone']\n# Draw the ground truth boxes in green (omit the label for more clarity)\nfor box in batch_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\n# Draw the predicted boxes in blue\nfor box in y_pred_decoded[i]:\n xmin = box[-4]\n ymin = box[-3]\n xmax = box[-2]\n ymax = box[-1]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n\n"
] | [
[
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.gca",
"numpy.linspace",
"numpy.set_printoptions",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.gca",
"numpy.linspace",
"numpy.set_printoptions",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
altaregos/NiaPy | [
"74f1b2827778d9086603f4a8cb523f6b5537212a",
"74f1b2827778d9086603f4a8cb523f6b5537212a"
] | [
"niapy/problems/zakharov.py",
"niapy/algorithms/modified/saba.py"
] | [
"# encoding=utf8\n\"\"\"Implementations of Zakharov function.\"\"\"\n\nimport numpy as np\nfrom niapy.problems.problem import Problem\n\n__all__ = ['Zakharov']\n\n\nclass Zakharov(Problem):\n r\"\"\"Implementations of Zakharov functions.\n\n Date: 2018\n\n Author: Klemen Berkovič\n\n License: MIT\n\n Function:\n **Zakharov Function**\n\n :math:`f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4`\n\n **Input domain:**\n The function can be defined on any input domain but it is usually\n evaluated on the hypercube :math:`x_i ∈ [-5, 10]`, for all :math:`i = 1, 2,..., D`.\n\n **Global minimum:**\n :math:`f(\\textbf{x}^*) = 0` at :math:`\\textbf{x}^* = (0, \\cdots, 0)`\n\n LaTeX formats:\n Inline:\n $f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4$\n\n Equation:\n \\begin{equation} f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4 \\end{equation}\n\n Domain:\n $-5 \\leq x_i \\leq 10$\n\n Reference:\n https://www.sfu.ca/~ssurjano/zakharov.html\n\n \"\"\"\n\n def __init__(self, dimension=4, lower=-5.0, upper=10.0, *args, **kwargs):\n r\"\"\"Initialize Zakharov problem..\n\n Args:\n dimension (Optional[int]): Dimension of the problem.\n lower (Optional[Union[float, Iterable[float]]]): Lower bounds of the problem.\n upper (Optional[Union[float, Iterable[float]]]): Upper bounds of the problem.\n\n See Also:\n :func:`niapy.problems.Problem.__init__`\n\n \"\"\"\n super().__init__(dimension, lower, upper, *args, **kwargs)\n\n @staticmethod\n def latex_code():\n r\"\"\"Return the latex code of the problem.\n\n Returns:\n str: Latex code.\n\n \"\"\"\n return r'''$f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4$'''\n\n def _evaluate(self, x):\n sum1 = np.sum(x * x)\n sum2 = np.sum(0.5 * np.arange(1, self.dimension + 1) * x)\n return sum1 + sum2 ** 2 + sum2 ** 4\n",
"# encoding=utf8\nimport logging\n\nimport numpy as np\n\nfrom niapy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('niapy.algorithms.modified')\nlogger.setLevel('INFO')\n\n__all__ = ['AdaptiveBatAlgorithm', 'SelfAdaptiveBatAlgorithm']\n\n\nclass AdaptiveBatAlgorithm(Algorithm):\n r\"\"\"Implementation of Adaptive bat algorithm.\n\n Algorithm:\n Adaptive bat algorithm\n\n Date:\n April 2019\n\n Authors:\n Klemen Berkovič\n\n License:\n MIT\n\n Attributes:\n Name (List[str]): List of strings representing algorithm name.\n epsilon (float): Scaling factor.\n alpha (float): Constant for updating loudness.\n pulse_rate (float): Pulse rate.\n min_frequency (float): Minimum frequency.\n max_frequency (float): Maximum frequency.\n\n See Also:\n * :class:`niapy.algorithms.Algorithm`\n\n \"\"\"\n\n Name = ['AdaptiveBatAlgorithm', 'ABA']\n\n @staticmethod\n def info():\n r\"\"\"Get basic information about the algorithm.\n\n Returns:\n str: Basic information.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.info`\n\n \"\"\"\n return r\"\"\"TODO\"\"\"\n\n def __init__(self, population_size=100, starting_loudness=0.5, epsilon=0.001, alpha=1.0, pulse_rate=0.5,\n min_frequency=0.0, max_frequency=2.0, *args, **kwargs):\n \"\"\"Initialize AdaptiveBatAlgorithm.\n\n Args:\n population_size (Optional[int]): Population size.\n starting_loudness (Optional[float]): Starting loudness.\n epsilon (Optional[float]): Scaling factor.\n alpha (Optional[float]): Constant for updating loudness.\n pulse_rate (Optional[float]): Pulse rate.\n min_frequency (Optional[float]): Minimum frequency.\n max_frequency (Optional[float]): Maximum frequency.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.__init__`\n\n \"\"\"\n super().__init__(population_size, *args, **kwargs)\n self.starting_loudness = starting_loudness\n self.epsilon = epsilon\n self.alpha = alpha\n self.pulse_rate = pulse_rate\n self.min_frequency = min_frequency\n self.max_frequency = max_frequency\n\n def set_parameters(self, population_size=100, starting_loudness=0.5, epsilon=0.001, alpha=1.0, pulse_rate=0.5,\n min_frequency=0.0, max_frequency=2.0, **kwargs):\n r\"\"\"Set the parameters of the algorithm.\n\n Args:\n population_size (Optional[int]): Population size.\n starting_loudness (Optional[float]): Starting loudness.\n epsilon (Optional[float]): Scaling factor.\n alpha (Optional[float]): Constant for updating loudness.\n pulse_rate (Optional[float]): Pulse rate.\n min_frequency (Optional[float]): Minimum frequency.\n max_frequency (Optional[float]): Maximum frequency.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.set_parameters`\n\n \"\"\"\n super().set_parameters(population_size=population_size, **kwargs)\n self.starting_loudness = starting_loudness\n self.epsilon = epsilon\n self.alpha = alpha\n self.pulse_rate = pulse_rate\n self.min_frequency = min_frequency\n self.max_frequency = max_frequency\n\n def get_parameters(self):\n r\"\"\"Get algorithm parameters.\n\n Returns:\n Dict[str, Any]: Arguments values.\n\n See Also:\n * :func:`niapy.algorithms.algorithm.Algorithm.get_parameters`\n\n \"\"\"\n d = super().get_parameters()\n d.update({\n 'starting_loudness': self.starting_loudness,\n 'epsilon': self.epsilon,\n 'alpha': self.alpha,\n 'pulse_rate': self.pulse_rate,\n 'min_frequency': self.min_frequency,\n 'max_frequency': self.max_frequency\n })\n return d\n\n def init_population(self, task):\n r\"\"\"Initialize the starting population.\n\n Args:\n task (Task): Optimization task\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population.\n 2. New population fitness/function values.\n 3. Additional arguments:\n * loudness (float): Loudness.\n * velocities (numpy.ndarray[float]): Velocity.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.init_population`\n\n \"\"\"\n population, fitness, d = super().init_population(task)\n loudness = np.full(self.population_size, self.starting_loudness)\n velocities = np.zeros((self.population_size, task.dimension))\n d.update({'loudness': loudness, 'velocities': velocities})\n return population, fitness, d\n\n def local_search(self, best, loudness, task, **kwargs):\n r\"\"\"Improve the best solution according to the Yang (2010).\n\n Args:\n best (numpy.ndarray): Global best individual.\n loudness (float): Loudness.\n task (Task): Optimization task.\n\n Returns:\n numpy.ndarray: New solution based on global best individual.\n\n \"\"\"\n return task.repair(best + self.epsilon * loudness * self.normal(0, 1, task.dimension), rng=self.rng)\n\n def update_loudness(self, loudness):\n r\"\"\"Update loudness when the prey is found.\n\n Args:\n loudness (float): Loudness.\n\n Returns:\n float: New loudness.\n\n \"\"\"\n new_loudness = loudness * self.alpha\n return new_loudness if new_loudness > 1e-13 else self.starting_loudness\n\n def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):\n r\"\"\"Core function of Bat Algorithm.\n\n Args:\n task (Task): Optimization task.\n population (numpy.ndarray): Current population\n population_fitness (numpy.ndarray[float]): Current population fitness/function values\n best_x (numpy.ndarray): Current best individual\n best_fitness (float): Current best individual function/fitness value\n params (Dict[str, Any]): Additional algorithm arguments\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population\n 2. New population fitness/function values\n 3. Additional arguments:\n * loudness (numpy.ndarray[float]): Loudness.\n * velocities (numpy.ndarray[float]): Velocities.\n\n \"\"\"\n loudness = params.pop('loudness')\n velocities = params.pop('velocities')\n\n for i in range(self.population_size):\n frequency = self.min_frequency + (self.max_frequency - self.min_frequency) * self.random()\n velocities[i] += (population[i] - best_x) * frequency\n if self.random() > self.pulse_rate:\n solution = self.local_search(best=best_x, loudness=loudness[i], task=task, i=i, Sol=population)\n else:\n solution = task.repair(population[i] + velocities[i], rng=self.rng)\n new_fitness = task.eval(solution)\n if (new_fitness <= population_fitness[i]) and (self.random() < loudness[i]):\n population[i], population_fitness[i] = solution, new_fitness\n if new_fitness <= best_fitness:\n best_x, best_fitness, loudness[i] = solution.copy(), new_fitness, self.update_loudness(loudness[i])\n return population, population_fitness, best_x, best_fitness, {'loudness': loudness, 'velocities': velocities}\n\n\nclass SelfAdaptiveBatAlgorithm(AdaptiveBatAlgorithm):\n r\"\"\"Implementation of Hybrid bat algorithm.\n\n Algorithm:\n Self Adaptive Bat Algorithm\n\n Date:\n April 2019\n\n Author:\n Klemen Berkovič\n\n License:\n MIT\n\n Reference paper:\n Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniški vestnik, 2013. 1-7.\n\n Attributes:\n Name (List[str]): List of strings representing algorithm name.\n A_l (Optional[float]): Lower limit of loudness.\n A_u (Optional[float]): Upper limit of loudness.\n r_l (Optional[float]): Lower limit of pulse rate.\n r_u (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :class:`niapy.algorithms.basic.BatAlgorithm`\n\n \"\"\"\n\n Name = ['SelfAdaptiveBatAlgorithm', 'SABA']\n\n @staticmethod\n def info():\n r\"\"\"Get basic information about the algorithm.\n\n Returns:\n str: Basic information.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.info`\n\n \"\"\"\n return r\"\"\"Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniški vestnik, 2013. 1-7.\"\"\"\n\n def __init__(self, min_loudness=0.9, max_loudness=1.0, min_pulse_rate=0.001, max_pulse_rate=0.1, tao_1=0.1,\n tao_2=0.1, *args, **kwargs):\n \"\"\"Initialize SelfAdaptiveBatAlgorithm.\n\n Args:\n min_loudness (Optional[float]): Lower limit of loudness.\n max_loudness (Optional[float]): Upper limit of loudness.\n min_pulse_rate (Optional[float]): Lower limit of pulse rate.\n max_pulse_rate (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.__init__`\n\n \"\"\"\n super().__init__(*args, **kwargs)\n self.min_loudness = min_loudness\n self.max_loudness = max_loudness\n self.min_pulse_rate = min_pulse_rate\n self.max_pulse_rate = max_pulse_rate\n self.tao_1 = tao_1\n self.tao_2 = tao_2\n\n def set_parameters(self, min_loudness=0.9, max_loudness=1.0, min_pulse_rate=0.001, max_pulse_rate=0.1, tao_1=0.1, tao_2=0.1, **kwargs):\n r\"\"\"Set core parameters of HybridBatAlgorithm algorithm.\n\n Args:\n min_loudness (Optional[float]): Lower limit of loudness.\n max_loudness (Optional[float]): Upper limit of loudness.\n min_pulse_rate (Optional[float]): Lower limit of pulse rate.\n max_pulse_rate (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.set_parameters`\n\n \"\"\"\n super().set_parameters(**kwargs)\n self.min_loudness = min_loudness\n self.max_loudness = max_loudness\n self.min_pulse_rate = min_pulse_rate\n self.max_pulse_rate = max_pulse_rate\n self.tao_1 = tao_1\n self.tao_2 = tao_2\n\n def get_parameters(self):\n r\"\"\"Get parameters of the algorithm.\n\n Returns:\n Dict[str, Any]: Parameters of the algorithm.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.get_parameters`\n\n \"\"\"\n d = AdaptiveBatAlgorithm.get_parameters(self)\n d.update({\n 'min_loudness': self.min_loudness,\n 'max_loudness': self.max_loudness,\n 'min_pulse_rate': self.min_pulse_rate,\n 'max_pulse_rate': self.max_pulse_rate,\n 'tao_1': self.tao_1,\n 'tao_2': self.tao_2\n })\n return d\n\n def init_population(self, task):\n population, fitness, d = super().init_population(task)\n pulse_rates = np.full(self.population_size, self.pulse_rate)\n d.update({'pulse_rates': pulse_rates})\n return population, fitness, d\n\n def self_adaptation(self, loudness, pulse_rate):\n r\"\"\"Adaptation step.\n\n Args:\n loudness (float): Current loudness.\n pulse_rate (float): Current pulse rate.\n\n Returns:\n Tuple[float, float]:\n 1. New loudness.\n 2. Nwq pulse rate.\n\n \"\"\"\n return self.min_loudness + self.random() * (\n self.max_loudness - self.min_loudness) if self.random() < self.tao_1 else loudness, self.min_pulse_rate + self.random() * (\n self.max_pulse_rate - self.min_pulse_rate) if self.random() < self.tao_2 else pulse_rate\n\n def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):\n r\"\"\"Core function of Bat Algorithm.\n\n Args:\n task (Task): Optimization task.\n population (numpy.ndarray): Current population\n population_fitness (numpy.ndarray[float]): Current population fitness/function values\n best_x (numpy.ndarray): Current best individual\n best_fitness (float): Current best individual function/fitness value\n params (Dict[str, Any]): Additional algorithm arguments\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population\n 2. New population fitness/function values\n 3. Additional arguments:\n * loudness (numpy.ndarray[float]): Loudness.\n * pulse_rates (numpy.ndarray[float]): Pulse rate.\n * velocities (numpy.ndarray[float]): Velocities.\n\n \"\"\"\n loudness = params.pop('loudness')\n pulse_rates = params.pop('pulse_rates')\n velocities = params.pop('velocities')\n\n for i in range(self.population_size):\n loudness[i], pulse_rates[i] = self.self_adaptation(loudness[i], pulse_rates[i])\n frequency = self.min_frequency + (self.max_frequency - self.min_frequency) * self.random()\n velocities[i] += (population[i] - best_x) * frequency\n if self.random() > pulse_rates[i]:\n solution = self.local_search(best=best_x, loudness=loudness[i], task=task, i=i, population=population)\n else:\n solution = task.repair(population[i] + velocities[i], rng=self.rng)\n new_fitness = task.eval(solution)\n if (new_fitness <= population_fitness[i]) and (self.random() < (self.min_loudness - loudness[i]) / self.starting_loudness):\n population[i], population_fitness[i] = solution, new_fitness\n if new_fitness <= best_fitness:\n best_x, best_fitness = solution.copy(), new_fitness\n return population, population_fitness, best_x, best_fitness, {'loudness': loudness, 'pulse_rates': pulse_rates, 'velocities': velocities}\n"
] | [
[
"numpy.arange",
"numpy.sum"
],
[
"numpy.zeros",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
patrickbook/models | [
"718fb2c0d478ab6c9906a3dbf44099942a2c6426"
] | [
"official/nlp/modeling/networks/packed_sequence_embedding.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An embedding network supporting packed sequences and position ids.\"\"\"\n# pylint: disable=g-classes-have-attributes\nimport collections\nimport tensorflow as tf\n\nfrom official.modeling import tf_utils\nfrom official.nlp import keras_nlp\nfrom official.nlp.modeling import layers\n\n\[email protected]_keras_serializable(package='Text')\nclass PackedSequenceEmbedding(tf.keras.Model):\n \"\"\"An embedding network supporting packed sequences and position ids.\n\n This network implements an embedding layer similar to the one described in\n \"BERT: Pre-training of Deep Bidirectional Transformers for Language\n Understanding\" (https://arxiv.org/abs/1810.04805). On top of it, it supports\n to (1) pack multiple sequences into one sequence and (2) allow additional\n \"position_ids\" as input.\n\n Args:\n vocab_size: The size of the token vocabulary.\n type_vocab_size: The size of the type vocabulary.\n embedding_width: Width of token embeddings.\n hidden_size: The output size for this encoder.\n max_seq_length: The maximum sequence length for this encoder.\n initializer: The initializer for the embedding portion of this encoder.\n dropout_rate: The dropout rate to apply before the encoding layers.\n pack_multiple_sequences: If True, we can feed multiple sequences into one\n sequence for training and inference (they don't impact each other).\n use_position_id: Whether to expect `position_ids` as an input to the\n network. If False, the `position_ids` will be inferred: (1) when\n pack_multiple_sequences is False, we assume the position ids are 0, 1,\n 2, ..., seq_length - 1; (2) when pack_multiple_sequences is True, there\n may be multiple sub sequences, and for each sub sequence, its position\n ids start from 0, 1, 2, ...\n \"\"\"\n\n def __init__(self,\n vocab_size,\n type_vocab_size,\n embedding_width,\n hidden_size,\n max_seq_length,\n initializer,\n dropout_rate,\n use_position_id=False,\n pack_multiple_sequences=False,\n **kwargs):\n initializer = tf.keras.initializers.get(initializer)\n config_dict = {\n 'vocab_size': vocab_size,\n 'type_vocab_size': type_vocab_size,\n 'embedding_width': embedding_width,\n 'hidden_size': hidden_size,\n 'max_seq_length': max_seq_length,\n 'initializer': tf.keras.initializers.serialize(initializer),\n 'dropout_rate': dropout_rate,\n 'use_position_id': use_position_id,\n 'pack_multiple_sequences': pack_multiple_sequences,\n }\n\n word_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_word_ids')\n mask = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_mask')\n type_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_type_ids')\n inputs = {\n 'input_word_ids': word_ids,\n 'input_mask': mask,\n 'input_type_ids': type_ids,\n }\n if use_position_id:\n position_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='position_ids')\n inputs['position_ids'] = position_ids\n else:\n position_ids = None\n\n if pack_multiple_sequences:\n sub_seq_mask = PackedSequenceMask()(word_ids)\n else:\n sub_seq_mask = None\n\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n name='word_embeddings')\n word_embeddings = embedding_layer(word_ids)\n\n # Always uses dynamic slicing for simplicity.\n position_embedding_layer = PositionEmbeddingWithSubSeqMask(\n initializer=initializer,\n use_dynamic_slicing=True,\n max_sequence_length=max_seq_length,\n name='position_embedding')\n position_embeddings = position_embedding_layer(\n word_embeddings, position_ids, sub_seq_mask)\n\n type_embeddings = (\n layers.OnDeviceEmbedding(\n vocab_size=type_vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n use_one_hot=True,\n name='type_embeddings')(type_ids))\n\n embeddings = tf.keras.layers.Add()(\n [word_embeddings, position_embeddings, type_embeddings])\n embeddings = tf.keras.layers.LayerNormalization(\n name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(\n embeddings)\n embeddings = tf.keras.layers.Dropout(\n rate=dropout_rate, dtype=tf.float32)(\n embeddings)\n\n if embedding_width != hidden_size:\n embeddings = tf.keras.layers.experimental.EinsumDense(\n '...x,xy->...y',\n output_shape=hidden_size,\n bias_axes=None,\n kernel_initializer=initializer,\n name='embedding_projection')(\n embeddings)\n\n attention_mask = keras_nlp.layers.SelfAttentionMask()(embeddings, mask)\n if sub_seq_mask is not None:\n attention_mask = tf.keras.layers.Lambda(\n lambda x: x[0] * tf.cast(x[1], x[0].dtype))(\n [attention_mask, sub_seq_mask])\n\n outputs = [embeddings, attention_mask]\n super(PackedSequenceEmbedding, self).__init__(\n inputs=inputs, outputs=outputs, **kwargs)\n # TF does not track immutable attrs which do not contain Trackables,\n # so by creating a config namedtuple instead of a dict we avoid tracking it.\n config_cls = collections.namedtuple('Config', config_dict.keys())\n self._config = config_cls(**config_dict)\n self._embedding_layer = embedding_layer\n self._position_embedding_layer = position_embedding_layer\n\n def get_embedding_table(self):\n return self._embedding_layer.embeddings\n\n def get_config(self):\n return dict(self._config._asdict())\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\[email protected]_keras_serializable(package='Text')\nclass PackedSequenceMask(tf.keras.layers.Layer):\n \"\"\"A layer to create a mask to indicate multiple sub sequences.\"\"\"\n\n def call(self, input_ids):\n \"\"\"Implements call() for the layer.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n\n Returns:\n boolean Tensor of shape [batch_size, seq_length, seq_length]. [x, y, z]\n is True if for x'th instance in a batch, y'th token and z'th token are\n from the same sub sequence.\n \"\"\"\n # Suppose\n # - the first token in the parent sequence is [CLS].\n # - every sequence starts from [CLS].\n # - every sequence only contains one [CLS].\n seq_start_token = input_ids[:, 0:1]\n seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32)\n # Set different ids for different sub sequences.\n seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1)\n return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1]))\n\n\[email protected]_keras_serializable(package='Text')\nclass PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):\n \"\"\"Creates a positional embedding with sub-sequence masking.\n\n This layer creates a positional embedding as described in \"BERT: Pre-training\n of Deep Bidirectional Transformers for Language Understanding\"\n (https://arxiv.org/abs/1810.04805). On top of it, it supports\n `position_ids` and `sub_sequence_mask` tensors.\n\n This layer can be set up to either create a statically shaped slice or a\n dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor\n can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the\n input size must be fixed.\n\n Args:\n initializer: The initializer to use for the embedding weights. Defaults to\n \"glorot_uniform\".\n use_dynamic_slicing: Whether to use the dynamic slicing path.\n max_sequence_length: The maximum size of the dynamic sequence. Only\n applicable if `use_dynamic_slicing` is True.\n \"\"\"\n\n def __init__(self,\n initializer='glorot_uniform',\n use_dynamic_slicing=False,\n max_sequence_length=None,\n **kwargs):\n # We need to have a default dtype of float32, since the inputs (which Keras\n # usually uses to infer the dtype) will always be int32.\n if 'dtype' not in kwargs:\n kwargs['dtype'] = 'float32'\n\n super(PositionEmbeddingWithSubSeqMask, self).__init__(**kwargs)\n if use_dynamic_slicing and max_sequence_length is None:\n raise ValueError(\n 'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'\n )\n self._max_sequence_length = max_sequence_length\n self._initializer = tf.keras.initializers.get(initializer)\n self._use_dynamic_slicing = use_dynamic_slicing\n\n def get_config(self):\n config = {\n 'max_sequence_length': self._max_sequence_length,\n 'initializer': tf.keras.initializers.serialize(self._initializer),\n 'use_dynamic_slicing': self._use_dynamic_slicing,\n }\n base_config = super(PositionEmbeddingWithSubSeqMask, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dimension_list = input_shape.as_list()\n\n if len(dimension_list) != 3:\n raise ValueError('PositionEmbedding expects a 3-dimensional input tensor '\n 'of shape [batch, sequence, width]')\n seq_length = dimension_list[1]\n width = dimension_list[2]\n\n # If we are not using dynamic slicing, we must assume that the sequence\n # length is fixed and max_sequence_length should not be specified.\n if not self._use_dynamic_slicing:\n if seq_length is None:\n raise ValueError(\n 'PositionEmbedding must have `use_dynamic_slicing` set '\n 'to True (and max_sequence_length set) when the '\n 'sequence (1st) dimension of the input is None.')\n if self._max_sequence_length is not None:\n raise ValueError(\n 'When `use_dynamic_slicing` is False, max_sequence_length should '\n 'not be specified and we ought to use seq_length to get the '\n 'variable shape.')\n\n if self._max_sequence_length is not None:\n weight_sequence_length = self._max_sequence_length\n else:\n weight_sequence_length = seq_length\n\n self._position_embeddings = self.add_weight(\n 'embeddings',\n shape=[weight_sequence_length, width],\n initializer=self._initializer)\n\n super(PositionEmbeddingWithSubSeqMask, self).build(input_shape)\n\n def call(self, inputs, position_ids=None, sub_sequence_mask=None):\n \"\"\"Implements call() for the layer.\n\n When `position_ids` is specified, it will return the position embeddings\n corresponding to this `position_ids`; otherwise, `position_ids` will be\n inferred in the following way:\n\n (1) When `sub_sequence_mask` is None, we assume the position ids are\n 0, 1, 2, ..., seq_length - 1.\n (2) When `sub_sequence_mask` is specified, there may be multiple sub\n sequences, and for each sub sequence, its position ids start from\n 0, 1, 2, ...\n\n Args:\n inputs: Word embeddings in shape [batch, seq_length, embedding_dim].\n position_ids: An optional int32 tensor in shape [batch, seq_length].\n sub_sequence_mask: An optional bool tensor in shape [batch, seq_length,\n seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th\n token and z'th token are from the same sub sequence.\n\n Returns:\n The position embeddings in shape [batch, seq_length, embedding_dim].\n \"\"\"\n input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)\n if self._use_dynamic_slicing:\n position_embeddings = self._position_embeddings[:input_shape[1], :]\n else:\n position_embeddings = self._position_embeddings\n\n if position_ids is not None:\n return tf.gather(position_embeddings, position_ids)\n\n if sub_sequence_mask is None:\n return tf.broadcast_to(position_embeddings, input_shape)\n else:\n sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32)\n # For each sub sequence, its position ids start from 0, 1, 2, ...\n position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1\n return tf.gather(position_embeddings, position_ids)\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.transpose",
"tensorflow.broadcast_to",
"tensorflow.equal",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.cast",
"tensorflow.keras.layers.experimental.EinsumDense",
"tensorflow.gather",
"tensorflow.cumsum",
"tensorflow.keras.layers.Add",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
konstin/esm | [
"a39894c079ce314e1c0aaa607e8ae498111910a0",
"a39894c079ce314e1c0aaa607e8ae498111910a0"
] | [
"esm/model.py",
"esm/pretrained.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .modules import TransformerLayer, PositionalEmbedding # noqa\n\n\nclass ProteinBertModel(nn.Module):\n @classmethod\n def add_args(cls, parser):\n parser.add_argument(\n \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\"\n )\n parser.add_argument(\n \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\"\n )\n parser.add_argument(\n \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to logits\"\n )\n parser.add_argument(\n \"--ffn_embed_dim\",\n default=5120,\n type=int,\n metavar=\"N\",\n help=\"embedding dimension for FFN\",\n )\n parser.add_argument(\n \"--attention_heads\",\n default=20,\n type=int,\n metavar=\"N\",\n help=\"number of attention heads\",\n )\n\n def __init__(self, args, alphabet_size, padding_idx):\n super().__init__()\n self.args = args\n self.alphabet_size = alphabet_size\n self.padding_idx = padding_idx\n self.embed_scale = math.sqrt(self.args.embed_dim)\n self._init_submodules()\n\n def _init_submodules(self):\n self.embed_tokens = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.embed_positions = PositionalEmbedding(self.args.embed_dim, self.padding_idx)\n self.layers = nn.ModuleList(\n [\n TransformerLayer(\n self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads\n )\n for _ in range(self.args.layers)\n ]\n )\n self.embed_out = nn.Parameter(\n torch.zeros((self.alphabet_size, self.args.embed_dim))\n )\n self.embed_out_bias = None\n if self.args.final_bias:\n self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))\n\n def forward(self, tokens, repr_layers=[]):\n assert tokens.ndim == 2\n padding_mask = tokens.eq(self.padding_idx)\n if not padding_mask.any():\n padding_mask = None\n\n x = self.embed_scale * self.embed_tokens(tokens)\n x = x + self.embed_positions(tokens)\n\n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n # (B, T, E) => (T, B, E)\n x = x.transpose(0, 1)\n\n for layer_idx, layer in enumerate(self.layers):\n x, _ = layer(x, self_attn_padding_mask=padding_mask)\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.transpose(0, 1)\n\n x = F.linear(x, self.embed_out, bias=self.embed_out_bias)\n\n # (T, B, E) => (B, T, E)\n x = x.transpose(0, 1)\n\n result = {\"logits\": x, \"representations\": hidden_representations}\n\n return result\n\n @property\n def num_layers(self):\n return self.args.layers\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport esm\nimport torch\nfrom argparse import Namespace\nfrom .constants import proteinseq_toks\n\ndef load_model_and_alphabet(model_name):\n if model_name.endswith(\".pt\"): # treat as filepath\n return load_model_and_alphabet_local(model_name)\n else:\n return load_model_and_alphabet_hub(model_name)\n\ndef load_model_and_alphabet_hub(model_name):\n alphabet = esm.Alphabet.from_dict(proteinseq_toks)\n\n url = f\"https://dl.fbaipublicfiles.com/fair-esm/models/{model_name}.pt\"\n if torch.cuda.is_available():\n model_data = torch.hub.load_state_dict_from_url(url, progress=False)\n else:\n model_data = torch.hub.load_state_dict_from_url(url, progress=False, map_location=torch.device('cpu'))\n\n # upgrade state dict\n pra = lambda s: ''.join(s.split('decoder_')[1:] if 'decoder' in s else s)\n prs = lambda s: ''.join(s.split('decoder.')[1:] if 'decoder' in s else s)\n model_args = {pra(arg[0]): arg[1] for arg in vars(model_data[\"args\"]).items()}\n model_state = {prs(arg[0]): arg[1] for arg in model_data[\"model\"].items()}\n\n model = esm.ProteinBertModel(\n Namespace(**model_args), len(alphabet), padding_idx=alphabet.padding_idx\n )\n model.load_state_dict(model_state)\n\n return model, alphabet\n\ndef load_model_and_alphabet_local(model_location):\n alphabet = esm.Alphabet.from_dict(proteinseq_toks)\n\n model_data = torch.load(model_location)\n\n # upgrade state dict\n pra = lambda s: ''.join(s.split('decoder_')[1:] if 'decoder' in s else s)\n prs = lambda s: ''.join(s.split('decoder.')[1:] if 'decoder' in s else s)\n model_args = {pra(arg[0]): arg[1] for arg in vars(model_data[\"args\"]).items()}\n model_state = {prs(arg[0]): arg[1] for arg in model_data[\"model\"].items()}\n\n model = esm.ProteinBertModel(\n Namespace(**model_args), len(alphabet), padding_idx=alphabet.padding_idx\n )\n model.load_state_dict(model_state)\n return model, alphabet\n\ndef esm1_t34_670M_UR50S_local():\n model_location = '/checkpoint/bioseq_nonsecure/br2020/br4/checkpoint94.pt'\n model, alphabet = load_model_and_alphabet_local(model_location)\n\n return model, alphabet\n\ndef esm1_t34_670M_UR50S_hub():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50S\")\n\ndef esm1_t34_670M_UR50S():\n \"\"\" 34 layer transformer model with 670M params, trained on Uniref50 Sparse.\n\n Returns a tuple of (ProteinBertModel, Alphabet).\n \"\"\"\n #return esm1_t34_670M_UR50S_hub()\n #return esm1_t34_670M_UR50S_local()\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50S\")\n\ndef esm1_t34_670M_UR50D():\n \"\"\" 34 layer transformer model with 670M params, trained on Uniref50 Dense.\n\n Returns a tuple of (ProteinBertModel, Alphabet).\n \"\"\"\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50D\")\n\ndef esm1_t34_670M_UR100():\n \"\"\" 34 layer transformer model with 670M params, trained on Uniref100.\n\n Returns a tuple of (ProteinBertModel, Alphabet).\n \"\"\"\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")\n\ndef esm1_t12_85M_UR50S():\n \"\"\" 12 layer transformer model with 85M params, trained on Uniref50 Sparse.\n\n Returns a tuple of (ProteinBertModel, Alphabet).\n \"\"\"\n return load_model_and_alphabet_hub(\"esm1_t12_85M_UR50S\")\n\ndef esm1_t6_43M_UR50S():\n \"\"\" 6 layer transformer model with 43M params, trained on Uniref50 Sparse.\n\n Returns a tuple of (ProteinBertModel, Alphabet).\n \"\"\"\n return load_model_and_alphabet_hub(\"esm1_t6_43M_UR50S\")\n"
] | [
[
"torch.nn.functional.linear",
"torch.nn.Embedding",
"torch.zeros"
],
[
"torch.device",
"torch.hub.load_state_dict_from_url",
"torch.cuda.is_available",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AryamanSrii/Mecha-Karen | [
"4a5c7318f8c458495eee72a13be5db8a0113ed28"
] | [
"Bot/src/funhouse/image.py"
] | [
"# !/usr/bin/python\n\n\"\"\"\nCopyright ©️: 2020 Seniatical / _-*™#7519\nLicense: Apache 2.0\nA permissive license whose main conditions require preservation of copyright and license notices.\nContributors provide an express grant of patent rights.\nLicensed works, modifications, and larger works may be distributed under different terms and without source code.\nFULL LICENSE CAN BE FOUND AT:\n https://www.apache.org/licenses/LICENSE-2.0.html\nAny violation to the license, will result in moderate action\nYou are legally required to mention (original author, license, source and any changes made)\n\"\"\"\n\nimport typing\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import BucketType\nfrom PIL import Image, ImageDraw\nfrom io import BytesIO\nimport aiohttp\nimport MK\nimport numpy as np\nimport random\nimport cv2\n\nfrom core._ import extract_\nfrom core._.image.effects import *\nfrom core._.image._ import sort_size, save_image\nfrom core._.image.cloud import APISESSION\n\nclass _Image(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.converter = commands.MemberConverter()\n self.vac_api = APISESSION.Client()\n self.client = MK.Async.Client(bot.env('API_TOKEN'))\n self.ses = aiohttp.ClientSession()\n self.cache = bot.cache\n self.loop = bot.loop\n self.beard_image = Image.open('./storage/images/beard.png')\n self.wasted_template = Image.open('./storage/images/wasted.png').resize((900, 900))\n\n self.emoji_c = commands.PartialEmojiConverter()\n\n bot.api_c = self.client\n\n @staticmethod\n def pixelate(image_to_pixelate: Image) -> Image:\n return image_to_pixelate.resize((32, 32), resample=Image.NEAREST).resize((1024, 1024), resample=Image.NEAREST)\n\n @staticmethod\n def quantize(image_to_quantize: Image) -> Image:\n return image_to_quantize.quantize()\n\n @commands.command(name='Trash')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def trash(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/trash.jpg')\n\n author = Image.open(_author).convert('RGBA').resize((130, 134))\n member = Image.open(_user).convert('RGBA').resize((105, 109))\n\n im.paste(author, (260, 120))\n im.paste(member, (105, 7))\n\n with BytesIO() as b:\n im.save(b, 'PNG')\n b.seek(0)\n file = discord.File(fp=b, filename='trash.png')\n return file\n\n author_av = BytesIO(await ctx.author.avatar.read())\n user_av = await extract_.get_stream(ctx, query=argument)\n\n if not user_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n await ctx.send(\n embed=discord.Embed(title='Hes getting recycled', colour=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://trash.png'),\n file=future.result())\n\n @commands.command(name='Slap')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def slap(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/slap.jpg')\n\n author = Image.open(_author).convert('RGBA').resize((310, 310))\n member = Image.open(_user).convert('RGBA').resize((320, 320))\n\n im = im.copy()\n im.paste(author, (465, 70))\n im.paste(member, (810, 350))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n return discord.File(buffer, filename='slapped.png')\n\n author_av = BytesIO(await ctx.author.avatar.read())\n user_av = await extract_.get_stream(ctx, query=argument)\n\n if not user_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='He just got SLAPPED!',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://slapped.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Spank')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def spank(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/spank.jpg').convert('RGBA')\n\n author = Image.open(_author).convert('RGBA').resize((230, 230))\n member = Image.open(_user).convert('RGBA').resize((320, 320))\n\n im = im.copy()\n im.paste(member, (750, 25))\n im.paste(author, (1200, 455))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='spanked.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n user_av = BytesIO(await ctx.author.avatar.read())\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='Who\\'s being a naughty boy',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://spanked.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Boot')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def boot(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/boot.jpg')\n\n _author = Image.open(_author).convert('RGBA').resize((50, 54))\n _user = Image.open(_user).convert('RGBA').resize((50, 54))\n\n im = im.copy()\n im.paste(_author, (183, 13))\n im.paste(_user, (33, 12))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='booted.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n user_av = BytesIO(await ctx.author.avatar.read())\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='Right in the sacks',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://booted.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Obese')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def obese(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/obese.jpg').convert('RGBA').resize((900, 900))\n\n _author = Image.open(_author).convert('RGBA').resize((220, 220))\n im.paste(_author, (457, 135))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='obese.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='He\\'s not that fat *yet*.',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://obese.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Bird')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def bird(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/bird.jpg').convert('RGBA').resize((900, 900))\n _author = Image.open(_author).convert('RGBA').resize((220, 220))\n im.paste(_author, (555, 60))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='bird.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='Somebody is preparing to migrate',\n colour=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://bird.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Delete')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def delete(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/delete.jpg').convert('RGB')\n\n _author = Image.open(_author).convert('RGBA').resize((196, 196))\n im.paste(_author, (121, 137))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='delete.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='Moving file to the recycle bin',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://delete.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Invert')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def invert(self, ctx, argument: str = None, animate: str = '--true', *size) -> typing.Union[discord.MessageReference, discord.Embed]:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.invert, stream, animate, *size)\n embed = discord.Embed(title='Inverted!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Equalize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def equalize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.equalize, stream, animate, *size)\n embed = discord.Embed(title='Equalized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Grayscale')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def grayscale(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.grayscale, stream, animate, *size)\n embed = discord.Embed(title='Grayscaled!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Mirror')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def mirror(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.mirror, stream, animate, *size)\n embed = discord.Embed(title='Mirrored!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Posterize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def posterize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.posterize, stream, animate, *size, {'bits': 1})\n embed = discord.Embed(title='Posterized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Solarize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def solarize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.solarize, stream, animate, *size, {'threshold': 255})\n embed = discord.Embed(title='Solarized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Transpose')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def transpose(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.exif_transpose, stream, animate, *size)\n embed = discord.Embed(title='Transposed!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Flip')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def flip(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.flip, stream, animate, *size)\n embed = discord.Embed(title='Flipped!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Gamma')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def gamma(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('gamma', str(img))\n except Exception as e:\n print(e)\n return await ctx.send('Invalid image URL passed.')\n\n file = discord.File(fp=img, filename='gamma.png')\n embed = discord.Embed(title='Gammafied!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://gamma.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Rainbow')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def rainbow(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('rainbow', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='autumn.png')\n embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://autumn.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Autumn')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def autumn(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('autumn', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='autumn.png')\n embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://autumn.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Inferno')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def inferno(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('hsv', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='inferno.png')\n embed = discord.Embed(title='Inferno Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://inferno.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Twilight')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def twilight(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('twilight', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='twilight.png')\n embed = discord.Embed(title='Twilight Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://twilight.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Warp')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def warp(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('warp', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='warp.png')\n embed = discord.Embed(title='Warped Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://warp.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Blur')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def blur(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('blur', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='blur.png')\n embed = discord.Embed(title='You now look like a foggy mirror!',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://blur.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Swirl')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def swirl(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('swirl', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='swirl.png')\n embed = discord.Embed(title='Round and a round', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://swirl.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Achievement')\n @commands.cooldown(1, 10, BucketType.user)\n async def achievement(self, ctx, *, message: str = None):\n message = 'Nothing.' if not message else message\n message = message.replace(' ', '%20')\n url = 'https://minecraftskinstealer.com/achievement/{}/Achievement%20Earned!/{}'.format(random.randrange(40),\n message)\n embed = discord.Embed(colour=discord.Colour.red()).set_image(url=url)\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cartoon(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('cartoon', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n\n file = discord.File(fp=img, filename='cartoon.png')\n embed = discord.Embed(title='Cartoon Filter', color=ctx.author.color).set_image(url='attachment://cartoon.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def beard(self, ctx, *args):\n if not args:\n user = ctx.author\n pos_x: str = '290'\n pos_y: str = '250'\n beard_x: str = '300'\n beard_y = '300'\n else:\n try:\n user = await self.converter.convert(ctx, args[0])\n except commands.errors.MemberNotFound:\n user = ctx.author\n if len(args) > 1:\n pos_x = args[1]\n else:\n pos_x = '290'\n if len(args) > 2:\n pos_y = args[2]\n else:\n pos_y = '250'\n if len(args) > 3:\n beard_x = args[3]\n else:\n beard_x = '300'\n if len(args) > 4:\n beard_y = args[4]\n else:\n beard_y = '300'\n try:\n positions = [pos_x, pos_y, beard_x, beard_y]\n new_pos = list(map(int, positions))\n if any([i for i in new_pos if i > 900 or i < 1]):\n return await ctx.send('Markers cannot be larger than 900 or less than 1')\n except ValueError:\n return await ctx.send('Markers to place or resize the beard must be numbers!')\n user = user or ctx.author\n\n raw_beard = self.beard_image\n\n beard = raw_beard.resize((new_pos[2], new_pos[3]))\n\n avatar = Image.open(BytesIO(await user.avatar.with_format(format='png').read())).convert(\n 'RGBA').resize((900, 900))\n avatar.paste(beard, (new_pos[0], new_pos[1]), beard)\n\n with BytesIO() as buffer:\n avatar.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='bearded.jpg')\n\n embed = discord.Embed(title=f'Given {user.display_name} a nice beard', color=user.color).set_image(\n url='attachment://bearded.jpg')\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def wasted(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n\n def execute(image):\n img = Image.open(image).convert('RGB').resize((900, 900))\n img = img.point(lambda p: p * 0.5)\n\n img.paste(self.wasted_template, (0, 0), self.wasted_template)\n\n with BytesIO() as buffer:\n img.save(buffer, 'PNG')\n buffer.seek(0)\n file = discord.File(fp=buffer, filename='wasted.jpg')\n return file\n\n image = await self.loop.run_in_executor(None,\n execute,\n BytesIO(await user.avatar.with_format(format='png').read())\n )\n await ctx.send(embed=discord.Embed(title='Wasted', colour=user.colour).set_image(url='attachment://wasted.jpg'),\n file=image)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def gayify(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.send('Invalid image provided')\n\n file = await self.loop.run_in_executor(None, gayify_, stream, animate, *size)\n embed = discord.Embed(title=f'Gay Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def distracted(self, ctx, user1: discord.Member = None, user2: discord.Member = None,\n user3: discord.Member = None):\n m1 = user1 or ctx.author\n m2 = user2 or ctx.author\n m3 = user3 or ctx.author\n user = await self.vac_api.distracted_bf(m1.avatar.with_format(format='png'),\n m2.avatar.with_format(format='png'),\n m3.avatar.with_format(format='png'))\n image_out = discord.File(fp=await user.read(), filename=\"distracted.png\")\n embed = discord.Embed(title=f'Oh no.', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://distracted.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def dos(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.dock_of_shame(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"dockofshame.png\")\n embed = discord.Embed(title=f'SHAME THEM!', color=user.colour).set_image(url='attachment://dockofshame.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def drip(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.drip(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"drip.png\")\n embed = discord.Embed(title=f'Speechless', color=user.colour).set_image(url='attachment://drip.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cr(self, ctx, *, text: str):\n user = await self.vac_api.car_reverse(text)\n image_out = discord.File(fp=await user.read(), filename=\"carreverse.png\")\n embed = discord.Embed(title=f'Car Reverse Meme', color=ctx.author.colour).set_image(\n url='attachment://carreverse.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cmm(self, ctx, *, text: str):\n user = await self.vac_api.change_my_mind(text)\n image_out = discord.File(fp=await user.read(), filename=\"changemymind.png\")\n embed = discord.Embed(title=f'Change My Mind.', color=ctx.author.colour).set_image(\n url='attachment://changemymind.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def heaven(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.heaven(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"heaven.png\")\n embed = discord.Embed(title=f'They have ascended.', color=user.colour).set_image(url='attachment://heaven.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def table_flip(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.table_flip(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"tableflip.png\")\n embed = discord.Embed(title=f'{user.display_name} looks fiesty.', color=user.colour).set_image(\n url='attachment://tableflip.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command(aliases=['color'], name='Colour')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def get_colour(self, ctx, colour):\n try:\n colour = int((str((await self.converter.convert(ctx, colour)).colour)).replace('#', '0x'), 16)\n except Exception:\n try:\n colour = int(colour.replace('#', '0x'), 16)\n except Exception:\n return await ctx.send('Invalid hex code provided.')\n with BytesIO() as b:\n new = Image.new(mode='RGB', size=(900, 900), color=colour)\n new.save(b, 'PNG')\n b.seek(0)\n await ctx.send(file=discord.File(fp=b, filename='{}.png'.format(colour)),\n embed=discord.Embed(title='Created new colour:', colour=colour).set_image(\n url='attachment://{}.png'.format(colour)))\n\n @commands.command(name='8bit')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def bittify(self, ctx, argument: str = None, animate: str = '--true', *size) -> discord.Embed:\n _io = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(_io, animate, size):\n avatar = Image.open(_io)\n duration = avatar.info.get('duration')\n loops = avatar.info.get('loop')\n\n if not size and not getattr(_io, 'discord', False):\n size = avatar.size\n else:\n size = sort_size(*size)\n\n if getattr(avatar, 'is_animated', False) and animate.lower() == '--true':\n frames = []\n for _ in range(avatar.n_frames):\n avatar.seek(_)\n frames.append(self.quantize(self.pixelate(avatar)).resize(size))\n return save_image(frames, filename='8bit.gif', duration=duration, loop=loops)\n\n eightbit = self.pixelate(avatar)\n eightbit = self.quantize(eightbit).resize(size)\n\n with BytesIO() as buffer:\n eightbit.save(buffer, format=\"PNG\")\n buffer.seek(0)\n\n file = discord.File(buffer, filename=\"8bit.png\")\n return file\n\n if not _io:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, _io, animate, size)\n await future\n\n embed = discord.Embed(\n title=\"8-Bit filter\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://{}\".format(future.result().filename))\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def oil(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n oil = cv2.xphoto.oilPainting(image, 7, 1)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(oil)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='oilpainting.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Oil Painting\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://oilpainting.png\")\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(aliases=['watercolor'])\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def watercolour(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n water_colour = cv2.stylization(image, sigma_s=60, sigma_r=0.6)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(water_colour)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='watercolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Watercolour Painting\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://watercolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @commands.group(invoke_without_command=True)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def sketch(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(dst_gray)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='sketchnocolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Sketched your image\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://sketchnocolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @sketch.command(aliases=['color'], name='colour')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def sketch_colour(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(dst_color)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='sketchcolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Sketched your image\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://sketchcolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @commands.command()\n async def expand(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n\n message = await ctx.send(embed=discord.Embed(description='<a:online:834143953221582927> | Building GIF',\n colour=discord.Colour.green()))\n\n def execute(image):\n images = []\n\n width = 900\n center = width // 2\n color_1 = (0, 255, 0)\n background_colour = (255, 255, 255)\n max_radius = int(center * 1.5)\n step = 55\n\n avatar = Image.open(image).convert('RGB')\n\n for i in range(1, max_radius, step):\n im = Image.new('RGB', (width, width), background_colour)\n\n image = avatar.resize((width, width))\n\n npImage = np.array(image)\n h, w = im.size\n\n alpha = Image.new('L', image.size, 0)\n draw = ImageDraw.Draw(alpha)\n draw.pieslice((center - i, center - i, center + i, center + i), 0, 360, fill=255)\n\n npAlpha = np.array(alpha)\n npImage = np.dstack((npImage, npAlpha))\n\n image = Image.fromarray(npImage).convert('RGBA')\n\n im.paste(image, (0, 0), image)\n\n images.append(im)\n\n with BytesIO() as buffer:\n images[0].save(buffer, format='GIF', optimize=False, duration=150, append_images=images[1:],\n save_all=True, quality=1, loop=0)\n buffer.seek(0)\n return discord.File(buffer, filename='expand.gif')\n\n image = BytesIO(await user.avatar.with_format(format='jpg').read())\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n gif_message = await ctx.send(file=future.result())\n\n return await message.edit(embed=discord.Embed(\n description='<:Done:835812226345598986> | [Message Link]({}) | [Image Link]({})'.format(\n gif_message.jump_url, gif_message.attachments[0].url),\n colour=discord.Colour.green()))\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def glitch(self, ctx, argument: str = None, level: str = 'low', animated: str = '--true',\n *size) -> typing.Union[typing.Optional[discord.Embed], discord.MessageReference]:\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n levels = {\n 'low': 2,\n 'medium': 5,\n 'high': 10\n }\n try:\n level = levels.get(level.lower()) if level.lower() in levels else float(level)\n except Exception:\n level = 2\n\n if level < 0 or level > 10:\n return await ctx.send('Max level for glitching images starts at 0 and is capped at 10!')\n\n future = self.loop.run_in_executor(None, glitch_, image, level, animated, size)\n await future\n try:\n return await ctx.send(embed=discord.Embed(\n title='Glitch Effect',\n colour=random.randint(0x000000, 0xFFFFFF)\n ).set_image(url='attachment://glitched.gif'), file=future.result())\n except Exception:\n return await ctx.send('Oops, this level was abit too high for your image - please retry with a lower level')\n\n @commands.command()\n @commands.cooldown(1, 30, commands.BucketType.user)\n async def image(self, ctx, *, query: str = None):\n if not query:\n return await ctx.send('Need to give an image to search for!')\n url = 'https://api.pexels.com/v1/search?query={}&per_page={}'.format(query, random.randint(1, 100))\n auth = self.bot.env('PEXEL_API_TOKEN')\n r = requests.get(url, headers={'Authorization': auth}).json()\n try:\n await ctx.send(\n embed=discord.Embed(\n title='Search results for {}'.format(\n query.title()\n ),\n colour=discord.Color.red(),\n ).set_image(url=random.choice(r['photos'])['src']['large2x'])\n )\n except IndexError:\n return await ctx.send('No Image was Found Under the Context **{}**'.format(query.title()))\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def spin(self, ctx, argument: str = None, animate: str = '--true') -> discord.Message:\n image = await extract_.get_stream(ctx, query=argument)\n if not image:\n return await ctx.send('Invalid image provided')\n\n future = await self.loop.run_in_executor(None, spin_, image, animate)\n return await ctx.send(embed=discord.Embed(\n title='Spun around and around',\n colour=random.randint(0x000000, 0xFFFFFF)\n ).set_image(url='attachment://spin.gif'), file=future)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def evilpatrick(self, ctx, argument: str = None) -> discord.MessageReference:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Invalid image provided')\n\n def execute(stream):\n image = Image.open(stream).resize((150, 150)).convert('RGB')\n frames = []\n\n with BytesIO() as buffer:\n with Image.open('./storage/images/evil.gif') as _base:\n for _ in range(_base.n_frames):\n _base.seek(_)\n\n temp = _base.copy().convert('RGBA')\n temp.paste(image, (205, 20))\n\n frames.append(temp)\n\n frames[0].save(\n buffer, 'GIF',\n append_images=frames[1:],\n loop=0, duration=(_base.info.get('duration') or 0),\n save_all=True\n )\n buffer.seek(0)\n return discord.File(fp=buffer, filename='evil.gif')\n image = await self.loop.run_in_executor(None, execute, stream)\n return await ctx.message.reply(\n embed=discord.Embed(\n title='Evil!',\n colour=discord.Colour.red()\n ).set_image(url='attachment://evil.gif'), file=image)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def salt(self, ctx, argument: str = None) -> discord.MessageReference:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Invalid image provided')\n\n def execute(stream):\n image = Image.open(stream).resize((300, 300)).convert('RGB')\n frames = []\n\n with BytesIO() as buffer:\n with Image.open('./storage/images/salty.gif') as _base:\n for _ in range(_base.n_frames):\n _base.seek(_)\n\n temp = _base.copy().resize((200, 200)).convert('RGBA')\n image_ = image.copy()\n image_.paste(temp, (120, 10), temp)\n\n frames.append(image_)\n\n frames[0].save(\n buffer, 'GIF',\n append_images=frames[1:],\n loop=0, duration=(_base.info.get('duration') or 0),\n save_all=True\n )\n buffer.seek(0)\n return discord.File(fp=buffer, filename='salty.gif')\n image = await self.loop.run_in_executor(None, execute, stream)\n return await ctx.message.reply(\n embed=discord.Embed(\n title='Salty!',\n colour=discord.Colour.red()\n ).set_image(url='attachment://salty.gif'), file=image)\n\ndef setup(bot):\n bot.add_cog(_Image(bot))\n"
] | [
[
"numpy.array",
"numpy.dstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/pyfolio | [
"68efdcc2e2d0f140ddbc408a260c6318ac8b06d3"
] | [
"pyfolio/tears.py"
] | [
"#\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import division\n\nfrom time import time\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport scipy.stats\nimport pandas as pd\n\nfrom . import timeseries\nfrom . import utils\nfrom . import pos\nfrom . import txn\nfrom . import round_trips\nfrom . import plotting\nfrom . import _seaborn as sns\nfrom .plotting import plotting_context\n\ntry:\n from . import bayesian\nexcept ImportError:\n warnings.warn(\n \"Could not import bayesian submodule due to missing pymc3 dependency.\",\n ImportWarning)\n\n\ndef timer(msg_body, previous_time):\n current_time = time()\n run_time = current_time - previous_time\n message = \"\\nFinished \" + msg_body + \" (required {:.2f} seconds).\"\n print(message.format(run_time))\n\n return current_time\n\n\ndef create_full_tear_sheet(returns,\n positions=None,\n transactions=None,\n benchmark_rets=None,\n gross_lev=None,\n slippage=None,\n live_start_date=None,\n sector_mappings=None,\n bayesian=False,\n round_trips=False,\n hide_positions=False,\n cone_std=(1.0, 1.5, 2.0),\n bootstrap=False,\n set_context=True):\n \"\"\"\n Generate a number of tear sheets that are useful\n for analyzing a strategy's performance.\n\n - Fetches benchmarks if needed.\n - Creates tear sheets for returns, and significant events.\n If possible, also creates tear sheets for position analysis,\n transaction analysis, and Bayesian analysis.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - Time series with decimal returns.\n - Example:\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\n positions : pd.DataFrame, optional\n Daily net position values.\n - Time series of dollar amount invested in each position and cash.\n - Days where stocks are not held can be represented by 0 or NaN.\n - Non-working capital is labelled 'cash'\n - Example:\n index 'AAPL' 'MSFT' cash\n 2004-01-09 13939.3800 -14012.9930 711.5585\n 2004-01-12 14492.6300 -14624.8700 27.1821\n 2004-01-13 -13853.2800 13653.6400 -43.6375\n transactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\n gross_lev : pd.Series, optional\n The leverage of a strategy.\n - Time series of the sum of long and short exposure per share\n divided by net asset value.\n - Example:\n 2009-12-04 0.999932\n 2009-12-07 0.999783\n 2009-12-08 0.999880\n 2009-12-09 1.000283\n slippage : int/float, optional\n Basis points of slippage to apply to returns before generating\n tearsheet stats and plots.\n If a value is provided, slippage parameter sweep\n plots will be generated from the unadjusted returns.\n Transactions and positions must also be passed.\n - See txn.adjust_returns_for_slippage for more details.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period. This datetime should be normalized.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n bayesian: boolean, optional\n If True, causes the generation of a Bayesian tear sheet.\n round_trips: boolean, optional\n If True, causes the generation of a round trip tear sheet.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\n bootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\n set_context : boolean, optional\n If True, set default plotting style context.\n - See plotting.context().\n \"\"\"\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n\n # If the strategy's history is longer than the benchmark's, limit strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n if slippage is not None and transactions is not None:\n turnover = txn.get_turnover(positions, transactions,\n period=None, average=False)\n unadjusted_returns = returns.copy()\n returns = txn.adjust_returns_for_slippage(returns, turnover, slippage)\n else:\n unadjusted_returns = None\n\n create_returns_tear_sheet(\n returns,\n live_start_date=live_start_date,\n cone_std=cone_std,\n benchmark_rets=benchmark_rets,\n bootstrap=bootstrap,\n set_context=set_context)\n\n create_interesting_times_tear_sheet(returns,\n benchmark_rets=benchmark_rets,\n set_context=set_context)\n\n if positions is not None:\n create_position_tear_sheet(returns, positions,\n gross_lev=gross_lev,\n hide_positions=hide_positions,\n set_context=set_context,\n sector_mappings=sector_mappings)\n\n if transactions is not None:\n create_txn_tear_sheet(returns, positions, transactions,\n unadjusted_returns=unadjusted_returns,\n set_context=set_context)\n if round_trips:\n create_round_trip_tear_sheet(\n positions=positions,\n transactions=transactions,\n sector_mappings=sector_mappings)\n\n if bayesian:\n create_bayesian_tear_sheet(returns,\n live_start_date=live_start_date,\n benchmark_rets=benchmark_rets,\n set_context=set_context)\n\n\n@plotting_context\ndef create_returns_tear_sheet(returns, live_start_date=None,\n cone_std=(1.0, 1.5, 2.0),\n benchmark_rets=None,\n bootstrap=False,\n return_fig=False):\n \"\"\"\n Generate a number of plots for analyzing a strategy's returns.\n\n - Fetches benchmarks, then creates the plots on a single figure.\n - Plots: rolling returns (with cone), rolling beta, rolling sharpe,\n rolling Fama-French risk factors, drawdowns, underwater plot, monthly\n and annual return plots, daily similarity plots,\n and return quantile box plot.\n - Will also print the start and end dates of the strategy,\n performance statistics, drawdown periods, and the return range.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\n benchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n bootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n \"\"\"\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n # If the strategy's history is longer than the benchmark's, limit\n # strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n df_cum_rets = timeseries.cum_returns(returns, starting_value=1)\n print(\"Entire data start date: \" + str(df_cum_rets\n .index[0].strftime('%Y-%m-%d')))\n print(\"Entire data end date: \" + str(df_cum_rets\n .index[-1].strftime('%Y-%m-%d')))\n\n print('\\n')\n\n plotting.show_perf_stats(returns, benchmark_rets,\n bootstrap=bootstrap,\n live_start_date=live_start_date)\n\n if live_start_date is not None:\n vertical_sections = 11\n live_start_date = utils.get_utc_timestamp(live_start_date)\n else:\n vertical_sections = 10\n\n if bootstrap:\n vertical_sections += 1\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_rolling_returns = plt.subplot(gs[:2, :])\n ax_rolling_returns_vol_match = plt.subplot(gs[2, :],\n sharex=ax_rolling_returns)\n ax_rolling_beta = plt.subplot(gs[3, :], sharex=ax_rolling_returns)\n ax_rolling_sharpe = plt.subplot(gs[4, :], sharex=ax_rolling_returns)\n ax_rolling_risk = plt.subplot(gs[5, :], sharex=ax_rolling_returns)\n ax_drawdown = plt.subplot(gs[6, :], sharex=ax_rolling_returns)\n ax_underwater = plt.subplot(gs[7, :], sharex=ax_rolling_returns)\n ax_monthly_heatmap = plt.subplot(gs[8, 0])\n ax_annual_returns = plt.subplot(gs[8, 1])\n ax_monthly_dist = plt.subplot(gs[8, 2])\n ax_return_quantiles = plt.subplot(gs[9, :])\n\n plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=cone_std,\n ax=ax_rolling_returns)\n ax_rolling_returns.set_title(\n 'Cumulative Returns')\n\n plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=None,\n volatility_match=True,\n legend_loc=None,\n ax=ax_rolling_returns_vol_match)\n ax_rolling_returns_vol_match.set_title(\n 'Cumulative returns volatility matched to benchmark.')\n\n plotting.plot_rolling_beta(\n returns, benchmark_rets, ax=ax_rolling_beta)\n\n plotting.plot_rolling_sharpe(\n returns, ax=ax_rolling_sharpe)\n\n plotting.plot_rolling_fama_french(\n returns, ax=ax_rolling_risk)\n\n # Drawdowns\n plotting.plot_drawdown_periods(\n returns, top=5, ax=ax_drawdown)\n\n plotting.plot_drawdown_underwater(\n returns=returns, ax=ax_underwater)\n\n plotting.show_worst_drawdown_periods(returns)\n\n df_weekly = timeseries.aggregate_returns(returns, 'weekly')\n df_monthly = timeseries.aggregate_returns(returns, 'monthly')\n\n print('\\n')\n plotting.show_return_range(returns, df_weekly)\n\n plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)\n plotting.plot_annual_returns(returns, ax=ax_annual_returns)\n plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)\n\n plotting.plot_return_quantiles(\n returns,\n df_weekly,\n df_monthly,\n ax=ax_return_quantiles)\n\n if bootstrap:\n ax_bootstrap = plt.subplot(gs[10, :])\n plotting.plot_perf_stats(returns, benchmark_rets,\n ax=ax_bootstrap)\n\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_position_tear_sheet(returns, positions, gross_lev=None,\n show_and_plot_top_pos=2, hide_positions=False,\n return_fig=False, sector_mappings=None):\n \"\"\"\n Generate a number of plots for analyzing a\n strategy's positions and holdings.\n\n - Plots: gross leverage, exposures, top positions, and holdings.\n - Will also print the top positions held.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n gross_lev : pd.Series, optional\n The leverage of a strategy.\n - See full explanation in create_full_tear_sheet.\n show_and_plot_top_pos : int, optional\n By default, this is 2, and both prints and plots the\n top 10 positions.\n If this is 0, it will only plot; if 1, it will only print.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n Overrides show_and_plot_top_pos to 0 to suppress text output.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n \"\"\"\n\n if hide_positions:\n show_and_plot_top_pos = 0\n vertical_sections = 6 if sector_mappings is not None else 5\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_gross_leverage = plt.subplot(gs[0, :])\n ax_exposures = plt.subplot(gs[1, :], sharex=ax_gross_leverage)\n ax_top_positions = plt.subplot(gs[2, :], sharex=ax_gross_leverage)\n ax_max_median_pos = plt.subplot(gs[3, :], sharex=ax_gross_leverage)\n ax_holdings = plt.subplot(gs[4, :], sharex=ax_gross_leverage)\n\n positions_alloc = pos.get_percent_alloc(positions)\n\n if gross_lev is not None:\n plotting.plot_gross_leverage(returns, gross_lev, ax=ax_gross_leverage)\n\n plotting.plot_exposures(returns, positions_alloc, ax=ax_exposures)\n\n plotting.show_and_plot_top_positions(\n returns,\n positions_alloc,\n show_and_plot=show_and_plot_top_pos,\n hide_positions=hide_positions,\n ax=ax_top_positions)\n\n plotting.plot_max_median_position_concentration(positions,\n ax=ax_max_median_pos)\n\n plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)\n\n if sector_mappings is not None:\n sector_exposures = pos.get_sector_exposures(positions, sector_mappings)\n if len(sector_exposures.columns) > 1:\n sector_alloc = pos.get_percent_alloc(sector_exposures)\n sector_alloc = sector_alloc.drop('cash', axis='columns')\n ax_sector_alloc = plt.subplot(gs[5, :], sharex=ax_gross_leverage)\n plotting.plot_sector_allocations(returns, sector_alloc,\n ax=ax_sector_alloc)\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_txn_tear_sheet(returns, positions, transactions,\n unadjusted_returns=None, return_fig=False):\n \"\"\"\n Generate a number of plots for analyzing a strategy's transactions.\n\n Plots: turnover, daily volume, and a histogram of daily volume.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n unadjusted_returns : pd.Series, optional\n Daily unadjusted returns of the strategy, noncumulative.\n Will plot additional swippage sweep analysis.\n - See pyfolio.plotting.plot_swippage_sleep and\n pyfolio.plotting.plot_slippage_sensitivity\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n \"\"\"\n vertical_sections = 5 if unadjusted_returns is not None else 3\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_turnover = plt.subplot(gs[0, :])\n ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover)\n ax_turnover_hist = plt.subplot(gs[2, :])\n\n plotting.plot_turnover(\n returns,\n transactions,\n positions,\n ax=ax_turnover)\n\n plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)\n\n try:\n plotting.plot_daily_turnover_hist(transactions, positions,\n ax=ax_turnover_hist)\n except ValueError:\n warnings.warn('Unable to generate turnover plot.', UserWarning)\n\n if unadjusted_returns is not None:\n ax_slippage_sweep = plt.subplot(gs[3, :])\n plotting.plot_slippage_sweep(unadjusted_returns,\n transactions,\n positions,\n ax=ax_slippage_sweep\n )\n ax_slippage_sensitivity = plt.subplot(gs[4, :])\n plotting.plot_slippage_sensitivity(unadjusted_returns,\n transactions,\n positions,\n ax=ax_slippage_sensitivity\n )\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_round_trip_tear_sheet(positions, transactions,\n sector_mappings=None,\n return_fig=False):\n \"\"\"\n Generate a number of figures and plots describing the duration,\n frequency, and profitability of trade \"round trips.\"\n A round trip is started when a new long or short position is\n opened and is only completed when the number of shares in that\n position returns to or crosses zero.\n\n Parameters\n ----------\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n \"\"\"\n\n transactions_closed = round_trips.add_closing_transactions(positions,\n transactions)\n trades = round_trips.extract_round_trips(transactions_closed)\n\n if len(trades) < 5:\n warnings.warn(\n \"\"\"Fewer than 5 round-trip trades made.\n Skipping round trip tearsheet.\"\"\", UserWarning)\n return\n\n ndays = len(positions)\n\n print(trades.drop(['open_dt', 'close_dt', 'symbol'],\n axis='columns').describe())\n print('Percent of round trips profitable = {:.4}%'.format(\n (trades.pnl > 0).mean() * 100))\n\n winning_round_trips = trades[trades.pnl > 0]\n losing_round_trips = trades[trades.pnl < 0]\n print('Mean return per winning round trip = {:.4}'.format(\n winning_round_trips.returns.mean()))\n print('Mean return per losing round trip = {:.4}'.format(\n losing_round_trips.returns.mean()))\n\n print('A decision is made every {:.4} days.'.format(ndays / len(trades)))\n print('{:.4} trading decisions per day.'.format(len(trades) * 1. / ndays))\n print('{:.4} trading decisions per month.'.format(\n len(trades) * 1. / (ndays / 21)))\n\n plotting.show_profit_attribution(trades)\n\n if sector_mappings is not None:\n sector_trades = round_trips.apply_sector_mappings_to_round_trips(\n trades, sector_mappings)\n plotting.show_profit_attribution(sector_trades)\n\n fig = plt.figure(figsize=(14, 3 * 6))\n\n fig = plt.figure(figsize=(14, 3 * 6))\n gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)\n\n ax_trade_lifetimes = plt.subplot(gs[0, :])\n ax_prob_profit_trade = plt.subplot(gs[1, 0])\n ax_holding_time = plt.subplot(gs[1, 1])\n ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])\n ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])\n\n plotting.plot_round_trip_life_times(trades, ax=ax_trade_lifetimes)\n\n plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)\n\n trade_holding_times = [x.days for x in trades['duration']]\n sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)\n ax_holding_time.set(xlabel='holding time in days')\n\n sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)\n ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')\n\n sns.distplot(trades.returns * 100, kde=False,\n ax=ax_pnl_per_round_trip_pct)\n ax_pnl_per_round_trip_pct.set(\n xlabel='Round-trip returns in %')\n\n gs.tight_layout(fig)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_interesting_times_tear_sheet(\n returns, benchmark_rets=None, legend_loc='best', return_fig=False):\n \"\"\"\n Generate a number of returns plots around interesting points in time,\n like the flash crash and 9/11.\n\n Plots: returns around the dotcom bubble burst, Lehmann Brothers' failure,\n 9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing\n bubble burst, EZB IR, Great Recession (August 2007, March and September\n of 2008, Q1 & Q2 2009), flash crash, April and October 2014.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n benchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n legend_loc : plt.legend_loc, optional\n The legend's location.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n \"\"\"\n rets_interesting = timeseries.extract_interesting_date_ranges(returns)\n\n if len(rets_interesting) == 0:\n warnings.warn('Passed returns do not overlap with any'\n 'interesting times.', UserWarning)\n return\n\n print('\\nStress Events')\n print(np.round(pd.DataFrame(rets_interesting).describe().transpose().loc[\n :, ['mean', 'min', 'max']], 3))\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n # If the strategy's history is longer than the benchmark's, limit\n # strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n bmark_interesting = timeseries.extract_interesting_date_ranges(\n benchmark_rets)\n\n num_plots = len(rets_interesting)\n # 2 plots, 1 row; 3 plots, 2 rows; 4 plots, 2 rows; etc.\n num_rows = int((num_plots + 1) / 2.0)\n fig = plt.figure(figsize=(14, num_rows * 6.0))\n gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5)\n\n for i, (name, rets_period) in enumerate(rets_interesting.items()):\n\n # i=0 -> 0, i=1 -> 0, i=2 -> 1 ;; i=0 -> 0, i=1 -> 1, i=2 -> 0\n ax = plt.subplot(gs[int(i / 2.0), i % 2])\n timeseries.cum_returns(rets_period).plot(\n ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2)\n timeseries.cum_returns(bmark_interesting[name]).plot(\n ax=ax, color='gray', label='SPY', alpha=0.6)\n ax.legend(['algo',\n 'SPY'],\n loc=legend_loc)\n ax.set_title(name, size=14)\n ax.set_ylabel('Returns')\n ax.set_xlabel('')\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_bayesian_tear_sheet(returns, benchmark_rets=None,\n live_start_date=None, samples=2000,\n return_fig=False, stoch_vol=False):\n \"\"\"\n Generate a number of Bayesian distributions and a Bayesian\n cone plot of returns.\n\n Plots: Sharpe distribution, annual volatility distribution,\n annual alpha distribution, beta distribution, predicted 1 and 5\n day returns distributions, and a cumulative returns cone plot.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n benchmark_rets : pd.Series or pd.DataFrame, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n live_start_date : datetime, optional\n The point in time when the strategy began live\n trading, after its backtest period.\n samples : int, optional\n Number of posterior samples to draw.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n stoch_vol : boolean, optional\n If True, run and plot the stochastic volatility model\n \"\"\"\n\n if live_start_date is None:\n raise NotImplementedError(\n 'Bayesian tear sheet requires setting of live_start_date'\n )\n\n # start by benchmark is S&P500\n fama_french = False\n if benchmark_rets is None:\n benchmark_rets = pd.DataFrame(\n utils.get_symbol_rets('SPY',\n start=returns.index[0],\n end=returns.index[-1]))\n # unless user indicates otherwise\n elif isinstance(benchmark_rets, str) and (benchmark_rets ==\n 'Fama-French'):\n fama_french = True\n rolling_window = utils.APPROX_BDAYS_PER_MONTH * 6\n benchmark_rets = timeseries.rolling_fama_french(\n returns, rolling_window=rolling_window)\n\n live_start_date = utils.get_utc_timestamp(live_start_date)\n df_train = returns.loc[returns.index < live_start_date]\n df_test = returns.loc[returns.index >= live_start_date]\n\n # Run T model with missing data\n print(\"Running T model\")\n previous_time = time()\n # track the total run time of the Bayesian tear sheet\n start_time = previous_time\n\n trace_t, ppc_t = bayesian.run_model('t', df_train,\n returns_test=df_test,\n samples=samples, ppc=True)\n previous_time = timer(\"T model\", previous_time)\n\n # Compute BEST model\n print(\"\\nRunning BEST model\")\n trace_best = bayesian.run_model('best', df_train,\n returns_test=df_test,\n samples=samples)\n previous_time = timer(\"BEST model\", previous_time)\n\n # Plot results\n\n fig = plt.figure(figsize=(14, 10 * 2))\n gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)\n\n axs = []\n row = 0\n\n # Plot Bayesian cone\n ax_cone = plt.subplot(gs[row, :])\n bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)\n previous_time = timer(\"plotting Bayesian cone\", previous_time)\n\n # Plot BEST results\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n # Effect size across two\n axs.append(plt.subplot(gs[row, :]))\n\n bayesian.plot_best(trace=trace_best, axs=axs)\n previous_time = timer(\"plotting BEST results\", previous_time)\n\n # Compute Bayesian predictions\n row += 1\n ax_ret_pred_day = plt.subplot(gs[row, 0])\n ax_ret_pred_week = plt.subplot(gs[row, 1])\n day_pred = ppc_t[:, 0]\n p5 = scipy.stats.scoreatpercentile(day_pred, 5)\n sns.distplot(day_pred,\n ax=ax_ret_pred_day\n )\n ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)\n ax_ret_pred_day.set_xlabel('Predicted returns 1 day')\n ax_ret_pred_day.set_ylabel('Frequency')\n ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=ax_ret_pred_day.transAxes)\n previous_time = timer(\"computing Bayesian predictions\", previous_time)\n\n # Plot Bayesian VaRs\n week_pred = (\n np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]\n p5 = scipy.stats.scoreatpercentile(week_pred, 5)\n sns.distplot(week_pred,\n ax=ax_ret_pred_week\n )\n ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)\n ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')\n ax_ret_pred_week.set_ylabel('Frequency')\n ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=ax_ret_pred_week.transAxes)\n previous_time = timer(\"plotting Bayesian VaRs estimate\", previous_time)\n\n # Run alpha beta model\n print(\"\\nRunning alpha beta model\")\n benchmark_rets = benchmark_rets.loc[df_train.index]\n trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,\n bmark=benchmark_rets,\n samples=samples)\n previous_time = timer(\"running alpha beta model\", previous_time)\n\n # Plot alpha and beta\n row += 1\n ax_alpha = plt.subplot(gs[row, 0])\n ax_beta = plt.subplot(gs[row, 1])\n if fama_french:\n sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,\n ax=ax_alpha)\n betas = ['SMB', 'HML', 'UMD']\n nbeta = trace_alpha_beta['beta'].shape[1]\n for i in range(nbeta):\n sns.distplot(trace_alpha_beta['beta'][100:, i], ax=ax_beta,\n label=betas[i])\n plt.legend()\n else:\n sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,\n ax=ax_alpha)\n sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)\n ax_alpha.set_xlabel('Annual Alpha')\n ax_alpha.set_ylabel('Belief')\n ax_beta.set_xlabel('Beta')\n ax_beta.set_ylabel('Belief')\n previous_time = timer(\"plotting alpha beta model\", previous_time)\n\n if stoch_vol:\n # run stochastic volatility model\n returns_cutoff = 400\n print(\n \"\\nRunning stochastic volatility model on \"\n \"most recent {} days of returns.\".format(returns_cutoff)\n )\n if df_train.size > returns_cutoff:\n df_train_truncated = df_train[-returns_cutoff:]\n _, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)\n previous_time = timer(\n \"running stochastic volatility model\", previous_time)\n\n # plot log(sigma) and log(nu)\n print(\"\\nPlotting stochastic volatility model\")\n row += 1\n ax_sigma_log = plt.subplot(gs[row, 0])\n ax_nu_log = plt.subplot(gs[row, 1])\n sigma_log = trace_stoch_vol['sigma_log']\n sns.distplot(sigma_log, ax=ax_sigma_log)\n ax_sigma_log.set_xlabel('log(Sigma)')\n ax_sigma_log.set_ylabel('Belief')\n nu_log = trace_stoch_vol['nu_log']\n sns.distplot(nu_log, ax=ax_nu_log)\n ax_nu_log.set_xlabel('log(nu)')\n ax_nu_log.set_ylabel('Belief')\n\n # plot latent volatility\n row += 1\n ax_volatility = plt.subplot(gs[row, :])\n bayesian.plot_stoch_vol(\n df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)\n previous_time = timer(\n \"plotting stochastic volatility model\", previous_time)\n\n total_time = time() - start_time\n print(\"\\nTotal runtime was {:.2f} seconds.\".format(total_time))\n\n gs.tight_layout(fig)\n\n plt.show()\n if return_fig:\n return fig\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.DataFrame",
"numpy.cumprod",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
brsr/mapproj | [
"1ec1694149a69da6393ecb94650f7164e3cfd2e1",
"1ec1694149a69da6393ecb94650f7164e3cfd2e1"
] | [
"bin/circlepack.py",
"bin/invcomparison.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 14 14:15:06 2021\n\n@author: brsr\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mapproj\nimport fiona\nfrom shapely.geometry import Point, LineString, MultiPolygon, Polygon\nimport geopandas\nimport pyproj\ngeod = pyproj.Geod(a=1, f=0)\nn = 9\na = np.arctan(1/2)/np.pi*180\nactrlpts3 = np.array([[15+0, 15+36, 15-36],\n [-a, a, a]])\n#actrlpts3 = np.array([[ 0, 0, 90],\n# [90, 0, 0]])\nctrlpoly3 = mapproj.geodesics(actrlpts3[0], actrlpts3[1], geod, includepts=True)\ntgtpts3 = mapproj.complex_to_float2d(1j*np.exp(2j/3*np.arange(3)*np.pi)).T\nbp = mapproj.Barycentric(tgtpts3)\n\ngrid3 = mapproj.Barycentric.grid(1/8)\ngridp3 = mapproj.Barycentric.gridpolys(n=9)\n#%%\ngridbary = mapproj.transeach(bp.transform, gridp3)\nconformal = mapproj.ConformalTri3(actrlpts3, tgtpts3)\ninvframe = mapproj.transeach(conformal.invtransform, gridbary)#slooooow\ninvframev = mapproj.transeach(mapproj.UnitVector.transform, invframe)\ninvframe.plot()\n\n#%%\nres = geod.inv(actrlpts3[0], actrlpts3[1],\n np.roll(actrlpts3[0], -1), np.roll(actrlpts3[1], -1))\ncornerangle = np.pi/180*(res[0] - np.roll(res[1], 1)).mean() #np.pi*2/5 #\nedgelength = res[2].mean()\n\ninitial = conformal.ctrlpts_v\nanglesumtarget = np.ones(shape=(n+1,n+1))\nanglesumtarget = np.tril(anglesumtarget, -1)[::-1]\n#anglesumtarget[..., 0] = 0\n#anglesumtarget[-1] = 0\nanglesumtarget[anglesumtarget == 0] = np.nan\nind = np.arange(0,n)\nedgeweight = np.ones(n)*2\nedgeweight[[0, -1]] = 1\nedge1 = (ind, 0)\nedge2 = (0, ind)\nedge3 = (ind,ind[::-1])\nanglesumtarget[edge1] = 1/2\nanglesumtarget[edge2] = 1/2\nanglesumtarget[edge3] = 1/2\nanglesumtarget *= 2*np.pi\nanglesumtarget[0, 0] = cornerangle\nanglesumtarget[-2, 0] = cornerangle\nanglesumtarget[0, -2] = cornerangle\n\nmsplitframe = np.array([[0, 1, 2],\n [2, 0, 1]])\nmsplit1 = np.tile(msplitframe, (3, n, n))[..., :n,:n]\nmsplit = (msplit1 + np.arange(3)[:, np.newaxis, np.newaxis]) % 3\nmsplit = msplit == 0\nmsplit[:, ~np.isfinite(anglesumtarget[:-1,:-1])] = False\n#neighbors like this\n# n n\n# n x n\n# n n\n\nneighbors = np.array([[ 1, 1, 0, -1, -1, 0],\n [ 0, -1, -1, 0, 1, 1]])\ngrindex = np.array(np.meshgrid(ind, ind))\n\nneighborhood = neighbors[..., np.newaxis, np.newaxis] + grindex[:,np.newaxis]\n\nfindex = np.array(np.where(np.isfinite(anglesumtarget))).T\nr = np.ones(shape=anglesumtarget.shape, dtype=float)*cornerangle/(2*n-2)\nr[~np.isfinite(anglesumtarget)] = np.nan\nr[[0, -2, 0], [0, 0, -2]] /= 3\n#%%\nfor i in range(128):\n x = r[:-1, :-1]\n y = r[neighborhood[0], neighborhood[1]]\n z = np.roll(y, 1, axis=0)\n if np.any(x+y+z > np.pi):\n break\n locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/\n (np.sin(x+y)*np.sin(x+z)))\n #locos_x_yz = np.arccos(((x+y)**2 + (x+z)**2 - (y+z)**2)/\n # (2*(x+y)*(x+z)))\n anglesum = np.nansum(locos_x_yz, axis=0)\n pctdiff = (anglesum/anglesumtarget[:-1,:-1])\n pctdiff /= np.nanmean(pctdiff)\n #pctdiff -= np.clip(pctdiff, 0.9, 1.1)\n #pctdiff /= np.nanmean(pctdiff)\n #ind = np.unravel_index(np.nanargmax(abs(pctdiff)), pctdiff.shape)\n r[:-1, :-1] *= pctdiff\n r *= edgelength/(r[edge1]@edgeweight)\n print(i, np.nanmax(abs(pctdiff-1)))\n if np.nanmax(abs(pctdiff-1)) < 1E-7:\n break\n #print(ind, r[ind], pctdiff[ind])\n\n#print(r[edge1]@edgeweight, edgelength)\nprint(np.round(r[:-1,:-1], 3))\n#%%0.9999999999999746 1.0000000000000149\n#%%\nfor i in range(36*256):\n ind = findex[i % findex.shape[0]]\n x = r[ind[0], ind[1]]\n y = r[neighbors[0] + ind[0], neighbors[1] + ind[1]]\n z = np.roll(y, 1, axis=0)\n locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/\n (np.sin(x+y)*np.sin(x+z)))\n anglesum = np.nansum(locos_x_yz, axis=0)\n pctdiff = anglesum/anglesumtarget[ind[0],ind[1]]#np.clip(, 0.8, 1.2)\n r[ind[0], ind[1]] *= pctdiff\n r *= edgelength/(r[edge1]@edgeweight)\n #print(ind, r[ind[0], ind[1]], pctdiff)\n\nprint(r[edge1]@edgeweight, np.pi/2)\nprint(np.round(r[:-1,:-1], 3))\n#%%\nvertices = np.ones((3,n+1,n+1))*np.nan\nvertices[:,0,0] = initial[:,0]\nvertices[:,-2,0] = initial[:,1]\nvertices[:,0,-2] = initial[:,2]\n\nr1 = r[edge1]\nt = (r1[:-1] + r1[1:]).cumsum()/edgelength\nt = np.concatenate([[0,], t])\ne1 = mapproj.slerp(initial[:,0], initial[:,1], t[:, np.newaxis]).T\ne2 = mapproj.slerp(initial[:,0], initial[:,2], t[:, np.newaxis]).T\ne3 = mapproj.slerp(initial[:,2], initial[:,1], t[:, np.newaxis]).T\nvertices[:,edge1[0], edge1[1]] = e1\nvertices[:,edge2[0], edge2[1]] = e2\nvertices[:,edge3[0], edge3[1]] = e3\n#%%\nfor i in range(1, n-1):\n for j in range(1, n-i-1):\n index = np.array([i, j])\n indexnb = index[:,np.newaxis] + neighbors\n vertexnb = vertices[:, indexnb[0], indexnb[1]]\n rnb = r[indexnb[0], indexnb[1]]\n ri = r[i, j]\n filled = np.all(np.isfinite(vertexnb), axis=0)\n vertexnb = vertexnb[:, filled]\n rnb = rnb[filled]\n cl = np.cos(rnb+ri)\n lq = np.linalg.lstsq(vertexnb.T, cl)\n v = lq[0]\n norm = np.linalg.norm(v)\n v /= norm\n vertices[:, i, j] = v\n print(i, j, filled.sum(), lq, norm)\n\nvindex = np.all(np.isfinite(vertices), axis=0)\nresult = mapproj.UnitVector.invtransform_v(vertices)\n#%%\nfig, axes = plt.subplots(ncols = 3, figsize=(10, 8), sharex=True, sharey=True)\naxes[0].plot(vertices[0], vertices[1])\naxes[1].plot(vertices[0], vertices[2])\naxes[2].plot(vertices[1], vertices[2])\nfor ax in axes:\n ax.set_aspect('equal')\n#%%\nfig, ax = plt.subplots(figsize=(10, 8))\ninvframe.plot(ax=ax)\nax.scatter(*result, color='k')\nax.scatter(*actrlpts3, color='y')\n#%%\ntriframe = np.array([[[0,0,1],\n [0,1,0]],\n [[1,0,1],\n [1,1,0]]])\ntris = []\nfor i in range(n-1):\n for j in range(n-i-1):\n for tf in triframe:\n xy = result[:,i+tf[0], j+tf[1]]\n if np.all(np.isfinite(xy)):\n tris.append(Polygon(xy.T))\n\ngptris = geopandas.GeoSeries(tris)\n#use geopandas.intersect to determine which grid cell a point lands in",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 22 20:02:25 2020\n\n@author: brsr\n\"\"\"\nimport pyproj\npyproj.datadir.set_data_dir('/usr/local/share/proj')\nimport fiona\nimport geopandas\nimport pandas as pd\n#import shapely\nfrom shapely.geometry import Point, LineString, MultiPolygon, Polygon\n#import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import minimize_scalar#minimize, root_scalar\nimport copy\n\n#import os\n#os.chdir('Code/mapproj')\nimport mapproj\n\ngeod = pyproj.Geod(a=6371, f=0)\nworld = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))\na = np.arctan(1/2)/np.pi*180\nactrlpts3 = np.array([[15+0, 15+36, 15-36],\n [-a, a, a]])\nctrlpoly3 = mapproj.geodesics(actrlpts3[0], actrlpts3[1], geod, includepts=True)\na = 180/np.pi * np.arctan(1/np.sqrt(2))\nactrlpts4 = np.array([[-30, 60, 60, -30],\n [-a, -a, a, a]])\nctrlpoly4 = mapproj.geodesics(actrlpts4[0], actrlpts4[1], geod, includepts=True)\n\nctrlarea3, _ = geod.polygon_area_perimeter(actrlpts3[0],\n actrlpts3[1])\nctrlarea4, _ = geod.polygon_area_perimeter(actrlpts4[0],\n actrlpts4[1])\n\ntgtpts3 = mapproj.complex_to_float2d(1j*np.exp(2j/3*np.arange(3)*np.pi)).T\nbp = mapproj.Barycentric(tgtpts3)\n\ngrid3 = mapproj.Barycentric.grid()\ngrid4 = mapproj.UV.grid()\n\ngridp3 = mapproj.Barycentric.gridpolys()\ngridp4 = mapproj.UV.gridpolys()\n\ntestshape4 = geopandas.GeoSeries(Polygon(shell=[(0,0),(0.25,0),(0.25,0.25),\n (0.75,0.25),(0.75,0.5),(0.25,0.5),\n (0.25,0.75),(1,0.75),(1,1),(0,1)]))\n#testshape3 = mapproj.transeach(bp.invtransform, testshape4)\ntestshape3 = geopandas.GeoSeries(Polygon(shell=[(1,0,0),\n (0.75,0.25,0),\n (0.5,0.25,0.25),\n (0.5,0.5,0),\n (0.25,0.25,0.5),\n (0.25,0.75,0),\n (0,1,0),\n (0,0,1)]))\n#%% optimize\nprojs = {}\nnctrlpts = {}\n#invbary = {}\ninvframe = {}\ntestshapet = {}\n\nprojs_k = {'Naive Slerp Tri': mapproj.NSlerpTri(actrlpts3, k=1),#add different k vals\n 'Naive Slerp Tri~': mapproj.NSlerpTri(actrlpts3, k=1, exact=False),#add different k vals \n 'Naive Slerp Quad': mapproj.NSlerpQuad(actrlpts4, k=1),\n 'Naive Slerp Quad~': mapproj.NSlerpQuad(actrlpts4, k=1, exact=False),\n 'Naive Slerp Quad 2': mapproj.NSlerpQuad2(actrlpts4, k=1),\n 'Naive Slerp Quad 2~': mapproj.NSlerpQuad2(actrlpts4, k=1, exact=False),\n 'Elliptical': mapproj.EllipticalQuad(actrlpts4, k=1),\n 'Elliptical~': mapproj.EllipticalQuad(actrlpts4, k=1, exact=False),\n }\nfor name in projs_k:\n mp = projs_k[name]\n i = mp.nctrlpts\n #nctrlpts[name] = i\n if i == 3:\n gridp = gridp3\n else:\n gridp = gridp4\n def objective_a(k):\n mp.k = k\n iv = mapproj.transeach(mp.invtransform, gridp)\n arealist = []\n for p in iv.geometry:\n area, _ = geod.geometry_area_perimeter(p)\n arealist.append(area)\n return max(arealist)/min(arealist)\n def objective_l(k):\n mp.k = k\n iv = mapproj.transeach(mp.invtransform, gridp)\n alist = []\n for p in iv.geometry:\n coords = np.array(p.exterior.xy)\n l = geod.line_lengths(coords[0], coords[1])\n aspect = max(l)/min(l)\n alist.append(aspect) \n return max(alist)\n def objective_l2(k):\n mp.k = k\n iv = mapproj.transeach(mp.invtransform, gridp)\n alist = []\n for p in iv.geometry:\n coords = np.array(p.exterior.xy)\n l = geod.line_lengths(coords[0], coords[1])\n aspect = max(l)/min(l)\n alist.append(aspect) \n return np.mean(alist)\n objs = [objective_a, objective_l, objective_l2]\n for obj in objs:\n res = minimize_scalar(obj, bracket=[0,1])\n mp2 = copy.copy(mp)\n mp2.k = res.x\n print(name, res.x)\n if np.round(res.x, 7) not in [0,1]:\n projs[name + ' ' + str(mp2.k)] = mp2\n#%%\nprojs.update({'Areal': mapproj.Areal(actrlpts3),\n 'Fuller explicit': mapproj.FullerEq(actrlpts3),\n #'Fuller': mapproj.Fuller(actrlpts3, tweak=False),\n #'Fuller Tweaked': mapproj.Fuller(actrlpts3, tweak=True),\n 'Bisect': mapproj.BisectTri(actrlpts3),\n 'Bisect2': mapproj.BisectTri2(actrlpts3),\n 'Snyder Equal-Area 3': mapproj.SnyderEA3(actrlpts3),\n #'Snyder Symmetrized': mapproj.SnyderEASym(actrlpts3),#?\n #'Alfredo': mapproj.Alfredo(actrlpts3),#polygonal?\n #'Alfredo Tweaked': mapproj.Alfredo(actrlpts3, tweak=True),#not polygonal\n #'SEA': mapproj.SnyderEA(actrlpts3),\n 'Reverse Fuller': mapproj.ReverseFuller(actrlpts3),\n 'Reverse Fuller Tweak': mapproj.ReverseFuller(actrlpts3, tweak=True),\n 'Naive Slerp Tri 0': mapproj.NSlerpTri(actrlpts3, k=0),#add different k vals\n 'Naive Slerp Tri 1': mapproj.NSlerpTri(actrlpts3, k=1),#add different k vals\n 'Naive Slerp Tri~ 1': mapproj.NSlerpTri(actrlpts3, k=1, exact=False),#add different k vals \n 'Crider': mapproj.CriderEq(actrlpts4),\n #'Naive Slerp Quad k0': mapproj.NSlerpQuad(actrlpts4, k=0),\n 'Naive Slerp Quad 1': mapproj.NSlerpQuad(actrlpts4, k=1),\n 'Naive Slerp Quad~ 1': mapproj.NSlerpQuad(actrlpts4, k=1, exact=False),\n 'Naive Slerp Quad 2 0': mapproj.NSlerpQuad2(actrlpts4, k=0),\n 'Naive Slerp Quad 2 1': mapproj.NSlerpQuad2(actrlpts4, k=1),\n 'Naive Slerp Quad 2~ 1': mapproj.NSlerpQuad2(actrlpts4, k=1, exact=False),\n 'Elliptical 0': mapproj.EllipticalQuad(actrlpts4, k=0),\n 'Elliptical 1': mapproj.EllipticalQuad(actrlpts4, k=1),\n 'Elliptical~ 1': mapproj.EllipticalQuad(actrlpts4, k=1, exact=False),\n 'Snyder Equal-Area 4': mapproj.SnyderEA4(actrlpts4)\n })\n\nfor name in projs:\n print(name)\n mp = projs[name]\n i = mp.nctrlpts\n nctrlpts[name] = i\n #invbary[name] = mapproj.transeach(mp.invtransform, bary)\n if i == 3:\n invframe[name] = mapproj.transeach(mp.invtransform, gridp3)\n testshapet[name] = mapproj.transeach(mp.invtransform, testshape3)\n elif i == 4:\n invframe[name] = mapproj.transeach(mp.invtransform, gridp4)\n testshapet[name] = mapproj.transeach(mp.invtransform, testshape4)\n#%%\ntestshapez3 = mapproj.transeach(bp.transform, testshape3)\ngridpz3 = mapproj.transeach(bp.transform, gridp3)\nprojs2 = {'Conformal': mapproj.ConformalTri3(actrlpts3, tgtpts3),#slow\n #'Linear Trimetric': mapproj.LinearTrimetric(actrlpts3, geod),#no\n }\n \nfor name in projs2:\n print(name)\n mp = projs2[name]\n i = mp.nctrlpts\n nctrlpts[name] = i\n #invbary[name] = mapproj.transeach(mp.invtransform, bary)\n if i == 3:\n invframe[name] = mapproj.transeach(mp.invtransform, gridpz3)\n testshapet[name] = mapproj.transeach(mp.invtransform, testshapez3)\n elif i == 4:\n invframe[name] = mapproj.transeach(mp.invtransform, gridpz4)\n testshapet[name] = mapproj.transeach(mp.invtransform, testshapez4)\n#%%\ncrs = {'proj': 'longlat', 'datum': 'WGS84'}\ncrs3= {'proj': 'gnom',\n 'lat_0': 10.812316963571709,\n 'lon_0': 15}\nctrlpts3 = mapproj.arraytoptseries(actrlpts3)\nctrlpts3.crs = crs\ntgtptsg3 = ctrlpts3.to_crs(crs3)\nbg = mapproj.Barycentric(mapproj.ptseriestoarray(tgtptsg3))\ngridpzz3 = mapproj.transeach(bg.transform, gridp3)\ngridpzz3.crs = crs3\ntestshapezz3 = mapproj.transeach(bg.transform, testshape3)\ntestshapezz3.crs = crs3\nname = 'Gnomonic 3'\ninvframe[name] = gridpzz3.to_crs(crs)\ntestshapet[name] = testshapezz3.to_crs(crs)\nnctrlpts[name] = 3\n\ncrs4= {'proj': 'gnom',\n 'lat_0': 0,\n 'lon_0': 15}\nctrlpts4 = mapproj.arraytoptseries(actrlpts4)\nctrlpts4.crs = crs\ntgtptsg4 = ctrlpts4.to_crs(crs4)\nscale = np.array(tgtptsg4[1].xy[0])\ndef transform_01(x, y, scale=scale):\n return (2*x - 1)*scale, (2*y - 1)*scale\ngridpzz4 = mapproj.transeach(transform_01, gridp4)\ngridpzz4.crs = crs4\ntestshapezz4 = mapproj.transeach(transform_01, testshape4)\ntestshapezz4.crs = crs4\nname = 'Gnomonic 4'\ninvframe[name] = gridpzz4.to_crs(crs)\ntestshapet[name] = testshapezz4.to_crs(crs)\nnctrlpts[name] = 4\n#%%\nms = ['area', 'lengthrat']#'perim', 'anglediff',\n#atotal, ptotal = geod.polygon_area_perimeter(*actrlpts)\nareas = {}\nperims = {}\nangles = {}\nlengths = {}\ncycle3 = [0, 1, 2, 0]\ncycle4 = [0, 1, 2, 3, 0]\nfor name in invframe:\n iv = invframe[name]\n arealist = []\n perimlist = []\n anglelist = []\n lengthlist = []\n i = nctrlpts[name]\n for p in iv.geometry:\n area, perim = geod.geometry_area_perimeter(p)\n arealist.append(area)\n perimlist.append(perim)\n coords = np.array(p.exterior.xy)#[:]\n# cycle = cycle3 if i == 3 else cycle4\n l = geod.line_lengths(coords[0], coords[1])\n f, b, _ = geod.inv(coords[0], coords[1],\n np.roll(coords[0], -1), np.roll(coords[1], -1))\n angle = (np.roll(f, 1) - np.roll(b, -1)) % 360\n anglelist.append(angle)\n lengthlist.append(l)\n ctrlarea = ctrlarea3 if i == 3 else ctrlarea4\n areas[name] = np.array(arealist)/ctrlarea*len(iv) - 1\n perims[name] = np.array(perimlist)\n angles[name] = np.array(anglelist)\n lengths[name] = np.array(lengthlist)\n\nanglediff = {}\nlengthrat = {}\nfor name in lengths:\n angle = angles[name]\n anglediff[name] = angle.max(axis=1)\n length = lengths[name]\n lengthrat[name] = length.max(axis=1)/length.min(axis=1) - 1\n\n#ms = ['Areas', 'Perimeters', 'Angles', 'Lengths']\nfor name in invframe:\n iv = invframe[name]\n iv = geopandas.GeoDataFrame(geometry=iv.geometry, data={\n 'area': areas[name],\n 'perim': perims[name],\n 'anglediff': anglediff[name],\n 'lengthrat': lengthrat[name]})\n invframe[name] = iv\n \n#%% plots\nfor name in invframe:\n print(name)\n n = nctrlpts[name]\n ts = testshapet[name]\n ib = invframe[name]\n\n fig, axes = plt.subplots(ncols=3, figsize=(10, 4))\n fig.suptitle(name)\n ax = axes[0]\n\n ts.plot(ax=ax)\n #ib.plot(ax=ax, facecolor=None, edgecolor='k')\n\n axes1 = axes[1:]\n for mn, ax in zip(ms, axes1):\n ib.plot(column=mn, ax=ax, legend=True)\n ax.set_title(mn)\n\n for ax in axes:\n if n == 3:\n ctrlpoly3.plot(ax=ax, color='g')\n elif n == 4:\n ctrlpoly4.plot(ax=ax, color='g')\n\n #ax.legend(loc='best')\n\n#%% table\nprojnames = areas.keys()\nindex = pd.MultiIndex.from_product([projnames, ms],\n names=['Projection', 'Measure'])\ncols = ['min', 'max', 'measure']#'q1', 'q99',\ndat = pd.DataFrame(index = index, columns=cols)\n\nfor name, iv in invframe.items():\n a = iv['area']\n dat.loc[name, 'area'] = [a.min(), a.max(),\n (a.max() + 1) / (a.min() + 1) - 1]\n b = iv.lengthrat\n dat.loc[name, 'lengthrat'] = [b.min(), b.max(), b.mean()]\n\nns = np.array([x for x in nctrlpts.values()])\nindex = ns == 3\n#%% efficiency\nareas = dat.xs('area', level=1).measure\nlens = dat.xs('lengthrat', level=1).measure\nareasi = areas[index]\nlensi = lens[index]\nareasni = areas[~index]\nlensni = lens[~index]\n\nefi = np.ones(len(areasi), dtype=bool)\nfor a,l in zip(areasi, lensi):\n efi[(areasi > a) & (lensi > l)] = False\n \nefni = np.ones(len(areasni), dtype=bool)\nfor a,l in zip(areasni, lensni):\n efni[(areasni > a) & (lensni > l)] = False\n \n#%%\nfor m in ms:\n print(m)\n print(dat.xs(m, level=1)[index][efi].sort_values(['measure', 'max']))\nfor m in ms:\n print(m)\n print(dat.xs(m, level=1)[~index][efni].sort_values(['measure', 'max']))\n\n#in limit as grid cells get small\n#icosahedron:\n#grid cells near vertex has interior angles 2pi/5, 3pi/10, 3pi/10\n#so by law of sines\n#a/sin(2pi/5) = b/sin(3pi/10)\n#thus benchmark length ratio is\nb3 = np.sin(2*np.pi/5)/np.sin(3*np.pi/10) - 1\ncm3 = b3*3/len(gridp3)\n\n#%%\nfig, axes = plt.subplots(nrows = 2, figsize=(10, 8))\nax1, ax2 = axes\nax1.scatter(areasi +1, lensi, c=efi)\nfor n, x, y in zip(areas.index[index][efi], areas[index][efi] + 1, \n lens[index][efi]):\n ax1.annotate(n, (x, y), ha='center', va='bottom')\nax2.scatter(areasni +1, lensni, c=efni)\nfor n, x, y in zip(areas.index[~index][efni], areas[~index][efni] + 1, \n lens[~index][efni]):\n ax2.annotate(n, (x, y), ha='center', va='bottom')\n\nax1.set_xscale('log')\nax2.set_xscale('log')\n"
] | [
[
"numpy.arctan",
"numpy.concatenate",
"numpy.round",
"numpy.any",
"numpy.nanmean",
"numpy.roll",
"numpy.tril",
"numpy.arange",
"numpy.sin",
"numpy.nansum",
"numpy.linalg.lstsq",
"numpy.meshgrid",
"numpy.array",
"numpy.isfinite",
"matplotlib.pyplot.subplots",
"numpy.tile",
"numpy.ones",
"numpy.cos",
"numpy.linalg.norm"
],
[
"numpy.sqrt",
"numpy.arctan",
"numpy.arange",
"scipy.optimize.minimize_scalar",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.sin",
"numpy.round",
"numpy.mean",
"pandas.MultiIndex.from_product",
"numpy.array",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
odedzewi/coremltools | [
"fdd5630c423c0fc4f1a04c3f5a3c17b808a15505",
"fdd5630c423c0fc4f1a04c3f5a3c17b808a15505"
] | [
"coremltools/converters/mil/mil/ops/defs/scatter_gather.py",
"coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py"
] | [
"# Copyright (c) 2020, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nimport numpy as np\nimport numbers\n\nfrom coremltools.converters.mil.mil import Operation, types\nfrom coremltools.converters.mil.mil.input_type import (\n DefaultInputs,\n InputSpec,\n IntInputType,\n IntTensorInputType,\n TensorInputType,\n StringInputType,\n)\nfrom coremltools.converters.mil.mil.operation import precondition\nfrom coremltools.converters.mil.mil.ops.defs._op_reqs import register_op\nfrom coremltools.converters.mil.mil.types.symbolic import is_compatible_symbolic_vector, is_symbolic\n\nfrom coremltools.converters.mil.mil.operation import (\n SYMBOL,\n VALUE\n)\n\n\n@register_op(doc_str=\"\")\nclass gather(Operation):\n \"\"\"\n Gather slices from input ``x`` along dimension ``axis`` according to ``indices``,\n similar to `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.\n\n * If ``indices`` is scalar (0-D):\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]\n\n Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``.\n\n * If ``indices`` is 1-D tensor:\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}]\n\n The output has rank ``rank(x)``.\n\n * In general:\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]\n\n Where ``M = rank(x)``.\n\n Parameters\n ----------\n x: tensor<\\*D,T> (Required)\n indices: tensor<\\*N,i32> (Required)\n * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``.\n axis: const i32 (Optional. Default=``0``)\n * Negative axis is supported.\n\n Returns\n -------\n tensor<\\*K,T>\n * Where ``K = D[:axis] + N + D[axis+1:]``.\n\n Attributes\n ----------\n T: fp32\n\n References\n ----------\n See `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.\n\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntInputType(),\n axis=IntInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n )\n\n def __init__(self, **kwargs):\n super(gather, self).__init__(**kwargs)\n\n @precondition(allow=VALUE | SYMBOL)\n def value_inference(self):\n x = self.x.sym_val\n indices = self.indices.val\n if indices is None:\n # only allow x to be symbolic. indices cannot.\n return None\n scalar_indices = isinstance(indices, numbers.Integral)\n axis = self.axis.val\n if scalar_indices:\n res = np.take(x, [indices], axis)\n res2 = np.squeeze(res, axis=axis)\n if isinstance(res2, np.ndarray) and len(res2.shape) == 0:\n # res2 is a scalar, but represented as np.array(symbol,\n # dtype=np.object) which np.squeeze can't remove.\n return res2.item()\n return res2\n return np.take(x, indices, axis)\n\n def type_inference(self):\n out_type = self.x.dtype\n\n if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n output_rank = self.x.rank - 1 + self.indices.rank\n if output_rank == 0:\n # output scalar\n return out_type\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.x.rank\n out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]\n return types.tensor(out_type, out_shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``\n by operation ``mode``.\n\n Example: ``mode == update``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Example: ``mode == add``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +\n .. math::\n x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n data: tensor<\\*D, T> (Required)\n indices: tensor<[C],T> (Required)\n * 1-D tensor.\n updates: tensor<\\*K, T> (Required)\n * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.\n axis: const i32 (Optional)\n * Default to ``0``.\n mode: const string (Optional)\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n * Default value is ``update``.\n\n Returns\n -------\n tensor<\\*D, T>\n * With the same type and shape as input ``x``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n axis=IntInputType(const=True, optional=True),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter, self).__init__(**kwargs)\n\n def type_inference(self):\n if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.data.rank\n expected_updates_shape = (\n self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]\n )\n\n err = \"Updates shape {} is incorrect. It should be {}.\".format(self.updates.shape, expected_updates_shape)\n assert is_compatible_symbolic_vector(\n self.updates.shape, tuple(expected_updates_shape)\n ), err\n\n return self.data.sym_type\n\n\n@register_op(doc_str=\"\")\nclass gather_along_axis(Operation):\n \"\"\"\n Take the values along ``axis`` at locations ``indices``.\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n x: tensor<\\*D, T> (Required)\n indices: tensor<\\*K, T> (Required)\n * ``rank(indices) == rank(x)``.\n axis: const i32 (Optional):\n * Default to ``0``.\n\n Returns\n -------\n tensor<\\*D, T>:\n * Output tensor has the same shape as ``indices``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntTensorInputType(),\n axis=IntInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n )\n\n def __init__(self, **kwargs):\n super(gather_along_axis, self).__init__(**kwargs)\n\n @precondition(allow=VALUE)\n def value_inference(self):\n x = self.x.val\n indices = self.indices.val\n axis = self.axis.val\n return np.take_along_axis(x, indices, axis)\n\n def type_inference(self):\n\n if self.x.rank != self.indices.rank:\n raise ValueError(\n \"Rank mismatch between input and indices. \\\n Input rank: {}, indices rank: {}\".format(\n self.x.rank, self.indices.rank\n )\n )\n\n if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.x.rank\n\n for i in range(self.x.rank):\n if i != axis:\n assert self.x.shape[i] == self.indices.shape[i]\n\n return types.tensor(self.x.dtype, self.indices.shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter_along_axis(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``\n by operation ``mode``.\n\n Example: ``mode == update``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Example: ``mode == add``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +\n .. math::\n x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n data: tensor<\\*D, T> (Required)\n indices: tensor<\\*K,T> (Required)\n * ``rank(indices) == rank(data)``.\n updates: tensor<\\*K, T> (Required)\n * Must be the same shape as ``indices``.\n axis: const i32 (Optional)\n * Default to ``0``.\n mode: const string (Optional)\n * Default to ``add``.\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n\n Returns\n -------\n tensor<\\*D, T>\n * With the same type and shape as input ``x``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n axis=IntInputType(const=True, optional=True),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter_along_axis, self).__init__(**kwargs)\n\n @precondition(allow=VALUE)\n def value_inference(self):\n data = np.copy(self.data.val)\n indices = self.indices.val\n updates = self.updates.val\n axis = self.axis.val\n np_output = data\n np.put_along_axis(np_output, indices, updates, axis=axis)\n return np_output\n\n def type_inference(self):\n if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.data.rank\n\n assert is_compatible_symbolic_vector(\n self.indices.shape, self.updates.shape\n )\n assert self.data.rank == self.indices.rank\n for i in range(self.data.rank):\n if i != axis:\n assert self.data.shape[i] == self.indices.shape[i]\n\n return self.data.sym_type\n\n\n@register_op(doc_str=\"\")\nclass gather_nd(Operation):\n \"\"\"\n Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.\n\n The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice\n of ``x``:\n\n .. math::\n output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]\n\n Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank\n ``rank(x) - indices.shape[-1]``.\n\n Parameters\n ----------\n x: tensor<\\*D,T> (Required)\n indices: tensor<\\*K,i32> (Required)\n\n Returns\n -------\n tensor<\\*V,T>\n * ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.\n\n Attributes\n ----------\n T: fp32\n\n References\n ----------\n See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntTensorInputType(),\n )\n\n def __init__(self, **kwargs):\n super(gather_nd, self).__init__(**kwargs)\n\n def type_inference(self):\n assert self.indices.shape[-1] <= self.x.rank\n out_type = self.x.dtype\n out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]\n return types.tensor(out_type, out_shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter_nd(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices``.\n\n The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a\n slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``\n has rank ``rank(data) - indices.shape[-1]``.\n\n * Example: ``mode == update``: The ``output`` is set to ``data`` initially, and\n the op updates ``output`` as follows:\n\n .. math::\n output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]\n\n * Example: ``mode == add``. The update rule is:\n\n .. math::\n output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]\n\n Parameters\n ----------\n data: tensor<\\*D,T> (Required)\n indices: tensor<\\*K,i32> (Required)\n updates: tensor<\\*K, T> (Required)\n * Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.\n mode: const string (Optional)\n * Default to ``add``.\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n\n Returns\n -------\n tensor<\\*D,T>\n * A tensor with the same shape and type as ``data``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter_nd, self).__init__(**kwargs)\n\n def type_inference(self):\n assert self.indices.shape[-1] <= self.data.rank\n expected_updates_shape = (\n self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]\n )\n assert is_compatible_symbolic_vector(\n self.updates.shape, tuple(expected_updates_shape)\n )\n return self.data.sym_type\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2020, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nfrom coremltools.converters.mil.mil.passes.pass_registry import register_pass\nfrom coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass\nfrom coremltools.converters.mil.input_types import ImageType\n# import mil internal ops to add it to the builder\nfrom coremltools.converters.mil.mil.ops import defs as _ops\nfrom coremltools.converters.mil.mil import Builder as mb\nfrom coremltools.converters.mil.mil.types import nptype_from_builtin\n\nimport numpy as np\n\n@register_pass(namespace=\"mil_backend\")\nclass insert_image_preprocessing_ops(AbstractGraphPass):\n \"\"\"\n Insert preprocessing ops, right after the input if its of type Image\n \"\"\"\n def apply(self, prog):\n for f_name, f in prog.functions.items():\n if f_name == 'main':\n _insert_image_preprocessing_ops(f, prog)\n\n\ndef _insert_image_preprocessing_ops(block, prog):\n input_types = list(prog.main_input_types)\n\n for input_type in input_types:\n if isinstance(input_type, ImageType):\n if input_type.name not in block.inputs:\n continue\n\n input_var = block.inputs[input_type.name]\n placeholder_op = block.placeholder_inputs[input_type.name]\n first_op = block.operations[0]\n old_var = placeholder_op.outputs[0]\n has_bias = np.any(np.array(input_type.bias) != 0)\n with block:\n last_output = input_var\n input_nptype = nptype_from_builtin(type(last_output.dtype()))\n if input_type.scale != 1:\n last_output = mb.mul(x=last_output,\n y=np.array(input_type.scale, dtype=input_nptype),\n before_op=first_op, name=input_var.name + \"__scaled__\")\n if has_bias:\n if input_type.color_layout == \"G\":\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype),\n before_op=first_op, name=input_var.name + \"__biased__\")\n else:\n if len(last_output.shape) == 3:\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]),\n before_op=first_op, name=input_var.name + \"__biased__\")\n elif len(last_output.shape) == 4:\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]),\n before_op=first_op, name=input_var.name + \"__biased__\")\n else:\n raise TypeError(\"Unsupported rank for image input type.\")\n\n if last_output != input_var:\n block.replace_uses_of_var_after_op(anchor_op=last_output.op,\n old_var=old_var,\n new_var=last_output)\n"
] | [
[
"numpy.take_along_axis",
"numpy.take",
"numpy.put_along_axis",
"numpy.squeeze",
"numpy.copy"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tabor-Research-Group/ChemOS | [
"50117f572e95e68dc4dccb624cedb28dbfc6e419",
"50117f572e95e68dc4dccb624cedb28dbfc6e419",
"50117f572e95e68dc4dccb624cedb28dbfc6e419"
] | [
"ParamGenerator/Spearmint/spearmint/utils/compression.py",
"ParamGenerator/Phoenics/ObservationParser/observation_parser.py",
"ParamGenerator/Phoenics/BayesianNeuralNetwork/pymc3_interface.py"
] | [
"# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\nimport zlib\nimport numpy as np\n\nCOMPRESS_TYPE = 'compressed array'\n\n# TODO: see if there is a better way to encode this than base64\n# It takes about 0.65 seconds to compress a 1000x1000 array on a 2011 Macbook air\ndef compress_array(a):\n return {'ctype' : COMPRESS_TYPE,\n 'shape' : list(a.shape),\n 'value' : (zlib.compress(a))}#.encode('base64'))}\n\n# It takes about 0.15 seconds to decompress a 1000x1000 array on a 2011 Macbook air\ndef decompress_array(a):\n# return np.fromstring(zlib.decompress(a['value'].decode('base64'))).reshape(a['shape'])\n return np.fromstring(zlib.decompress(a['value'])).reshape(a['shape'])\n\ndef compress_nested_container(u_container):\n if isinstance(u_container, dict):\n cdict = {}\n for key, value in u_container.items():\n if isinstance(value, dict) or isinstance(value, list):\n cdict[key] = compress_nested_container(value)\n else:\n if isinstance(value, np.ndarray):\n cdict[key] = compress_array(value)\n else:\n cdict[key] = value\n\n return cdict\n elif isinstance(u_container, list):\n clist = []\n for value in u_container:\n if isinstance(value, dict) or isinstance(value, list):\n clist.append(compress_nested_container(value))\n else:\n if isinstance(value, np.ndarray):\n clist.append(compress_array(value))\n else:\n clist.append(value)\n\n return clist\n\ndef decompress_nested_container(c_container):\n if isinstance(c_container, dict):\n# if c_container.has_key('ctype') and c_container['ctype'] == COMPRESS_TYPE:\n if 'ctype' in c_container.keys() and c_container['ctype'] == COMPRESS_TYPE:\n try:\n return decompress_array(c_container)\n except:\n raise Exception('Container does not contain a valid array.')\n else:\n udict = {}\n for key, value in c_container.items():\n if isinstance(value, dict) or isinstance(value, list):\n udict[key] = decompress_nested_container(value)\n else:\n udict[key] = value\n\n return udict\n elif isinstance(c_container, list):\n ulist = []\n for value in c_container:\n if isinstance(value, dict) or isinstance(value, list):\n ulist.append(decompress_nested_container(value))\n else:\n ulist.append(value)\n\n return ulist\n\ndef test_compression():\n b = np.random.randn(10)\n c = np.random.randn(5,1)\n e = np.random.randn(2,3)\n f = np.random.randn(1,2)\n g = np.random.randn(4,2,3)\n\n d = {'a': {'b': b, 'c': c}, 'e': [e,[f,g]]}\n\n dc = compress_nested_container(d)\n du = decompress_nested_container(dc)\n\n v1 = [d['a']['b'], d['a']['c'], d['e'][0], d['e'][1][0], d['e'][1][1]]\n v2 = [du['a']['b'], du['a']['c'], du['e'][0], du['e'][1][0], du['e'][1][1]]\n\n comp = [np.all(i==j) for i,j in zip(v1,v2)]\n\n return np.all(comp)\n\nif __name__ == '__main__':\n test_compression()\n",
"#!/usr/bin/env python\n\n__author__ = 'Florian Hase'\n\n#========================================================================\n\nimport numpy as np \n\nfrom ObservationParser.hierarchies import HierarchicalLossShaper\nfrom Utils.utils import VarDictParser, ObsDictParser\n\n#========================================================================\n\ndef heavyside(value):\n\tbeta = 50.\n\targ = - beta * value\n\treturn 1 / (1. + np.exp(arg))\n\n#========================================================================\n\nclass ObservationParser(VarDictParser, ObsDictParser):\n\n\tdef __init__(self, var_dicts, obs_dicts, softness = 0.01):\n\t\tVarDictParser.__init__(self, var_dicts)\n\t\tObsDictParser.__init__(self, obs_dicts)\n\n\t\tself.softness = softness\n\t\tself.loss_shaper = HierarchicalLossShaper(self.loss_tolerances, self.softness)\n\n\t\tself.all_lower = []\n\t\tself.all_upper = []\n\t\tfor var_index, full_var_dict in enumerate(self.var_dicts):\n\t\t\tvar_dict = full_var_dict[self.var_names[var_index]]\n\t\t\tif 'low' in var_dict:\n\t\t\t\tself.all_lower.extend([var_dict['low'] for i in range(self.var_sizes[var_index])])\n\t\t\t\tself.all_upper.extend([var_dict['high'] for i in range(self.var_sizes[var_index])])\n\t\t\telse:\n\t\t\t\tself.all_lower.extend([0. for i in range(self.var_sizes[var_index])])\n\t\t\t\tself.all_upper.extend([len(var_dict['options']) for i in range(self.var_sizes[var_index])])\n\t\tself.all_lower = np.array(self.all_lower)\n\t\tself.all_upper = np.array(self.all_upper)\n\n\t\tself.soft_lower = self.all_lower + 0.1 * (self.all_upper - self.all_lower)\n\t\tself.soft_upper = self.all_upper - 0.1 * (self.all_upper - self.all_lower)\n\t\tself.soft_lower[self._cats] = -10**6\n\t\tself.soft_upper[self._cats] = 10**6\n\n\n\n\tdef _get_mirrored_samples(self, sample):\n\t\t# first, we get the indices\n\t\tlower_indices = np.where(sample < self.soft_lower)[0]\n\t\tupper_indices = np.where(sample > self.soft_upper)[0]\n\t\tindex_dict = {index: 'lower' for index in lower_indices}\n\t\tfor index in upper_indices:\n\t\t\tindex_dict[index] = 'upper'\n\n\t\t# now we start the mirroring procedure\n\t\tsamples = []\n\t\tindex_dict_keys = list(index_dict.keys())\n\t\tindex_dict_values = list(index_dict.values())\n\t\tfor index in range(2**len(index_dict)):\n\t\t\tsample_copy = np.copy(sample)\n\t\t\tfor jndex in range(len(index_dict)):\n\n\t\t\t\tif (index // 2**jndex) % 2 == 1:\n\t\t\t\t\tsample_index = index_dict_keys[jndex]\n\t\t\t\t\tif index_dict_values[jndex] == 'lower':\n\t\t\t\t\t\tsample_copy[sample_index] = self.all_lower[sample_index] - (sample[sample_index] - self.all_lower[sample_index])\n\t\t\t\t\telif index_dict_values[jndex] == 'upper':\n\t\t\t\t\t\tsample_copy[sample_index] = self.all_upper[sample_index] + (self.all_upper[sample_index] - sample[sample_index])\n\t\t\tsamples.append(sample_copy)\n\t\tif len(samples) == 0:\n\t\t\tsamples.append(np.copy(sample))\n\t\treturn samples\n\n\n\n\n\tdef _rescale_losses(self, losses):\n\n\t\thier_losses = self.loss_shaper.rescale_losses(losses)\n\n\t\tif np.amin(hier_losses) != np.amax(hier_losses):\n\t\t\thier_losses = (hier_losses - np.amin(hier_losses)) / (np.amax(hier_losses) - np.amin(hier_losses))\n\t\t\thier_losses = np.sqrt(hier_losses)\n\t\telse:\n\t\t\thier_losses -= np.amin(hier_losses)\n\n\t\treturn hier_losses\n#\t\treturn losses[:, 0]\n\n\n\n\n\tdef _get_sample_from_categorical(self, var_index, sample):\n\t\toptions = self.var_options[var_index]\n\t\tparsed_sample = [options.index(element) for element in sample]\n\t\treturn parsed_sample\n\n\n\n\tdef parse(self, observ_dicts):\n\t\traw_samples, raw_losses = [], []\n\t\tsamples, losses = [], []\n\t\tfor observ_dict in observ_dicts:\n\n\t\t\t# first, we get the sample\n\t\t\tsample = []\n\t\t\tfor var_index, var_name in enumerate(self.var_names):\n\t\t\t\tobserved_sample = observ_dict[var_name]['samples']\n\t\t\t\tif self.var_types[var_index] == 'categorical':\n\t\t\t\t\tobserved_sample = self._get_sample_from_categorical(var_index, observed_sample)\n\t\t\t\ttry:\n\t\t\t\t\tsample.extend(observed_sample)\n\t\t\t\texcept TypeError:\n\t\t\t\t\tsample.append(observed_sample)\n\t\t\tsample = np.array(sample)\n\t\t\traw_samples.append(sample)\n\n\t\t\t# now we need to mirror the sample\n\t\t\tmirrored_samples = self._get_mirrored_samples(sample)\n\n\t\t\t# get the current losses\n#\t\t\tfor key, value in observ_dict.items():\n#\t\t\t\tprint(key, value)\n#\t\t\tquit()\n\n\t\t\tlosses = np.array([observ_dict['loss'][loss_name] for loss_name in self.loss_names])\n\n\t\t\t# and now add them to the lists\n\t\t\tfor sample in mirrored_samples:\n\t\t\t\tsamples.append(np.array(sample))\n\t\t\t\traw_losses.append(losses.copy())\n\n\t\tself._raw_obs_params = np.array(raw_samples)\n\t\tself._raw_obs_losses = np.array(raw_losses)\n\n\t\t# we close with rescaling the losses\n\t\tsamples = np.array(samples)\n\t\tlosses = self._rescale_losses(np.array(raw_losses))\n\n#\t\tfor index, element in enumerate(samples):\n#\t\t\tprint(index, element, losses[index])\n\n\t\treturn samples, losses\n",
"#!/usr/bin/env python \n\n__author__ = 'Florian Hase'\n\n#========================================================================\n\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\nimport pymc3 as pm \n\nfrom Utils.utils import VarDictParser\n#from BayesianNeuralNetwork.distributions import DiscreteLaplace\n\n#========================================================================\n\nclass Pymc3Network(VarDictParser):\n\n\tdef __init__(self, var_dicts, observed_params, observed_losses, batch_size, model_details):\n\t\tVarDictParser.__init__(self, var_dicts)\n\n\t\tself.observed_params = observed_params\n\t\tself.observed_losses = observed_losses\n\t\tself.num_obs = len(self.observed_losses)\n\t\tself.batch_size = batch_size\n\t\tself.model_details = model_details\n\n\t\tfor key, value in self.model_details.items():\n\t\t\tsetattr(self, str(key), value)\n\n\t\tself._process_network_inputs()\n\t\tself._get_weights_and_bias_shapes()\n\n\n\tdef __get_weights(self, index, shape, scale = None):\n\t\treturn pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape)\n\n\tdef __get_biases(self, index, shape, scale = None):\n\t\treturn pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape)\n\n\tdef weight(self, index):\n\t\treturn getattr(self, 'w%d' % index)\n\n\tdef bias(self, index):\n\t\treturn getattr(self, 'b%d' % index)\n\n\tdef _get_weights_and_bias_shapes(self):\n\t\tself.weight_shapes = [[self.network_input.shape[1], self.hidden_shape]]\n\t\tself.bias_shapes = [[self.hidden_shape]]\n\t\tfor index in range(1, self.num_layers - 1):\n\t\t\tself.weight_shapes.append([self.hidden_shape, self.hidden_shape])\n\t\t\tself.bias_shapes.append([self.hidden_shape])\n\t\tself.weight_shapes.append([self.hidden_shape, self.network_input.shape[1]])\n\t\tself.bias_shapes.append([self.network_input.shape[1]])\n\n\n\tdef _process_network_inputs(self):\n\t\tself.network_input = np.zeros((self.num_obs, self.complete_size)) #+ 10.**-4\n\t\tself.network_output = np.zeros((self.num_obs, self.total_size))\n\t\tfor obs_index, obs in enumerate(self.observed_params):\n\t\t\tcurrent_index = 0\n\t\t\tfor var_index, value in enumerate(obs):\n\t\t\t\tif self.var_p_types[var_index] == 'categorical':\n\t\t\t\t\tself.network_input[obs_index, int(current_index + value)] += 1. #- 2 * 10.**-4\n\t\t\t\t\tself.network_output[obs_index, var_index] = value\n\t\t\t\t\tcurrent_index += len(self.var_p_options[var_index])\n\t\t\t\telse:\n\t\t\t\t\tself.network_input[obs_index, current_index] = value\n\t\t\t\t\tself.network_output[obs_index, var_index] = value\n\t\t\t\t\tcurrent_index += 1\n\n\n\t\tfor att in ['floats', 'ints', 'cats']:\n\t\t\tsetattr(self, att, np.array([False for i in range(self.complete_size)]))\n\n\t\tself.upper_rescalings = np.empty(self.complete_size)\n\t\tself.lower_rescalings = np.empty(self.complete_size)\n\t\tfor var_e_index, var_e_name in enumerate(self.var_e_names):\n\t\t\thigh = self.var_e_highs[var_e_index]\n\t\t\tlow = self.var_e_lows[var_e_index]\n\t\t\tif self.var_e_types[var_e_index] == 'float':\n\t\t\t\tself.upper_rescalings[var_e_index] = high + 0.1 * (high - low)\n\t\t\t\tself.lower_rescalings[var_e_index] = low - 0.1 * (high - low)\n\t\t\t\tself.floats[var_e_index] = True\n\t\t\telif self.var_e_types[var_e_index] == 'integer':\n\t\t\t\tself.upper_rescalings[var_e_index] = high# + np.ceil(0.1 * (high - low))\n\t\t\t\tself.lower_rescalings[var_e_index] = low# - np.ceil(0.1 * (high - low))\n\t\t\t\tself.ints[var_e_index] = True\n\t\t\telif self.var_e_types[var_e_index] == 'categorical':\n\t\t\t\tself.upper_rescalings[var_e_index] = 1.\n\t\t\t\tself.lower_rescalings[var_e_index] = 0.\n\t\t\t\tself.cats[var_e_index] = True\n\n\n\t\tself.network_input = 2. * (self.network_input - self.lower_rescalings) / (self.upper_rescalings - self.lower_rescalings) - 1.\n\n\n\n\tdef _create_model(self):\n\n\t\twith pm.Model() as self.model:\n\n\t\t\t# getting the location primers\n\t\t\tfor layer_index in range(self.num_layers):\n\t\t\t\tsetattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))\n\t\t\t\tsetattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))\n\n\t\t\t\tif layer_index == 0:\n\t\t\t\t\tfc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))\n\t\t\t\t\tsetattr(self, 'fc%d' % layer_index, fc)\n\t\t\t\telif 0 < layer_index < self.num_layers - 1:\n\t\t\t\t\tfc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))\n\t\t\t\t\tsetattr(self, 'fc%d' % layer_index, fc)\n\t\t\t\telse:\n\t\t\t\t\tself._loc = pm.Deterministic('bnn_out', pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) )\t\n\n\n\t\t\t# getting the precision / standard deviation / variance\n\t\t\tself.tau_rescaling = np.zeros((self.num_obs, self.network_input.shape[1]))\n\t\t\tfor obs_index in range(self.num_obs):\n\t\t\t\tself.tau_rescaling[obs_index] += self.var_e_ranges\n\t\t\tself.tau_rescaling = self.tau_rescaling**2\n\n\t\t\ttau = pm.Gamma('tau', self.num_obs**2, 1., shape = (self.num_obs, self.network_input.shape[1]))\n\t\t\tself.tau = tau / self.tau_rescaling\n\t\t\tself.scale = pm.Deterministic('scale', 1. / pm.math.sqrt(self.tau))\n\n\n\t\t\t# learn the floats\n\t\t\tself.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * self._loc + self.lower_rescalings)\n\t\t\tself.out_floats = pm.Normal('out_floats', self.loc[:, self.floats], tau = self.tau[:, self.floats], observed = self.network_output[:, self._floats])\n\n\n\t\t\t# learn the integers\n\t\t\tself.int_scale = pm.Deterministic('int_scale', 1. * self.scale)\n\t\t\tself.out_ints = DiscreteLaplace('out_ints', loc = self.loc[:, self.ints], scale = self.int_scale[:, self.ints], observed = self.network_output[:, self._ints])\n\n\n\t\t\t# learn the categories\n\t\t\tdist_counter, cat_var_index = 0, 0\n\t\t\t\n\t\t\tself.alpha = pm.Deterministic('alpha', (self.loc + 1.) * self.scale)\n\t\t\tself.num_cats = 0\n\t\t\tfor var_e_index, var_e_type in enumerate(self.var_e_types):\n\t\t\t\tif var_e_type == 'categorical' and self.var_e_begin[var_e_index] == var_e_index:\n\t\t\t\t\tbegin, end = self.var_e_begin[var_e_index], self.var_e_end[var_e_index]\n\t\t\t\t\tvar_e_name = self.var_e_names[var_e_index]\n\t\t\t\t\tparam_index = np.argwhere(self.var_p_names == var_e_name)[0, 0]\n\t\t\t\t\tself.param_index = param_index\n\n\t\t\t\t\tout_dirichlet = pm.Dirichlet('dirich_%d' % dist_counter, a = self.alpha[:, begin : end], shape = (self.num_obs, int(end - begin)) )\n\t\t\t\t\tout_cats = pm.Categorical('out_cats_%d' % dist_counter, p = out_dirichlet, observed = self.network_output[:, param_index])\n\t\t\t\t\tself.num_cats += 1\n\t\t\t\t\tdist_counter += 1\n\n\n\tdef _sample(self, num_epochs = None, num_draws = None):\n\t\tif not num_epochs: num_epochs = self.num_epochs\n\t\tif not num_draws: num_draws = self.num_draws\n\n\t\twith self.model:\n\t\t\t\n#\t\t\tapprox = pm.fit(method = 'svgd', n = 1000, obj_optimizer = pm.adam(learning_rate = self.learning_rate))\n#\t\t\tself.trace = approx.sample(draws = num_draws)\n\n\t\t\tapprox = pm.fit(n = num_epochs, obj_optimizer = pm.adam(learning_rate = self.learning_rate))\n\t\t\tself.trace = approx.sample(draws = num_draws)\n\n"
] | [
[
"numpy.all",
"numpy.random.randn"
],
[
"numpy.amax",
"numpy.sqrt",
"numpy.amin",
"numpy.copy",
"numpy.array",
"numpy.exp",
"numpy.where"
],
[
"numpy.zeros",
"numpy.empty",
"numpy.argwhere"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hanscal/unlp | [
"93a630cac7957f1ddd38f34403ec6577a277e10a"
] | [
"unlp/unsupervised/Word2Vec/get_file.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@description: Download file.\n\"\"\"\n\nimport hashlib\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport time\nimport typing\nimport zipfile\nfrom pathlib import Path\n\nimport numpy as np\nimport six\nfrom six.moves.urllib.error import HTTPError\nfrom six.moves.urllib.error import URLError\nfrom six.moves.urllib.request import urlretrieve\n\n\nclass Progbar(object):\n \"\"\"\n Displays a progress bar.\n\n :param target: Total number of steps expected, None if unknown.\n :param width: Progress bar width on screen.\n :param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n :param stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over time. Metrics in this list\n will be displayed as-is. All others will be averaged\n by the progbar before display.\n :param interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n\n def __init__(\n self,\n target,\n width=30,\n verbose=1,\n interval=0.05,\n ):\n \"\"\"Init.\"\"\"\n self.target = target\n self.width = width\n self.verbose = verbose\n self.interval = interval\n\n self._dynamic_display = ((hasattr(sys.stdout,\n 'isatty') and sys.stdout.isatty()\n ) or 'ipykernel' in sys.modules)\n self._total_width = 0\n self._seen_so_far = 0\n self._start = time.time()\n self._last_update = 0\n\n def update(self, current):\n \"\"\"Updates the progress bar.\"\"\"\n self._seen_so_far = current\n\n now = time.time()\n info = ' - {0:.0f}s'.format(now - self._start)\n if self.verbose == 1:\n if (now - self._last_update < self.interval and self.target is not\n None and current < self.target):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.floor(np.log10(self.target))) + 1\n bar = '{2:{0:d}d}/{1} ['.format(\n numdigits, self.target, current)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '{0:7d}/Unknown'.format(current)\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n if self.target is not None and current < self.target:\n eta = int(time_per_unit * (self.target - current))\n if eta > 3600:\n eta_format = ('{0:d}:{1:02d}:{2:02d}'.format(\n eta // 3600, (eta % 3600) // 60, eta % 60))\n elif eta > 60:\n eta_format = '{0:d}:{1:02d}'.format(eta // 60, eta % 60)\n else:\n eta_format = '{0:d}s'.format(eta)\n\n info = ' - ETA: {0}'.format(eta_format)\n else:\n if time_per_unit >= 1:\n info += ' {0:.0f}s/step'.format(time_per_unit)\n elif time_per_unit >= 1e-3:\n info += ' {0:.0f}ms/step'.format(time_per_unit * 1e3)\n else:\n info += ' {0:.0f}us/step'.format(time_per_unit * 1e6)\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if self.target is not None and current >= self.target:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is None or current >= self.target:\n info += '\\n'\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now\n\n\ndef _extract_archive(file_path, path='.', archive_format='auto'):\n \"\"\"\n Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.\n\n :param file_path: path to the archive file\n :param path: path to extract the archive file\n :param archive_format: Archive format to try for extracting the file.\n Options are 'auto', 'tar', 'zip', and None.\n 'tar' includes tar, tar.gz, and tar.bz files.\n The default 'auto' is ['tar', 'zip'].\n None or an empty list will return no matches found.\n\n :return: True if a match was found and an archive extraction was completed,\n False otherwise.\n \"\"\"\n if archive_format is None:\n return False\n if archive_format == 'auto':\n archive_format = ['tar', 'zip']\n if isinstance(archive_format, six.string_types):\n archive_format = [archive_format]\n\n for archive_type in archive_format:\n if archive_type == 'tar':\n open_fn = tarfile.open\n is_match_fn = tarfile.is_tarfile\n if archive_type == 'zip':\n open_fn = zipfile.ZipFile\n is_match_fn = zipfile.is_zipfile\n\n if is_match_fn(file_path):\n with open_fn(file_path) as archive:\n try:\n archive.extractall(path)\n except (tarfile.TarError, RuntimeError,\n KeyboardInterrupt):\n if os.path.exists(path):\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n raise\n return True\n return False\n\n\ndef get_file(\n fname: str = None,\n origin: str = None,\n untar: bool = False,\n extract: bool = False,\n md5_hash: typing.Any = None,\n file_hash: typing.Any = None,\n hash_algorithm: str = 'auto',\n archive_format: str = 'auto',\n cache_subdir: typing.Union[Path, str] = 'data',\n cache_dir: typing.Union[Path, str] = 'dataset',\n verbose: int = 1\n) -> str:\n \"\"\"\n Downloads a file from a URL if it not already in the cache.\n\n By default the file at the url `origin` is downloaded to the\n cache_dir `~/.project/datasets`, placed in the cache_subdir `data`,\n and given the filename `fname`. The final location of a file\n `example.txt` would therefore be `~/.project/datasets/data/example.txt`.\n\n Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.\n Passing a hash will verify the file after download. The command line\n programs `shasum` and `sha256sum` can compute the hash.\n\n :param fname: Name of the file. If an absolute path `/path/to/file.txt` is\n specified the file will be saved at that location.\n :param origin: Original URL of the file.\n :param untar: Deprecated in favor of 'extract'. Boolean, whether the file\n should be decompressed.\n :param md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file\n for verification.\n :param file_hash: The expected hash string of the file after download.\n The sha256 and md5 hash algorithms are both supported.\n :param cache_subdir: Subdirectory under the cache dir where the file is\n saved. If an absolute path `/path/to/folder` is specified the file\n will be saved at that location.\n :param hash_algorithm: Select the hash algorithm to verify the file.\n options are 'md5', 'sha256', and 'auto'. The default 'auto' detects\n the hash algorithm in use.\n :papram extract: True tries extracting the file as an Archive, like tar\n or zip.\n :param archive_format: Archive format to try for extracting the file.\n Options are 'auto', 'tar', 'zip', and None.\n 'tar' includes tar, tar.gz, and tar.bz files.\n The default 'auto' is ['tar', 'zip'].\n None or an empty list will return no matches found.\n :param cache_dir: Location to store cached files, when None it defaults to\n the [project.USER_DATA_DIR](~/.project/datasets).\n :param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n\n :return: Path to the downloaded file.\n \"\"\"\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.text2vec')\n datadir = os.path.join(datadir_base, cache_subdir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated because the file hash '\n 'does not match the original value of file_hash.'\n ' We will re-download the data.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(\n target=total_size, verbose=verbose)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {} : {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt):\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath\n\n\ndef validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n \"\"\"\n Validates a file against a sha256 or md5 hash.\n\n :param fpath: path to the file being validated\n :param file_hash: The expected hash string of the file.\n The sha256 and md5 hash algorithms are both supported.\n :param algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.\n The default 'auto' detects the hash algorithm in use.\n :param chunk_size: Bytes to read at a time, important for large files.\n\n :return: Whether the file is valid.\n \"\"\"\n if ((algorithm == 'sha256') or (algorithm == 'auto' and len(\n file_hash) == 64)):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False\n\n\ndef hash_file(fpath, algorithm='sha256', chunk_size=65535):\n \"\"\"\n Calculates a file sha256 or md5 hash.\n\n :param fpath: path to the file being validated\n :param algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.\n The default 'auto' detects the hash algorithm in use.\n :param chunk_size: Bytes to read at a time, important for large files.\n\n :return: The file hash.\n \"\"\"\n if algorithm == 'sha256':\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()\n"
] | [
[
"numpy.log10"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hahahawu/Tagger | [
"180a0412abf571797638d024b8dacf9d776ee6f9"
] | [
"scripts/input_converter.py"
] | [
"# input_converter.py\n# author: Playinf\n# email: [email protected]\n\n\nimport os\nimport six\nimport json\nimport random\nimport argparse\nimport tensorflow as tf\n\n\ndef load_vocab(filename):\n fd = open(filename, \"r\")\n\n count = 0\n vocab = {}\n for line in fd:\n word = line.strip()\n vocab[word] = count\n count += 1\n\n fd.close()\n return vocab\n\n\ndef to_json(dictionary):\n \"\"\" Convert python dictionary to JSON format \"\"\"\n return json.dumps(dictionary)\n\n\ndef to_dictionary(example):\n \"\"\" Convert JSON/tf.train.Example to python dictionary \"\"\"\n if isinstance(example, str):\n dictionary = json.loads(example)\n elif isinstance(example, tf.train.Example):\n dictionary = {}\n keys = example.features.feature.keys()\n values = example.features.feature.values()\n\n for (k, v) in zip(keys, values):\n int64_list = list(v.int64_list.value)\n float_list = list(v.float_list.value)\n bytes_list = list(v.bytes_list.value)\n\n if int64_list:\n dictionary[k] = int64_list\n elif float_list:\n dictionary[k] = float_list\n elif bytes_list:\n dictionary[k] = bytes_list\n else:\n raise ValueError(\"All lists are empty.\")\n else:\n raise ValueError(\"Unsupported format\")\n\n return dictionary\n\n\ndef to_example(dictionary):\n \"\"\" Convert python dictionary to tf.train.Example \"\"\"\n features = {}\n\n for (k, v) in six.iteritems(dictionary):\n if not v:\n raise ValueError(\"Empty generated field: %s\", str((k, v)))\n\n if isinstance(v[0], six.integer_types):\n int64_list = tf.train.Int64List(value=v)\n features[k] = tf.train.Feature(int64_list=int64_list)\n elif isinstance(v[0], float):\n float_list = tf.train.FloatList(value=v)\n features[k] = tf.train.Feature(float_list=float_list)\n elif isinstance(v[0], six.string_types):\n bytes_list = tf.train.BytesList(value=v)\n features[k] = tf.train.Feature(bytes_list=bytes_list)\n else:\n raise ValueError(\"Value is neither an int nor a float; \"\n \"v: %s type: %s\" % (str(v[0]), str(type(v[0]))))\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n\ndef read_records(filename):\n \"\"\" Read TensorFlow record \"\"\"\n reader = tf.python_io.tf_record_iterator(filename)\n records = []\n\n for record in reader:\n records.append(record)\n if len(records) % 10000 == 0:\n tf.logging.info(\"read: %d\", len(records))\n\n return records\n\n\ndef write_records(records, out_filename):\n \"\"\" Write to TensorFlow record \"\"\"\n writer = tf.python_io.TFRecordWriter(out_filename)\n\n for count, record in enumerate(records):\n writer.write(record)\n if count % 10000 == 0:\n tf.logging.info(\"write: %d\", count)\n\n writer.close()\n\n\ndef convert_record_to_json(pattern, output_name, output_dir, num_shards=1):\n \"\"\" Convert TensorFlow record to JSON format \"\"\"\n output_files = []\n writers = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.gfile.GFile(output_file, \"w\"))\n\n filenames = tf.gfile.Glob(pattern)\n records = []\n\n for filename in filenames:\n records.extend(read_records(filename))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n example = tf.train.Example()\n example.ParseFromString(record)\n features = to_dictionary(example)\n json_str = to_json(features)\n writers[shard].write(json_str + \"\\n\")\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\n# format:\n# pred-pos tokens ||| labels\ndef convert_plain_to_json(name, vocabs, output_name, output_dir, num_shards,\n lower=True, shuffle=True):\n \"\"\" Convert plain SRL data to TensorFlow record \"\"\"\n vocab_token = load_vocab(vocabs[0])\n vocab_label = load_vocab(vocabs[1])\n records = []\n unk = vocab_token[\"<unk>\"]\n\n with open(name) as fd:\n for line in fd:\n features, labels = line.strip().split(\"|||\")\n features = features.strip().split(\" \")\n labels = labels.strip().split(\" \")\n pred_pos = features[0]\n inputs = features[1:]\n\n if lower:\n inputs = [item.lower() for item in inputs]\n\n inputs = [vocab_token[item] if item in vocab_token else unk\n for item in inputs]\n labels = [vocab_label[item] for item in labels]\n preds = [0 for _ in inputs]\n preds[int(pred_pos)] = 1\n\n feature = {\n \"inputs\": inputs,\n \"preds\": preds,\n \"targets\": labels\n }\n records.append(feature)\n\n if shuffle:\n random.shuffle(records)\n\n writers = []\n output_files = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.gfile.GFile(output_file, \"w\"))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n features = record\n json_str = to_json(features)\n writers[shard].write(json_str + \"\\n\")\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\n# format:\n# pred-pos tokens ||| labels\ndef convert_plain_to_record(name, vocabs, output_name, output_dir, num_shards,\n lower=True, shuffle=True):\n \"\"\" Convert plain SRL data to TensorFlow record \"\"\"\n vocab_token = load_vocab(vocabs[0])\n vocab_label = load_vocab(vocabs[1])\n records = []\n unk = vocab_token[\"<unk>\"]\n\n with open(name) as fd:\n for line in fd:\n features, labels = line.strip().split(\"|||\")\n features = features.strip().split()\n labels = labels.strip().split()\n pred_pos = features[0]\n inputs = features[1:]\n\n if lower:\n inputs = [item.lower() for item in inputs]\n\n inputs = [vocab_token[item] if item in vocab_token else unk\n for item in inputs]\n labels = [vocab_label[item] for item in labels]\n preds = [0 for _ in inputs]\n preds[int(pred_pos)] = 1\n\n feature = {\n \"inputs\": inputs,\n \"preds\": preds,\n \"targets\": labels\n }\n records.append(feature)\n\n if shuffle:\n random.shuffle(records)\n\n output_files = []\n writers = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n example = to_example(record)\n writers[shard].write(example.SerializeToString())\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\ndef parse_args():\n msg = \"convert srl data to TensorFlow record format\"\n usage = \"srl_input_converter.py [<args>] [-h | --help]\"\n parser = argparse.ArgumentParser(description=msg, usage=usage)\n\n msg = \"path of source file\"\n parser.add_argument(\"--input_path\", required=True, type=str, help=msg)\n msg = \"output name\"\n parser.add_argument(\"--output_name\", required=True, type=str, help=msg)\n msg = \"output directory\"\n parser.add_argument(\"--output_dir\", required=True, type=str, help=msg)\n msg = \"path of vocabulary\"\n parser.add_argument(\"--vocab\", type=str, nargs=2, help=msg)\n msg = \"number of output shards\"\n parser.add_argument(\"--num_shards\", default=100, type=int, help=msg)\n msg = \"shuffle inputs\"\n parser.add_argument(\"--shuffle\", action=\"store_true\", help=msg)\n msg = \"use lowercase\"\n parser.add_argument(\"--lower\", action=\"store_true\", help=msg)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n convert_plain_to_record(args.input_path, args.vocab, args.output_name,\n args.output_dir, args.num_shards, args.lower,\n args.shuffle)\n"
] | [
[
"tensorflow.train.Feature",
"tensorflow.train.Example",
"tensorflow.gfile.GFile",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.gfile.Glob",
"tensorflow.logging.info",
"tensorflow.train.Features",
"tensorflow.train.FloatList",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.BytesList",
"tensorflow.train.Int64List"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dmachlanski/ce807 | [
"17c9b7ddd71906c018cd213a674f37cbed36856d"
] | [
"run.py"
] | [
"import numpy as np\nimport pandas as pd\nimport re, argparse, datetime\nfrom timeit import default_timer\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\nfrom sklearn.model_selection import train_test_split, cross_validate\nfrom sklearn.metrics import f1_score, make_scorer\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n\ndef get_parser():\n \"\"\" Builds the argument parser for the program. \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')\n parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')\n parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')\n parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')\n parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')\n parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')\n parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')\n parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')\n parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')\n parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')\n parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')\n parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')\n parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')\n\n return parser\n\ndef get_data(options):\n \"\"\" Loads and pre-processes the data. \"\"\"\n if options.verbose > 0:\n print(f'Loading data [dataset: {options.data_name}, fraction: {options.data_fraction}, test size: {options.test_size}]')\n \n # Load the data.\n location_prefix = '../../' if options.is_batch_mode else ''\n data = pd.read_csv(f'{location_prefix}data/{options.data_name}.csv')\n\n # Get raw values from the DataFrame.\n X_all = data['title'].values\n # Labels are separated by a '\\t' character. Convert them into a list of labels per each data row.\n Y_all = [x.split('\\t') for x in data['labels'].values]\n\n # Get only a fraction of the data if necessary\n if options.data_fraction < 1.0:\n data_slice = int(options.data_fraction * X_all.shape[0])\n X_raw, Y_raw = X_all[:data_slice], Y_all[:data_slice]\n else:\n X_raw, Y_raw = X_all, Y_all\n\n # Allow for tokens fitting into the following pattern only.\n word_regexp = r\"(?u)\\b[a-zA-Z_][a-zA-Z_]+\\b\"\n # Take only the most frequent 25k words. Use unigrams.\n terms = CountVectorizer(input='content', stop_words='english', binary=False, token_pattern=word_regexp, max_features=25000, ngram_range=(1, 1))\n X = terms.fit_transform(X_raw)\n\n # Binrize the labels (convert them into a sparse matrix of one-hot vectors).\n mlb = MultiLabelBinarizer(sparse_output=True)\n Y = mlb.fit_transform(Y_raw)\n\n return train_test_split(X, Y, test_size=options.test_size)\n\ndef get_model(options):\n \"\"\" Prepare a classifier for training. \"\"\"\n classifiers = {\n \"dt\" : DecisionTreeClassifier(max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features),\n \"xts\" : ExtraTreesClassifier(n_estimators=options.n_estimators,\n n_jobs=options.n_jobs,\n max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features),\n \"rf\" : RandomForestClassifier(n_estimators=options.n_estimators,\n n_jobs=options.n_jobs,\n max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features)\n }\n\n # Prepare the pipeline that consists of TF-IDF representation and a classifier.\n trf = TfidfTransformer(sublinear_tf=False, use_idf=True, norm='l2')\n clf = Pipeline([(\"trf\", trf), (\"clf\", classifiers[options.clf_key])])\n\n return clf\n\nif __name__ == \"__main__\":\n # Get and parse passed arguments.\n parser = get_parser()\n options = parser.parse_args()\n\n if options.verbose > 0:\n print('### Starting ###')\n print('Arguments:', options)\n\n X_train, X_test, Y_train, Y_test = get_data(options)\n\n clf = get_model(options)\n\n # The program can be run in either a 'cross-validation' or a 'test' mode.\n # The former performs k-fold cross-validation, while the latter fits the selected model\n # on the training data and runs predictions against the test set.\n # Both modes report samples-based F1-score, fitting time and prediction time (in seconds).\n if options.mode == 'cv':\n if options.verbose > 0:\n print(f'Running {options.cv}-fold cross-validation')\n\n scores = cross_validate(clf, X_train.toarray(), Y_train.toarray(), cv=options.cv,\n scoring=make_scorer(f1_score, average='samples'), n_jobs=options.n_jobs, verbose=options.verbose)\n\n test_score = scores['test_score']\n fit_time = scores['fit_time']\n score_time = scores['score_time']\n print(\"F1-score: %0.2f (+/- %0.2f)\" % (test_score.mean(), test_score.std()))\n print(\"Fit time: %0.2f (+/- %0.2f)\" % (fit_time.mean(), fit_time.std()))\n print(\"Prediction time: %0.2f (+/- %0.2f)\" % (score_time.mean(), score_time.std()))\n else:\n if options.verbose > 0:\n print('Training the model')\n \n fit_time_start = default_timer()\n clf.fit(X_train.toarray(), Y_train.toarray())\n fit_time_end = default_timer()\n\n if options.verbose > 0:\n print('Running predictions')\n\n pred_time_start = default_timer()\n Y_pred = clf.predict(X_test.toarray())\n pred_time_end = default_timer()\n\n test_score = f1_score(Y_test.toarray(), Y_pred, average='samples')\n print(\"F1-score: %0.2f\" % (test_score))\n print(\"Fit time: %0.2f\" % (fit_time_end - fit_time_start))\n print(\"Prediction time: %0.2f\" % (pred_time_end - pred_time_start))"
] | [
[
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MultiLabelBinarizer",
"sklearn.pipeline.Pipeline",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.make_scorer",
"sklearn.feature_extraction.text.TfidfTransformer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
josephgalestian/taichiV2-master | [
"12a63a05fdccc824205b1ee6545e4706bf473405"
] | [
"python/taichi/lang/kernel_impl.py"
] | [
"import ast\nimport functools\nimport inspect\nimport re\nimport sys\nimport textwrap\n\nimport numpy as np\nimport taichi.lang\nfrom taichi._lib import core as _ti_core\nfrom taichi.lang import impl, runtime_ops\nfrom taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,\n transform_tree)\nfrom taichi.lang.enums import Layout\nfrom taichi.lang.exception import (TaichiCompilationError,\n TaichiRuntimeTypeError, TaichiSyntaxError)\nfrom taichi.lang.expr import Expr\nfrom taichi.lang.matrix import MatrixType\nfrom taichi.lang.shell import _shell_pop_print, oinspect\nfrom taichi.lang.util import has_pytorch, to_taichi_type\nfrom taichi.linalg.sparse_matrix import sparse_matrix_builder\nfrom taichi.types import any_arr, primitive_types, template\n\nfrom taichi import _logging\n\nif has_pytorch():\n import torch\n\n\ndef func(fn):\n \"\"\"Marks a function as callable in Taichi-scope.\n\n This decorator transforms a Python function into a Taichi one. Taichi\n will JIT compile it into native instructions.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> @ti.func\n >>> def foo(x):\n >>> return x + 2\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> print(foo(40)) # 42\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n\n fun = Func(fn, _classfunc=is_classfunc)\n\n @functools.wraps(fn)\n def decorated(*args):\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef pyfunc(fn):\n \"\"\"Marks a function as callable in both Taichi and Python scopes.\n\n When called inside the Taichi scope, Taichi will JIT compile it into\n native instructions. Otherwise it will be invoked directly as a\n Python function.\n\n See also :func:`~taichi.lang.kernel_impl.func`.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)\n\n @functools.wraps(fn)\n def decorated(*args):\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef _get_tree_and_ctx(self,\n excluded_parameters=(),\n is_kernel=True,\n arg_features=None,\n args=None,\n ast_builder=None):\n file = oinspect.getsourcefile(self.func)\n src, start_lineno = oinspect.getsourcelines(self.func)\n src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]\n tree = ast.parse(textwrap.dedent(\"\\n\".join(src)))\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n global_vars = _get_global_vars(self.func)\n\n for i, arg in enumerate(func_body.args.args):\n anno = arg.annotation\n if isinstance(anno, ast.Name):\n global_vars[anno.id] = self.argument_annotations[i]\n\n if isinstance(func_body.returns, ast.Name):\n global_vars[func_body.returns.id] = self.return_type\n\n if is_kernel or impl.get_runtime().experimental_real_function:\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,\n is_kernel=is_kernel,\n func=self,\n arg_features=arg_features,\n global_vars=global_vars,\n argument_data=args,\n src=src,\n start_lineno=start_lineno,\n file=file,\n ast_builder=ast_builder)\n\n\nclass Func:\n function_counter = 0\n\n def __init__(self, _func, _classfunc=False, _pyfunc=False):\n self.func = _func\n self.func_id = Func.function_counter\n Func.function_counter += 1\n self.compiled = None\n self.classfunc = _classfunc\n self.pyfunc = _pyfunc\n self.argument_annotations = []\n self.argument_names = []\n self.return_type = None\n self.extract_arguments()\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n self.taichi_functions = {} # The |Function| class in C++\n\n def __call__(self, *args):\n if not impl.inside_kernel():\n if not self.pyfunc:\n raise TaichiSyntaxError(\n \"Taichi functions cannot be called from Python-scope.\"\n \" Use @ti.pyfunc if you wish to call Taichi functions \"\n \"from both Python-scope and Taichi-scope.\")\n return self.func(*args)\n\n if impl.get_runtime().experimental_real_function:\n if impl.get_runtime().current_kernel.is_grad:\n raise TaichiSyntaxError(\n \"Real function in gradient kernels unsupported.\")\n instance_id, _ = self.mapper.lookup(args)\n key = _ti_core.FunctionKey(self.func.__name__, self.func_id,\n instance_id)\n if self.compiled is None:\n self.compiled = {}\n if key.instance_id not in self.compiled:\n self.do_compile(key=key, args=args)\n return self.func_call_rvalue(key=key, args=args)\n tree, ctx = _get_tree_and_ctx(\n self,\n is_kernel=False,\n args=args,\n ast_builder=impl.get_runtime().prog.current_ast_builder())\n ret = transform_tree(tree, ctx)\n if not impl.get_runtime().experimental_real_function:\n if self.return_type and not ctx.returned:\n raise TaichiSyntaxError(\n \"Function has a return type but does not have a return statement\"\n )\n return ret\n\n def func_call_rvalue(self, key, args):\n # Skip the template args, e.g., |self|\n assert impl.get_runtime().experimental_real_function\n non_template_args = []\n for i, anno in enumerate(self.argument_annotations):\n if not isinstance(anno, template):\n non_template_args.append(args[i])\n non_template_args = impl.make_expr_group(non_template_args)\n return Expr(\n _ti_core.make_func_call_expr(\n self.taichi_functions[key.instance_id], non_template_args))\n\n def do_compile(self, key, args):\n tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)\n fn = impl.get_runtime().prog.create_function(key)\n\n def func_body():\n ctx.ast_builder = fn.ast_builder()\n transform_tree(tree, ctx)\n\n self.taichi_functions[key.instance_id] = fn\n self.compiled[key.instance_id] = func_body\n self.taichi_functions[key.instance_id].set_function_body(func_body)\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise TaichiSyntaxError(\n 'Taichi functions do not support variable positional parameters (i.e., *args)'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise TaichiSyntaxError(\n 'Taichi functions do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi functions only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if annotation is inspect.Parameter.empty:\n if i == 0 and self.classfunc:\n annotation = template()\n # TODO: pyfunc also need type annotation check when real function is enabled,\n # but that has to happen at runtime when we know which scope it's called from.\n elif not self.pyfunc and impl.get_runtime(\n ).experimental_real_function:\n raise TaichiSyntaxError(\n f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'\n )\n else:\n if not id(annotation\n ) in primitive_types.type_ids and not isinstance(\n annotation, template):\n raise TaichiSyntaxError(\n f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n\nclass TaichiCallableTemplateMapper:\n def __init__(self, annotations, template_slot_locations):\n self.annotations = annotations\n self.num_args = len(annotations)\n self.template_slot_locations = template_slot_locations\n self.mapping = {}\n\n @staticmethod\n def extract_arg(arg, anno):\n if isinstance(anno, template):\n if isinstance(arg, taichi.lang.snode.SNode):\n return arg.ptr\n if isinstance(arg, taichi.lang.expr.Expr):\n return arg.ptr.get_underlying_ptr_address()\n if isinstance(arg, _ti_core.Expr):\n return arg.get_underlying_ptr_address()\n if isinstance(arg, tuple):\n return tuple(\n TaichiCallableTemplateMapper.extract_arg(item, anno)\n for item in arg)\n return arg\n if isinstance(anno, any_arr):\n if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):\n anno.check_element_dim(arg, 0)\n anno.check_element_shape(())\n anno.check_field_dim(len(arg.shape))\n return arg.dtype, len(arg.shape), (), Layout.AOS\n if isinstance(arg, taichi.lang.matrix.VectorNdarray):\n anno.check_element_dim(arg, 1)\n anno.check_element_shape((arg.n, ))\n anno.check_field_dim(len(arg.shape))\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout\n if isinstance(arg, taichi.lang.matrix.MatrixNdarray):\n anno.check_element_dim(arg, 2)\n anno.check_element_shape((arg.n, arg.m))\n anno.check_field_dim(len(arg.shape))\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 2, (arg.n,\n arg.m), arg.layout\n # external arrays\n element_dim = 0 if anno.element_dim is None else anno.element_dim\n layout = Layout.AOS if anno.layout is None else anno.layout\n shape = tuple(arg.shape)\n if len(shape) < element_dim:\n raise ValueError(\n f\"Invalid argument into ti.any_arr() - required element_dim={element_dim}, \"\n f\"but the argument has only {len(shape)} dimensions\")\n element_shape = (\n ) if element_dim == 0 else shape[:\n element_dim] if layout == Layout.SOA else shape[\n -element_dim:]\n return to_taichi_type(arg.dtype), len(shape), element_shape, layout\n # Use '#' as a placeholder because other kinds of arguments are not involved in template instantiation\n return '#'\n\n def extract(self, args):\n extracted = []\n for arg, anno in zip(args, self.annotations):\n extracted.append(self.extract_arg(arg, anno))\n return tuple(extracted)\n\n def lookup(self, args):\n if len(args) != self.num_args:\n raise TypeError(\n f'{self.num_args} argument(s) needed but {len(args)} provided.'\n )\n\n key = self.extract(args)\n if key not in self.mapping:\n count = len(self.mapping)\n self.mapping[key] = count\n return self.mapping[key], key\n\n\ndef _get_global_vars(_func):\n # Discussions: https://github.com/taichi-dev/taichi/issues/282\n global_vars = _func.__globals__.copy()\n\n freevar_names = _func.__code__.co_freevars\n closure = _func.__closure__\n if closure:\n freevar_values = list(map(lambda x: x.cell_contents, closure))\n for name, value in zip(freevar_names, freevar_values):\n global_vars[name] = value\n\n return global_vars\n\n\nclass Kernel:\n counter = 0\n\n def __init__(self, _func, is_grad, _classkernel=False):\n self.func = _func\n self.kernel_counter = Kernel.counter\n Kernel.counter += 1\n self.is_grad = is_grad\n self.grad = None\n self.argument_annotations = []\n self.argument_names = []\n self.return_type = None\n self.classkernel = _classkernel\n self.extract_arguments()\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n impl.get_runtime().kernels.append(self)\n self.reset()\n self.kernel_cpp = None\n\n def reset(self):\n self.runtime = impl.get_runtime()\n if self.is_grad:\n self.compiled_functions = self.runtime.compiled_grad_functions\n else:\n self.compiled_functions = self.runtime.compiled_functions\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support variable positional parameters (i.e., *args)'\n )\n if param.default is not inspect.Parameter.empty:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support default values for arguments'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi kernels only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if param.annotation is inspect.Parameter.empty:\n if i == 0 and self.classkernel: # The |self| parameter\n annotation = template()\n else:\n raise TaichiSyntaxError(\n 'Taichi kernels parameters must be type annotated')\n else:\n if isinstance(annotation, (template, any_arr)):\n pass\n elif id(annotation) in primitive_types.type_ids:\n pass\n elif isinstance(annotation, sparse_matrix_builder):\n pass\n elif isinstance(annotation, MatrixType):\n pass\n else:\n raise TaichiSyntaxError(\n f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n def materialize(self, key=None, args=None, arg_features=None):\n if key is None:\n key = (self.func, 0)\n self.runtime.materialize()\n if key in self.compiled_functions:\n return\n grad_suffix = \"\"\n if self.is_grad:\n grad_suffix = \"_grad\"\n kernel_name = f\"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}\"\n _logging.trace(f\"Compiling kernel {kernel_name}...\")\n\n tree, ctx = _get_tree_and_ctx(\n self,\n args=args,\n excluded_parameters=self.template_slot_locations,\n arg_features=arg_features)\n\n if self.is_grad:\n KernelSimplicityASTChecker(self.func).visit(tree)\n\n # Do not change the name of 'taichi_ast_generator'\n # The warning system needs this identifier to remove unnecessary messages\n def taichi_ast_generator(kernel_cxx):\n if self.runtime.inside_kernel:\n raise TaichiSyntaxError(\n \"Kernels cannot call other kernels. I.e., nested kernels are not allowed. \"\n \"Please check if you have direct/indirect invocation of kernels within kernels. \"\n \"Note that some methods provided by the Taichi standard library may invoke kernels, \"\n \"and please move their invocations to Python-scope.\")\n self.runtime.inside_kernel = True\n self.runtime.current_kernel = self\n try:\n ctx.ast_builder = kernel_cxx.ast_builder()\n transform_tree(tree, ctx)\n if not impl.get_runtime().experimental_real_function:\n if self.return_type and not ctx.returned:\n raise TaichiSyntaxError(\n \"Kernel has a return type but does not have a return statement\"\n )\n finally:\n self.runtime.inside_kernel = False\n self.runtime.current_kernel = None\n\n taichi_kernel = impl.get_runtime().prog.create_kernel(\n taichi_ast_generator, kernel_name, self.is_grad)\n\n self.kernel_cpp = taichi_kernel\n\n assert key not in self.compiled_functions\n self.compiled_functions[key] = self.get_function_body(taichi_kernel)\n\n def get_torch_callbacks(self, v, has_torch, is_ndarray=True):\n callbacks = []\n\n def get_call_back(u, v):\n def call_back():\n u.copy_(v)\n\n return call_back\n\n assert has_torch\n assert isinstance(v, torch.Tensor)\n if v._is_view():\n raise ValueError(\n \"Torch view tensors are not supported, please call tensor.clone() before passing it into taichi kernel.\"\n )\n tmp = v\n taichi_arch = self.runtime.prog.config.arch\n # Ndarray means its memory is allocated on the specified taichi arch.\n # Since torch only supports CPU & CUDA, torch-base ndarray only supports\n # taichi cpu/cuda backend as well.\n # Note I put x64/arm64/cuda here to be more specific.\n assert not is_ndarray or taichi_arch in (\n _ti_core.Arch.cuda, _ti_core.Arch.x64, _ti_core.Arch.arm64\n ), \"Torch-based ndarray is only supported on taichi x64/arm64/cuda backend.\"\n\n if str(v.device).startswith('cuda'):\n # External tensor on cuda\n if taichi_arch != _ti_core.Arch.cuda:\n # copy data back to cpu\n host_v = v.to(device='cpu', copy=True)\n tmp = host_v\n callbacks.append(get_call_back(v, host_v))\n else:\n # External tensor on cpu\n if taichi_arch == _ti_core.Arch.cuda:\n gpu_v = v.cuda()\n tmp = gpu_v\n callbacks.append(get_call_back(v, gpu_v))\n return tmp, callbacks\n\n def get_function_body(self, t_kernel):\n # The actual function body\n def func__(*args):\n assert len(args) == len(\n self.argument_annotations\n ), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'\n\n tmps = []\n callbacks = []\n has_external_arrays = False\n has_torch = has_pytorch()\n ndarray_use_torch = impl.get_runtime().ndarray_use_torch\n\n actual_argument_slot = 0\n launch_ctx = t_kernel.make_launch_context()\n for i, v in enumerate(args):\n needed = self.argument_annotations[i]\n if isinstance(needed, template):\n continue\n provided = type(v)\n # Note: do not use sth like \"needed == f32\". That would be slow.\n if id(needed) in primitive_types.real_type_ids:\n if not isinstance(v, (float, int)):\n raise TaichiRuntimeTypeError(i, needed.to_string(),\n provided)\n launch_ctx.set_arg_float(actual_argument_slot, float(v))\n elif id(needed) in primitive_types.integer_type_ids:\n if not isinstance(v, int):\n raise TaichiRuntimeTypeError(i, needed.to_string(),\n provided)\n launch_ctx.set_arg_int(actual_argument_slot, int(v))\n elif isinstance(needed, sparse_matrix_builder):\n # Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument\n launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())\n elif isinstance(needed, any_arr) and isinstance(\n v, taichi.lang._ndarray.Ndarray):\n has_external_arrays = True\n v = v.arr\n if ndarray_use_torch:\n is_ndarray = True\n tmp, torch_callbacks = self.get_torch_callbacks(\n v, has_torch, is_ndarray)\n callbacks += torch_callbacks\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement(), v.shape)\n else:\n launch_ctx.set_arg_ndarray(actual_argument_slot, v)\n elif isinstance(needed, any_arr) and (self.match_ext_arr(v)):\n has_external_arrays = True\n is_numpy = isinstance(v, np.ndarray)\n if is_numpy:\n tmp = np.ascontiguousarray(v)\n # Purpose: DO NOT GC |tmp|!\n tmps.append(tmp)\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.ctypes.data),\n tmp.nbytes, v.shape)\n else:\n is_ndarray = False\n tmp, torch_callbacks = self.get_torch_callbacks(\n v, has_torch, is_ndarray)\n callbacks += torch_callbacks\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement(), v.shape)\n\n elif isinstance(needed, MatrixType):\n if id(needed.dtype) in primitive_types.real_type_ids:\n for a in range(needed.n):\n for b in range(needed.m):\n if not isinstance(v[a, b], (int, float)):\n raise TaichiRuntimeTypeError(\n i, needed.dtype.to_string(),\n type(v[a, b]))\n launch_ctx.set_arg_float(\n actual_argument_slot, float(v[a, b]))\n actual_argument_slot += 1\n elif id(needed.dtype) in primitive_types.integer_type_ids:\n for a in range(needed.n):\n for b in range(needed.m):\n if not isinstance(v[a, b], int):\n raise TaichiRuntimeTypeError(\n i, needed.dtype.to_string(),\n type(v[a, b]))\n launch_ctx.set_arg_int(actual_argument_slot,\n int(v[a, b]))\n actual_argument_slot += 1\n else:\n raise ValueError(\n f'Matrix dtype {needed.dtype} is not integer type or real type.'\n )\n continue\n else:\n raise ValueError(\n f'Argument type mismatch. Expecting {needed}, got {type(v)}.'\n )\n actual_argument_slot += 1\n # Both the class kernels and the plain-function kernels are unified now.\n # In both cases, |self.grad| is another Kernel instance that computes the\n # gradient. For class kernels, args[0] is always the kernel owner.\n if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:\n self.runtime.target_tape.insert(self, args)\n\n t_kernel(launch_ctx)\n\n ret = None\n ret_dt = self.return_type\n has_ret = ret_dt is not None\n\n if has_ret or (impl.current_cfg().async_mode\n and has_external_arrays):\n runtime_ops.sync()\n\n if has_ret:\n if id(ret_dt) in primitive_types.integer_type_ids:\n ret = t_kernel.get_ret_int(0)\n else:\n ret = t_kernel.get_ret_float(0)\n\n if callbacks:\n for c in callbacks:\n c()\n\n return ret\n\n return func__\n\n @staticmethod\n def match_ext_arr(v):\n has_array = isinstance(v, np.ndarray)\n if not has_array and has_pytorch():\n has_array = isinstance(v, torch.Tensor)\n return has_array\n\n def ensure_compiled(self, *args):\n instance_id, arg_features = self.mapper.lookup(args)\n key = (self.func, instance_id)\n self.materialize(key=key, args=args, arg_features=arg_features)\n return key\n\n # For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__\n # Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)\n @_shell_pop_print\n def __call__(self, *args, **kwargs):\n if self.is_grad and impl.current_cfg().opt_level == 0:\n _logging.warn(\n \"\"\"opt_level = 1 is enforced to enable gradient computation.\"\"\"\n )\n impl.current_cfg().opt_level = 1\n assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'\n key = self.ensure_compiled(*args)\n return self.compiled_functions[key](*args)\n\n\n# For a Taichi class definition like below:\n#\n# @ti.data_oriented\n# class X:\n# @ti.kernel\n# def foo(self):\n# ...\n#\n# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is\n# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',\n# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class\n# inherits, i.e. class X(object):, then in both versions, |code_context| is\n# 'class X(object):'...\n_KERNEL_CLASS_STACKFRAME_STMT_RES = [\n re.compile(r'@(\\w+\\.)?data_oriented'),\n re.compile(r'class '),\n]\n\n\ndef _inside_class(level_of_class_stackframe):\n try:\n maybe_class_frame = sys._getframe(level_of_class_stackframe)\n statement_list = inspect.getframeinfo(maybe_class_frame)[3]\n first_statment = statement_list[0].strip()\n for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:\n if pat.match(first_statment):\n return True\n except:\n pass\n return False\n\n\ndef _kernel_impl(_func, level_of_class_stackframe, verbose=False):\n # Can decorators determine if a function is being defined inside a class?\n # https://stackoverflow.com/a/8793684/12003165\n is_classkernel = _inside_class(level_of_class_stackframe + 1)\n\n if verbose:\n print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')\n primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)\n adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)\n # Having |primal| contains |grad| makes the tape work.\n primal.grad = adjoint\n\n if is_classkernel:\n # For class kernels, their primal/adjoint callables are constructed\n # when the kernel is accessed via the instance inside\n # _BoundedDifferentiableMethod.\n # This is because we need to bind the kernel or |grad| to the instance\n # owning the kernel, which is not known until the kernel is accessed.\n #\n # See also: _BoundedDifferentiableMethod, data_oriented.\n @functools.wraps(_func)\n def wrapped(*args, **kwargs):\n # If we reach here (we should never), it means the class is not decorated\n # with @ti.data_oriented, otherwise getattr would have intercepted the call.\n clsobj = type(args[0])\n assert not hasattr(clsobj, '_data_oriented')\n raise TaichiSyntaxError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n else:\n\n @functools.wraps(_func)\n def wrapped(*args, **kwargs):\n try:\n return primal(*args, **kwargs)\n except TaichiCompilationError as e:\n raise type(e)('\\n' + str(e)) from None\n\n wrapped.grad = adjoint\n\n wrapped._is_wrapped_kernel = True\n wrapped._is_classkernel = is_classkernel\n wrapped._primal = primal\n wrapped._adjoint = adjoint\n return wrapped\n\n\ndef kernel(fn):\n \"\"\"Marks a function as a Taichi kernel.\n\n A Taichi kernel is a function written in Python, and gets JIT compiled by\n Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).\n The top-level ``for`` loops are automatically parallelized, and distributed\n to either a CPU thread pool or massively parallel GPUs.\n\n Kernel's gradient kernel would be generated automatically by the AutoDiff system.\n\n See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.\n\n Args:\n fn (Callable): the Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> x = ti.field(ti.i32, shape=(4, 8))\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> # Assigns all the elements of `x` in parallel.\n >>> for i in x:\n >>> x[i] = i\n \"\"\"\n return _kernel_impl(fn, level_of_class_stackframe=3)\n\n\nclass _BoundedDifferentiableMethod:\n def __init__(self, kernel_owner, wrapped_kernel_func):\n clsobj = type(kernel_owner)\n if not getattr(clsobj, '_data_oriented', False):\n raise TaichiSyntaxError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n self._kernel_owner = kernel_owner\n self._primal = wrapped_kernel_func._primal\n self._adjoint = wrapped_kernel_func._adjoint\n self._is_staticmethod = wrapped_kernel_func._is_staticmethod\n self.__name__ = None\n\n def __call__(self, *args, **kwargs):\n if self._is_staticmethod:\n return self._primal(*args, **kwargs)\n return self._primal(self._kernel_owner, *args, **kwargs)\n\n def grad(self, *args, **kwargs):\n return self._adjoint(self._kernel_owner, *args, **kwargs)\n\n\ndef data_oriented(cls):\n \"\"\"Marks a class as Taichi compatible.\n\n To allow for modularized code, Taichi provides this decorator so that\n Taichi kernels can be defined inside a class.\n\n See also https://docs.taichi.graphics/lang/articles/advanced/odop\n\n Example::\n\n >>> @ti.data_oriented\n >>> class TiArray:\n >>> def __init__(self, n):\n >>> self.x = ti.field(ti.f32, shape=n)\n >>>\n >>> @ti.kernel\n >>> def inc(self):\n >>> for i in self.x:\n >>> self.x[i] += 1.0\n >>>\n >>> a = TiArray(32)\n >>> a.inc()\n\n Args:\n cls (Class): the class to be decorated\n\n Returns:\n The decorated class.\n \"\"\"\n def _getattr(self, item):\n method = cls.__dict__.get(item, None)\n is_property = method.__class__ == property\n is_staticmethod = method.__class__ == staticmethod\n if is_property:\n x = method.fget\n else:\n x = super(cls, self).__getattribute__(item)\n if hasattr(x, '_is_wrapped_kernel'):\n if inspect.ismethod(x):\n wrapped = x.__func__\n else:\n wrapped = x\n wrapped._is_staticmethod = is_staticmethod\n assert inspect.isfunction(wrapped)\n if wrapped._is_classkernel:\n ret = _BoundedDifferentiableMethod(self, wrapped)\n ret.__name__ = wrapped.__name__\n if is_property:\n return ret()\n return ret\n if is_property:\n return x(self)\n return x\n\n cls.__getattribute__ = _getattr\n cls._data_oriented = True\n\n return cls\n\n\n__all__ = [\"data_oriented\", \"func\", \"kernel\"]\n"
] | [
[
"numpy.ascontiguousarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AI-Huang/XOR_Gate_NN | [
"d97c7fd7e5b046e84bd862081ab800b9ccbb1672"
] | [
"xor_gate_nn/datasets/keras_fn/datasets.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Date : Feb-09-21 22:23\n# @Author : Kelly Hwong ([email protected])\n\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass XOR_Dataset(tf.keras.utils.Sequence):\n \"\"\"XOR_Dataset.\"\"\"\n\n def __init__(\n self,\n batch_size=1,\n shuffle=False,\n seed=42,\n ):\n self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n self.y = np.array([[0], [1], [1], [0]])\n\n assert batch_size <= 4\n self.batch_size = batch_size # one by one learning\n self.index = self._set_index_array()\n self.shuffle = shuffle\n\n def __getitem__(self, batch_index):\n \"\"\"Gets batch at batch_index `batch_index`.\n\n Arguments:\n batch_index: batch_index of the batch in the Sequence.\n\n Returns:\n batch_x, batch_y: a batch of sequence data.\n \"\"\"\n batch_size = self.batch_size\n\n sample_index = \\\n self.index[batch_index * batch_size:(batch_index+1) * batch_size]\n\n batch_x = np.empty((batch_size, 2))\n batch_y = np.empty(batch_size)\n\n for _, i in enumerate(sample_index):\n batch_x[_, ] = self.X[i, :]\n batch_y[_] = self.y[i, :]\n\n return batch_x, batch_y\n\n def __len__(self):\n \"\"\"Number of batches in the Sequence.\n Returns:\n The number of batches in the Sequence.\n \"\"\"\n return int(np.ceil(self.index.shape[0] / self.batch_size))\n\n def __iter__(self):\n \"\"\"Create a generator that iterate over the Sequence.\"\"\"\n for item in (self[i] for i in range(len(self))):\n yield item\n\n def _set_index_array(self):\n \"\"\"_set_index_array\n \"\"\"\n N = 4\n return np.arange(0, N)\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.empty",
"numpy.ceil"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
austinpeel/jax | [
"ca766caa02296023bd6714bb7fdba064a45e2258",
"ca766caa02296023bd6714bb7fdba064a45e2258",
"ca766caa02296023bd6714bb7fdba064a45e2258"
] | [
"jax/experimental/loops.py",
"jax/random.py",
"jax/interpreters/masking.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loops is an **experimental** module for syntactic sugar for loops and control-flow.\n\nThe current implementation should convert loops correctly to JAX internal\nrepresentation, and most transformations should work (see below), but we have\nnot yet fine-tuned the performance of the resulting XLA compilation!\n\nBy default, loops and control-flow in JAX are executed and inlined during tracing.\nFor example, in the following code the `for` loop is unrolled during JAX tracing::\n\n arr = np.zeros(5)\n for i in range(arr.shape[0]):\n arr[i] += 2.\n if i % 2 == 0:\n arr[i] += 1.\n\nIn order to capture the structured control-flow one has to use the higher-order\nJAX operations, which require you to express the body of the loops and\nconditionals as functions, and the array updates using a functional style that\nreturns an updated array, e.g.::\n\n arr = np.zeros(5)\n def loop_body(i, acc_arr):\n arr1 = ops.index_update(acc_arr, i, acc_arr[i] + 2.)\n return lax.cond(i % 2 == 0,\n arr1,\n lambda arr1: ops.index_update(arr1, i, arr1[i] + 1),\n arr1,\n lambda arr1: arr1)\n arr = lax.fori_loop(0, arr.shape[0], loop_body, arr)\n\nThe default notation quickly gets unreadable with deeper nested loops.\nWith the utilities in this module you can write loops and conditionals that\nlook closer to plain Python, as long as you keep the loop-carried state in a\nspecial `loops.scope` object and use `for` loops over special\n`scope.range` iterators::\n\n from jax.experimental import loops\n with loops.Scope() as s:\n s.arr = np.zeros(5) # Create the mutable state of the loop as `scope` fields.\n for i in s.range(s.arr.shape[0]):\n s.arr = ops.index_update(s.arr, i, s.arr[i] + 2.)\n for _ in s.cond_range(i % 2 == 0): # Conditionals as loops with 0 or 1 iterations\n s.arr = ops.index_update(s.arr, i, s.arr[i] + 1.)\n\nLoops constructed with `range` must have literal constant bounds. If you need\nloops with dynamic bounds, you can use the more general `while_range` iterator.\nHowever, in that case that `grad` transformation is not supported::\n\n s.idx = start\n for _ in s.while_range(lambda: s.idx < end):\n s.idx += 1\n\nNotes:\n * Loops and conditionals to be functionalized can appear only inside scopes\n constructed with `loops.Scope` and they must use one of the `Scope.range`\n iterators. All other loops are unrolled during tracing, as usual in JAX.\n * Only scope data (stored in fields of the scope object) is functionalized.\n All other state, e.g., in other Python variables, will not be considered as\n being part of the loop output. All references to the mutable state should be\n through the scope: `s.arr`.\n * Conceptually, this model is still \"functional\" in the sense that a loop over\n a `Scope.range` behaves as a function whose input and output is the scope data.\n * Scopes should be passed down to callees that need to use loop\n functionalization, or they may be nested.\n * The programming model is that the loop body over a `scope.range` is traced\n only once, using abstract shape values, similar to how JAX traces function\n bodies.\n\nRestrictions:\n * The tracing of the loop body should not exit prematurely with `return`,\n `exception`, `break`. This would be detected and reported as errors when we\n encounter unnested scopes.\n * The loop index variable should not be used after the loop. Similarly, one\n should not use outside the loop data computed in the loop body, except data\n stored in fields of the scope object.\n * No new mutable state can be created inside a loop to be functionalized.\n All mutable state must be created outside all loops and conditionals.\n * For a `while` loop, the conditional function is not allowed to modify the\n scope state. This is a checked error. Also, for `while` loops the `grad`\n transformation does not work. An alternative that allows `grad` is a bounded\n loop (`range`).\n\nTransformations:\n * All transformations are supported, except `grad` is not supported for\n `Scope.while_range` loops.\n * `vmap` is very useful for such loops because it pushes more work into the\n inner-loops, which should help performance for accelerators.\n\nFor usage example, see tests/loops_test.py.\n\"\"\"\n\n\nimport copy\nfrom functools import partial\nimport itertools\nimport numpy as np\nimport traceback\nfrom typing import Any, List, cast\n\nfrom jax import abstract_arrays\nfrom jax import lax, core\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax import tree_util\nfrom jax import numpy as jnp\nfrom jax.interpreters import partial_eval as pe\nfrom jax.util import safe_map\nfrom jax.config import config\n\n\nclass Scope(object):\n \"\"\"A scope context manager to keep the state of loop bodies for functionalization.\n\n Usage::\n\n with Scope() as s:\n s.data = 0.\n for i in s.range(5):\n s.data += 1.\n return s.data\n\n \"\"\"\n\n def __init__(self):\n self._mutable_state = {} # state to be functionalized, indexed by name.\n self._active_ranges = [] # stack of active ranges, last one is the innermost.\n self._count_subtraces = 0 # How many net started subtraces, for error recovery\n\n def range(self, first, second=None, third=None):\n \"\"\"Creates an iterator for bounded iterations to be functionalized.\n\n The body is converted to a `lax.scan`, for which all JAX transformations work.\n The `first`, `second`, and `third` arguments must be integer literals.\n\n Usage::\n\n range(5) # start=0, end=5, step=1\n range(1, 5) # start=1, end=5, step=1\n range(1, 5, 2) # start=1, end=5, step=2\n\n s.out = 1.\n for i in scope.range(5):\n s.out += 1.\n \"\"\"\n if third is not None:\n start = int(first)\n stop = int(second)\n step = int(third)\n else:\n step = 1\n if second is not None:\n start = int(first)\n stop = int(second)\n else:\n start = 0\n stop = int(first)\n return _BodyTracer(self, _BoundedLoopBuilder(start, stop, step))\n\n def cond_range(self, pred):\n \"\"\"Creates a conditional iterator with 0 or 1 iterations based on the boolean.\n\n The body is converted to a `lax.cond`. All JAX transformations work.\n\n Usage::\n\n for _ in scope.cond_range(s.field < 0.):\n s.field = - s.field\n \"\"\"\n # TODO: share these checks with lax_control_flow.cond\n if len(np.shape(pred)) != 0:\n raise TypeError(\n \"Pred must be a scalar, got {} of shape {}.\".format(pred, np.shape(pred)))\n\n try:\n pred_dtype = np.result_type(pred)\n except TypeError as err:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred)) from err\n\n if pred_dtype.kind != 'b':\n if pred_dtype.kind in 'iuf':\n pred = pred != 0\n else:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred_dtype))\n\n return _BodyTracer(self, _CondBuilder(pred))\n\n def while_range(self, cond_func):\n \"\"\"Creates an iterator that continues as long as `cond_func` returns true.\n\n The body is converted to a `lax.while_loop`.\n The `grad` transformation does not work.\n\n Usage::\n\n for _ in scope.while_range(lambda: s.loss > 1.e-5):\n s.loss = loss(...)\n\n Args:\n cond_func: a lambda with no arguments, the condition for the \"while\".\n \"\"\"\n return _BodyTracer(self, _WhileBuilder(cond_func))\n\n def _push_range(self, range_):\n for ar in self._active_ranges:\n if ar is range_:\n raise ValueError(\"Range is reused nested inside itself.\")\n self._active_ranges.append(range_)\n\n def _pop_range(self, range_):\n if not (range_ is self._active_ranges[-1]):\n self._error_premature_exit_range()\n self._active_ranges.pop()\n\n def _error_premature_exit_range(self):\n \"\"\"Raises error about premature exit from a range\"\"\"\n msg = \"Some ranges have exited prematurely. The innermost such range is at\\n{}\"\n raise ValueError(msg.format(self._active_ranges[-1].location()))\n\n def __getattr__(self, key):\n \"\"\"Accessor for scope data.\n\n Called only if the attribute is not found, which will happen when we read\n scope data that has been stored in self._mutable_state.\n \"\"\"\n mt_val = self._mutable_state.get(key)\n if mt_val is None:\n raise AttributeError(\n \"Reading uninitialized data '{}' from the scope.\".format(key))\n return mt_val\n\n def __setattr__(self, key, value):\n \"\"\"Update scope data to be functionalized.\n\n Called for *all* attribute setting.\n \"\"\"\n if key in [\"_active_ranges\", \"_mutable_state\", \"_count_subtraces\"]:\n object.__setattr__(self, key, value)\n else:\n if self._active_ranges and key not in self._mutable_state:\n raise ValueError(\n \"New mutable state '{}' cannot be created inside a loop.\".format(key))\n self._mutable_state[key] = value\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n try:\n if exc_type is None:\n if self._active_ranges: # We have some ranges that we did not exit properly\n self._error_premature_exit_range()\n return True\n else:\n # The exception may come from inside one or more ranges. We let the current\n # exception propagate, assuming it terminates the tracing. If not, the\n # tracers may be left in an inconsistent state.\n return False # re-raise\n finally:\n # Ensure we leave the global trace_state as we found it\n while self._count_subtraces > 0:\n self.end_subtrace()\n\n def start_subtrace(self):\n \"\"\"Starts a nested trace, returns the Trace object.\"\"\"\n # TODO: This follows the __enter__ part of core.new_main.\n if config.omnistaging_enabled:\n level = core.thread_local_state.trace_state.trace_stack.next_level()\n main = core.MainTrace(level, pe.JaxprTrace)\n core.thread_local_state.trace_state.trace_stack.push(main)\n self._count_subtraces += 1\n return pe.JaxprTrace(main, core.cur_sublevel())\n else:\n level = core.thread_local_state.trace_state.trace_stack.next_level(False)\n main = core.MainTrace(level, pe.JaxprTrace)\n core.thread_local_state.trace_state.trace_stack.push(main, False)\n self._count_subtraces += 1\n return pe.JaxprTrace(main, core.cur_sublevel())\n\n def end_subtrace(self):\n # TODO: This follows the __exit__ part of core.new_main\n if config.omnistaging_enabled:\n core.thread_local_state.trace_state.trace_stack.pop()\n else:\n core.thread_local_state.trace_state.trace_stack.pop(False)\n self._count_subtraces -= 1\n\n\nclass _BodyTracer(object):\n \"\"\"Traces the body of the loop and builds a functional control-flow representation.\n\n This class is also an iterator, only the first iteration is traced.\n \"\"\"\n\n def __init__(self, scope, loop_builder):\n \"\"\"\n Params:\n scope: the current scope\n loop_builder: instance of _LoopBuilder\n \"\"\"\n self.scope = scope\n self.loop_builder = loop_builder\n self.first_iteration = True # If we are tracing the first iteration\n # Stack trace, without this line and the s.range function\n self.stack = traceback.StackSummary.from_list(\n cast(List[Any], traceback.extract_stack()[:-2]))\n\n # Next are state kept from the start of the first iteration to the end of the iteration.\n self.carried_state_initial = {}\n # The parameters that were created for state upon entering an arbitrary iteration.\n self.carried_state_vars = {}\n\n self.trace = None\n # List of scope fields carried through the loop\n self.carried_state_names = None\n self.init_tree = None # The PyTreeDef corresponding to carried_state_names\n self.init_vals = None # The values corresponding to self.init_tree\n\n def location(self):\n \"\"\"A multiline string representing the source location of the range.\"\"\"\n if self.stack is not None:\n return \" \".join(self.stack.format())\n else:\n return \"\"\n\n def __iter__(self):\n \"\"\"Called before starting the first iteration.\"\"\"\n self.first_iteration = True # In case we reuse the range\n return self\n\n def __next__(self):\n if self.first_iteration:\n self.first_iteration = False\n self.scope._push_range(self)\n self.start_tracing_body()\n return self._index_var\n else:\n self.end_tracing_body()\n self.scope._pop_range(self)\n raise StopIteration # Trace only one iteration.\n\n def next(self): # For PY2\n return self.__next__()\n\n def start_tracing_body(self):\n \"\"\"Called upon starting the tracing of the loop body.\"\"\"\n # Make a copy of the current value of the mutable state\n self.carried_state_initial = copy.copy(self.scope._mutable_state)\n # The entire state is carried.\n self.carried_state_names = sorted(self.scope._mutable_state.keys())\n\n # TODO: This is the first part of partial_eval.trace_to_subjaxpr. Share.\n self.trace = self.scope.start_subtrace()\n # Set the scope._mutable_state to new tracing variables.\n for key, initial in self.carried_state_initial.items():\n mt_aval = _BodyTracer.abstractify(initial)\n mt_pval = pe.PartialVal.unknown(mt_aval)\n mt_var = self.trace.new_arg(mt_pval)\n self.carried_state_vars[key] = mt_var\n self.scope._mutable_state[key] = mt_var\n\n index_var_aval = _BodyTracer.abstractify(0)\n index_var_pval = pe.PartialVal.unknown(index_var_aval)\n self._index_var = self.trace.new_arg(index_var_pval)\n\n def end_tracing_body(self):\n \"\"\"Called when we are done tracing one iteration of the body.\"\"\"\n # We will turn the body of the loop into a function that takes some values\n # for the scope state (carried_state_names) and returns the values for the\n # same state fields after one execution of the body. For some of the ranges,\n # e.g., scope.range, the function will also take the index_var as last parameter.\n in_tracers = [self.carried_state_vars[ms] for ms in self.carried_state_names]\n if self.loop_builder.can_use_index_var():\n in_tracers += [self._index_var]\n\n # Make the jaxpr for the body of the loop\n # TODO: See which mutable state was changed in the one iteration.\n # For now, we assume all state changes.\n body_out_tracers = tuple([self.scope._mutable_state[ms]\n for ms in self.carried_state_names])\n try:\n # If the body actually uses the index variable, and is not allowed to\n # (e.g., cond_range and while_range), then in_tracers will not contain\n # the tracer for the index_var, and trace_to_jaxpr_finalize will throw\n # an assertion error.\n body_closed_jaxpr, body_const_vals = _BodyTracer.trace_to_jaxpr_finalize(\n in_tracers=in_tracers,\n out_tracers=body_out_tracers,\n trace=self.trace)\n except core.UnexpectedTracerError as e:\n if \"Tracer not among input tracers\" in str(e):\n raise ValueError(\"Body of cond_range or while_range should not use the \"\n \"index variable returned by iterator.\") from e\n raise\n # End the subtrace for the loop body, before we trace the condition\n self.scope.end_subtrace()\n\n carried_init_val = tuple([self.carried_state_initial[ms]\n for ms in self.carried_state_names])\n carried_init_vals, carried_tree = tree_util.tree_flatten(carried_init_val)\n\n carried_out_vals = self.loop_builder.build_output_vals(\n self.scope, self.carried_state_names, carried_tree,\n carried_init_vals, body_closed_jaxpr, body_const_vals)\n carried_mutable_state_unflattened = tree_util.tree_unflatten(carried_tree,\n carried_out_vals)\n\n # Update the mutable state with the values of the changed vars, after the loop.\n for ms, mv in zip(self.carried_state_names, carried_mutable_state_unflattened):\n self.scope._mutable_state[ms] = mv\n\n @staticmethod\n def abstractify(x):\n return abstract_arrays.raise_to_shaped(core.get_aval(x))\n\n @staticmethod\n def trace_to_jaxpr_finalize(in_tracers, out_tracers, trace, instantiate=True):\n # TODO: This is the final part of the partial_eval.trace_to_subjaxpr. Share.\n instantiate = [instantiate] * len(out_tracers)\n out_tracers = safe_map(trace.full_raise, safe_map(core.full_lower, out_tracers))\n out_tracers = safe_map(partial(pe.instantiate_const_at, trace),\n instantiate, out_tracers)\n jaxpr, consts, env = pe.tracers_to_jaxpr(in_tracers, out_tracers)\n assert not env # TODO: this is from partial_eval.trace_to_jaxpr. Share.\n closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())\n return closed_jaxpr, consts\n\n\nclass _LoopBuilder(object):\n \"\"\"Abstract superclass for the loop builders\"\"\"\n\n def can_use_index_var(self):\n \"\"\"Whether this kind of loop can use the index var returned by the range iterator.\"\"\"\n raise NotImplementedError\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n \"\"\"Builds the output values for the loop carried state.\n\n Params:\n scope: the current Scope object.\n carried_state_names: the list of names of mutable state fields that is\n carried through the body.\n carried_tree: the PyTreeDef for the tuple of carried_state_names.\n init_vals: the initial values on body entry corresponding to the init_tree.\n body_closed_jaxpr: the Jaxpr for the body returning the new values of\n carried_state_names.\n body_const_vals: the constant values for the body.\n\n Returns:\n the output tracer corresponding to the lax primitive representing the loop.\n \"\"\"\n raise NotImplementedError\n\n def __str__(self):\n raise NotImplementedError\n\n\nclass _BoundedLoopBuilder(_LoopBuilder):\n \"\"\"Builds a lax operation corresponding to a bounded range iteration.\"\"\"\n\n def __init__(self, start, stop, step):\n self.start = start\n self.stop = stop\n self.step = step\n self._index_var = None # The parameter for the index variable\n\n def can_use_index_var(self):\n return True\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n arange_val = jnp.arange(self.start, stop=self.stop, step=self.step)\n return lax_control_flow.scan_p.bind(*itertools.chain(body_const_vals,\n init_vals, [arange_val]),\n reverse=False, length=arange_val.shape[0],\n jaxpr=body_closed_jaxpr,\n num_consts=len(body_const_vals),\n num_carry=len(init_vals),\n linear=(False,) * (len(body_const_vals) +\n len(init_vals) + 1),\n unroll=1)\n\n\nclass _CondBuilder(_LoopBuilder):\n \"\"\"Builds a lax.cond operation.\"\"\"\n\n def __init__(self, pred):\n self.index = lax.convert_element_type(pred, np.int32)\n\n def can_use_index_var(self):\n return False\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n # Simulate a pass-through false branch\n in_vals, in_tree = tree_util.tree_flatten(\n (body_const_vals, tree_util.tree_unflatten(carried_tree, init_vals)))\n in_avals = safe_map(_BodyTracer.abstractify, in_vals)\n pass_through_closed_jaxpr, pass_through_const_vals, _ = (\n lax_control_flow._initial_style_jaxpr(\n lambda *args: args[1],\n in_tree,\n tuple(in_avals)))\n assert len(pass_through_const_vals) == 0\n args = list(itertools.chain(body_const_vals, init_vals))\n return lax_control_flow.cond_p.bind(\n self.index, *args,\n branches=(pass_through_closed_jaxpr, body_closed_jaxpr),\n linear=(False,) * len(args))\n\n\nclass _WhileBuilder(_LoopBuilder):\n \"\"\"Builds a lax.while operation.\"\"\"\n\n def __init__(self, cond_func):\n self.cond_func = cond_func # Function with 0 arguments (can reference the scope)\n\n def can_use_index_var(self):\n return False\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n # Trace the conditional function. cond_func takes 0 arguments, but\n # for lax.while we need a conditional function that takes the\n # carried_state_names. _initial_style_jaxpr will start its own trace and\n # will create tracers for all the carried state. We must put these values\n # in the scope._mutable_state before we trace the conditional\n # function.\n def cond_func_wrapped(*args):\n assert len(args) == len(carried_state_names)\n for ms, init_ms in zip(carried_state_names, args):\n scope._mutable_state[ms] = init_ms\n res = self.cond_func()\n # Conditional function is not allowed to modify the scope state\n for ms, init_ms in zip(carried_state_names, args):\n if not (scope._mutable_state[ms] is init_ms):\n msg = \"Conditional function modifies scope.{} field.\"\n raise ValueError(msg.format(ms))\n return res\n\n init_avals = safe_map(_BodyTracer.abstractify, init_vals)\n cond_jaxpr, cond_consts, cond_tree = (\n lax_control_flow._initial_style_jaxpr(cond_func_wrapped,\n carried_tree,\n tuple(init_avals)))\n # TODO: share these checks with lax_control_flow.while\n if not tree_util.treedef_is_leaf(cond_tree):\n msg = \"cond_fun must return a boolean scalar, but got pytree {}.\"\n raise TypeError(msg.format(cond_tree))\n if cond_jaxpr.out_avals != [abstract_arrays.ShapedArray((), np.bool_)]:\n msg = \"cond_fun must return a boolean scalar, but got output type(s) {}.\"\n raise TypeError(msg.format(cond_jaxpr.out_avals))\n\n return lax_control_flow.while_p.bind(*itertools.chain(cond_consts,\n body_const_vals,\n init_vals),\n cond_nconsts=len(cond_consts),\n cond_jaxpr=cond_jaxpr,\n body_nconsts=len(body_const_vals),\n body_jaxpr=body_closed_jaxpr)\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"JAX pseudo-random number generators (PRNGs).\n\nExample usage:\n\n>>> rng = jax.random.PRNGKey(seed)\n>>> for i in range(num_steps):\n... rng, rng_input = jax.random.split(rng)\n... params = compiled_update(rng_input, params, next(batches))\n\nContext:\n\nAmong other requirements, the JAX PRNG aims to:\n(a) ensure reproducibility,\n(b) parallelize well, both in terms of vectorization (generating array values)\nand multi-replica, multi-core computation. In particular it should not use\nsequencing constraints between random function calls.\n\nThe approach is based on:\n1. \"Parallel random numbers: as easy as 1, 2, 3\" (Salmon et al. 2011)\n2. \"Splittable pseudorandom number generators using cryptographic hashing\"\n(Claessen et al. 2013)\n\nSee also https://github.com/google/jax/blob/master/design_notes/prng.md\nfor the design and its motivation.\n\"\"\"\n\n\nfrom functools import partial\nfrom typing import Optional, Sequence, Union\nimport warnings\n\nimport numpy as np\n\nfrom . import lax\nfrom . import numpy as jnp\nfrom . import dtypes\nfrom .api import jit, vmap\nfrom jax._src.numpy.lax_numpy import _constant_like, asarray\nfrom jax.lib import xla_bridge\nfrom jax.lib import xla_client\nfrom jax.lib import cuda_prng\nfrom jax import core\nfrom jax import abstract_arrays\nfrom jax.numpy.linalg import cholesky\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import xla\nfrom jax.util import prod\n\n\n_UINT_DTYPES = {8: jnp.uint8, 16: jnp.uint16, 32: jnp.uint32, 64: jnp.uint64}\n\n\ndef PRNGKey(seed: int) -> jnp.ndarray:\n \"\"\"Create a pseudo-random number generator (PRNG) key given an integer seed.\n\n Args:\n seed: a 64- or 32-bit integer used as the value of the key.\n\n Returns:\n A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n key is constructed from a 64-bit seed by effectively bit-casting to a pair\n of uint32 values (or from a 32-bit seed by first padding out with zeros).\n \"\"\"\n if np.shape(seed):\n raise TypeError(\"PRNGKey seed must be a scalar.\")\n convert = lambda k: lax.reshape(lax.convert_element_type(k, np.uint32), [1])\n if isinstance(seed, (int, np.ndarray)):\n # Special handling of raw integer values, which may have be 64bit even\n # when jax_enable_x64=False and we don't want to drop the top 32 bits\n k1 = convert(np.bitwise_and(np.right_shift(seed, 32), 0xFFFFFFFF))\n else:\n k1 = convert(lax.shift_right_logical(seed, lax._const(seed, 32)))\n k2 = convert(jnp.bitwise_and(seed, 0xFFFFFFFF))\n return lax.concatenate([k1, k2], 0)\n\ndef _is_prng_key(key: jnp.ndarray) -> bool:\n try:\n return key.shape == (2,) and key.dtype == np.uint32\n except AttributeError:\n return False\n\n\n### utilities\n\n\n# TODO(mattjj,jakevdp): add more info to error message, use this utility more\ndef _asarray(x):\n \"\"\"A more restrictive jnp.asarray, only accepts JAX arrays and np.ndarrays.\"\"\"\n if not isinstance(x, (np.ndarray, jnp.ndarray)):\n raise TypeError(f\"Function requires array input, got {x} of type {type(x)}.\")\n return jnp.asarray(x)\n\n\ndef _make_rotate_left(dtype):\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(\"_rotate_left only accepts integer dtypes.\")\n nbits = np.array(jnp.iinfo(dtype).bits, dtype)\n\n def _rotate_left(x, d):\n if lax.dtype(d) != dtype:\n d = lax.convert_element_type(d, dtype)\n if lax.dtype(x) != dtype:\n x = lax.convert_element_type(x, dtype)\n return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)\n return _rotate_left\n\n\ndef _bit_stats(bits):\n \"\"\"This is a debugging function to compute the statistics of bit fields.\"\"\"\n return np.array([list(map(int, np.binary_repr(x, 64))) for x in bits]).mean(0)\n\n\n### hash function and split\n\ndef _threefry2x32_abstract_eval(*args):\n if any(a.dtype != jnp.uint32 for a in args):\n raise TypeError(\"Arguments to threefry2x32 must have uint32 type, got {}\"\n .format(args))\n if all(isinstance(arg, abstract_arrays.ShapedArray) for arg in args):\n shape = lax._broadcasting_shape_rule(*args)\n aval = abstract_arrays.ShapedArray(shape, jnp.dtype(jnp.uint32))\n else:\n aval = abstract_arrays.UnshapedArray(jnp.dtype(jnp.uint32))\n return (aval,) * 2\n\nrotate_left = _make_rotate_left(np.uint32)\n\ndef apply_round(v, rot):\n v = v[:]\n v[0] = v[0] + v[1]\n v[1] = rotate_left(v[1], rot)\n v[1] = v[0] ^ v[1]\n return v\n\ndef rotate_list(xs):\n return xs[1:] + xs[:1]\n\ndef rolled_loop_step(i, state):\n x, ks, rotations = state\n for r in rotations[0]:\n x = apply_round(x, r)\n new_x = [x[0] + ks[0], x[1] + ks[1] + asarray(i + 1, dtype=np.uint32)]\n return new_x, rotate_list(ks), rotate_list(rotations)\n\ndef _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n x = [x1, x2]\n\n rotations = [np.array([13, 15, 26, 6], dtype=np.uint32),\n np.array([17, 29, 16, 24], dtype=np.uint32)]\n ks = [key1, key2, key1 ^ key2 ^ np.uint32(0x1BD11BDA)]\n\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1]\n\n if use_rolled_loops:\n x, _, _ = lax.fori_loop(0, 5, rolled_loop_step, (x, rotate_list(ks), rotations))\n\n else:\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(1)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(2)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1] + np.uint32(3)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(4)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(5)\n\n return tuple(x)\n\n\ndef _threefry2x32_gpu_translation_rule(c, k1, k2, x1, x2):\n shape = lax.broadcast_shapes(\n c.get_shape(k1).dimensions(), c.get_shape(k2).dimensions(),\n c.get_shape(x1).dimensions(), c.get_shape(x2).dimensions())\n rank = len(shape)\n if 0 in shape:\n zeros = xla_client.ops.Broadcast(\n xla_bridge.constant(c, np.array(0, np.uint32)), shape)\n return xla_client.ops.Tuple(c, [zeros, zeros])\n def _broadcast(x):\n ndims = c.get_shape(x).rank()\n return xla_client.ops.BroadcastInDim(x, shape,\n tuple(range(rank - ndims, rank)))\n return cuda_prng.threefry2x32(\n c, (_broadcast(k1), _broadcast(k2)), (_broadcast(x1), _broadcast(x2)))\n\nthreefry2x32_p = core.Primitive(\"threefry2x32\")\nthreefry2x32_p.multiple_results = True\nthreefry2x32_p.def_impl(partial(xla.apply_primitive, threefry2x32_p))\nthreefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)\nbatching.defbroadcasting(threefry2x32_p)\nxla.translations[threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True)\nxla.backend_specific_translations['cpu'][threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=True),\n multiple_results=True)\nif cuda_prng:\n xla.backend_specific_translations['gpu'][threefry2x32_p] = \\\n _threefry2x32_gpu_translation_rule\n\n@jit\ndef threefry_2x32(keypair, count):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n key1, key2 = keypair\n if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == np.uint32:\n msg = \"threefry_2x32 requires uint32 arguments, got {}\"\n raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))\n\n odd_size = count.size % 2\n if odd_size:\n x = list(jnp.split(jnp.concatenate([count.ravel(), np.uint32([0])]), 2))\n else:\n x = list(jnp.split(count.ravel(), 2))\n\n x = threefry2x32_p.bind(key1, key2, x[0], x[1])\n out = jnp.concatenate(x)\n assert out.dtype == np.uint32\n return lax.reshape(out[:-1] if odd_size else out, count.shape)\n\n\ndef split(key: jnp.ndarray, num: int = 2) -> jnp.ndarray:\n \"\"\"Splits a PRNG key into `num` new keys by adding a leading axis.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n num: optional, a positive integer indicating the number of keys to produce\n (default 2).\n\n Returns:\n An array with shape (num, 2) and dtype uint32 representing `num` new keys.\n \"\"\"\n return _split(key, int(num)) # type: ignore\n\n@partial(jit, static_argnums=(1,))\ndef _split(key, num) -> jnp.ndarray:\n counts = lax.iota(np.uint32, num * 2)\n return lax.reshape(threefry_2x32(key, counts), (num, 2))\n\n\ndef fold_in(key, data):\n \"\"\"Folds in data to a PRNG key to form a new PRNG key.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n data: a 32bit integer representing data to be folded in to the key.\n\n Returns:\n A new PRNGKey that is a deterministic function of the inputs and is\n statistically safe for producing a stream of new pseudo-random values.\n \"\"\"\n return _fold_in(key, data)\n\n@jit\ndef _fold_in(key, data):\n return threefry_2x32(key, PRNGKey(data))\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _random_bits(key, bit_width, shape):\n \"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\n if not _is_prng_key(key):\n raise TypeError(\"_random_bits got invalid prng key.\")\n if bit_width not in (8, 16, 32, 64):\n raise TypeError(\"requires 8-, 16-, 32- or 64-bit field width.\")\n size = prod(shape)\n max_count = int(np.ceil(bit_width * size / 32))\n\n nblocks, rem = divmod(max_count, jnp.iinfo(np.uint32).max)\n if not nblocks:\n bits = threefry_2x32(key, lax.iota(np.uint32, rem))\n else:\n *subkeys, last_key = split(key, nblocks + 1)\n blocks = [threefry_2x32(k, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))\n for k in subkeys]\n last = threefry_2x32(last_key, lax.iota(np.uint32, rem))\n bits = lax.concatenate(blocks + [last], 0)\n\n dtype = _UINT_DTYPES[bit_width]\n if bit_width == 64:\n bits = [lax.convert_element_type(x, dtype) for x in jnp.split(bits, 2)]\n bits = lax.shift_left(bits[0], dtype(32)) | bits[1]\n elif bit_width in [8, 16]:\n # this is essentially bits.view(dtype)[:size]\n bits = lax.bitwise_and(\n np.uint32(np.iinfo(dtype).max),\n lax.shift_right_logical(\n lax.broadcast(bits, (1,)),\n lax.mul(\n np.uint32(bit_width),\n lax.broadcasted_iota(np.uint32, (32 // bit_width, 1), 0)\n )\n )\n )\n bits = lax.reshape(bits, (np.uint32(max_count * 32 // bit_width),), (1, 0))\n bits = lax.convert_element_type(bits, dtype)[:size]\n return lax.reshape(bits, shape)\n\n\n### random samplers\n\n\ndef _check_shape(name, shape, *param_shapes):\n shape = abstract_arrays.canonicalize_shape(shape)\n\n if param_shapes:\n shape_ = lax.broadcast_shapes(shape, *param_shapes)\n if shape != shape_:\n msg = (\"{} parameter shapes must be broadcast-compatible with shape \"\n \"argument, and the result of broadcasting the shapes must equal \"\n \"the shape argument, but got result {} for shape argument {}.\")\n raise ValueError(msg.format(name, shape_, shape))\n\n\ndef uniform(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: np.dtype = dtypes.float_,\n minval: Union[float, jnp.ndarray] = 0.,\n maxval: Union[float, jnp.ndarray] = 1.) -> jnp.ndarray:\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n minval: optional, a minimum (inclusive) value broadcast-compatible with shape for the range (default 0).\n maxval: optional, a maximum (exclusive) value broadcast-compatible with shape for the range (default 1).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `uniform` must be a float dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _uniform(key, shape, dtype, minval, maxval) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _uniform(key, shape, dtype, minval, maxval) -> jnp.ndarray:\n _check_shape(\"uniform\", shape)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"uniform only accepts floating point dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, len(shape))\n maxval = lax.broadcast_to_rank(maxval, len(shape))\n\n finfo = jnp.finfo(dtype)\n nbits, nmant = finfo.bits, finfo.nmant\n\n if nbits not in (16, 32, 64):\n raise TypeError(\"uniform only accepts 32- or 64-bit dtypes.\")\n\n bits = _random_bits(key, nbits, shape)\n\n # The strategy here is to randomize only the mantissa bits with an exponent of\n # 1 (after applying the bias), then shift and scale to the desired range. The\n # bit-level transformation we use relies on Numpy and XLA having bit-for-bit\n # equivalent float representations, which might not be true on all platforms.\n float_bits = lax.bitwise_or(\n lax.shift_right_logical(bits, np.array(nbits - nmant, lax.dtype(bits))),\n np.array(1., dtype).view(_UINT_DTYPES[nbits]))\n floats = lax.bitcast_convert_type(float_bits, dtype) - np.array(1., dtype)\n return lax.max(\n minval,\n lax.reshape(floats * (maxval - minval) + minval, shape))\n\n\ndef randint(key: jnp.ndarray,\n shape: Sequence[int],\n minval: Union[int, jnp.ndarray],\n maxval: Union[int, jnp.ndarray],\n dtype: np.dtype = dtypes.int_):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: a tuple of nonnegative integers representing the shape.\n minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n (inclusive) value for the range.\n maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n (exclusive) value for the range.\n dtype: optional, an int dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _randint(key, shape, minval, maxval, dtype)\n\n@partial(jit, static_argnums=(1, 4))\ndef _randint(key, shape, minval, maxval, dtype):\n _check_shape(\"randint\", shape, np.shape(minval), np.shape(maxval))\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(\"randint only accepts integer dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, len(shape))\n maxval = lax.broadcast_to_rank(maxval, len(shape))\n nbits = jnp.iinfo(dtype).bits\n\n if nbits not in (8, 16, 32, 64):\n raise TypeError(\"randint only accepts 8-, 16-, 32-, or 64-bit dtypes.\")\n\n # if we don't have minval < maxval, just always return minval\n # https://github.com/google/jax/issues/222\n maxval = lax.max(lax.add(minval, np.array(1, dtype)), maxval)\n\n # This algorithm is biased whenever (maxval - minval) is not a power of 2.\n # We generate double the number of random bits required by the dtype so as to\n # reduce that bias.\n k1, k2 = split(key)\n rbits = lambda key: _random_bits(key, nbits, shape)\n higher_bits, lower_bits = rbits(k1), rbits(k2)\n\n unsigned_dtype = _UINT_DTYPES[nbits]\n span = lax.convert_element_type(maxval - minval, unsigned_dtype)\n\n # To compute a remainder operation on an integer that might have twice as many\n # bits as we can represent in the native unsigned dtype, we compute a\n # multiplier equal to 2**nbits % span. To avoid overflow, we use the identity:\n # (a * b) % N = [(a % N) * (b % N)] % N\n multiplier = lax.rem(lax._const(span, 2 ** (nbits // 2)), span)\n multiplier = lax.rem(lax.mul(multiplier, multiplier), span)\n\n random_offset = lax.add(lax.mul(lax.rem(higher_bits, span), multiplier),\n lax.rem(lower_bits, span))\n random_offset = lax.rem(random_offset, span)\n return lax.add(minval, lax.convert_element_type(random_offset, dtype))\n\n\ndef shuffle(key: jnp.ndarray, x: jnp.ndarray, axis: int = 0) -> jnp.ndarray:\n \"\"\"Shuffle the elements of an array uniformly at random along an axis.\n\n Args:\n key: a PRNGKey used as the random key.\n x: the array to be shuffled.\n axis: optional, an int axis along which to shuffle (default 0).\n\n Returns:\n A shuffled version of x.\n \"\"\"\n msg = (\"jax.random.shuffle is deprecated and will be removed in a future release. \"\n \"Use jax.random.permutation\")\n warnings.warn(msg, FutureWarning)\n return _shuffle(key, x, axis) # type: ignore\n\n\ndef permutation(key, x):\n \"\"\"\n Permute elements of an array along its first axis or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Args:n\n key: a PRNGKey used as the random key.\n x: the array or integer range to be shuffled.\n\n Returns:\n A shuffled version of x or array range\n \"\"\"\n if not np.ndim(x):\n # scalar case, must be a concrete integer\n if not np.issubdtype(lax.dtype(x), np.integer):\n raise TypeError(\"x must be an integer or at least 1-dimensional\")\n x = int(x)\n return _shuffle(key, jnp.arange(x), 0)\n elif np.ndim(x) == 1:\n return _shuffle(key, x, 0)\n else:\n ind = _shuffle(key, jnp.arange(x.shape[0]), 0) # type: ignore[attribute-error]\n return x[ind]\n\n\n@partial(jit, static_argnums=(2,))\ndef _shuffle(key, x, axis) -> jnp.ndarray:\n # On parallel architectures, Fisher-Yates is more expensive than doing\n # multiple sorts. This algorithm is based on one developed and analyzed by\n # tjablin@. We sort according to randomly-generated 32bit keys, but those keys\n # may have collisions. If we repeat the process, using fresh 32bit keys for\n # each sort, then whenever all pairs of elements have been assigned distinct\n # keys at some iteration (or equivalently when the strings formed by\n # concatenating the successive keys for each element are all distinct) then we\n # are guaranteed to have a perfect sample (assuming that either the sort is\n # stable or that any bias is not value-dependent). Since checking uniqueness\n # at runtime may be expensive, we use a heuristic static stop criterion\n # developed by tjablin@. See tensorflow/compiler/tf2xla/random_ops.cc for more\n # info, and for the original implementation of this algorithm. See also\n # Section 2 of http://people.csail.mit.edu/costis/6896sp11/lec5s.pdf for\n # another analysis (where the keys are generated one bit at a time).\n exponent = 3 # see tjablin@'s analysis for explanation of this parameter\n uint32max = jnp.iinfo(np.uint32).max\n num_rounds = int(np.ceil(exponent * np.log(x.size) / np.log(uint32max)))\n\n for _ in range(num_rounds):\n key, subkey = split(key)\n sort_keys = _random_bits(subkey, 32, x.shape)\n _, x = lax.sort_key_val(sort_keys, x, axis)\n\n return x\n\n\ndef choice(key, a, shape=(), replace=True, p=None):\n \"\"\"Generates a random sample from a given 1-D array.\n\n Args:\n key: a PRNGKey used as the random key.\n a : 1D array or int. If an ndarray, a random sample is generated from\n its elements. If an int, the random sample is generated as if a were\n arange(a).\n shape : tuple of ints, optional. Output shape. If the given shape is,\n e.g., ``(m, n)``, then ``m * n`` samples are drawn. Default is (),\n in which case a single value is returned.\n replace : boolean. Whether the sample is with or without replacement.\n default is True.\n p : 1-D array-like, The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribution over all\n entries in a.\n\n Returns:\n An array of shape `shape` containing samples from `a`.\n \"\"\"\n if not isinstance(shape, Sequence):\n raise TypeError(\"shape argument of jax.random.choice must be a sequence, \"\n f\"got {shape}\")\n if np.ndim(a) not in [0, 1]:\n raise ValueError(\"a must be an integer or 1-dimensional\")\n if np.ndim(a) == 0:\n a = int(a)\n else:\n a = _asarray(a)\n n_inputs = a if np.ndim(a) == 0 else len(a)\n n_draws = prod(shape)\n if n_draws == 0:\n return jnp.zeros(shape, dtype=lax.dtype(a))\n if n_inputs <= 0:\n raise ValueError(\"a must be greater than 0 unless no samples are taken\")\n if not replace and n_draws > n_inputs:\n raise ValueError(\"Cannot take a larger sample than population when 'replace=False'\")\n\n if p is None:\n if replace:\n ind = randint(key, shape, 0, n_inputs)\n result = ind if np.ndim(a) == 0 else a[ind]\n else:\n result = permutation(key, a)[:n_draws]\n else:\n if p.shape != (n_inputs,):\n raise ValueError(\"p must be None or match the shape of a\")\n if replace:\n p_cuml = jnp.cumsum(p)\n r = p_cuml[-1] * (1 - uniform(key, shape))\n ind = jnp.searchsorted(p_cuml, r)\n result = ind if np.ndim(a) == 0 else a[ind]\n else:\n # Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/\n g = -gumbel(key, (n_inputs,)) - jnp.log(p)\n ind = jnp.argsort(g)[:n_draws]\n result = ind if np.ndim(a) == 0 else a[ind]\n return result.reshape(shape)\n\n\ndef normal(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: np.dtype = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample standard normal random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `normal` must be a float dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _normal(key, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal(key, shape, dtype) -> jnp.ndarray:\n _check_shape(\"normal\", shape)\n lo = np.nextafter(np.array(-1., dtype), 0., dtype=dtype)\n hi = np.array(1., dtype)\n u = uniform(key, shape, dtype, lo, hi)\n return np.array(np.sqrt(2), dtype) * lax.erf_inv(u)\n\n\ndef multivariate_normal(key: jnp.ndarray,\n mean: jnp.ndarray,\n cov: jnp.ndarray,\n shape: Optional[Sequence[int]] = None,\n dtype: np.dtype = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample multivariate normal random values with given mean and covariance.\n\n Args:\n key: a PRNGKey used as the random key.\n mean: a mean vector of shape ``(..., n)``.\n cov: a positive definite covariance matrix of shape ``(..., n, n)``. The\n batch shape ``...`` must be broadcast-compatible with that of ``mean``.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and\n ``cov.shape[:-2]``. The default (None) produces a result batch shape by\n broadcasting together the batch shapes of ``mean`` and ``cov``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + mean.shape[-1:]`` if ``shape`` is not None, or else\n ``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `multivariate_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _multivariate_normal(key, mean, cov, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(3, 4))\ndef _multivariate_normal(key, mean, cov, shape, dtype) -> jnp.ndarray:\n if not np.ndim(mean) >= 1:\n msg = \"multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}\"\n raise ValueError(msg.format(np.ndim(mean)))\n if not np.ndim(cov) >= 2:\n msg = \"multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}\"\n raise ValueError(msg.format(np.ndim(cov)))\n n = mean.shape[-1]\n if np.shape(cov)[-2:] != (n, n):\n msg = (\"multivariate_normal requires cov.shape == (..., n, n) for n={n}, \"\n \"but got cov.shape == {shape}.\")\n raise ValueError(msg.format(n=n, shape=np.shape(cov)))\n\n if shape is None:\n shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2])\n else:\n _check_shape(\"normal\", shape, mean.shape[:-1], cov.shape[:-2])\n\n chol_factor = cholesky(cov)\n normal_samples = normal(key, shape + mean.shape[-1:], dtype)\n return mean + jnp.einsum('...ij,...j->...i', chol_factor, normal_samples)\n\n\ndef truncated_normal(key: jnp.ndarray,\n lower: Union[float, jnp.ndarray],\n upper: Union[float, jnp.ndarray],\n shape: Optional[Sequence[int]] = None,\n dtype: np.dtype = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample truncated standard normal random values with given shape and dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lower: a float or array of floats representing the lower bound for\n truncation. Must be broadcast-compatible with ``upper``.\n upper: a float or array of floats representing the upper bound for\n truncation. Must be broadcast-compatible with ``lower``.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``lower`` and ``upper``. The\n default (None) produces a result shape by broadcasting ``lower`` and\n ``upper``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.\n Returns values in the open interval ``(lower, upper)``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `truncated_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _truncated_normal(key, lower, upper, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(3, 4))\ndef _truncated_normal(key, lower, upper, shape, dtype) -> jnp.ndarray:\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, np.shape(lower), np.shape(upper))\n\n sqrt2 = np.array(np.sqrt(2), dtype)\n lower = lax.convert_element_type(lower, dtype)\n upper = lax.convert_element_type(upper, dtype)\n a = lax.erf(lower / sqrt2)\n b = lax.erf(upper / sqrt2)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=a, maxval=b)\n out = sqrt2 * lax.erf_inv(u)\n # Clamp the value to the open interval (lower, upper) to make sure that\n # rounding (or if we chose `a` for `u`) doesn't push us outside of the range.\n return jnp.clip(\n out,\n lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)),\n lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))\n\n\ndef bernoulli(key: jnp.ndarray,\n p: jnp.ndarray = np.float32(0.5),\n shape: Optional[Sequence[int]] = None) -> jnp.ndarray:\n \"\"\"Sample Bernoulli random values with given shape and mean.\n\n Args:\n key: a PRNGKey used as the random key.\n p: optional, a float or array of floats for the mean of the random\n variables. Must be broadcast-compatible with ``shape``. Default 0.5.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Must be broadcast-compatible with ``p.shape``. The default (None)\n produces a result shape equal to ``p.shape``.\n\n Returns:\n A random array with boolean dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``p.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(lax.dtype(p))\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n if not jnp.issubdtype(dtype, np.floating):\n msg = \"bernoulli probability `p` must have a floating dtype, got {}.\"\n raise TypeError(msg.format(dtype))\n p = lax.convert_element_type(p, dtype)\n return _bernoulli(key, p, shape) # type: ignore\n\n@partial(jit, static_argnums=(2,))\ndef _bernoulli(key, p, shape) -> jnp.ndarray:\n if shape is None:\n shape = np.shape(p)\n else:\n _check_shape(\"bernoulli\", shape, np.shape(p))\n\n return uniform(key, shape, lax.dtype(p)) < p\n\n\ndef beta(key: jnp.ndarray,\n a: Union[float, jnp.ndarray],\n b: Union[float, jnp.ndarray],\n shape: Optional[Sequence[int]] = None,\n dtype: np.dtype = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Beta random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the first parameter \"alpha\".\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the second parameter \"beta\".\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a`` and ``b``. The default\n (None) produces a result shape by broadcasting ``a`` and ``b``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``a`` and ``b``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `beta` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _beta(key, a, b, shape, dtype)\n\ndef _beta(key, a, b, shape, dtype):\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(a), np.shape(b))\n else:\n _check_shape(\"beta\", shape, np.shape(a), np.shape(b))\n\n a = lax.convert_element_type(a, dtype)\n b = lax.convert_element_type(b, dtype)\n key_a, key_b = split(key)\n a = jnp.broadcast_to(a, shape)\n b = jnp.broadcast_to(b, shape)\n gamma_a = gamma(key_a, a, shape, dtype)\n gamma_b = gamma(key_b, b, shape, dtype)\n return gamma_a / (gamma_a + gamma_b)\n\n\ndef cauchy(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample Cauchy random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `cauchy` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _cauchy(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _cauchy(key, shape, dtype):\n _check_shape(\"cauchy\", shape)\n u = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)\n pi = _constant_like(u, np.pi)\n return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))\n\n\ndef dirichlet(key, alpha, shape=None, dtype=dtypes.float_):\n \"\"\"Sample Dirichlet random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n alpha: an array of shape ``(..., n)`` used as the concentration\n parameter of the random variables.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n element of value ``n``. Must be broadcast-compatible with\n ``alpha.shape[:-1]``. The default (None) produces a result shape equal to\n ``alpha.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else\n ``alpha.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `dirichlet` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _dirichlet(key, alpha, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _dirichlet(key, alpha, shape, dtype):\n if not np.ndim(alpha) >= 1:\n msg = \"dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}\"\n raise ValueError(msg.format(np.ndim(alpha)))\n\n if shape is None:\n shape = np.shape(alpha)[:-1]\n else:\n _check_shape(\"dirichlet\", shape, np.shape(alpha)[:-1])\n\n alpha = lax.convert_element_type(alpha, dtype)\n gamma_samples = gamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)\n return gamma_samples / jnp.sum(gamma_samples, axis=-1, keepdims=True)\n\n\ndef exponential(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample Exponential random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `exponential` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _exponential(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _exponential(key, shape, dtype):\n _check_shape(\"exponential\", shape)\n u = uniform(key, shape, dtype)\n # taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)\n return lax.neg(lax.log1p(lax.neg(u)))\n\n\ndef _gamma_one(key, alpha):\n # Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang\n # The algorithm can also be founded in:\n # https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables\n zero = _constant_like(alpha, 0)\n one = _constant_like(alpha, 1)\n minus_one = _constant_like(alpha, -1)\n one_over_two = _constant_like(alpha, 0.5)\n one_over_three = _constant_like(alpha, 1. / 3.)\n squeeze_const = _constant_like(alpha, 0.0331)\n dtype = lax.dtype(alpha)\n\n key, subkey = split(key)\n # for alpha < 1, we boost alpha to alpha + 1 and get a sample according to\n # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)\n boost = lax.select(lax.ge(alpha, one),\n one,\n lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))\n alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))\n\n d = lax.sub(alpha, one_over_three)\n c = lax.div(one_over_three, lax.pow(d, one_over_two))\n\n def _cond_fn(kXVU):\n _, X, V, U = kXVU\n # TODO: use lax.cond when its batching rule is supported\n # The reason is to avoid evaluating second condition which involves log+log\n # if the first condition is satisfied\n cond = lax.bitwise_and(lax.ge(U, lax.sub(one, lax.mul(squeeze_const, lax.mul(X, X)))),\n lax.ge(lax.log(U), lax.add(lax.mul(X, one_over_two),\n lax.mul(d, lax.add(lax.sub(one, V),\n lax.log(V))))))\n return cond\n\n def _body_fn(kXVU):\n def _next_kxv(kxv):\n key = kxv[0]\n key, subkey = split(key)\n x = normal(subkey, (), dtype=dtype)\n v = lax.add(one, lax.mul(x, c))\n return key, x, v\n\n key = kXVU[0]\n key, x_key, U_key = split(key, 3)\n _, x, v = lax.while_loop(lambda kxv: lax.le(kxv[2], zero), _next_kxv, (x_key, zero, minus_one))\n X = lax.mul(x, x)\n V = lax.mul(lax.mul(v, v), v)\n U = uniform(U_key, (), dtype=dtype)\n return key, X, V, U\n\n # initial state is chosen such that _cond_fn will return True\n _, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _constant_like(alpha, 2)))\n z = lax.mul(lax.mul(d, V), boost)\n return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)\n\n\ndef _gamma_grad(sample, a):\n samples = jnp.reshape(sample, -1)\n alphas = jnp.reshape(a, -1)\n if xla_bridge.get_backend().platform == 'cpu':\n grads = lax.map(lambda args: lax.random_gamma_grad(*args), (alphas, samples))\n else:\n grads = vmap(lax.random_gamma_grad)(alphas, samples)\n return grads.reshape(np.shape(a))\n\ndef _gamma_impl(key, a, use_vmap=False):\n a_shape = jnp.shape(a)\n # split key to match the shape of a\n key_ndim = jnp.ndim(key) - 1\n key = jnp.reshape(key, (-1, 2))\n key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))\n keys = jnp.reshape(key, (-1, 2))\n alphas = jnp.reshape(a, -1)\n if use_vmap:\n samples = vmap(_gamma_one)(keys, alphas)\n else:\n samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))\n\n return jnp.reshape(samples, a_shape)\n\ndef _gamma_batching_rule(batched_args, batch_dims):\n k, a = batched_args\n bk, ba = batch_dims\n size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)\n k = batching.bdim_at_front(k, bk, size)\n a = batching.bdim_at_front(a, ba, size)\n return random_gamma_p.bind(k, a), 0\n\nrandom_gamma_p = core.Primitive('random_gamma')\nrandom_gamma_p.def_impl(_gamma_impl)\nrandom_gamma_p.def_abstract_eval(lambda key, a: abstract_arrays.raise_to_shaped(a))\nad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: tangent * _gamma_grad(ans, a))\nxla.translations[random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=True),\n multiple_results=False)\nxla.backend_specific_translations['cpu'][random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=False),\n multiple_results=False)\nbatching.primitive_batchers[random_gamma_p] = _gamma_batching_rule\n\ndef gamma(key, a, shape=None, dtype=dtypes.float_):\n \"\"\"Sample Gamma random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a``. The default (None)\n produces a result shape equal to ``a.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``a.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gamma` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _gamma(key, a, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _gamma(key, a, shape, dtype):\n if shape is None:\n shape = np.shape(a)\n else:\n _check_shape(\"gamma\", shape, np.shape(a))\n\n a = lax.convert_element_type(a, dtype)\n if np.shape(a) != shape:\n a = jnp.broadcast_to(a, shape)\n return random_gamma_p.bind(key, a)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_knuth(key, lam, shape, dtype, max_iters):\n # Knuth's algorithm for generating Poisson random variates.\n # Reference:\n # https://en.wikipedia.org/wiki/Poisson_distribution#Generating_Poisson-distributed_random_variables\n\n def body_fn(carry):\n i, k, rng, log_prod = carry\n rng, subkey = split(rng)\n k = lax.select(log_prod > -lam, k + 1, k)\n u = uniform(subkey, shape, np.float32)\n return i + 1, k, rng, log_prod + jnp.log(u)\n\n def cond_fn(carry):\n i, log_prod = carry[0], carry[3]\n return (log_prod > -lam).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, 0, dtype, shape)\n log_rate_init = lax.full_like(lam, 0, np.float32, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, key, log_rate_init))[1]\n return (k - 1).astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_rejection(key, lam, shape, dtype, max_iters):\n # Transformed rejection due to Hormann.\n # Reference:\n # http://citeseer.ist.psu.edu/viewdoc/citations;jsessionid=1BEB35946CC807879F55D42512E5490C?doi=10.1.1.48.3054.\n log_lam = lax.log(lam)\n b = 0.931 + 2.53 * lax.sqrt(lam)\n a = -0.059 + 0.02483 * b\n inv_alpha = 1.1239 + 1.1328 / (b - 3.4)\n v_r = 0.9277 - 3.6224 / (b - 2)\n\n def body_fn(carry):\n i, k_out, accepted, key = carry\n key, subkey_0, subkey_1 = split(key, 3)\n\n u = uniform(subkey_0, shape, lam.dtype) - 0.5\n v = uniform(subkey_1, shape, lam.dtype)\n u_shifted = 0.5 - abs(u)\n\n k = lax.floor((2 * a / u_shifted + b) * u + lam + 0.43)\n s = lax.log(v * inv_alpha / (a / (u_shifted * u_shifted) + b))\n t = -lam + k * log_lam - lax.lgamma(k + 1)\n\n accept1 = (u_shifted >= 0.07) & (v <= v_r)\n reject = (k < 0) | ((u_shifted < 0.013) & (v > u_shifted))\n accept2 = s <= t\n accept = accept1 | (~reject & accept2)\n\n k_out = lax.select(accept, k, k_out)\n accepted |= accept\n\n return i + 1, k_out, accepted, key\n\n def cond_fn(carry):\n i, k_out, accepted, key = carry\n return (~accepted).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, -1, lam.dtype, shape)\n accepted = lax.full_like(lam, False, jnp.bool_, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, accepted, key))[1]\n return k.astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3))\ndef _poisson(key, lam, shape, dtype):\n # The implementation matches TensorFlow and NumPy:\n # https://github.com/tensorflow/tensorflow/blob/v2.2.0-rc3/tensorflow/core/kernels/random_poisson_op.cc\n # https://github.com/numpy/numpy/blob/v1.18.3/numpy/random/src/distributions/distributions.c#L574\n # For lambda < 10, we use the Knuth algorithm; otherwise, we use transformed\n # rejection sampling.\n use_knuth = lam < 10\n lam_knuth = lax.select(use_knuth, lam, lax.full_like(lam, 0.0))\n # The acceptance probability for rejection sampling maxes out at 89% as\n # λ -> ∞, so pick some arbitrary large value.\n lam_rejection = lax.select(use_knuth, lax.full_like(lam, 1e5), lam)\n max_iters = dtype.type(jnp.iinfo(dtype).max) # insanely conservative\n return lax.select(\n use_knuth,\n _poisson_knuth(key, lam_knuth, shape, dtype, max_iters),\n _poisson_rejection(key, lam_rejection, shape, dtype, max_iters),\n )\n\n\ndef poisson(key, lam, shape=(), dtype=dtypes.int_):\n \"\"\"Sample Poisson random values with given shape and integer dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lam: rate parameter (mean of the distribution), must be >= 0.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a integer dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n if np.shape(lam) != shape:\n lam = jnp.broadcast_to(lam, shape)\n lam = lax.convert_element_type(lam, np.float32)\n return _poisson(key, lam, shape, dtype)\n\n\ndef gumbel(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample Gumbel random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gumbel` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _gumbel(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _gumbel(key, shape, dtype):\n _check_shape(\"gumbel\", shape)\n return -jnp.log(-jnp.log(\n uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)))\n\n\ndef categorical(key, logits, axis=-1, shape=None):\n \"\"\"Sample random values from categorical distributions.\n\n Args:\n key: a PRNGKey used as the random key.\n logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,\n so that `softmax(logits, axis)` gives the corresponding probabilities.\n axis: Axis along which logits belong to the same categorical distribution.\n shape: Optional, a tuple of nonnegative integers representing the result shape.\n Must be broadcast-compatible with ``np.delete(logits.shape, axis)``.\n The default (None) produces a result shape equal to ``np.delete(logits.shape, axis)``.\n\n Returns:\n A random array with int dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``np.delete(logits.shape, axis)``.\n \"\"\"\n\n if axis >= 0:\n axis -= len(logits.shape)\n\n batch_shape = tuple(np.delete(logits.shape, axis))\n if shape is None:\n shape = batch_shape\n else:\n _check_shape(\"categorical\", shape, batch_shape)\n\n sample_shape = shape[:len(shape)-len(batch_shape)]\n return jnp.argmax(gumbel(key, sample_shape + logits.shape, logits.dtype) + logits, axis=axis)\n\n\ndef laplace(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample Laplace random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `laplace` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _laplace(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _laplace(key, shape, dtype):\n _check_shape(\"laplace\", shape)\n u = uniform(\n key, shape, dtype, minval=-1. + jnp.finfo(dtype).epsneg, maxval=1.)\n return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))\n\n\ndef logistic(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample logistic random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `logistic` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _logistic(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _logistic(key, shape, dtype):\n # Mathematically, we can compute the distribution by generating uniformly-distributed\n # numbers x in the open interval (a, b) and computing:\n # z = log[ (x - a) / (b - x))\n # It's important to avoid x=a or x=b, which lead to infinite values for z.\n # The uniform() function generates pseudorandom floating point numbers x in the\n # semi-closed interval [0, 1), so if used directly with (a,b)=(0,1), it will\n # lead to infinite output in a small number of cases (as many as 1 in 2^23 for float32).\n #\n # Instead, we let (a, b) = (-ε, 1) where ε is the smallest step between floating point\n # values: then numbers in the interval (-ε, 1) are approximated by standard uniformly\n # drawn numbers in [0, 1).\n _check_shape(\"logistic\", shape)\n x = uniform(key, shape, dtype)\n eps = jnp.finfo(dtype).eps\n return lax.log(lax.div(lax.add(lax._const(x, eps), x), lax.sub(lax._const(x, 1), x)))\n\n\ndef pareto(key, b, shape=None, dtype=dtypes.float_):\n \"\"\"Sample Pareto random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``b``. The default (None)\n produces a result shape equal to ``b.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``b.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `pareto` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = abstract_arrays.canonicalize_shape(shape)\n return _pareto(key, b, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _pareto(key, b, shape, dtype):\n if shape is None:\n shape = np.shape(b)\n else:\n _check_shape(\"pareto\", shape)\n\n b = lax.convert_element_type(b, dtype)\n e = exponential(key, shape, dtype)\n return lax.exp(e / b)\n\n\ndef t(key, df, shape=(), dtype=dtypes.float_):\n \"\"\"Sample Student's t random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n df: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``df``. The default (None)\n produces a result shape equal to ``df.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``df.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `t` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _t(key, df, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _t(key, df, shape, dtype):\n if shape is None:\n shape = np.shape(df)\n else:\n _check_shape(\"t\", shape, np.shape(df))\n\n df = lax.convert_element_type(df, dtype)\n key_n, key_g = split(key)\n n = normal(key_n, shape, dtype)\n two = _constant_like(n, 2)\n half_df = lax.div(df, two)\n g = gamma(key_n, half_df, shape, dtype)\n return n * jnp.sqrt(half_df / g)\n\n\ndef rademacher(key, shape, dtype=dtypes.int_):\n \"\"\"Sample from a Rademacher distribution.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`. Each element in the output has\n a 50% change of being 1 or -1.\n\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _rademacher(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _rademacher(key, shape, dtype):\n bernoulli_samples = bernoulli(key=key, p=0.5, shape=shape)\n return (2 * bernoulli_samples - 1).astype(dtype)\n\n\ndef maxwell(key, shape=(), dtype=dtypes.float_):\n \"\"\"Sample from a one sided Maxwell distribution.\n\n The scipy counterpart is `scipy.stats.maxwell`.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`.\n\n \"\"\"\n # Generate samples using:\n # sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `maxwell` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _maxwell(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _maxwell(key, shape, dtype):\n shape = shape + (3,)\n norm_rvs = normal(key=key, shape=shape, dtype=dtype)\n return jnp.linalg.norm(norm_rvs, axis=-1)\n\n\ndef double_sided_maxwell(key, loc, scale, shape=(), dtype=dtypes.float_):\n \"\"\"Sample from a double sided Maxwell distribution.\n\n Samples using:\n loc + scale* sgn(U-0.5)* one_sided_maxwell U~Unif;\n\n Args:\n key: a PRNGKey key.\n loc: The location parameter of the distribution.\n scale: The scale parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `double_sided_maxwell` must be a float\"\n f\" dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _double_sided_maxwell(key, loc, scale, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2, 3, 4))\ndef _double_sided_maxwell(key, loc, scale, shape, dtype):\n params_shapes = lax.broadcast_shapes(np.shape(loc), np.shape(scale))\n if not shape:\n shape = params_shapes\n\n shape = shape + params_shapes\n maxwell_key, rademacher_key = split(key)\n maxwell_rvs = maxwell(maxwell_key, shape=shape, dtype=dtype)\n # Generate random signs for the symmetric variates.\n random_sign = rademacher(rademacher_key, shape=shape, dtype=dtype)\n assert random_sign.shape == maxwell_rvs.shape\n\n return random_sign * maxwell_rvs * scale + loc\n\n\ndef weibull_min(key, scale, concentration, shape=(), dtype=dtypes.float_):\n \"\"\"Sample from a Weibull distribution.\n\n The scipy counterpart is `scipy.stats.weibull_min`.\n\n Args:\n key: a PRNGKey key.\n scale: The scale parameter of the distribution.\n concentration: The concentration parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `weibull_min` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = abstract_arrays.canonicalize_shape(shape)\n return _weibull_min(key, scale, concentration, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2, 3, 4))\ndef _weibull_min(key, scale, concentration, shape, dtype):\n random_uniform = uniform(\n key=key, shape=shape, minval=0, maxval=1, dtype=dtype)\n\n # Inverse weibull CDF.\n return jnp.power(-jnp.log1p(-random_uniform), 1.0/concentration) * scale\n",
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager\nfrom collections import Counter, namedtuple\nfrom functools import partial, reduce\nfrom itertools import chain, product\nimport operator as op\nimport string\nfrom typing import Callable, Dict, Sequence, Union, Tuple\n\nimport numpy as np\n\nfrom .. import abstract_arrays\nfrom .. import core, dtypes\nfrom ..tree_util import tree_unflatten\nfrom ..core import Trace, Tracer\nfrom ..util import safe_map, safe_zip, unzip2, prod, wrap_name\nfrom ..abstract_arrays import ShapedArray\nfrom .. import linear_util as lu\n\nmap = safe_map\nzip = safe_zip\n\nmasking_rules: Dict[core.Primitive, Callable] = {}\n\ndef defvectorized(prim):\n masking_rules[prim] = partial(vectorized_masking_rule, prim)\n\ndef defnaryop(prim):\n masking_rules[prim] = partial(naryop_masking_rule, prim)\n\ndef vectorized_masking_rule(prim, padded_vals, logical_shapes, **params):\n del logical_shapes # Unused.\n padded_val, = padded_vals\n return prim.bind(padded_val, **params)\n\ndef naryop_masking_rule(prim, padded_vals, logical_shapes):\n del logical_shapes # Unused.\n return prim.bind(*padded_vals)\n\nShapeEnvs = namedtuple(\"ShapeEnvs\", [\"logical\", \"padded\"])\nshape_envs = ShapeEnvs({}, {}) # TODO(mattjj): make this a stack for efficiency\n\ndef is_tracing():\n return bool(shape_envs.padded)\n\n@contextmanager\ndef extend_shape_envs(logical_env, padded_env):\n global shape_envs\n new_logical = dict(chain(shape_envs.logical.items(), logical_env.items()))\n new_padded = dict(chain(shape_envs.padded.items(), padded_env.items()))\n shape_envs, prev = ShapeEnvs(new_logical, new_padded), shape_envs\n try:\n yield\n finally:\n shape_envs = prev\n\ndef shape_as_value(shape):\n assert is_tracing() or not is_polymorphic(shape)\n return eval_poly_shape(shape, shape_envs.logical)\n\ndef padded_shape_as_value(shape):\n assert is_tracing() or not is_polymorphic(shape)\n return eval_poly_shape(shape, shape_envs.padded)\n\ndef mask_fun(fun, logical_env, padded_env, in_vals, polymorphic_shapes):\n env_keys, padded_env_vals = unzip2(sorted(padded_env.items()))\n logical_env_vals = [logical_env[k] for k in env_keys]\n # Make padded_env hashable\n padded_env = (env_keys, padded_env_vals)\n with core.new_main(MaskTrace) as main:\n fun, out_shapes = mask_subtrace(fun, main, polymorphic_shapes, padded_env)\n out_vals = fun.call_wrapped(*(logical_env_vals + in_vals))\n del main\n return out_vals, out_shapes()\n\[email protected]_with_aux\ndef mask_subtrace(main, shapes, padded_env, *in_vals):\n env_keys, _ = padded_env\n logical_env_vals, in_vals = in_vals[:len(env_keys)], in_vals[len(env_keys):]\n logical_env = dict(zip(env_keys, logical_env_vals))\n padded_env = dict(zip(*padded_env))\n trace = MaskTrace(main, core.cur_sublevel())\n in_tracers = [MaskTracer(trace, x, s).full_lower()\n for x, s in zip(in_vals, shapes)]\n with extend_shape_envs(logical_env, padded_env):\n outs = yield in_tracers, {}\n out_tracers = map(trace.full_raise, outs)\n out_vals, out_shapes = unzip2((t.val, t.polymorphic_shape) for t in out_tracers)\n yield out_vals, out_shapes\n\ndef eval_poly_shape(shape, values_dict):\n return tuple(eval_poly(dim, values_dict) for dim in shape)\n\ndef eval_poly(poly, values_dict):\n return poly.evaluate(values_dict) if type(poly) is Poly else poly\n\ndef _ensure_poly(p: 'Size') -> 'Poly':\n if isinstance(p, Poly): return p\n return Poly({Mon(): p})\n\ndef _polys_to_ints(shape):\n return tuple(int(d) if type(d) is Poly and d.is_constant else d\n for d in shape)\n\ndef is_polymorphic(shape: Sequence['Size']):\n return any(map(lambda d: type(d) is Poly, shape))\n\nclass Poly(dict):\n \"\"\"Polynomial with integer coefficients for polymorphic shapes.\"\"\"\n\n def __init__(self, coeffs: Dict['Mon', int]):\n # Makes sure Polynomials are always in canonical form\n coeffs = {mon: op.index(coeff)\n for mon, coeff in coeffs.items() if coeff != 0}\n coeffs = coeffs or {Mon(): 0}\n super().__init__(coeffs)\n\n def __add__(self, other: 'Size') -> 'Poly':\n coeffs = self.copy()\n for mon, coeff in _ensure_poly(other).items():\n coeffs[mon] = coeffs.get(mon, 0) + coeff\n return Poly(coeffs)\n\n def __sub__(self, other: 'Size') -> 'Poly':\n return self + -other\n\n def __neg__(self) -> 'Poly':\n return Poly({mon: -coeff for mon, coeff in self.items()})\n\n def __mul__(self, other: 'Size') -> 'Poly':\n other = _ensure_poly(other)\n coeffs: Dict[Mon, int] = {}\n for (mon1, coeff1), (mon2, coeff2) in product(self.items(), other.items()):\n mon = mon1 * mon2\n coeffs[mon] = coeffs.get(mon, 0) + coeff1 * coeff2\n return Poly(coeffs)\n\n def __rmul__(self, other: 'Size') -> 'Poly':\n return self * other # multiplication commutes\n\n def __radd__(self, other: 'Size') -> 'Poly':\n return self + other # addition commutes\n\n def __rsub__(self, other: 'Size') -> 'Poly':\n return _ensure_poly(other) - self\n\n def __floordiv__(self, divisor: 'Size') -> 'Poly':\n q, _ = divmod(self, divisor) # type: ignore\n return q\n\n def __mod__(self, divisor: 'Size') -> int:\n _, r = divmod(self, divisor) # type: ignore\n return r\n\n def __divmod__(self, divisor: 'Size') -> Tuple['Poly', int]:\n \"\"\"\n Floor division with remainder (divmod) generalized to polynomials. To allow\n ensuring '0 <= remainder < divisor' for consistency with integer divmod, the\n divisor must divide the dividend (up to a constant for constant divisors).\n :return: Quotient resulting from polynomial division and integer remainder.\n \"\"\"\n divisor = _ensure_poly(divisor)\n dmon, dcount = divisor._leading_term\n dividend, quotient, remainder = self, _ensure_poly(0), _ensure_poly(0)\n while dividend != 0: # invariant: dividend == divisor*quotient + remainder\n mon, count = dividend._leading_term\n qcount, rcount = divmod(count, dcount)\n try:\n qmon = mon // dmon\n except ValueError:\n raise ValueError(f\"Stride {divisor} must divide size {self} \"\n f\"(up to a constant for constant divisors).\")\n r = Poly({mon: rcount})\n q = Poly({qmon: qcount})\n quotient += q\n remainder += r\n dividend -= q * divisor + r\n return quotient, int(remainder)\n\n def __rdivmod__(self, dividend: 'Size') -> Tuple['Poly', int]:\n return divmod(_ensure_poly(dividend), self) # type: ignore\n\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n def __eq__(self, other):\n return super().__eq__(_ensure_poly(other))\n\n def __ne__(self, other):\n return not self == other\n\n def __ge__(self, other: 'Size'):\n diff = self - other\n if diff.is_constant: return int(diff) >= 0\n\n # Assume nonconstant polynomials are positive, allows use in shape rules:\n if _ensure_poly(other).is_constant and other <= 1: return True\n elif self.is_constant and self <= 0: return False\n\n raise ValueError(f\"Polynomial comparison {self} >= {other} is inconclusive.\")\n\n def __le__(self, other: 'Size'):\n return _ensure_poly(other) >= self\n\n def __lt__(self, other: 'Size'):\n return not (self >= other)\n\n def __gt__(self, other: 'Size'):\n return not (_ensure_poly(other) >= self)\n\n def __str__(self):\n return ' + '.join(f'{c} {mon}' if c != 1 or mon.degree == 0 else str(mon)\n for mon, c in sorted(self.items(), reverse=True)).strip()\n\n def __repr__(self):\n return str(self)\n\n def __int__(self):\n assert self.is_constant, f\"casting polynomial '{self}' to integer\"\n return op.index(next(iter(self.values())))\n\n def evaluate(self, env):\n prod = lambda xs: reduce(op.mul, xs) if xs else 1\n terms = [mul(coeff, prod([pow(env[id], deg) for id, deg in mon.items()]))\n for mon, coeff in self.items()]\n return sum(terms) if len(terms) > 1 else terms[0]\n\n @property\n def is_constant(self):\n return len(self) == 1 and next(iter(self)).degree == 0\n\n @property\n def _leading_term(self) -> Tuple['Mon', int]:\n \"\"\"Returns the highest degree term that comes first lexicographically.\"\"\"\n return max(self.items())\n\nSize = Union[int, Poly]\n\ndef pow(x, deg):\n try:\n deg = int(deg)\n except:\n return x ** deg\n else:\n return 1 if deg == 0 else x if deg == 1 else x ** deg\n\ndef mul(coeff, mon):\n try:\n coeff = int(coeff)\n except:\n return coeff * mon\n else:\n return 0 if coeff == 0 else mon if coeff == 1 else coeff * mon\n\n\nabstract_arrays._DIMENSION_TYPES.add(Poly)\n\nclass Mon(dict):\n \"\"\"Represents a multivariate monomial, such as n^3 * m.\"\"\"\n def __hash__(self):\n return hash(frozenset(self.items()))\n\n def __str__(self):\n return ' '.join(f'{key}^{exponent}' if exponent != 1 else str(key)\n for key, exponent in sorted(self.items()))\n\n def __lt__(self, other: 'Mon'):\n \"\"\"\n Comparison to another monomial in graded reverse lexicographic order.\n \"\"\"\n self_key = -self.degree, tuple(sorted(self))\n other_key = -other.degree, tuple(sorted(other))\n return self_key > other_key\n\n def __mul__(self, other: 'Mon') -> 'Mon':\n \"\"\"\n Returns the product with another monomial. Example: (n^2*m) * n == n^3 * m.\n \"\"\"\n return Mon(Counter(self) + Counter(other))\n\n @property\n def degree(self):\n return sum(self.values())\n\n def __floordiv__(self, divisor: 'Mon') -> 'Mon':\n \"\"\"\n Divides by another monomial. Raises a ValueError if impossible.\n For example, (n^3 * m) // n == n^2*m, but n // m fails.\n \"\"\"\n d = Counter(self)\n for key, exponent in divisor.items():\n diff = self.get(key, 0) - exponent\n if diff < 0: raise ValueError(f\"Cannot divide {self} by {divisor}.\")\n elif diff == 0: del d[key]\n elif diff > 0: d[key] = diff\n return Mon(d)\n\nclass ShapeError(Exception): pass\n\nclass ShapeSyntaxError(Exception): pass\n\n# To denote some shape expressions (for annotations) we use a small language.\n#\n# data ShapeSpec = ShapeSpec [Dim]\n# data Dim = Id PyObj\n# | Lit Int\n# | Mul Dim Dim\n# | Add Dim Dim\n# | MonomorphicDim\n#\n# We'll also make a simple concrete syntax for annotation. The grammar is\n#\n# shape_spec ::= '(' dims ')'\n# dims ::= dim ',' dims | ''\n# dim ::= str | int | dim '*' dim | dim '+' dim | '_'\n#\n# ShapeSpecs can have some monomorphic dims inside them, which must be replaced\n# with concrete shapes when known.\n\nclass ShapeSpec(tuple):\n def __str__(self):\n return 'ShapeSpec({})'.format(', '.join(map(str, self)))\n\ndef finalize_spec(polymorphic_shape, padded_shape):\n # TODO: what if polymorphic_shape has a constant that does not match padded_shape?\n return tuple(_parse_lit(d) if e is _monomorphic_dim else e\n for e, d in zip(polymorphic_shape, padded_shape))\n\ndef parse_spec(spec=''):\n if not spec:\n return ShapeSpec(())\n if spec[0] == '(':\n if spec[-1] != ')': raise ShapeSyntaxError(spec)\n spec = spec[1:-1]\n dims = map(_parse_dim, spec.replace(' ', '').strip(',').split(','))\n return ShapeSpec(dims)\n\ndef _parse_dim(spec):\n if '+' in spec:\n return np.sum(map(_parse_dim, spec.split('+')))\n elif '*' in spec:\n return prod(map(_parse_dim, spec.split('*')))\n elif spec.isdigit() or spec.startswith('-') and spec[1:].isdigit():\n return _parse_lit(spec)\n elif spec[0] in _identifiers:\n return _parse_id(spec)\n elif spec == '_':\n return _monomorphic_dim\n else:\n raise ShapeSyntaxError(spec)\n\n_identifiers = frozenset(string.ascii_lowercase)\n\ndef _parse_id(name): return Poly({Mon({name: 1}): 1})\n\ndef _parse_lit(val_str): return int(val_str)\n\nclass MonomorphicDim(object):\n def __str__(self): return '_'\n\n_monomorphic_dim = MonomorphicDim()\n\n# Two convenient ways to provide shape annotations:\n# 1. '(m, n)'\n# 2. s_['m', 'n']\n\nclass S_(object):\n def __getitem__(self, idx):\n return parse_spec(('(' + ','.join(map(str, idx)) + ')')\n if type(idx) is tuple else str(idx))\n\ns_ = S_()\n\ndef _shape_spec_consistent(spec, expr):\n return all(a == b for a, b in zip(spec, expr) if a is not _monomorphic_dim)\n\nclass MaskTracer(Tracer):\n __slots__ = [\"val\", \"polymorphic_shape\"]\n\n def __init__(self, trace, val, polymorphic_shape):\n super().__init__(trace)\n self.val = val\n self.polymorphic_shape = polymorphic_shape\n\n @property\n def aval(self):\n return ShapedArray(self.polymorphic_shape, self.dtype)\n\n @property\n def dtype(self):\n return dtypes.dtype(self.val)\n\n def is_pure(self):\n return all(type(poly) is not Poly or poly.is_constant\n for poly in self.polymorphic_shape)\n\n def full_lower(self):\n if self.is_pure():\n return core.full_lower(self.val)\n else:\n return self\n\n\nclass MaskTrace(Trace):\n def pure(self, val):\n return MaskTracer(self, val, np.shape(val))\n\n def lift(self, val):\n return MaskTracer(self, val, np.shape(val))\n\n def sublift(self, val):\n return MaskTracer(self, val.val, val.polymorphic_shape)\n\n def process_primitive(self, primitive, tracers, params):\n masking_rule = masking_rules.get(primitive)\n if masking_rule is None:\n raise NotImplementedError(\n f'Masking rule for {primitive} not implemented yet.')\n out_aval = primitive.abstract_eval(*(t.aval for t in tracers), **params)\n vals, polymorphic_shapes = unzip2((t.val, t.polymorphic_shape) for t in tracers)\n logical_shapes = map(shape_as_value, polymorphic_shapes)\n # TODO(mattjj): generalize mask rule signature\n if primitive.name == 'reshape': params['polymorphic_shapes'] = polymorphic_shapes\n out = masking_rule(vals, logical_shapes, **params)\n if primitive.multiple_results:\n out_shapes = map(_polys_to_ints, [o.shape for o in out_aval])\n return map(partial(MaskTracer, self), out, out_shapes)\n else:\n return MaskTracer(self, out, _polys_to_ints(out_aval.shape))\n\n def process_call(self, call_primitive, f, tracers, params):\n assert call_primitive.multiple_results\n params = dict(params, name=wrap_name(params.get('name', f.__name__), 'mask'))\n vals, shapes = unzip2((t.val, t.polymorphic_shape) for t in tracers)\n if not any(is_polymorphic(s) for s in shapes):\n return call_primitive.bind(f, *vals, **params)\n else:\n logical_env, padded_env = shape_envs\n env_keys, padded_env_vals = unzip2(sorted(padded_env.items()))\n logical_env_vals = tuple(logical_env[k] for k in env_keys)\n # Make padded_env hashable\n padded_env = (env_keys, padded_env_vals)\n f, shapes_out = mask_subtrace(f, self.main, shapes, padded_env)\n if 'donated_invars' in params:\n params = dict(params, donated_invars=((False,) * len(logical_env_vals) +\n params['donated_invars']))\n vals_out = call_primitive.bind(f, *(logical_env_vals + vals), **params)\n return [MaskTracer(self, v, s) for v, s in zip(vals_out, shapes_out())]\n\n def post_process_call(self, call_primitive, out_tracers, params):\n vals, shapes = unzip2((t.val, t.polymorphic_shape) for t in out_tracers)\n main = self.main\n def todo(vals):\n trace = MaskTrace(main, core.cur_sublevel())\n return map(partial(MaskTracer, trace), vals, shapes)\n return vals, todo\n\nclass UniqueId:\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return self.name\n\n def __lt__(self, other):\n return self.name < other.name\n\nclass UniqueIds(dict):\n def __missing__(self, key):\n unique_id = UniqueId(key)\n self[key] = unique_id\n return unique_id\n\ndef remap_ids(names, shape_spec):\n return ShapeSpec(Poly({Mon({names[id] : deg for id, deg in mon.items()})\n : coeff for mon, coeff in poly.items()})\n if isinstance(poly, Poly) else\n poly for poly in shape_spec)\n\ndef bind_shapes(polymorphic_shapes, padded_shapes):\n env = {}\n for polymorphic_shape, padded_shape in zip(polymorphic_shapes, padded_shapes):\n for poly, d in zip(polymorphic_shape, padded_shape):\n if type(poly) is not Poly or poly.is_constant:\n if int(poly) != d: raise ShapeError\n else:\n poly = poly.copy()\n const_coeff = poly.pop(Mon({}), 0)\n (mon, linear_coeff), = poly.items()\n (id, index), = mon.items()\n if index != 1: raise ShapeError\n d, r = divmod(d - const_coeff, linear_coeff)\n assert r == 0\n if env.setdefault(id, d) != d: raise ShapeError\n return env\n\ndef check_shapes(specs, spec_tree, shapes, tree, message_prefix=\"Output\"):\n if spec_tree != tree or not all(map(_shape_spec_consistent, specs, shapes)):\n specs = tree_unflatten(spec_tree, specs)\n shapes = tree_unflatten(tree, shapes)\n raise ShapeError(f\"{message_prefix} shapes should be {specs} but are {shapes}.\")\n"
] | [
[
"numpy.result_type",
"numpy.shape"
],
[
"numpy.right_shift",
"numpy.log",
"numpy.sqrt",
"numpy.uint32",
"numpy.ndim",
"numpy.ceil",
"numpy.delete",
"numpy.binary_repr",
"numpy.shape",
"numpy.iinfo",
"numpy.float32",
"numpy.array"
],
[
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ko-ya346/python_asr | [
"251d8a4ff810fbeb5f7b63229139944195ab7cb5"
] | [
"04dnn_hmm/02_train_dnn.py"
] | [
"# -*- coding: utf-8 -*-\n\n#\n# DNNを学習します.\n#\n\n# Pytorchを用いた処理に必要なモジュールをインポート\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch import optim\n\n# 作成したDatasetクラスをインポート\nfrom my_dataset import SequenceDataset\n\n# 数値演算用モジュール(numpy)をインポート\nimport numpy as np\n\n# プロット用モジュール(matplotlib)をインポート\nimport matplotlib.pyplot as plt\n\n# hmmfunc.pyからMonoPhoneHMMクラスをインポート\nfrom hmmfunc import MonoPhoneHMM\n\n# モデルの定義をインポート\nfrom my_model import MyDNN\n\n# json形式の入出力を行うモジュールをインポート\nimport json\n\n# os, sys, shutilモジュールをインポート\nimport os\nimport sys\nimport shutil\n\n#\n# メイン関数\n#\nif __name__ == \"__main__\":\n \n #\n # 設定ここから\n #\n\n # 訓練データの特徴量リスト\n train_feat_scp = \\\n '../01compute_features/mfcc/train_small/feats.scp'\n # 訓練データのラベル(アライメント)ファイル\n train_label_file = \\\n './exp/data/train_small/alignment'\n \n # 訓練データから計算された\n # 特徴量の平均/標準偏差ファイル\n mean_std_file = \\\n '../01compute_features/mfcc/train_small/mean_std.txt'\n\n # 開発データの特徴量リスト\n dev_feat_scp = \\\n '../01compute_features/mfcc/dev/feats.scp'\n # 開発データのラベル(アライメント)ファイル\n dev_label_file = \\\n './exp/data/dev/alignment'\n\n # HMMファイル\n # HMMファイルは音素数と状態数の\n # 情報を得るためだけに使う\n hmm_file = '../03gmm_hmm/exp/model_3state_2mix/10.hmm'\n\n # 学習結果を出力するディレクトリ\n output_dir = os.path.join('exp', 'model_dnn')\n\n # ミニバッチに含める発話数\n batch_size = 5\n\n # 最大エポック数\n max_num_epoch = 60\n\n # 中間層のレイヤー数\n num_layers = 4\n\n # 中間層の次元数\n hidden_dim = 1024\n\n # splice: 前後 n フレームの特徴量を結合する\n # 次元数は(splice*2+1)倍になる\n splice = 5\n\n # 初期学習率\n initial_learning_rate = 0.008\n\n # 学習率の減衰やEarly stoppingの\n # 判定を開始するエポック数\n # (= 最低限このエポックまではどれだけ\n # validation結果が悪くても学習を続ける)\n lr_decay_start_epoch = 7\n\n # 学習率を減衰する割合\n # (減衰後学習率 <- 現在の学習率*lr_decay_factor)\n # 1.0以上なら,減衰させない\n lr_decay_factor = 0.5\n\n # Early stoppingの閾値\n # 最低損失値を更新しない場合が\n # 何エポック続けば学習を打ち切るか\n early_stop_threshold = 3\n\n #\n # 設定ここまで\n #\n\n # 出力ディレクトリが存在しない場合は作成する\n os.makedirs(output_dir, exist_ok=True)\n\n # 設定を辞書形式にする\n config = {'num_layers': num_layers, \n 'hidden_dim': hidden_dim,\n 'splice': splice,\n 'batch_size': batch_size,\n 'max_num_epoch': max_num_epoch,\n 'initial_learning_rate': initial_learning_rate,\n 'lr_decay_start_epoch': lr_decay_start_epoch, \n 'lr_decay_factor': lr_decay_factor,\n 'early_stop_threshold': early_stop_threshold}\n\n # 設定をJSON形式で保存する\n conf_file = os.path.join(output_dir, 'config.json')\n with open(conf_file, mode='w') as f:\n json.dump(config, f, indent=4)\n\n # 特徴量の平均/標準偏差ファイルを読み込む\n with open(mean_std_file, mode='r') as f:\n # 全行読み込み\n lines = f.readlines()\n # 1行目(0始まり)が平均値ベクトル(mean),\n # 3行目が標準偏差ベクトル(std)\n mean_line = lines[1]\n std_line = lines[3]\n # スペース区切りのリストに変換\n feat_mean = mean_line.split()\n feat_std = std_line.split()\n # numpy arrayに変換\n feat_mean = np.array(feat_mean, \n dtype=np.float32)\n feat_std = np.array(feat_std, \n dtype=np.float32)\n # 平均/標準偏差ファイルをコピーする\n shutil.copyfile(mean_std_file,\n os.path.join(output_dir, 'mean_std.txt'))\n\n # 次元数の情報を得る\n feat_dim = np.size(feat_mean)\n\n # DNNの出力層の次元数を得るために,\n # HMMの音素数と状態数を得る\n # MonoPhoneHMMクラスを呼び出す\n hmm = MonoPhoneHMM()\n # HMMを読み込む\n hmm.load_hmm(hmm_file)\n # DNNの出力層の次元数は音素数x状態数\n dim_out = hmm.num_phones * hmm.num_states\n # バッチデータ作成の際にラベルを埋める値\n # はdim_out以上の値にする\n pad_index = dim_out\n \n # ニューラルネットワークモデルを作成する\n # 入力特徴量の次元数は\n # feat_dim * (2*splice+1)\n dim_in = feat_dim * (2*splice+1)\n model = MyDNN(dim_in=dim_in,\n dim_hidden=hidden_dim,\n dim_out=dim_out, \n num_layers=num_layers)\n print(model)\n\n # オプティマイザを定義\n # ここでは momentum stochastic gradient descent\n # を使用\n optimizer = optim.SGD(model.parameters(), \n lr=initial_learning_rate,\n momentum=0.99)\n\n # 訓練データのデータセットを作成する\n # padding_indexはdim_out以上の値に設定する\n train_dataset = SequenceDataset(train_feat_scp,\n train_label_file,\n feat_mean,\n feat_std,\n pad_index,\n splice)\n # 開発データのデータセットを作成する\n dev_dataset = SequenceDataset(dev_feat_scp,\n dev_label_file,\n feat_mean,\n feat_std,\n pad_index,\n splice)\n \n # 訓練データのDataLoaderを呼び出す\n # 訓練データはシャッフルして用いる\n # (num_workerは大きい程処理が速くなりますが,\n # PCに負担が出ます.PCのスペックに応じて\n # 設定してください)\n train_loader = DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n # 開発データのDataLoaderを呼び出す\n # 開発データはデータはシャッフルしない\n dev_loader = DataLoader(dev_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4)\n\n # クロスエントロピーを損失関数として用いる\n criterion = \\\n nn.CrossEntropyLoss(ignore_index=pad_index)\n\n # CUDAが使える場合はモデルパラメータをGPUに,\n # そうでなければCPUに配置する\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n model = model.to(device)\n\n # モデルをトレーニングモードに設定する\n model.train()\n\n # 訓練データの処理と開発データの処理を\n # for でシンプルに記述するために,辞書データ化しておく\n dataset_loader = {'train': train_loader,\n 'validation': dev_loader}\n\n # 各エポックにおける損失値と誤り率の履歴\n loss_history = {'train': [],\n 'validation': []}\n error_history = {'train': [],\n 'validation': []}\n \n # 本プログラムでは,validation時の損失値が\n # 最も低かったモデルを保存する.\n # そのため,最も低い損失値,\n # そのときのモデルとエポック数を記憶しておく\n best_loss = -1\n best_model = None\n best_epoch = 0\n # Early stoppingフラグ.Trueになると学習を打ち切る\n early_stop_flag = False\n # Early stopping判定用(損失値の最低値が\n # 更新されないエポックが何回続いているか)のカウンタ\n counter_for_early_stop = 0\n\n # ログファイルの準備\n log_file = open(os.path.join(output_dir,\n 'log.txt'),\n mode='w')\n log_file.write('epoch\\ttrain loss\\t'\\\n 'train err\\tvalid loss\\tvalid err')\n\n # エポックの数だけループ\n for epoch in range(max_num_epoch):\n # early stopフラグが立っている場合は,\n # 学習を打ち切る\n if early_stop_flag:\n print(' Early stopping.'\\\n ' (early_stop_threshold = %d)' \\\n % (early_stop_threshold))\n log_file.write('\\n Early stopping.'\\\n ' (early_stop_threshold = %d)' \\\n % (early_stop_threshold))\n break\n\n # エポック数を表示\n print('epoch %d/%d:' % (epoch+1, max_num_epoch))\n log_file.write('\\n%d\\t' % (epoch+1))\n\n # trainフェーズとvalidationフェーズを交互に実施する\n for phase in ['train', 'validation']:\n # このエポックにおける累積損失値と発話数\n total_loss = 0\n total_utt = 0\n # このエポックにおける累積認識誤り文字数と総文字数\n total_error = 0\n total_frames = 0\n\n # 各フェーズのDataLoaderから1ミニバッチ\n # ずつ取り出して処理する.\n # これを全ミニバッチ処理が終わるまで繰り返す.\n # ミニバッチに含まれるデータは,\n # 音声特徴量,ラベル,フレーム数,\n # ラベル長,発話ID\n for (features, labels, feat_len,\n label_len, utt_ids) \\\n in dataset_loader[phase]:\n\n # CUDAが使える場合はデータをGPUに,\n # そうでなければCPUに配置する\n features, labels = \\\n features.to(device), labels.to(device)\n\n # 勾配をリセット\n optimizer.zero_grad()\n\n # モデルの出力を計算(フォワード処理)\n outputs = model(features)\n\n # この時点でoutputsは\n # [バッチサイズ, フレーム数, ラベル数]\n # の3次元テンソル.\n # CrossEntropyLossを使うためには\n # [サンプル数, ラベル数]の2次元テンソル\n # にする必要があるので,viewを使って\n # 変形する\n b_size, f_size, _ = outputs.size()\n outputs = outputs.view(b_size * f_size,\n dim_out)\n # labelsは[バッチサイズ, フレーム]の\n # 2次元テンソル.\n # CrossEntropyLossを使うためには\n # [サンプル数]の1次元テンソルにする\n # 必要があるので.viewを使って変形する.\n # 1次元への変形はview(-1)で良い.\n # (view(b_size*f_size)でも良い)\n labels = labels.view(-1)\n \n # 損失値を計算する.\n loss = criterion(outputs, labels)\n \n # 訓練フェーズの場合は,\n # 誤差逆伝搬を実行し,\n # モデルパラメータを更新する\n if phase == 'train':\n # 勾配を計算する\n loss.backward()\n # オプティマイザにより,\n # パラメータを更新する\n optimizer.step()\n\n # 損失値を累積する\n total_loss += loss.item()\n # 処理した発話数をカウントする\n total_utt += b_size\n\n #\n # フレーム単位の誤り率を計算する\n #\n # 推定ラベルを得る\n _, hyp = torch.max(outputs, 1)\n # ラベルにpad_indexを埋めた\n # フレームを取り除く\n hyp = hyp[labels != pad_index]\n ref = labels[labels != pad_index]\n # 推定ラベルと正解ラベルが不一致な\n # フレーム数を得る\n error = (hyp != ref).sum()\n\n # 誤りフレーム数を累積する\n total_error += error\n # 総フレーム数を累積する\n total_frames += len(ref)\n \n #\n # このフェーズにおいて,1エポック終了\n # 損失値,認識エラー率,モデルの保存等を行う\n # \n\n # 損失値の累積値を,処理した発話数で割る\n epoch_loss = total_loss / total_utt\n # 画面とログファイルに出力する\n print(' %s loss: %f' \\\n % (phase, epoch_loss))\n log_file.write('%.6f\\t' % (epoch_loss))\n # 履歴に加える\n loss_history[phase].append(epoch_loss)\n\n # 総誤りフレーム数を,総フレーム数で\n # 割ってエラー率に換算\n epoch_error = 100.0 * total_error \\\n / total_frames\n # 画面とログファイルに出力する\n print(' %s error rate: %f %%' \\\n % (phase, epoch_error))\n log_file.write('%.6f\\t' % (epoch_error))\n # 履歴に加える\n error_history[phase].append(epoch_error)\n\n #\n # validationフェーズ特有の処理\n #\n if phase == 'validation':\n if epoch == 0 or best_loss > epoch_loss:\n # 損失値が最低値を更新した場合は,\n # その時のモデルを保存する\n best_loss = epoch_loss\n torch.save(model.state_dict(),\n output_dir+'/best_model.pt')\n best_epoch = epoch\n # Early stopping判定用の\n # カウンタをリセットする\n counter_for_early_stop = 0\n else:\n # 最低値を更新しておらず,\n if epoch+1 >= lr_decay_start_epoch:\n # かつlr_decay_start_epoch以上の\n # エポックに達している場合\n if counter_for_early_stop+1 \\\n >= early_stop_threshold:\n # 更新していないエポックが,\n # 閾値回数以上続いている場合,\n # Early stopping フラグを立てる\n early_stop_flag = True\n else:\n # Early stopping条件に\n # 達していない場合は\n # 学習率を減衰させて学習続行\n if lr_decay_factor < 1.0:\n for i, param_group \\\n in enumerate(\\\n optimizer.param_groups):\n if i == 0:\n lr = param_group['lr']\n dlr = lr_decay_factor \\\n * lr\n print(' (Decay '\\\n 'learning rate:'\\\n ' %f -> %f)' \\\n % (lr, dlr))\n log_file.write(\\\n '(Decay learning'\\\n ' rate: %f -> %f)'\\\n % (lr, dlr))\n param_group['lr'] = dlr\n # Early stopping判定用の\n # カウンタを増やす\n counter_for_early_stop += 1\n \n #\n # 全エポック終了\n # 学習済みモデルの保存とログの書き込みを行う\n #\n print('---------------Summary'\\\n '------------------')\n log_file.write('\\n---------------Summary'\\\n '------------------\\n')\n\n # 最終エポックのモデルを保存する\n torch.save(model.state_dict(), \n os.path.join(output_dir,'final_model.pt'))\n print('Final epoch model -> %s/final_model.pt' \\\n % (output_dir))\n log_file.write('Final epoch model ->'\\\n ' %s/final_model.pt\\n' \\\n % (output_dir))\n\n # 最終エポックの情報\n for phase in ['train', 'validation']:\n # 最終エポックの損失値を出力\n print(' %s loss: %f' \\\n % (phase, loss_history[phase][-1]))\n log_file.write(' %s loss: %f\\n' \\\n % (phase, loss_history[phase][-1]))\n # 最終エポックのエラー率を出力 \n print(' %s error rate: %f %%' \\\n % (phase, error_history[phase][-1]))\n log_file.write(' %s error rate: %f %%\\n' \\\n % (phase, error_history[phase][-1]))\n\n # ベストエポックの情報\n # (validationの損失が最小だったエポック)\n print('Best epoch model (%d-th epoch)'\\\n ' -> %s/best_model.pt' \\\n % (best_epoch+1, output_dir))\n log_file.write('Best epoch model (%d-th epoch)'\\\n ' -> %s/best_model.pt\\n' \\\n % (best_epoch+1, output_dir))\n for phase in ['train', 'validation']:\n # ベストエポックの損失値を出力\n print(' %s loss: %f' \\\n % (phase, loss_history[phase][best_epoch]))\n log_file.write(' %s loss: %f\\n' \\\n % (phase, loss_history[phase][best_epoch]))\n # ベストエポックのエラー率を出力\n print(' %s error rate: %f %%' \\\n % (phase, error_history[phase][best_epoch]))\n log_file.write(' %s error rate: %f %%\\n' \\\n % (phase, error_history[phase][best_epoch]))\n\n # 損失値の履歴(Learning Curve)グラフにして保存する\n fig1 = plt.figure()\n for phase in ['train', 'validation']:\n plt.plot(loss_history[phase],\n label=phase+' loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n fig1.legend()\n fig1.savefig(output_dir+'/loss.png')\n\n # 認識誤り率の履歴グラフにして保存する\n fig2 = plt.figure()\n for phase in ['train', 'validation']:\n plt.plot(error_history[phase],\n label=phase+' error')\n plt.xlabel('Epoch')\n plt.ylabel('Error [%]')\n fig2.legend()\n fig2.savefig(output_dir+'/error.png')\n\n # ログファイルを閉じる\n log_file.close()\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.plot",
"numpy.size",
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"torch.device",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SilviaVec/Realtime-Action-Recognition | [
"330a64fc1b2158b1884a1ee86b9cc875925fc121"
] | [
"src/s2_put_skeleton_txts_to_a_single_txt.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n'''\nRead multiple skeletons txts and saved them into a single txt.\nIf an image doesn't have skeleton, discard it.\nIf an image label is not `CLASSES`, discard it.\nInput:\n `skeletons/00001.txt` ~ `skeletons/xxxxx.txt` from `SRC_DETECTED_SKELETONS_FOLDER`.\nOutput:\n `skeletons_info.txt`. The filepath is `DST_ALL_SKELETONS_TXT`.\n'''\n\nimport numpy as np\nimport simplejson\nimport collections\n\nif True: # Include project path\n import sys\n import os\n ROOT = os.path.dirname(os.path.abspath(__file__))+\"/../\"\n CURR_PATH = os.path.dirname(os.path.abspath(__file__))+\"/\"\n sys.path.append(ROOT)\n\n # import utils.lib_feature_proc # This is no needed,\n # because this script only transfer (part of) the data from many txts to a single txt,\n # without doing any data analsysis.\n\nimport utils.lib_commons as lib_commons\n\n\ndef par(path): # Pre-Append ROOT to the path if it's not absolute\n return ROOT + path if (path and path[0] != \"/\") else path\n\n# -- Settings\n\n\ncfg_all = lib_commons.read_yaml(ROOT + \"config/config.yaml\")\ncfg = cfg_all[\"s2_put_skeleton_txts_to_a_single_txt.py\"]\n\nCLASSES = np.array(cfg_all[\"classes\"])\n\nSKELETON_FILENAME_FORMAT = cfg_all[\"skeleton_filename_format\"]\n\nSRC_DETECTED_SKELETONS_FOLDER = par(cfg[\"input\"][\"detected_skeletons_folder\"])\nDST_ALL_SKELETONS_TXT = par(cfg[\"output\"][\"all_skeletons_txt\"])\n\nIDX_PERSON = 0 # Only use the skeleton of the 0th person in each image\nIDX_ACTION_LABEL = 3 # [1, 7, 54, \"jump\", \"jump_03-02-12-34-01-795/00240.jpg\"]\n\n# -- Helper function\n\n\ndef read_skeletons_from_ith_txt(i):\n ''' \n Arguments:\n i {int}: the ith skeleton txt. Zero-based index.\n If there are mutliple people, then there are multiple skeletons' data in this txt.\n Return:\n skeletons_in_ith_txt {list of list}:\n Length of each skeleton data is supposed to be 56 = 5 image info + 51 xyz positions. \n '''\n filename = SRC_DETECTED_SKELETONS_FOLDER + \\\n SKELETON_FILENAME_FORMAT.format(i)\n skeletons_in_ith_txt = lib_commons.read_listlist(filename)\n return skeletons_in_ith_txt\n\n\ndef get_length_of_one_skeleton_data(filepaths):\n ''' Find a non-empty txt file, and then get the length of one skeleton data.\n The data length should be 59, where:\n 59 = 5 + 54.\n 5: [cnt_action, cnt_clip, cnt_image, action_label, filepath]\n See utils.lib_io.get_training_imgs_info for more details\n 54: 18 joints * 3 xyz positions\n '''\n for i in range(len(filepaths)):\n skeletons = read_skeletons_from_ith_txt(i)\n if len(skeletons):\n skeleton = skeletons[IDX_PERSON]\n data_size = len(skeleton)\n assert(data_size == 59) #MODIFIED\n return data_size\n raise RuntimeError(f\"No valid txt under: {SRC_DETECTED_SKELETONS_FOLDER}.\")\n\n\n# -- Main\nif __name__ == \"__main__\":\n ''' Read multiple skeletons txts and saved them into a single txt. '''\n\n # -- Get skeleton filenames\n filepaths = lib_commons.get_filenames(SRC_DETECTED_SKELETONS_FOLDER,\n use_sort=True, with_folder_path=True)\n num_skeletons = len(filepaths)\n\n # -- Check data length of one skeleton\n data_length = get_length_of_one_skeleton_data(filepaths)\n print(\"Data length of one skeleton is {data_length}\")\n\n # -- Read in skeletons and push to all_skeletons\n all_skeletons = []\n labels_cnt = collections.defaultdict(int)\n for i in range(num_skeletons):\n\n # Read skeletons from a txt\n skeletons = read_skeletons_from_ith_txt(i)\n if not skeletons: # If empty, discard this image.\n continue\n skeleton = skeletons[IDX_PERSON]\n label = skeleton[IDX_ACTION_LABEL]\n if label not in CLASSES: # If invalid label, discard this image.\n continue\n labels_cnt[label] += 1 \n\n # Push to result\n all_skeletons.append(skeleton)\n\n # Print\n if i == 1 or i % 100 == 0: \n print(\"{}/{}\".format(i, num_skeletons))\n\n # -- Save to txt\n with open(DST_ALL_SKELETONS_TXT, 'w') as f:\n simplejson.dump(all_skeletons, f)\n\n print(f\"There are {len(all_skeletons)} skeleton data.\")\n print(f\"They are saved to {DST_ALL_SKELETONS_TXT}\")\n print(\"Number of each action: \")\n for label in CLASSES:\n print(f\" {label}: {labels_cnt[label]}\")\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dlee0156/bilateral-connectome | [
"26fe165341bb79379fecdd8bc5d7b5bfe3983fdc"
] | [
"pkg/pkg/stats/fisher_exact_nonunity.py"
] | [
"from scipy.stats import nchypergeom_fisher\nimport numpy as np\n\n\ndef fisher_exact_nonunity(table, alternative=\"two-sided\", null_odds=1):\n \"\"\"Perform a Fisher exact test on a 2x2 contingency table.\n Parameters\n ----------\n table : array_like of ints\n A 2x2 contingency table. Elements must be non-negative integers.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n * 'two-sided'\n * 'less': one-sided\n * 'greater': one-sided\n See the Notes for more details.\n null_odds : float, optional (default=1)\n A (possibly non-unity) null odds ratio.\n Returns\n -------\n oddsratio : float\n This is prior odds ratio and not a posterior estimate.\n p_value : float\n P-value, the probability of obtaining a distribution at least as\n extreme as the one that was actually observed, assuming that the\n null hypothesis is true.\n See Also\n --------\n chi2_contingency : Chi-square test of independence of variables in a\n contingency table. This can be used as an alternative to\n `fisher_exact` when the numbers in the table are large.\n barnard_exact : Barnard's exact test, which is a more powerful alternative\n than Fisher's exact test for 2x2 contingency tables.\n boschloo_exact : Boschloo's exact test, which is a more powerful alternative\n than Fisher's exact test for 2x2 contingency tables.\n Notes\n -----\n *Null hypothesis and p-values*\n The null hypothesis is that the input table is from the hypergeometric\n distribution with parameters (as used in `hypergeom`)\n ``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the\n input table is ``[[a, b], [c, d]]``. This distribution has support\n ``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values\n in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``\n can be interpreted as the upper-left element of a 2x2 table, so the\n tables in the distribution have form::\n [ x n - x ]\n [N - x M - (n + N) + x]\n For example, if::\n table = [6 2]\n [1 4]\n then the support is ``2 <= x <= 7``, and the tables in the distribution\n are::\n [2 6] [3 5] [4 4] [5 3] [6 2] [7 1]\n [5 0] [4 1] [3 2] [2 3] [1 4] [0 5]\n The probability of each table is given by the hypergeometric distribution\n ``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to\n three significant digits)::\n x 2 3 4 5 6 7\n p 0.0163 0.163 0.408 0.326 0.0816 0.00466\n These can be computed with::\n >>> from scipy.stats import hypergeom\n >>> table = np.array([[6, 2], [1, 4]])\n >>> M = table.sum()\n >>> n = table[0].sum()\n >>> N = table[:, 0].sum()\n >>> start, end = hypergeom.support(M, n, N)\n >>> hypergeom.pmf(np.arange(start, end+1), M, n, N)\n array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,\n 0.004662 ])\n The two-sided p-value is the probability that, under the null hypothesis,\n a random table would have a probability equal to or less than the\n probability of the input table. For our example, the probability of\n the input table (where ``x = 6``) is 0.0816. The x values where the\n probability does not exceed this are 2, 6 and 7, so the two-sided p-value\n is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::\n >>> from scipy.stats import fisher_exact\n >>> oddsr, p = fisher_exact(table, alternative='two-sided')\n >>> p\n 0.10256410256410257\n The one-sided p-value for ``alternative='greater'`` is the probability\n that a random table has ``x >= a``, which in our example is ``x >= 6``,\n or ``0.0816 + 0.00466 ~= 0.08626``::\n >>> oddsr, p = fisher_exact(table, alternative='greater')\n >>> p\n 0.08624708624708627\n This is equivalent to computing the survival function of the\n distribution at ``x = 5`` (one less than ``x`` from the input table,\n because we want to include the probability of ``x = 6`` in the sum)::\n >>> hypergeom.sf(5, M, n, N)\n 0.08624708624708627\n For ``alternative='less'``, the one-sided p-value is the probability\n that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),\n or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::\n >>> oddsr, p = fisher_exact(table, alternative='less')\n >>> p\n 0.9953379953379957\n This is equivalent to computing the cumulative distribution function\n of the distribution at ``x = 6``:\n >>> hypergeom.cdf(6, M, n, N)\n 0.9953379953379957\n *Odds ratio*\n The calculated odds ratio is different from the one R uses. This SciPy\n implementation returns the (more common) \"unconditional Maximum\n Likelihood Estimate\", while R uses the \"conditional Maximum Likelihood\n Estimate\".\n Examples\n --------\n Say we spend a few days counting whales and sharks in the Atlantic and\n Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the\n Indian ocean 2 whales and 5 sharks. Then our contingency table is::\n Atlantic Indian\n whales 8 2\n sharks 1 5\n We use this table to find the p-value:\n >>> from scipy.stats import fisher_exact\n >>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])\n >>> pvalue\n 0.0349...\n The probability that we would observe this or an even more imbalanced ratio\n by chance is about 3.5%. A commonly used significance level is 5%--if we\n adopt that, we can therefore conclude that our observed imbalance is\n statistically significant; whales prefer the Atlantic while sharks prefer\n the Indian ocean.\n \"\"\"\n dist = nchypergeom_fisher\n\n # int32 is not enough for the algorithm\n c = np.asarray(table, dtype=np.int64)\n if not c.shape == (2, 2):\n raise ValueError(\"The input `table` must be of shape (2, 2).\")\n\n if np.any(c < 0):\n raise ValueError(\"All values in `table` must be nonnegative.\")\n\n if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):\n # If both values in a row or column are zero, the p-value is 1 and\n # the odds ratio is NaN.\n return np.nan, 1.0\n\n if c[1, 0] > 0 and c[0, 1] > 0:\n oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])\n else:\n oddsratio = np.inf\n\n n1 = c[0, 0] + c[0, 1]\n n2 = c[1, 0] + c[1, 1]\n n = c[0, 0] + c[1, 0]\n\n rv = dist(n1 + n2, n1, n, null_odds)\n\n def binary_search(n, n1, n2, side):\n \"\"\"Binary search for where to begin halves in two-sided test.\"\"\"\n if side == \"upper\":\n minval = mode\n maxval = n\n else:\n minval = 0\n maxval = mode\n guess = -1\n while maxval - minval > 1:\n if maxval == minval + 1 and guess == minval:\n guess = maxval\n else:\n guess = (maxval + minval) // 2\n pguess = rv.pmf(guess)\n if side == \"upper\":\n ng = guess - 1\n else:\n ng = guess + 1\n if pguess <= pexact < rv.pmf(ng):\n break\n elif pguess < pexact:\n maxval = guess\n else:\n minval = guess\n if guess == -1:\n guess = minval\n if side == \"upper\":\n while guess > 0 and rv.pmf(guess) < pexact * epsilon:\n guess -= 1\n while rv.pmf(guess) > pexact / epsilon:\n guess += 1\n else:\n while rv.pmf(guess) < pexact * epsilon:\n guess += 1\n while guess > 0 and rv.pmf(guess) > pexact / epsilon:\n guess -= 1\n return guess\n\n if alternative == \"less\":\n pvalue = rv.cdf(c[0, 0])\n elif alternative == \"greater\":\n # Same formula as the 'less' case, but with the second column.\n pvalue = rv.sf(c[0, 0] - 1)\n elif alternative == \"two-sided\":\n mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))\n pexact = dist.pmf(c[0, 0], n1 + n2, n1, n, null_odds)\n pmode = dist.pmf(mode, n1 + n2, n1, n, null_odds)\n\n epsilon = 1 - 1e-4\n if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:\n return oddsratio, 1.0\n\n elif c[0, 0] < mode:\n plower = dist.cdf(c[0, 0], n1 + n2, n1, n, null_odds)\n if dist.pmf(n, n1 + n2, n1, n, null_odds) > pexact / epsilon:\n return oddsratio, plower\n\n guess = binary_search(n, n1, n2, \"upper\")\n pvalue = plower + dist.sf(guess - 1, n1 + n2, n1, n, null_odds)\n else:\n pupper = dist.sf(c[0, 0] - 1, n1 + n2, n1, n, null_odds)\n if dist.pmf(0, n1 + n2, n1, n, null_odds) > pexact / epsilon:\n return oddsratio, pupper\n\n guess = binary_search(n, n1, n2, \"lower\")\n pvalue = pupper + dist.cdf(guess, n1 + n2, n1, n, null_odds)\n else:\n msg = \"`alternative` should be one of {'two-sided', 'less', 'greater'}\"\n raise ValueError(msg)\n\n pvalue = min(pvalue, 1.0)\n\n return oddsratio, pvalue\n"
] | [
[
"numpy.asarray",
"numpy.abs",
"numpy.maximum",
"numpy.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JunaidAkhter/vmc_jax | [
"4f0dcc9f32cb6885cad3c5d797d9f9e01247f737",
"4f0dcc9f32cb6885cad3c5d797d9f9e01247f737",
"4f0dcc9f32cb6885cad3c5d797d9f9e01247f737"
] | [
"sg_sr/sr_data/sr_cplx/svd/cpxrbm.py",
"tests/symmetries_t.py",
"sg_sr/sr_data/sr_cplx/cpxmpo_3nodes5.py"
] | [
"import sys\n# Find jVMC package\n#sys.path.append(\"/Users/akhter/githesis-/jvmc/vmc_jax\")\nsys.path.append(\"/Users/akhter/thesis/vmc_jax\")\n\n\nimport jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\n\nimport jax.random as random\nimport jax.numpy as jnp\nimport numpy as np\nfrom jax.tree_util import tree_flatten, tree_unflatten\nimport jVMC\n\nimport tensornetwork as tn\ntn.set_default_backend(\"jax\")\n\nimport functools\nfrom typing import Any, Callable, Sequence, Optional\nimport flax\nfrom flax import linen as nn\nfrom flax import optim\nfrom jax import lax\nfrom functools import partial\n\nimport jVMC.nets.initializers as init\nimport jVMC.global_defs as global_defs\n\nimport time\n\n\n\n\n\n# DMRG energies produced with the TeNPy library https://github.com/tenpy/tenpy\n#DMRG_energies = {\"10\": -1.0545844370449059, \"20\": -1.0900383739, \"100\": -1.1194665474274852}\n\nL = 16 # system size\ng = -0.7 # strength of external field\n\n# Set up hamiltonian for open boundary conditions\nhamiltonian = jVMC.operator.BranchFreeOperator()\nfor l in range(L - 1):\n hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))\n hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))\nhamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))\n\ndef svd(dp,shape, rank=L):\n\n \"\"\"Takes in the concatenated matrix and spits out the copressed one\"\"\"\n \n #getting the real and the complex parts of the matrix\n real_matrix = jnp.reshape(dp[:L*h], (L,h)) \n complex_matrix = jnp.reshape(dp[L*h:], (L,h))\n print(\"real_matrix\", real_matrix, \"complex_matrix:\", complex_matrix)\n #creating the W matrix from the real and the complex parts \n matrix = jax.lax.complex(real_matrix, complex_matrix)\n print(\"matrix:\", matrix)\n #Now that we have the matrix we can svd it and reject some of the singular values. \n tensor1 = jnp.reshape(matrix, shape)\n print(\"tensor1_shape and atype:\", tensor1.shape, type(tensor1))\n #reshaping the matrix in a tensor of given shape e.g. a four legged tensor\n node = tn.Node(tensor1)\n #now we perform the svd of the node keeping the left two and the right two legs as they are \n u, vh, _ = tn.split_node(node, left_edges=[node[0], node[1]], right_edges=[node[2],node[3]], max_singular_values=r)\n print(\"shape of u:\", u.shape, \"shape of vh:\", vh.shape)\n node_contracted = (u @ vh).tensor\n matrix_returned = jnp.reshape(node_contracted, (matrix.shape))\n print(\"shape of matrix_returned:\", matrix_returned.shape)\n return matrix_returned\n \n\ndef simulate(rng, iterations, rank, t_step):\n net = net_init\n psi = jVMC.vqs.NQS(net, seed=rng) # Variational wave function\n\n\n # Set up sampler\n #tic = time.perf_counter()\n sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,\n numChains=100, sweepSteps=L,\n numSamples=30000, thermalizationSweeps=25)\n #toc = time.perf_counter()\n \n #print(\" == Total time for sampling step: %fs\\n\" % (toc - tic))\n\n # Set up TDVP\n tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,\n svdTol=1e-8, diagonalShift=10, makeReal='real')\n\n stepper = jVMC.util.stepper.Euler(timeStep=t_step) # ODE integrator\n\n\n res = []\n \n for n in range(iterations):\n dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)\n print(\"dp_inserted\", dp)\n dp = svd(dp, (4,4,2,2), rank = r)\n \n dp = jnp.concatenate([p.ravel() for p in tree_flatten(dp)[0]])\n dp = jnp.concatenate([dp.real, dp.imag])\n print(\"dp_returned\", dp)\n psi.set_parameters(dp)\n\n print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)\n\n res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])\n np.savetxt('dp', dp) \n return np.array(res)\n\n\n#iterations = 2500\n#rng_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\niterations = 2\nrng_list = [0, 1]\ntime_step = 12e-2 \nh = L\nnet_init = jVMC.nets.CpxRBM(numHidden = h, bias = False)\n\n#rank_list = jnp.arange(L/2, L+1)\nrank_list = [8,9]\nresults = []\nfor j,rng in enumerate(rng_list):\n \n E_0_aarray = np.zeros((iterations, len(rng_list)))#an empty two dimensional array corresponding to the D and \"rng\".\n\n for r in rank_list:\n \n #print(\"rng:\", rng)\n res = simulate(rng, iterations, rank=r, t_step = time_step)\n E_0 = res + 1.0660513358196495#this energy is for 16 spins\n #adding the energy values obtained to the first entry of the row\n #print(\"length\", len(E_0))\n E_0_aarray[:, j] = E_0[:, 0]\n #print(\"final_energy:\", E_0[-1])\n \n results.apend(E_0_aarray)\n\n#print(\"E_array\", E_0_aarray)\n\nnp.savetxt('cpxrbm_16_h16_sr_12t', np.array(results), header='Data for CpxRBM with h = 16 for 1 initializations')\n",
"import sys\n# Find jVMC package\nsys.path.append(sys.path[0] + \"/..\")\n\nimport unittest\n\nimport jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\nimport jax.random as random\nimport jax.numpy as jnp\n\nimport numpy as np\n\nimport jVMC\nimport jVMC.util.symmetries as symmetries\n\nimport jVMC.global_defs as global_defs\n\nimport time\n\n\nclass TestSymmetries(unittest.TestCase):\n\n def test_symmetries2D(self):\n L = 3\n rotation_f = 4\n reflection_f = 2\n translation_f = L**2\n for rotation in [True, False]:\n for reflection in [True, False]:\n for translation in [True, False]:\n orbit = symmetries.get_orbit_2d_square(L, rotation=rotation, reflection=reflection, translation=translation)\n self.assertTrue(orbit.shape[0] == (rotation_f if rotation else 1) * (reflection_f if reflection else 1) * (translation_f if translation else 1))\n self.assertTrue(np.issubdtype(orbit.dtype, np.integer))\n\n def test_symmetries1D(self):\n L = 3\n reflection_f = 2\n translation_f = L\n for translation in [True, False]:\n for reflection in [True, False]:\n orbit = symmetries.get_orbit_1d(L, reflection=reflection, translation=translation)\n self.assertTrue(orbit.shape[0] == (reflection_f if reflection else 1) * (translation_f if translation else 1))\n self.assertTrue(np.issubdtype(orbit.dtype, np.integer))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import sys\n# Find jVMC package\nsys.path.append(\"/Users/akhter/githesis-/jvmc/vmc_jax\")\n\nimport jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\n\nimport jax.random as random\nimport jax.numpy as jnp\nimport numpy as np\n\nimport jVMC\n\n\n\nimport functools\nfrom typing import Any, Callable, Sequence, Optional\nimport flax\nfrom flax import linen as nn\nfrom flax import optim\nfrom jax import lax\nfrom functools import partial\n\n\n\n\n\n\nimport tensornetwork as tn\ntn.set_default_backend(\"jax\")\n\n\nimport jVMC.nets.initializers as init\nimport jVMC.global_defs as global_defs\n\n\nimport time\nstart = time.time()\n\n\n# DMRG energies produced with the TeNPy library https://github.com/tenpy/tenpy\n#DMRG_energies = {\"10\": -1.0545844370449059, \"20\": -1.0900383739, \"100\": -1.1194665474274852}\n\nL = 12 # system size\ng = -0.7 # strength of external field\n\n\n#FIRST WE DEFINE AN MPO LAYER\n\nclass MPO(nn.Module):\n \"\"\"MPO with \"n\" nodes\n Acts on: \n x: Input data vector of any shape without the batch number.\n batching will be taken care by vmap function.\n Arguments:\n num_nodes: the number of nodes that we want.\n inp_dims: list containing input dimensions for each node.\n oup_dim: Output dimension (same for every node).\n D: Bond dimension\n Returns:\n An n dimensional array\n Note: One must know the dimension of \"x\"(without the batch)\n before the mpo acts on \"x\" and choose the number of nodes\n and the input dimensions so that the product of the input \n dimensions of MPO is same as the total dimensionality of \"x\"\n \"\"\" \n num_nodes: int \n inp_dims: Sequence[int]\n oup_dims: Sequence[int] \n D: int \n \n @nn.compact\n def __call__(self, x):\n n = self.num_nodes\n inp_dms = self.inp_dims\n oup_dms = self.oup_dims\n D = self.D\n #print(\"Input_dimension:\", inp_dms)\n x = x.reshape(inp_dms) #reshaping to feed to mpo\n #print(\"reshaped_x:\", x.shape)\n nodes = [] #empty list in which we will store nodes(which are basically just arrays) \n legs = [] #empty list in which we are going to store the sequences of contractions \n nodes.append(x) # adding the data as the first node to the list\n legs.append([ i for i in range(1,n+1)]) # naming list for input legs from the data\n #print('n:', n, 'input_dimensions:', inp_dms, 'output_dimensions:', oup_dm, 'D:', D)\n \n a = 1/(inp_dms[1]*inp_dms[2]*oup_dms[0]*(D**3)*oup_dms[1]**2 * oup_dms[2]**2)\n b = 1/(inp_dms[0]*inp_dms[2]*oup_dms[0]**2*(D**2)*oup_dms[1] * oup_dms[2]**2)\n c = 1/(inp_dms[1]*inp_dms[0]*oup_dms[2]*(D**3)*oup_dms[1]**2 * oup_dms[0]**2)\n d = (a*b*c)**(1/5)\n \n for i, dm in enumerate(inp_dms):\n if i == 0:\n #print('i:', i, 'dm:', dm)\n nodes.append(self.param('a'+str(i), partial(init.cplx_init1, var = d**2/a), (oup_dms[i],dm,D))) # include the node name later\n legs.append([-1,1,n+1])\n elif i == n-1:\n #print('i:', i, 'dm:', dm)\n nodes.append(self.param('a'+str(i), partial(init.cplx_init1, var = d**2/c), (dm,oup_dms[i],D)))\n legs.append([n, -n, 2*n-1])\n\n else:\n #print('i:', i, 'dm:', dm)\n nodes.append(self.param('a'+str(i), partial(init.cplx_init1, var = d**2/b), (dm,D,oup_dms[i],D)))\n legs.append([i+1, n+2, -(i+1), n+1])\n # creating the bias which we need to add at the end\n #bias = self.param('bias', self.kernel_init, [oup_dm]*n)\n \n result = tn.ncon(nodes, legs) # bias must be added here if the above line in ucommented. \n result = result \n \n return result\n\n# This class defines the network structure of a complex RBM\nclass MyNet(flax.linen.Module):\n num_nodes: int \n inp_dims: Sequence[int] \n oup_dims: Sequence[int] \n D: int \n\n @flax.linen.compact\n def __call__(self, s):\n\n\n # introducing the mpo layer\n def apply_mpo(single_config):\n return MPO(num_nodes = self.num_nodes, inp_dims = self.inp_dims, \\\n oup_dims = self.oup_dims, D = self.D)(single_config)\n\n return jnp.sum(jnp.log(jnp.cosh(apply_mpo(2 * s - 1))))\n\n\n\n# Initialize net\n#net = MyNet(num_nodes = 2, inp_dims = jnp.array([5,2]), oup_dim = 6, D = 7) # D = 07 in reality\n\n\n\n# Set up hamiltonian for open boundary conditions\nhamiltonian = jVMC.operator.BranchFreeOperator()\nfor l in range(L - 1):\n hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))\n hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))\nhamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))\n\n\n#Printing the shape of parameters\nnet = MyNet(num_nodes = 3, inp_dims = (2,3,2), oup_dims = (4,3,3), D=5) \nparams = net.init(jax.random.PRNGKey(1),jnp.zeros((L,), dtype=global_defs.tCpx)) # the \"dtype\" here is not so important\nprint(\"Shape of the model\", jax.tree_map(np.shape, params))\n\n\ndef simulate(rng, iterations, D):\n net = MyNet(num_nodes = 3, inp_dims = (2,3,2), oup_dims = (4,3,3), D=D) \n psi = jVMC.vqs.NQS(net, seed=rng) # Variational wave function\n #Checking the dhape of the mpo and the values of the initialized parameters\n #params = net.init(jax.random.PRNGKey(1),jnp.zeros((L,), dtype=global_defs.tCpx)) # the \"dtype\" here is not so important\n #print(\"Shape of the model\", jax.tree_map(np.shape, params))\n #print(\"parameters:\", params)\n\n\n # Set up sampler\n sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,\n numChains=100, sweepSteps=L,\n numSamples=30000, thermalizationSweeps=25)\n\n # Set up TDVP\n tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,\n svdTol=1e-8, diagonalShift=50, makeReal='real')\n\n stepper = jVMC.util.stepper.Euler(timeStep=1e-2) # ODE integrator\n\n\n\n res = []\n for n in range(iterations):\n dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)\n psi.set_parameters(dp)\n\n print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)\n\n res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])\n\n return np.array(res)\n\n\n#CREATING DATA\n\niterations = 1500\nrng_list = [0,1,2,3,4,5,6,7,8,9,10]\n#iterations = 1\n#rng_list = [0]\n\nE_0_aarray = np.zeros((iterations, len(rng_list)))#an empty two dimensional array corresponding to the D and \"rng\".\n\n\n\nfor j,rng in enumerate(rng_list):\n print(\"rng:\", rng)\n res = simulate(rng, iterations, D=5)\n E_0 = res + 1.0660513358196495#this energy is for 12 spins\n #adding the energy values obtained to the first entry of the row\n #print(\"length\", len(E_0))\n E_0_aarray[:, j] = E_0[:, 0]\n #print(\"final_energy:\", E_0[-1])\n\n\nnp.savetxt('cpxmpo_12_avg_d5_232_gs', E_0_aarray, header='Data for rlmpo with D = 5 for 10 different initializations')\n\n"
] | [
[
"numpy.savetxt",
"numpy.array"
],
[
"numpy.issubdtype"
],
[
"numpy.savetxt",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haidi-ustc/scikit-nano | [
"ef9b24165ba37918b3f520657f7311ba139b3e7d",
"ef9b24165ba37918b3f520657f7311ba139b3e7d",
"ef9b24165ba37918b3f520657f7311ba139b3e7d",
"ef9b24165ba37918b3f520657f7311ba139b3e7d"
] | [
"sknano/structures/_nanotube_bundle.py",
"sknano/core/atoms/_image_atoms.py",
"sknano/core/molecules/_molecules.py",
"sknano/core/refdata/setup.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n==============================================================================\nNanotube bundle base class (:mod:`sknano.structures._nanotube_bundle`)\n==============================================================================\n\n.. currentmodule:: sknano.structures._nanotube_bundle\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n__docformat__ = 'restructuredtext en'\n\nimport numbers\n\nimport numpy as np\n\nfrom sknano.core.atoms import Atom, vdw_radius_from_basis\nfrom sknano.core.refdata import aCC, grams_per_Da\nfrom sknano.core.math import Vector\nfrom ._extras import get_chiral_indices\n\n__all__ = ['compute_bundle_density', 'NanotubeBundleMixin',\n 'NanotubeBundleBase']\n\n\ndef compute_bundle_density(*Ch, r_vdw=None, bond=None,\n element1=None, element2=None):\n \"\"\"Compute nanotube bundle mass density \\\n :math:`\\\\rho_{\\\\mathrm{bundle}}(n, m)` in :math:`\\\\mathrm{g/cm^3}`.\n\n .. math::\n\n \\\\rho_{\\\\mathrm{bundle}}(n, m) = \\\\frac{8\\\\pi^2 m_{\\\\mathrm{C}}\n \\\\sqrt{n^2 + m^2 + nm}}{9\\\\sqrt{3}a_{\\\\mathrm{CC}}^3 \\\\times\n \\\\left(\\\\sqrt{n^2 + m^2 + nm} +\n \\\\frac{\\\\pi d_{\\\\mathrm{vdW}}}{\\\\sqrt{3}a_{\\\\mathrm{CC}}}\\\\right)^2}\n\n Parameters\n ----------\n *Ch : {:class:`python:tuple` or :class:`python:int`\\ s}\n Either a 2-tuple of ints or 2 integers giving the chiral indices\n of the nanotube chiral vector\n :math:`\\\\mathbf{C}_h = n\\\\mathbf{a}_1 + m\\\\mathbf{a}_2 = (n, m)`.\n r_vdw : int\n van der Waals radius of nanotube atoms\n bond : float, optional\n Bond length.\n\n Returns\n -------\n float\n :math:`\\\\rho_{\\\\mathrm{bundle}}` in units of\n :math:`\\\\mathrm{\\\\frac{g}{cm^3}}`\n\n \"\"\"\n n, m, _ = get_chiral_indices(*Ch)\n\n if bond is None:\n bond = aCC\n\n if element1 is None:\n element1 = 'C'\n if element2 is None:\n element2 = 'C'\n\n if r_vdw is None:\n r_vdw = vdw_radius_from_basis(element1, element2)\n\n if element1 == element2:\n bundle_density = 8 * np.pi ** 2 * Atom(element1).mass * \\\n np.sqrt(n ** 2 + m ** 2 + n * m) / \\\n (9 * np.sqrt(3) * bond ** 3 *\n (np.sqrt(n ** 2 + m ** 2 + n * m) +\n 2 * np.pi * r_vdw / (np.sqrt(3) * bond)) ** 2)\n else:\n bundle_density = 0\n\n # there are 1.6605e-24 grams / Da and 1e-8 cm / angstrom\n bundle_density *= grams_per_Da / (1e-8) ** 3\n return bundle_density\n\n\nclass NanotubeBundleMixin:\n \"\"\"Mixin class for nanotube bundles.\"\"\"\n\n @property\n def nx(self):\n \"\"\"Number of nanotubes along the :math:`x`-axis.\"\"\"\n return self._nx\n\n @nx.setter\n def nx(self, value):\n \"\"\"Set :math:`n_x`\"\"\"\n if not (isinstance(value, numbers.Number) or value > 0):\n raise TypeError('Expected a positive integer.')\n self._nx = int(value)\n\n @nx.deleter\n def nx(self):\n del self._nx\n\n @property\n def ny(self):\n \"\"\"Number of nanotubes along the :math:`y`-axis.\"\"\"\n return self._ny\n\n @ny.setter\n def ny(self, value):\n \"\"\"Set :math:`n_y`\"\"\"\n if not (isinstance(value, numbers.Number) or value > 0):\n raise TypeError('Expected a positive integer.')\n self._ny = int(value)\n\n @ny.deleter\n def ny(self):\n del self._ny\n\n @property\n def Lx(self):\n return self.nx * (self.dt + 2 * self.vdw_radius) / 10\n\n @property\n def Ly(self):\n return self.ny * (self.dt + 2 * self.vdw_radius) / 10\n\n @property\n def bundle_geometry(self):\n return self._bundle_geometry\n\n @bundle_geometry.setter\n def bundle_geometry(self, value):\n if value is not None and value not in self._bundle_geometries:\n print('Unrecognized `bundle_geometry`: {!r}'.format(value))\n value = None\n self._bundle_geometry = value\n\n @property\n def bundle_packing(self):\n return self._bundle_packing\n\n @bundle_packing.setter\n def bundle_packing(self, value):\n if value is None and \\\n self.bundle_geometry in ('square', 'rectangle'):\n value = 'ccp'\n elif value is None and \\\n self.bundle_geometry in ('triangle', 'hexagon'):\n value = 'hcp'\n\n if value is not None and value not in ('ccp', 'hcp'):\n raise ValueError('Expected value to be `hcp` or `ccp`')\n\n self._bundle_packing = value\n # self.generate_bundle_coords()\n\n @bundle_packing.deleter\n def bundle_packing(self):\n del self._bundle_packing\n\n @property\n def bundle_mass(self):\n return self.Ntubes * self.tube_mass\n\n @property\n def Natoms(self):\n \"\"\"Number of atoms in nanotube bundle.\n\n **Returns total number of atoms in nanotube bundle.**\n Use :attr:`~NanotubeBundleMixin.Natoms_per_tube` to\n get a list of the number of atoms in each nanotube in\n the bundle.\n\n \"\"\"\n return np.asarray(self.Natoms_list).sum()\n\n @property\n def Natoms_per_bundle(self):\n return self.Natoms\n\n @property\n def Natoms_list(self):\n return [nanotube.Natoms for nanotube in self.bundle_list]\n\n @property\n def Ntubes(self):\n return len(self.bundle_coords)\n\n @property\n def Natoms_per_tube(self):\n \"\"\"Alias for :attr:`~NanotubeBundleMixin.Natoms_list`.\"\"\"\n return self.Natoms_list\n\n def generate_bundle_coords(self):\n \"\"\"Generate coordinates of bundle tubes.\"\"\"\n self.r1 = Vector()\n self.r2 = Vector()\n self.bundle_coords = []\n\n self.r1.x = self.dt + 2 * self.vdw_radius\n if self.bundle_packing in ('cubic', 'ccp'):\n self.r2.y = self.r1.x\n else:\n self.r2.x = self.r1.x * np.cos(2 * np.pi / 3)\n self.r2.y = self.r1.x * np.sin(2 * np.pi / 3)\n if self.bundle_packing is None:\n self._bundle_packing = 'hcp'\n\n if self.bundle_geometry == 'hexagon':\n nrows = max(self.nx, self.ny, 3)\n if nrows % 2 != 1:\n nrows += 1\n\n ntubes_per_end_rows = int((nrows + 1) / 2)\n\n row = 0\n ntubes_per_row = nrows\n while ntubes_per_row >= ntubes_per_end_rows:\n if row == 0:\n for n in range(ntubes_per_row):\n dr = n * self.r1\n self.bundle_coords.append(dr)\n else:\n for nx in range(ntubes_per_row):\n for ny in (-row, row):\n dr = Vector()\n dr.x = abs(ny * self.r2.x)\n dr.y = ny * self.r2.y\n dr = nx * self.r1 + dr\n self.bundle_coords.append(dr)\n row += 1\n ntubes_per_row = nrows - row\n\n elif self.bundle_geometry == 'rectangle':\n Lx = 10 * self.Lx\n for nx in range(self.nx):\n for ny in range(self.ny):\n dr = nx * self.r1 + ny * self.r2\n while dr.x < 0:\n dr.x += Lx\n self.bundle_coords.append(dr)\n\n elif self.bundle_geometry == 'square':\n pass\n elif self.bundle_geometry == 'triangle':\n pass\n else:\n for nx in range(self.nx):\n for ny in range(self.ny):\n dr = nx * self.r1 + ny * self.r2\n self.bundle_coords.append(dr)\n\n\nclass NanotubeBundleBase(NanotubeBundleMixin):\n \"\"\"Nanotube bundle structure base class.\"\"\"\n\n _bundle_geometries = ['square', 'rectangle', 'hexagon']\n\n def __init__(self, *args, nx=1, ny=1, bundle_packing=None,\n bundle_geometry=None, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self.nx = nx\n self.ny = ny\n self.bundle_geometry = bundle_geometry\n self.bundle_packing = bundle_packing\n self.bundle_list = []\n self.generate_bundle_coords()\n\n def todict(self):\n attrdict = super().todict()\n attrdict.update(dict(nx=self.nx, ny=self.ny,\n bundle_packing=self.bundle_packing,\n bundle_geometry=self.bundle_geometry))\n return attrdict\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n===============================================================================\nAtom classes with image id attributes (:mod:`sknano.core.atoms._image_atoms`)\n===============================================================================\n\n.. currentmodule:: sknano.core.atoms._image_atoms\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n\n__docformat__ = 'restructuredtext en'\n\nfrom functools import total_ordering\nfrom operator import attrgetter\n\nimport numbers\nimport numpy as np\n\nfrom sknano.core.math import Point\nfrom ._atoms import Atom, Atoms\n\n__all__ = ['ImageAtom', 'ImageAtoms']\n\n\n@total_ordering\nclass ImageAtom(Atom):\n \"\"\"An `Atom` sub-class with image count attributes.\n\n Parameters\n ----------\n ix, iy, iz : int, optional\n :math:`x, y, z` `ImageAtom` image count\n\n \"\"\"\n def __init__(self, *args, ix=None, iy=None, iz=None, **kwargs):\n\n super().__init__(*args, **kwargs)\n self._i = Point([ix, iy, iz], dtype=int)\n self.fmtstr = super().fmtstr + \", ix={ix:d}, iy={iy:d}, iz={iz:d}\"\n\n def __eq__(self, other):\n return self.i == other.i and super().__eq__(other)\n\n def __lt__(self, other):\n return (self.i < other.i and super().__le__(other)) or \\\n (self.i <= other.i and super().__lt__(other))\n\n def __dir__(self):\n attrs = super().__dir__()\n attrs.extend(['ix', 'iy', 'iz'])\n return attrs\n\n @property\n def ix(self):\n \"\"\":math:`i_x` image flag.\"\"\"\n return self.i.x\n\n @ix.setter\n def ix(self, value):\n if not isinstance(value, numbers.Number):\n raise TypeError('Expected a number')\n self.i.x = int(value)\n\n @property\n def iy(self):\n \"\"\":math:`i_y` image flag.\"\"\"\n return self.i.y\n\n @iy.setter\n def iy(self, value):\n if not isinstance(value, numbers.Number):\n raise TypeError('Expected a number')\n self.i.y = int(value)\n\n @property\n def iz(self):\n \"\"\":math:`i_z` image flag.\"\"\"\n return self.i.z\n\n @iz.setter\n def iz(self, value):\n if not isinstance(value, numbers.Number):\n raise TypeError('Expected a number')\n self.i.z = int(value)\n\n @property\n def i(self):\n \"\"\":math:`i_x, i_y, i_z` image flags\n\n Returns\n -------\n `Point`\n\n \"\"\"\n return self._i\n\n @i.setter\n def i(self, value):\n \"\"\"Set :math:`i_x, i_y, i_z` image flags.\n\n Parameters\n ----------\n value : array_like\n\n \"\"\"\n if not isinstance(value, (list, np.ndarray)):\n raise TypeError('Expected an array_like object')\n self._i[:] = Point(value, nd=3, dtype=int)\n\n def todict(self):\n super_dict = super().todict()\n super_dict.update(dict(ix=self.ix, iy=self.iy, iz=self.iz))\n return super_dict\n\n\nclass ImageAtoms(Atoms):\n \"\"\"An `Atoms` sub-class for `ImageAtom`\\ s.\n\n Sub-class of `Atoms` class, and a container class for lists of\n :class:`~sknano.core.atoms.ImageAtom` instances.\n\n Parameters\n ----------\n atoms : {None, sequence, `ImageAtoms`}, optional\n if not `None`, then a list of `ImageAtom` instance objects or an\n existing `ImageAtoms` instance object.\n\n \"\"\"\n @property\n def __atom_class__(self):\n return ImageAtom\n\n def sort(self, key=attrgetter('i'), reverse=False):\n super().sort(key=key, reverse=reverse)\n\n @property\n def images(self):\n \"\"\":class:`~numpy:numpy.ndarray` of `ImageAtom` images.\"\"\"\n return np.asarray([atom.i for atom in self])\n\n @property\n def i(self):\n \"\"\"Alias for :attr:`~ImageAtoms.images`.\"\"\"\n return self.images\n\n @property\n def ix(self):\n \"\"\":class:`~numpy:numpy.ndarray` of `Atom`\\ s :math:`i_x` values.\"\"\"\n return self.i[:, 0]\n\n @property\n def iy(self):\n \"\"\":class:`~numpy:numpy.ndarray` of `Atom`\\ s :math:`i_y` values.\"\"\"\n return self.i[:, 1]\n\n @property\n def iz(self):\n \"\"\":class:`~numpy:numpy.ndarray` of `Atom`\\ s :math:`i_z` values.\"\"\"\n return self.i[:, 2]\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n==============================================================================\nBase class for structure molecules (:mod:`sknano.core.molecules._molecules`)\n==============================================================================\n\n.. currentmodule:: sknano.core.molecules._molecules\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n__docformat__ = 'restructuredtext en'\n\nfrom collections import OrderedDict\nfrom operator import attrgetter\n\nimport numpy as np\n\nfrom sknano.core import UserList, xyz\nfrom sknano.core.math import Vector, transformation_matrix\nfrom sknano.core.geometric_regions import Cuboid # , Rectangle\n\n__all__ = ['Molecules']\n\n\nclass Molecules(UserList):\n \"\"\"Base class for collection of `Molecule` objects.\n\n Parameters\n ----------\n molecules : {None, sequence, `Molecules`}, optional\n if not `None`, then a list of `Molecule` instance objects or an\n existing `Molecules` instance object.\n\n \"\"\"\n _moleculeattrs = []\n\n def __init__(self, molecules=None):\n super().__init__(initlist=molecules)\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n \"\"\"Return canonical string representation of `Molecules`.\"\"\"\n return \"Molecules(molecules={!r})\".format(self.data)\n\n def sort(self, key=attrgetter('id'), reverse=False):\n super().sort(key=key, reverse=reverse)\n\n @property\n def Nmolecules(self):\n \"\"\"Number of molecules in `Molecules`.\"\"\"\n return len(self)\n\n @property\n def CM(self):\n \"\"\"Center-of-Mass coordinates of `Molecules`.\n\n Returns\n -------\n ndarray\n 3-element ndarray specifying center-of-mass coordinates of\n `Molecules`.\n\n \"\"\"\n masses = np.asarray([self.masses])\n coords = self.coords\n MxR = masses.T * coords\n return Vector(np.sum(MxR, axis=0) / np.sum(masses))\n\n @property\n def M(self):\n \"\"\"Total mass of `Molecules`.\"\"\"\n #return math.fsum(self.masses)\n return self.masses.sum()\n\n @property\n def coords(self):\n \"\"\"Return list of `Molecule` coordinates.\"\"\"\n return np.asarray([molecule.r for molecule in self])\n\n @property\n def masses(self):\n \"\"\"Return list of `Molecule` masses.\"\"\"\n return np.asarray([molecule.m for molecule in self])\n\n @property\n def symbols(self):\n \"\"\"Return list of `Molecule` symbols.\"\"\"\n return np.asarray([molecule.symbol for molecule in self])\n\n @property\n def x(self):\n \"\"\"Return :math:`x` coordinates of `Molecule` objects as array.\"\"\"\n return self.coords[:,0]\n\n @property\n def y(self):\n \"\"\"Return :math:`y` coordinates of `Molecule` objects as array.\"\"\"\n return self.coords[:,1]\n\n @property\n def z(self):\n \"\"\"Return :math:`z` coordinates of `Molecule` objects as array.\"\"\"\n return self.coords[:,2]\n\n @property\n def bounds(self):\n \"\"\"Return bounds of `Molecules`.\"\"\"\n return Cuboid(pmin=[self.x.min(), self.y.min(), self.z.min()],\n pmax=[self.x.max(), self.y.max(), self.z.max()])\n\n def center_CM(self, axes=None):\n \"\"\"Center molecules on CM coordinates.\"\"\"\n dr = -self.CM\n self.translate(dr)\n\n def clip_bounds(self, region, center_before_clipping=False):\n \"\"\"Remove molecules outside the given limits along given dimension.\n\n Parameters\n ----------\n region : :class:`~sknano.core.geometric_regions.`GeometricRegion`\n\n \"\"\"\n CM0 = None\n if center_before_clipping:\n CM0 = self.CM\n self.translate(-CM0)\n\n self.data = \\\n np.asarray(self)[np.logical_and(\n np.logical_and(\n self.x <= region.limits['x']['max'],\n np.logical_and(\n self.y <= region.limits['y']['max'],\n self.z <= region.limits['z']['max'])),\n np.logical_and(\n self.x >= region.limits['x']['min'],\n np.logical_and(\n self.y >= region.limits['y']['min'],\n self.z >= region.limits['z']['min'])))].tolist()\n\n if CM0 is not None:\n self.translate(CM0)\n\n def filter(self, condition, invert=False):\n \"\"\"Filter `Molecules` by `condition`.\n\n Parameters\n ----------\n condition : array_like, bool\n invert : bool, optional\n\n Returns\n -------\n filtered_molecules : `Molecules`\n\n \"\"\"\n return self.__class__(molecules=np.asarray(self)[condition].tolist())\n\n def get_molecules(self, asarray=False):\n \"\"\"Return list of `Molecules`.\n\n Parameters\n ----------\n asarray : bool, optional\n\n Returns\n -------\n sequence or ndarray\n\n \"\"\"\n if asarray:\n return np.asarray(self)\n else:\n return self\n\n def get_coords(self, asdict=False):\n \"\"\"Return molecule coords.\n\n Parameters\n ----------\n asdict : bool, optional\n\n Returns\n -------\n coords : :py:class:`python:~collections.OrderedDict` or ndarray\n\n \"\"\"\n coords = self.coords\n if asdict:\n return OrderedDict(list(zip(xyz, coords.T)))\n else:\n return coords\n\n def rezero_coords(self, epsilon=1.0e-10):\n \"\"\"Alias for :meth:`Molecules.rezero`.\"\"\"\n self.rezero(epsilon=epsilon)\n\n def rezero_xyz(self, epsilon=1.0e-10):\n self.rezero(epsilon=epsilon)\n\n def rezero(self, epsilon=1.0e-10):\n \"\"\"Set really really small coordinates to zero.\n\n Set all coordinates with absolute value less than\n epsilon to zero.\n\n Parameters\n ----------\n epsilon : float\n smallest allowed absolute value of any :math:`x,y,z` component.\n\n \"\"\"\n [molecule.rezero(epsilon=epsilon) for molecule in self]\n\n def rotate(self, angle=None, axis=None, anchor_point=None,\n rot_point=None, from_vector=None, to_vector=None,\n degrees=False, transform_matrix=None, verbose=False, **kwargs):\n \"\"\"Rotate `Molecule` position vectors.\n\n Parameters\n ----------\n angle : float\n axis : :class:`~sknano.core.math.Vector`, optional\n anchor_point : :class:`~sknano.core.math.Point`, optional\n rot_point : :class:`~sknano.core.math.Point`, optional\n from_vector, to_vector : :class:`~sknano.core.math.Vector`, optional\n degrees : bool, optional\n transform_matrix : :class:`~numpy:numpy.ndarray`\n\n \"\"\"\n if transform_matrix is None:\n transform_matrix = \\\n transformation_matrix(angle=angle, axis=axis,\n anchor_point=anchor_point,\n rot_point=rot_point,\n from_vector=from_vector,\n to_vector=to_vector, degrees=degrees,\n verbose=verbose, **kwargs)\n [molecule.rotate(transform_matrix=transform_matrix)\n for molecule in self]\n\n def translate(self, t, fix_anchor_points=True):\n \"\"\"Translate `Molecule` position vectors by :class:`Vector` `t`.\n\n Parameters\n ----------\n t : :class:`Vector`\n fix_anchor_points : bool, optional\n\n \"\"\"\n [molecule.translate(t, fix_anchor_point=fix_anchor_points)\n for molecule in self]\n",
"#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\nfrom __future__ import unicode_literals\n\n\ndef configuration(parent_package='core', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('refdata', parent_package, top_path)\n config.add_data_files('*.json')\n config.add_data_files('*.yaml')\n config.add_data_dir('tests')\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n"
] | [
[
"numpy.asarray",
"numpy.cos",
"numpy.sqrt",
"numpy.sin"
],
[
"numpy.asarray"
],
[
"numpy.asarray",
"numpy.logical_and",
"numpy.sum"
],
[
"numpy.distutils.misc_util.Configuration"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nagapavan525/wtfml | [
"f2211addbe423a51b4dbbdec5a40d09649412452"
] | [
"wtfml/data_loaders/image/classification.py"
] | [
"\"\"\"\n__author__: Abhishek Thakur\n\"\"\"\n\nimport torch\n\nimport numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageFile\n\ntry:\n import torch_xla.core.xla_model as xm\n\n _xla_available = True\nexcept ImportError:\n _xla_available = False\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass ClassificationDataset:\n def __init__(self, image_paths, targets, resize, augmentations=None):\n \"\"\"\n :param image_paths: list of paths to images\n :param targets: numpy array\n :param resize: tuple or None\n :param augmentations: albumentations augmentations\n \"\"\"\n self.image_paths = image_paths\n self.targets = targets\n self.resize = resize\n self.augmentations = augmentations\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, item):\n image = Image.open(self.image_paths[item])\n targets = self.targets[item]\n if self.resize is not None:\n image = image.resize(\n (self.resize[1], self.resize[0]), resample=Image.BILINEAR\n )\n image = np.array(image)\n if self.augmentations is not None:\n augmented = self.augmentations(image=image)\n image = augmented[\"image\"]\n image = np.transpose(image, (2, 0, 1)).astype(np.float32)\n return {\n \"image\": torch.tensor(image),\n \"targets\": torch.tensor(targets),\n }\n\n\nclass ClassificationDataLoader:\n def __init__(self, image_paths, targets, resize, augmentations=None):\n \"\"\"\n :param image_paths: list of paths to images\n :param targets: numpy array\n :param resize: tuple or None\n :param augmentations: albumentations augmentations\n \"\"\"\n self.image_paths = image_paths\n self.targets = targets\n self.resize = resize\n self.augmentations = augmentations\n self.dataset = ClassificationDataset(\n image_paths=self.image_paths,\n targets=self.targets,\n resize=self.resize,\n augmentations=self.augmentations,\n )\n\n def fetch(self, batch_size, num_workers, drop_last=False, shuffle=True, tpu=False):\n \"\"\"\n :param batch_size: batch size\n :param num_workers: number of processes to use\n :param drop_last: drop the last batch?\n :param shuffle: True/False\n :param tpu: True/False, to use tpu or not\n \"\"\"\n sampler = None\n if tpu:\n sampler = torch.utils.data.distributed.DistributedSampler(\n self.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=shuffle,\n )\n\n data_loader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=batch_size,\n sampler=sampler,\n drop_last=drop_last,\n num_workers=num_workers,\n )\n return data_loader\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.transpose",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jcrist/pyblis | [
"d9c67d40a15c656a4681ba1b9ca0c52eff40163c"
] | [
"pyblis/tests/utils.py"
] | [
"import pytest\n\nimport numpy as np\n\n\nall_dtypes = pytest.mark.parametrize('dtype', ['f4', 'f8', 'c8', 'c16'])\n\n\nclass Base(object):\n def rand(self, dtype, shape=()):\n a = np.random.normal(size=shape).astype(dtype)\n if np.issubdtype(dtype, np.complexfloating):\n a += np.random.normal(size=a.shape) * 1j\n return a if a.shape else a.reshape((1,))[0]\n\n def call_base(self, *args, **kwargs):\n return self.call(*args, **kwargs)\n\n\nclass NumbaMixin(object):\n @property\n def error_cls(self):\n import numba\n return numba.errors.TypingError\n\n @classmethod\n def setup_class(cls):\n base, full = cls.compile()\n cls.base = staticmethod(base)\n cls.full = staticmethod(full)\n\n def call(self, *args, **kwargs):\n return self.full(*args, **kwargs)\n\n def call_base(self, *args, **kwargs):\n return self.base(*args, **kwargs)\n"
] | [
[
"numpy.issubdtype",
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FrancisDinh/Smart-Energy-Project | [
"16b021e127d9ac5c01653abc31d8cc5d0a7a05c6"
] | [
"application/DemandSideNew/Building/DemandProfile.py"
] | [
"import os, sys\nimport json\nimport os.path\nimport numpy\n\nclass DemandProfile:\n def __init__(self):\n cwd = os.getcwd()\n self.fname = cwd + '/demand-profile.json'\n \n def get_data(self):\n demand={}\n with open(self.fname) as demand_info:\n demand = json.load(demand_info)\n return demand\n\n def calculate_total_demand(self):\n data = self.get_data()\n total_energy_data=[]\n num=0\n total_demand = numpy.zeros(24)\n for i in data:\n value = i[str(1+num)][\"Circulation Pump\"]+i[str(1+num)][\"Dish Washer\"]+i[str(1+num)][\"Freezer\"]+i[str(1+num)][\"Washing Machine\"]\n total_demand[num] = value\n num+=1\n return total_demand\n\n#sample object\n#sample = DemandProfile()\n#print(sample.calculate_total_demand())"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Koukyosyumei/Senjyu | [
"70faa45e13cb3b1ccdee8a40146a03d60abe11e5"
] | [
"src/senjyu/ml/clustering/kmeans.py"
] | [
"import numpy as np\nfrom mpi4py import MPI\n\n\nclass Kmeans:\n def __init__(self, k=3, num_iterations=100, seed=42):\n self.k = k\n self.num_iterations = num_iterations\n self.centorids = None\n self.dim = None\n self.n = None\n\n np.random.seed(seed)\n\n def train(self, X, parallel=False):\n if parallel:\n pass\n else:\n return self._train_standalone(X)\n\n def _init_distiution(self, args=None):\n self.args = args\n self.comm = MPI.COMM_WORLD\n self.rank = self.comm.Get_rank()\n self.size = self.comm.Get_size()\n\n def _em_standalone(self, X):\n # E-step\n distance = np.zeros((self.k, self.n))\n for cluster_id in range(self.k):\n distance[cluster_id, :] = np.linalg.norm(\n X - self.centorids[cluster_id, :], axis=1\n )\n pred = np.argmin(distance, axis=0)\n\n # M-step\n for cluster_id in range(self.k):\n self.centorids[cluster_id, :] = np.mean(X[pred == cluster_id, :], axis=0)\n\n return pred\n\n def _train_standalone(self, X):\n self.n = X.shape[0]\n self.dim = X.shape[1]\n self.centorids = np.random.normal(0, 1, (self.k, self.dim))\n\n for _ in range(self.num_iterations):\n pred = self._em_standalone(X)\n\n return pred\n"
] | [
[
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.argmin",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aaron-YunZhao/xalpha | [
"76dc6390cb5714b1c004f7e79e4af832ad1e6fa5"
] | [
"xalpha/realtime.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodule for realtime watch and notfication\n\"\"\"\n\nimport datetime as dt\nimport smtplib\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr, parseaddr\nfrom re import match\n\nimport pandas as pd\n\nfrom xalpha.cons import today\nfrom xalpha.info import _download, fundinfo\nfrom xalpha.trade import trade\n\n\ndef _format_addr(s):\n \"\"\"\n parse the email sender and receiver, Chinese encode and support\n\n :param s: eg. 'name <[email protected]>, name2 <[email protected]>'\n \"\"\"\n name, addr = parseaddr(s)\n return formataddr((Header(name, \"utf-8\").encode(), addr))\n\n\ndef mail(\n title,\n content,\n sender=None,\n receiver=None,\n password=None,\n server=None,\n port=None,\n sender_name=\"sender\",\n receiver_name=None,\n):\n \"\"\"\n send email\n\n :param title: str, title of the email\n :param content: str, content of the email, plain text only\n :param conf: all other paramters can be import as a dictionay, eg.conf = {'sender': '[email protected]',\n 'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',\n 'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.\n The receiver_name and sender_name options can be omitted.\n \"\"\"\n ret = True\n try:\n if receiver_name is None:\n receiver_name = [\"receiver\" for _ in receiver]\n msg = MIMEText(content, \"plain\", \"utf-8\")\n msg[\"From\"] = _format_addr(\"%s <%s>\" % (sender_name, sender))\n # 括号里的对应发件人邮箱昵称、发件人邮箱账号\n receivestr = \"\"\n for i, s in enumerate(receiver):\n receivestr += receiver_name[i]\n receivestr += \" <\"\n receivestr += s\n receivestr += \">, \"\n msg[\"To\"] = _format_addr(receivestr) # 括号里的对应收件人邮箱昵称、收件人邮箱账号\n msg[\"Subject\"] = title # 邮件的主题,即标题\n\n server = smtplib.SMTP_SSL(server, port) # 发件人邮箱中的SMTP服务器和端口号\n server.login(sender, password) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(\n sender, receiver, msg.as_string()\n ) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit()\n except Exception:\n ret = False\n return ret\n\n\nclass rtdata:\n \"\"\"\n get real time data of specific funds\n\n :param code: string of six digitals for funds\n \"\"\"\n\n def __init__(self, code):\n url = \"http://fundgz.1234567.com.cn/js/\" + code + \".js\"\n page = _download(url)\n self.code = code\n self.rtvalue = float(match(r'.*\"gsz\":\"(\\d*\\.\\d*)\",.*', page.text)[1])\n self.name = match(r'.*\"name\":\"([^,]*)\",.*', page.text)[1]\n self.time = dt.datetime.strptime(\n match(r'.*\"gztime\":\"([\\d\\s\\-\\:]*)\".*', page.text)[1], \"%Y-%m-%d %H:%M\"\n )\n\n\ndef rfundinfo(\n code, round_label=0, dividend_label=0, fetch=False, save=False, path=\"\", form=\"csv\"\n):\n \"\"\"\n give a fundinfo object with todays estimate netvalue at running time\n\n :param code: string of six digitals for funds\n :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init\n :param save: boolean, when open the save option, info classes automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n :returns: the fundinfo object\n \"\"\"\n fundobj = fundinfo(\n code,\n round_label=round_label,\n dividend_label=dividend_label,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n )\n rt = rtdata(code)\n rtdate = dt.datetime.combine(rt.time, dt.time.min)\n rtvalue = rt.rtvalue\n if (rtdate - fundobj.price.iloc[-1].date).days > 0:\n fundobj.price = fundobj.price.append(\n pd.DataFrame(\n [[rtdate, rtvalue, fundobj.price.iloc[-1].totvalue, 0]],\n columns=[\"date\", \"netvalue\", \"totvalue\", \"comment\"],\n ),\n ignore_index=True,\n )\n return fundobj\n\n\nclass review:\n \"\"\"\n review policys and give the realtime purchase suggestions\n\n :param policylist: list of policy object\n :param namelist: list of names of corresponding policy, default as 0 to n-1\n :param date: object of datetime, check date, today is prefered, date other than is not guaranteed\n \"\"\"\n\n def __init__(self, policylist, namelist=None, date=today()):\n self.warn = []\n self.message = []\n self.policylist = policylist\n if namelist is None:\n self.namelist = [i for i in range(len(policylist))]\n else:\n self.namelist = namelist\n assert len(self.policylist) == len(self.namelist)\n for i, policy in enumerate(policylist):\n row = policy.status[policy.status[\"date\"] == date]\n if len(row) == 1:\n warn = (\n policy.aim.name,\n policy.aim.code,\n row.iloc[0].loc[policy.aim.code],\n self.namelist[i],\n )\n self.warn.append(warn)\n if warn[2] > 0:\n sug = \"买入%s元\" % warn[2]\n elif warn[2] < 0:\n ratio = -warn[2] / 0.005 * 100\n share = (\n trade(fundinfo(warn[1]), policy.status)\n .briefdailyreport()\n .get(\"currentshare\", 0)\n )\n share = -warn[2] / 0.005 * share\n sug = \"卖出%s%%的份额,也即%s份额\" % (ratio, share)\n self.message.append(\n \"根据%s计划,建议%s,%s(%s)\" % (warn[3], sug, warn[0], warn[1])\n )\n self.content = \"\\n\".join(map(str, self.message))\n\n def __str__(self):\n return self.content\n\n def notification(self, conf):\n \"\"\"\n send email of self.content, at least support for qq email sender\n\n :param conf: the configuration dictionary for email send settings, no ** before the dict in needed.\n eg.conf = {'sender': '[email protected]',\n 'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',\n 'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.\n The receiver_name and sender_name options can be omitted.\n \"\"\"\n if self.content:\n ret = mail(\"Notification\", self.content, **conf)\n if ret:\n print(\"邮件发送成功\")\n else:\n print(\"邮件发送失败\")\n else:\n print(\"没有提醒待发送\")\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ngduyanhece/ConvLab | [
"a04582a77537c1a706fbf64715baa9ad0be1301a",
"a04582a77537c1a706fbf64715baa9ad0be1301a",
"a04582a77537c1a706fbf64715baa9ad0be1301a"
] | [
"convlab/modules/e2e/multiwoz/Mem2Seq/utils/utils_babi_mem2seq.py",
"convlab/modules/word_policy/multiwoz/larl/latent_dialog/models_task.py",
"convlab/modules/word_dst/multiwoz/mdbt/mdbt_util.py"
] | [
"# Modified by Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\n\nimport torch\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nfrom utils.config import *\nfrom utils.until_temp import entityList\n\n\ndef hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\nMEM_TOKEN_SIZE = 3\n\nclass Lang:\n def __init__(self):\n self.word2index = {}\n self.word2count = {}\n self.index2word = {UNK_token: 'UNK', PAD_token: \"PAD\", EOS_token: \"EOS\", SOS_token: \"SOS\"}\n self.n_words = 4 # Count default tokens\n \n def index_words(self, story, trg=False):\n if trg:\n for word in story.split(' '):\n self.index_word(word)\n else:\n for word_triple in story:\n for word in word_triple:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\nclass Dataset(data.Dataset):\n \"\"\"Custom data.Dataset compatible with data.DataLoader.\"\"\"\n def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):\n \"\"\"Reads source and target sequences from txt files.\"\"\"\n self.src_seqs = src_seq\n self.trg_seqs = trg_seq\n self.index_seqs = index_seq \n self.gate_seq = gate_seq \n self.num_total_seqs = len(self.src_seqs)\n self.src_word2id = src_word2id\n self.trg_word2id = trg_word2id\n self.max_len = max_len\n self.conv_seq = conv_seq\n self.ent = ent\n self.ID = ID\n self.kb_arr = kb_arr\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (source and target).\"\"\"\n src_seq = self.src_seqs[index]\n trg_seq = self.trg_seqs[index]\n index_s = self.index_seqs[index]\n gete_s = self.gate_seq[index]\n src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)\n trg_seq = self.preprocess(trg_seq, self.trg_word2id)\n index_s = self.preprocess_inde(index_s,src_seq)\n gete_s = self.preprocess_gate(gete_s)\n conv_seq = self.conv_seq[index]\n conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)\n ID = self.ID[index]\n kb_arr = self.kb_arr[index]\n \n return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr\n\n def __len__(self):\n return self.num_total_seqs\n \n def preprocess(self, sequence, word2id, trg=True):\n \"\"\"Converts words to ids.\"\"\"\n if trg:\n story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]\n else:\n story = []\n for i, word_triple in enumerate(sequence):\n story.append([])\n for ii, word in enumerate(word_triple):\n temp = word2id[word] if word in word2id else UNK_token\n story[i].append(temp)\n try:\n story = torch.Tensor(story)\n except:\n print(sequence)\n print(story)\n return story\n\n def preprocess_inde(self, sequence, src_seq):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [len(src_seq)-1]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_gate(self, sequence):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [0]\n sequence = torch.Tensor(sequence)\n return sequence\n\ndef collate_fn(data):\n def merge(sequences,max_len):\n lengths = [len(seq) for seq in sequences]\n if (max_len):\n padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i,:end,:] = seq[:end]\n else:\n padded_seqs = torch.ones(len(sequences), max(lengths)).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i, :end] = seq[:end]\n return padded_seqs, lengths\n\n # sort a list by sequence length (descending order) to use pack_padded_sequence\n data.sort(key=lambda x: len(x[0]), reverse=True)\n # seperate source and target sequences\n src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)\n # merge sequences (from tuple of 1D tensor to 2D tensor)\n src_seqs, src_lengths = merge(src_seqs,max_len)\n trg_seqs, trg_lengths = merge(trg_seqs,None)\n ind_seqs, _ = merge(ind_seqs,None)\n gete_s, _ = merge(gete_s,None)\n conv_seqs, conv_lengths = merge(conv_seq, max_len)\n \n src_seqs = Variable(src_seqs).transpose(0,1)\n trg_seqs = Variable(trg_seqs).transpose(0,1)\n ind_seqs = Variable(ind_seqs).transpose(0,1)\n gete_s = Variable(gete_s).transpose(0,1)\n conv_seqs = Variable(conv_seqs).transpose(0,1)\n\n if USE_CUDA:\n src_seqs = src_seqs.cuda()\n trg_seqs = trg_seqs.cuda()\n ind_seqs = ind_seqs.cuda()\n gete_s = gete_s.cuda()\n conv_seqs = conv_seqs.cuda()\n return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr\n\ndef read_langs(file_name, entity, max_line = None):\n logging.info((\"Reading lines from {}\".format(file_name)))\n data=[]\n contex_arr = []\n conversation_arr = []\n kb_arr = []\n u=None\n r=None\n user_counter = 0\n system_counter = 0\n system_res_counter = 0\n KB_counter = 0\n dialog_counter = 0\n with open(file_name) as fin:\n cnt_ptr = 0\n cnt_voc = 0\n max_r_len = 0\n cnt_lin = 1\n time_counter = 1 \n for line in fin:\n line=line.strip()\n if line:\n nid, line = line.split(' ', 1)\n if '\\t' in line:\n u, r = line.split('\\t')\n if u!='<SILENCE>': user_counter += 1\n system_counter += 1\n\n gen_u = generate_memory(u, \"$u\", str(time_counter)) \n contex_arr += gen_u\n conversation_arr += gen_u\n\n r_index = []\n gate = []\n for key in r.split(' '):\n if ENTPTR: \n if (key in entity):\n index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else:\n index = len(contex_arr) \n cnt_voc +=1 \n else: \n index = len(contex_arr) \n gate.append(0) \n cnt_voc +=1 \n else:\n index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else: \n index = len(contex_arr)\n gate.append(0) \n cnt_voc +=1 \n r_index.append(index)\n system_res_counter += 1 \n\n if len(r_index) > max_r_len: \n max_r_len = len(r_index)\n contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]\n \n ent = []\n for key in r.split(' '):\n if(key in entity):\n ent.append(key)\n\n data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])\n gen_r = generate_memory(r, \"$s\", str(time_counter)) \n contex_arr += gen_r\n conversation_arr += gen_r\n\n time_counter += 1\n else:\n KB_counter += 1\n r=line\n if USEKB:\n temp = generate_memory(r, \"\", \"\") \n contex_arr += temp\n kb_arr += temp\n else:\n cnt_lin+=1\n if(max_line and cnt_lin>=max_line):\n break\n contex_arr=[]\n conversation_arr = []\n kb_arr = []\n time_counter = 1\n dialog_counter += 1\n max_len = max([len(d[0]) for d in data])\n logging.info(\"Pointer percentace= {} \".format(cnt_ptr/(cnt_ptr+cnt_voc)))\n logging.info(\"Max responce Len: {}\".format(max_r_len))\n logging.info(\"Max Input Len: {}\".format(max_len))\n logging.info(\"Avg. User Utterances: {}\".format(user_counter*1.0/dialog_counter))\n logging.info(\"Avg. Bot Utterances: {}\".format(system_counter*1.0/dialog_counter))\n logging.info(\"Avg. KB results: {}\".format(KB_counter*1.0/dialog_counter))\n logging.info(\"Avg. responce Len: {}\".format(system_res_counter*1.0/system_counter))\n \n print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])\n return data, max_len, max_r_len\n\ndef generate_memory(sent, speaker, time):\n sent_new = []\n sent_token = sent.split(' ')\n if speaker==\"$u\" or speaker==\"$s\":\n for word in sent_token:\n temp = [word, speaker, 't'+str(time)] + [\"PAD\"]*(MEM_TOKEN_SIZE-3)\n sent_new.append(temp)\n else:\n if sent_token[1]==\"R_rating\":\n sent_token = sent_token + [\"PAD\"]*(MEM_TOKEN_SIZE-len(sent_token))\n else:\n sent_token = sent_token[::-1] + [\"PAD\"]*(MEM_TOKEN_SIZE-len(sent_token))\n sent_new.append(sent_token)\n return sent_new\n\ndef get_seq(pairs,lang,batch_size,type,max_len): \n x_seq = []\n y_seq = []\n ptr_seq = []\n gate_seq = []\n conv_seq = []\n ent = []\n ID = []\n kb_arr = []\n for pair in pairs:\n x_seq.append(pair[0])\n y_seq.append(pair[1])\n ptr_seq.append(pair[2])\n gate_seq.append(pair[3])\n conv_seq.append(pair[4])\n ent.append(pair[5])\n ID.append(pair[6])\n kb_arr.append(pair[7])\n if(type):\n lang.index_words(pair[0])\n lang.index_words(pair[1], trg=True)\n \n dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=type,\n collate_fn=collate_fn)\n return data_loader\n\ndef prepare_data_seq(task,batch_size=100,shuffle=True):\n file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)\n file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)\n file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)\n if (int(task) != 6):\n file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)\n\n if int(task)!=6:\n ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))\n else:\n ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))\n\n pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)\n pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)\n pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)\n\n max_r_test_OOV = 0\n max_len_test_OOV = 0\n if (int(task) != 6):\n pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)\n \n max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1\n max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1\n lang = Lang()\n \n train = get_seq(pair_train,lang,batch_size,True,max_len)\n dev = get_seq(pair_dev,lang,batch_size,False,max_len)\n test = get_seq(pair_test,lang,batch_size,False,max_len)\n if (int(task) != 6):\n testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)\n else:\n testOOV = []\n \n logging.info(\"Read %s sentence pairs train\" % len(pair_train))\n logging.info(\"Read %s sentence pairs dev\" % len(pair_dev))\n logging.info(\"Read %s sentence pairs test\" % len(pair_test))\n if (int(task) != 6):\n logging.info(\"Read %s sentence pairs test\" % len(pair_test_OOV)) \n logging.info(\"Max len Input %s \" % max_len)\n logging.info(\"Vocab_size %s \" % lang.n_words)\n logging.info(\"USE_CUDA={}\".format(USE_CUDA))\n\n return train, dev, test, testOOV, lang, max_len, max_r",
"import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.base_models import BaseModel\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.corpora import SYS, EOS, PAD, BOS\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.utils import INT, FLOAT, LONG, Pack, cast_type\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.enc2dec.encoders import RnnUttEncoder\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.enc2dec.decoders import DecoderRNN, GEN, TEACH_FORCE\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog.criterions import NLLEntropy, CatKLLoss, Entropy, NormKLLoss\nfrom convlab.modules.word_policy.multiwoz.larl.latent_dialog import nn_lib\nimport numpy as np\n\n\nclass SysPerfectBD2Word(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Word, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.policy = nn.Sequential(nn.Linear(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.dec_cell_size), nn.Tanh(), nn.Dropout(config.dropout))\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=self.utt_encoder.output_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # pack attention context\n if self.config.dec_use_attn:\n attn_context = enc_outs\n else:\n attn_context = None\n\n # create decoder initial states\n dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n # h_dec_init_state = utt_summary.squeeze(1).unsqueeze(0)\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n # (batch_size, response_size-1)\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n # (batch_size, max_ctx_len, ctx_cell_size)\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n return ret_dict, labels\n if return_latent:\n return Pack(nll=self.nll(dec_outputs, labels),\n latent_action=dec_init_state)\n else:\n return Pack(nll=self.nll(dec_outputs, labels))\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # pack attention context\n if self.config.dec_use_attn:\n attn_context = enc_outs\n else:\n attn_context = None\n\n # create decoder initial states\n dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)\n\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=temp)\n return logprobs, outs\n\n\nclass SysPerfectBD2Cat(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Cat, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n self.k_size = config.k_size\n self.y_size = config.y_size\n self.simple_posterior = config.simple_posterior\n self.contextual_posterior = config.contextual_posterior\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.c2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.y_size, config.k_size, is_lstm=False)\n self.z_embedding = nn.Linear(self.y_size * self.k_size, config.dec_cell_size, bias=False)\n self.gumbel_connector = nn_lib.GumbelConnector(config.use_gpu)\n if not self.simple_posterior:\n if self.contextual_posterior:\n self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,\n config.y_size, config.k_size, is_lstm=False)\n else:\n self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size, config.y_size, config.k_size, is_lstm=False)\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=config.dec_cell_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n self.cat_kl_loss = CatKLLoss()\n self.entropy_loss = Entropy()\n self.log_uniform_y = Variable(th.log(th.ones(1) / config.k_size))\n self.eye = Variable(th.eye(self.config.y_size).unsqueeze(0))\n self.beta = self.config.beta if hasattr(self.config, 'beta') else 0.0\n if self.use_gpu:\n self.log_uniform_y = self.log_uniform_y.cuda()\n self.eye = self.eye.cuda()\n\n def valid_loss(self, loss, batch_cnt=None):\n if self.simple_posterior:\n total_loss = loss.nll\n if self.config.use_pr > 0.0:\n total_loss += self.beta * loss.pi_kl\n else:\n total_loss = loss.nll + loss.pi_kl\n\n if self.config.use_mi:\n total_loss += (loss.b_pr * self.beta)\n\n if self.config.use_diversity:\n total_loss += loss.diversity\n\n return total_loss\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n if self.simple_posterior:\n logits_qy, log_qy = self.c2z(enc_last)\n sample_y = self.gumbel_connector(logits_qy, hard=mode==GEN)\n log_py = self.log_uniform_y\n else:\n logits_py, log_py = self.c2z(enc_last)\n # encode response and use posterior to find q(z|x, c)\n x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))\n if self.contextual_posterior:\n logits_qy, log_qy = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))\n else:\n logits_qy, log_qy = self.xc2z(x_h.squeeze(1))\n\n # use prior at inference time, otherwise use posterior\n if mode == GEN or (use_py is not None and use_py is True):\n sample_y = self.gumbel_connector(logits_py, hard=False)\n else:\n sample_y = self.gumbel_connector(logits_qy, hard=True)\n\n # pack attention context\n if self.config.dec_use_attn:\n z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)\n attn_context = []\n temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)\n for z_id in range(self.y_size):\n attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))\n attn_context = th.cat(attn_context, dim=1)\n dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)\n else:\n dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n # (batch_size, response_size-1)\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n # (batch_size, max_ctx_len, ctx_cell_size)\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n ret_dict['sample_z'] = sample_y\n ret_dict['log_qy'] = log_qy\n return ret_dict, labels\n\n else:\n result = Pack(nll=self.nll(dec_outputs, labels))\n # regularization qy to be uniform\n avg_log_qy = th.exp(log_qy.view(-1, self.config.y_size, self.config.k_size))\n avg_log_qy = th.log(th.mean(avg_log_qy, dim=0) + 1e-15)\n b_pr = self.cat_kl_loss(avg_log_qy, self.log_uniform_y, batch_size, unit_average=True)\n mi = self.entropy_loss(avg_log_qy, unit_average=True) - self.entropy_loss(log_qy, unit_average=True)\n pi_kl = self.cat_kl_loss(log_qy, log_py, batch_size, unit_average=True)\n q_y = th.exp(log_qy).view(-1, self.config.y_size, self.config.k_size) # b\n p = th.pow(th.bmm(q_y, th.transpose(q_y, 1, 2)) - self.eye, 2)\n\n result['pi_kl'] = pi_kl\n\n result['diversity'] = th.mean(p)\n result['nll'] = self.nll(dec_outputs, labels)\n result['b_pr'] = b_pr\n result['mi'] = mi\n return result\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n if self.simple_posterior:\n logits_py, log_qy = self.c2z(enc_last)\n else:\n logits_py, log_qy = self.c2z(enc_last)\n\n qy = F.softmax(logits_py / temp, dim=1) # (batch_size, vocab_size, )\n log_qy = F.log_softmax(logits_py, dim=1) # (batch_size, vocab_size, )\n idx = th.multinomial(qy, 1).detach()\n logprob_sample_z = log_qy.gather(1, idx).view(-1, self.y_size)\n joint_logpz = th.sum(logprob_sample_z, dim=1)\n sample_y = cast_type(Variable(th.zeros(log_qy.size())), FLOAT, self.use_gpu)\n sample_y.scatter_(1, idx, 1.0)\n\n # pack attention context\n if self.config.dec_use_attn:\n z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)\n attn_context = []\n temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)\n for z_id in range(self.y_size):\n attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))\n attn_context = th.cat(attn_context, dim=1)\n dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)\n else:\n dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=0.1)\n return logprobs, outs, joint_logpz, sample_y\n\n\nclass SysPerfectBD2Gauss(BaseModel):\n def __init__(self, corpus, config):\n super(SysPerfectBD2Gauss, self).__init__(config)\n self.vocab = corpus.vocab\n self.vocab_dict = corpus.vocab_dict\n self.vocab_size = len(self.vocab)\n self.bos_id = self.vocab_dict[BOS]\n self.eos_id = self.vocab_dict[EOS]\n self.pad_id = self.vocab_dict[PAD]\n self.bs_size = corpus.bs_size\n self.db_size = corpus.db_size\n self.y_size = config.y_size\n self.simple_posterior = config.simple_posterior\n\n self.embedding = None\n self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,\n embedding_dim=config.embed_size,\n feat_size=0,\n goal_nhid=0,\n rnn_cell=config.utt_rnn_cell,\n utt_cell_size=config.utt_cell_size,\n num_layers=config.num_layers,\n input_dropout_p=config.dropout,\n output_dropout_p=config.dropout,\n bidirectional=config.bi_utt_cell,\n variable_lengths=False,\n use_attn=config.enc_use_attn,\n embedding=self.embedding)\n\n self.c2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size + self.db_size + self.bs_size,\n config.y_size, is_lstm=False)\n self.gauss_connector = nn_lib.GaussianConnector(self.use_gpu)\n self.z_embedding = nn.Linear(self.y_size, config.dec_cell_size)\n if not self.simple_posterior:\n self.xc2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,\n config.y_size, is_lstm=False)\n\n self.decoder = DecoderRNN(input_dropout_p=config.dropout,\n rnn_cell=config.dec_rnn_cell,\n input_size=config.embed_size,\n hidden_size=config.dec_cell_size,\n num_layers=config.num_layers,\n output_dropout_p=config.dropout,\n bidirectional=False,\n vocab_size=self.vocab_size,\n use_attn=config.dec_use_attn,\n ctx_cell_size=config.dec_cell_size,\n attn_mode=config.dec_attn_mode,\n sys_id=self.bos_id,\n eos_id=self.eos_id,\n use_gpu=config.use_gpu,\n max_dec_len=config.max_dec_len,\n embedding=self.embedding)\n\n self.nll = NLLEntropy(self.pad_id, config.avg_type)\n self.gauss_kl = NormKLLoss(unit_average=True)\n self.zero = cast_type(th.zeros(1), FLOAT, self.use_gpu)\n\n def valid_loss(self, loss, batch_cnt=None):\n if self.simple_posterior:\n total_loss = loss.nll\n if self.config.use_pr > 0.0:\n total_loss += self.config.beta * loss.pi_kl\n else:\n total_loss = loss.nll + loss.pi_kl\n\n return total_loss\n\n def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # get decoder inputs\n dec_inputs = out_utts[:, :-1]\n labels = out_utts[:, 1:].contiguous()\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n\n # create decoder initial states\n if self.simple_posterior:\n q_mu, q_logvar = self.c2z(enc_last)\n sample_z = self.gauss_connector(q_mu, q_logvar)\n p_mu, p_logvar = self.zero, self.zero\n else:\n p_mu, p_logvar = self.c2z(enc_last)\n # encode response and use posterior to find q(z|x, c)\n x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))\n q_mu, q_logvar = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))\n\n # use prior at inference time, otherwise use posterior\n if mode == GEN or use_py:\n sample_z = self.gauss_connector(p_mu, p_logvar)\n else:\n sample_z = self.gauss_connector(q_mu, q_logvar)\n\n # pack attention context\n dec_init_state = self.z_embedding(sample_z.unsqueeze(0))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,\n dec_inputs=dec_inputs,\n dec_init_state=dec_init_state, # tuple: (h, c)\n attn_context=attn_context,\n mode=mode,\n gen_type=gen_type,\n beam_size=self.config.beam_size) # (batch_size, goal_nhid)\n if mode == GEN:\n ret_dict['sample_z'] = sample_z\n return ret_dict, labels\n\n else:\n result = Pack(nll=self.nll(dec_outputs, labels))\n pi_kl = self.gauss_kl(q_mu, q_logvar, p_mu, p_logvar)\n result['pi_kl'] = pi_kl\n result['nll'] = self.nll(dec_outputs, labels)\n return result\n\n def gaussian_logprob(self, mu, logvar, sample_z):\n var = th.exp(logvar)\n constant = float(-0.5 * np.log(2*np.pi))\n logprob = constant - 0.5 * logvar - th.pow((mu-sample_z), 2) / (2.0*var)\n return logprob\n\n def forward_rl(self, data_feed, max_words, temp=0.1):\n ctx_lens = data_feed['context_lens'] # (batch_size, )\n short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)\n bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)\n batch_size = len(ctx_lens)\n\n utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))\n\n # create decoder initial states\n enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)\n # create decoder initial states\n p_mu, p_logvar = self.c2z(enc_last)\n\n sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()\n logprob_sample_z = self.gaussian_logprob(p_mu, self.zero, sample_z)\n joint_logpz = th.sum(logprob_sample_z, dim=1)\n\n # pack attention context\n dec_init_state = self.z_embedding(sample_z.unsqueeze(0))\n attn_context = None\n\n # decode\n if self.config.dec_rnn_cell == 'lstm':\n dec_init_state = tuple([dec_init_state, dec_init_state])\n\n # decode\n logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,\n dec_init_state=dec_init_state,\n attn_context=attn_context,\n vocab=self.vocab,\n max_words=max_words,\n temp=0.1)\n return logprobs, outs, joint_logpz, sample_z\n\n",
"# -*- coding: utf-8 -*-\n\n# Modified by Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport json\nimport math\nimport os\nimport time\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\nDATA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))), 'data/mdbt')\nVALIDATION_URL = os.path.join(DATA_PATH, \"data/validate.json\")\nWORD_VECTORS_URL = os.path.join(DATA_PATH, \"word-vectors/paragram_300_sl999.txt\")\nTRAINING_URL = os.path.join(DATA_PATH, \"data/train.json\")\nONTOLOGY_URL = os.path.join(DATA_PATH, \"data/ontology.json\")\nTESTING_URL = os.path.join(DATA_PATH, \"data/test.json\")\nMODEL_URL = os.path.join(DATA_PATH, \"models/model-1\")\nGRAPH_URL = os.path.join(DATA_PATH, \"graphs/graph-1\")\nRESULTS_URL = os.path.join(DATA_PATH, \"results/log-1.txt\")\n\n#ROOT_URL = '../../data/mdbt'\n\n#VALIDATION_URL = \"./data/mdbt/data/validate.json\"\n#WORD_VECTORS_URL = \"./data/mdbt/word-vectors/paragram_300_sl999.txt\"\n#TRAINING_URL = \"./data/mdbt/data/train.json\"\n#ONTOLOGY_URL = \"./data/mdbt/data/ontology.json\"\n#TESTING_URL = \"./data/mdbt/data/test.json\"\n#MODEL_URL = \"./data/mdbt/models/model-1\"\n#GRAPH_URL = \"./data/mdbt/graphs/graph-1\"\n#RESULTS_URL = \"./data/mdbt/results/log-1.txt\"\n\n\ndomains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi']\n\ntrain_batch_size = 64\nbatches_per_eval = 10\nno_epochs = 600\ndevice = \"gpu\"\nstart_batch = 0\n\nnum_slots = 0\n\nbooking_slots = {}\n\nnetwork = \"lstm\"\nbidirect = True\nlstm_num_hidden = 50\nmax_utterance_length = 50\nvector_dimension = 300\nmax_no_turns = 22\n\n\n# model.py\ndef get_available_devs():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\nclass GRU(tf.nn.rnn_cell.RNNCell):\n '''\n Create a Gated Recurrent unit to unroll the network through time\n for combining the current and previous belief states\n '''\n\n def __init__(self, W_h, U_h, M_h, W_m, U_m, label_size, reuse=None, binary_output=False):\n super(GRU, self).__init__(_reuse=reuse)\n self.label_size = label_size\n self.M_h = M_h\n self.W_m = W_m\n self.U_m = U_m\n self.U_h = U_h\n self.W_h = W_h\n self.binary_output = binary_output\n\n def __call__(self, inputs, state, scope=None):\n state_only = tf.slice(state, [0, self.label_size], [-1, -1])\n output_only = tf.slice(state, [0, 0], [-1, self.label_size])\n new_state = tf.tanh(tf.matmul(inputs, self.U_m) + tf.matmul(state_only, self.W_m))\n output = tf.matmul(inputs, self.U_h) + tf.matmul(output_only, self.W_h) + tf.matmul(state_only, self.M_h)\n if self.binary_output:\n output_ = tf.sigmoid(output)\n else:\n output_ = tf.nn.softmax(output)\n state = tf.concat([output_, new_state], 1)\n return output, state\n\n @property\n def state_size(self):\n return tf.shape(self.W_m)[0] + self.label_size\n\n @property\n def output_size(self):\n return tf.shape(self.W_h)[0]\n\n\ndef define_CNN_model(utter, num_filters=300, name=\"r\"):\n \"\"\"\n Better code for defining the CNN model.\n \"\"\"\n filter_sizes = [1, 2, 3]\n W = []\n b = []\n for i, filter_size in enumerate(filter_sizes):\n filter_shape = [filter_size, vector_dimension, 1, num_filters]\n W.append(tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"F_W\"))\n b.append(tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"F_b\"))\n\n utter = tf.reshape(utter, [-1, max_utterance_length, vector_dimension])\n\n hidden_representation = tf.zeros([num_filters], tf.float32)\n\n pooled_outputs = []\n for i, filter_size in enumerate(filter_sizes):\n # with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolution Layer\n conv = tf.nn.conv2d(\n tf.expand_dims(utter, -1),\n W[i],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv_R\")\n # Apply nonlinearity\n h = tf.nn.relu(tf.nn.bias_add(conv, b[i]), name=\"relu\")\n # Maxpooling over the outputs\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, max_utterance_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding='VALID',\n name=\"r_\")\n pooled_outputs.append(pooled)\n\n hidden_representation += tf.reshape(tf.concat(pooled, 3), [-1, num_filters])\n\n hidden_representation = tf.reshape(hidden_representation, [-1, max_no_turns, num_filters], name=name)\n\n return hidden_representation\n\n\ndef lstm_model(text_input, utterance_length, num_hidden, name, net_type, bidir):\n '''\n Define an Lstm model that will run across the user input and system act\n :param text_input: [batch_size, max_num_turns, max_utterance_size, vector_dimension]\n :param utterance_length: number words in every utterance [batch_size, max_num_turns, 1]\n :param num_hidden: -- int --\n :param name: The name of lstm network\n :param net_type: type of the network (\"lstm\" or \"gru\" or \"rnn\")\n :param bidir: use a bidirectional network -- bool --\n :return: output at each state [batch_size, max_num_turns, max_utterance_size, num_hidden],\n output of the final state [batch_size, max_num_turns, num_hidden]\n '''\n with tf.variable_scope(name):\n\n text_input = tf.reshape(text_input, [-1, max_utterance_length, vector_dimension])\n utterance_length = tf.reshape(utterance_length, [-1])\n\n def rnn(net_typ, num_units):\n if net_typ == \"lstm\":\n return tf.nn.rnn_cell.LSTMCell(num_units)\n elif net_typ == \"gru\":\n return tf.nn.rnn_cell.GRUCell(num_units)\n else:\n return tf.nn.rnn_cell.BasicRNNCell(num_units)\n\n if bidir:\n assert num_hidden % 2 == 0\n rev_cell = rnn(net_type, num_hidden // 2)\n cell = rnn(net_type, num_hidden // 2)\n _, lspd = tf.nn.bidirectional_dynamic_rnn(cell, rev_cell, text_input, dtype=tf.float32,\n sequence_length=utterance_length)\n if net_type == \"lstm\":\n lspd = (lspd[0].h, lspd[1].h)\n\n last_state = tf.concat(lspd, 1)\n else:\n cell = rnn(net_type, num_hidden)\n _, last_state = tf.nn.dynamic_rnn(cell, text_input, dtype=tf.float32, sequence_length=utterance_length)\n if net_type == \"lstm\":\n last_state = last_state.h\n\n last_state = tf.reshape(last_state, [-1, max_no_turns, num_hidden])\n\n return last_state\n\n\ndef model_definition(ontology, num_slots, slots, num_hidden=None, net_type=None, bidir=None, test=False, dev=None):\n '''\n Create neural belief tracker model that is defined in my notes. It consists of encoding the user and system input,\n then use the ontology to decode the encoder in manner that detects if a domain-slot-value class is mentioned\n :param ontology: numpy array of the embedded vectors of the ontology [num_slots, 3*vector_dimension]\n :param num_slots: number of ontology classes --int--\n :param slots: indices of the values of each slot list of lists of ints\n :param num_hidden: Number of hidden units or dimension of the hidden space\n :param net_type: The type of the encoder network cnn, lstm, gru, rnn ...etc\n :param bidir: For recurrent networks should it be bidirectional\n :param test: This is testing mode (no back-propagation)\n :param dev: Device to run the model on (cpu or gpu)\n :return: All input variable/placeholders output metrics (precision, recall, f1-score) and trainer\n '''\n # print('model definition')\n # print(ontology, num_slots, slots, num_hidden, net_type, bidir, test, dev)\n global lstm_num_hidden\n\n if not net_type:\n net_type = network\n else:\n print(\"\\tMDBT: Setting up the type of the network to {}..............................\".format(net_type))\n if bidir == None:\n bidir = bidirect\n else:\n pass\n # print(\"\\tMDBT: Setting up type of the recurrent network to bidirectional {}...........................\".format(bidir))\n if num_hidden:\n lstm_num_hidden = num_hidden\n print(\"\\tMDBT: Setting up type of the dimension of the hidden space to {}.........................\".format(num_hidden))\n\n ontology = tf.constant(ontology, dtype=tf.float32)\n\n # ----------------------------------- Define the input variables --------------------------------------------------\n user_input = tf.placeholder(tf.float32, [None, max_no_turns, max_utterance_length, vector_dimension], name=\"user\")\n system_input = tf.placeholder(tf.float32, [None, max_no_turns, max_utterance_length, vector_dimension], name=\"sys\")\n num_turns = tf.placeholder(tf.int32, [None], name=\"num_turns\")\n user_utterance_lengths = tf.placeholder(tf.int32, [None, max_no_turns], name=\"user_sen_len\")\n sys_utterance_lengths = tf.placeholder(tf.int32, [None, max_no_turns], name=\"sys_sen_len\")\n labels = tf.placeholder(tf.float32, [None, max_no_turns, num_slots], name=\"labels\")\n domain_labels = tf.placeholder(tf.float32, [None, max_no_turns, num_slots], name=\"domain_labels\")\n # dropout placeholder, 0.5 for training, 1.0 for validation/testing:\n keep_prob = tf.placeholder(\"float\")\n\n # ------------------------------------ Create the Encoder networks ------------------------------------------------\n devs = ['/device:CPU:0']\n if dev == 'gpu':\n devs = get_available_devs()\n\n if net_type == \"cnn\":\n with tf.device(devs[1 % len(devs)]):\n # Encode the domain of the user input using a LSTM network\n usr_dom_en = define_CNN_model(user_input, num_filters=lstm_num_hidden, name=\"h_u_d\")\n # Encode the domain of the system act using a LSTM network\n sys_dom_en = define_CNN_model(system_input, num_filters=lstm_num_hidden, name=\"h_s_d\")\n\n with tf.device(devs[2 % len(devs)]):\n # Encode the slot of the user input using a CNN network\n usr_slot_en = define_CNN_model(user_input, num_filters=lstm_num_hidden, name=\"h_u_s\")\n # Encode the slot of the system act using a CNN network\n sys_slot_en = define_CNN_model(system_input, num_filters=lstm_num_hidden, name=\"h_s_s\")\n # Encode the value of the user input using a CNN network\n usr_val_en = define_CNN_model(user_input, num_filters=lstm_num_hidden, name=\"h_u_v\")\n # Encode the value of the system act using a CNN network\n sys_val_en = define_CNN_model(system_input, num_filters=lstm_num_hidden, name=\"h_s_v\")\n # Encode the user using a CNN network\n usr_en = define_CNN_model(user_input, num_filters=lstm_num_hidden // 5, name=\"h_u\")\n\n else:\n\n with tf.device(devs[1 % len(devs)]):\n # Encode the domain of the user input using a LSTM network\n usr_dom_en = lstm_model(user_input, user_utterance_lengths, lstm_num_hidden, \"h_u_d\", net_type, bidir)\n usr_dom_en = tf.nn.dropout(usr_dom_en, keep_prob, name=\"h_u_d_out\")\n # Encode the domain of the system act using a LSTM network\n sys_dom_en = lstm_model(system_input, sys_utterance_lengths, lstm_num_hidden, \"h_s_d\", net_type, bidir)\n sys_dom_en = tf.nn.dropout(sys_dom_en, keep_prob, name=\"h_s_d_out\")\n\n with tf.device(devs[2 % len(devs)]):\n # Encode the slot of the user input using a LSTM network\n usr_slot_en = lstm_model(user_input, user_utterance_lengths, lstm_num_hidden, \"h_u_s\", net_type, bidir)\n usr_slot_en = tf.nn.dropout(usr_slot_en, keep_prob, name=\"h_u_s_out\")\n # Encode the slot of the system act using a LSTM network\n sys_slot_en = lstm_model(system_input, sys_utterance_lengths, lstm_num_hidden, \"h_s_s\", net_type, bidir)\n sys_slot_en = tf.nn.dropout(sys_slot_en, keep_prob, name=\"h_s_s_out\")\n # Encode the value of the user input using a LSTM network\n usr_val_en = lstm_model(user_input, user_utterance_lengths, lstm_num_hidden, \"h_u_v\", net_type, bidir)\n usr_val_en = tf.nn.dropout(usr_val_en, keep_prob, name=\"h_u_v_out\")\n # Encode the value of the system act using a LSTM network\n sys_val_en = lstm_model(system_input, sys_utterance_lengths, lstm_num_hidden, \"h_s_v\", net_type, bidir)\n sys_val_en = tf.nn.dropout(sys_val_en, keep_prob, name=\"h_s_v_out\")\n # Encode the user using a LSTM network\n usr_en = lstm_model(user_input, user_utterance_lengths, lstm_num_hidden // 5, \"h_u\", net_type, bidir)\n usr_en = tf.nn.dropout(usr_en, keep_prob, name=\"h_u_out\")\n\n with tf.device(devs[1 % len(devs)]):\n usr_dom_en = tf.tile(tf.expand_dims(usr_dom_en, axis=2), [1, 1, num_slots, 1], name=\"h_u_d\")\n sys_dom_en = tf.tile(tf.expand_dims(sys_dom_en, axis=2), [1, 1, num_slots, 1], name=\"h_s_d\")\n with tf.device(devs[2 % len(devs)]):\n usr_slot_en = tf.tile(tf.expand_dims(usr_slot_en, axis=2), [1, 1, num_slots, 1], name=\"h_u_s\")\n sys_slot_en = tf.tile(tf.expand_dims(sys_slot_en, axis=2), [1, 1, num_slots, 1], name=\"h_s_s\")\n usr_val_en = tf.tile(tf.expand_dims(usr_val_en, axis=2), [1, 1, num_slots, 1], name=\"h_u_v\")\n sys_val_en = tf.tile(tf.expand_dims(sys_val_en, axis=2), [1, 1, num_slots, 1], name=\"h_s_v\")\n usr_en = tf.tile(tf.expand_dims(usr_en, axis=2), [1, 1, num_slots, 1], name=\"h_u\")\n\n # All encoding vectors have size [batch_size, max_turns, num_slots, num_hidden]\n\n # Matrix that transforms the ontology from the embedding space to the hidden representation\n with tf.device(devs[1 % len(devs)]):\n W_onto_domain = tf.Variable(tf.random_normal([vector_dimension, lstm_num_hidden]), name=\"W_onto_domain\")\n W_onto_slot = tf.Variable(tf.random_normal([vector_dimension, lstm_num_hidden]), name=\"W_onto_slot\")\n W_onto_value = tf.Variable(tf.random_normal([vector_dimension, lstm_num_hidden]), name=\"W_onto_value\")\n\n # And biases\n b_onto_domain = tf.Variable(tf.zeros([lstm_num_hidden]), name=\"b_onto_domain\")\n b_onto_slot = tf.Variable(tf.zeros([lstm_num_hidden]), name=\"b_onto_slot\")\n b_onto_value = tf.Variable(tf.zeros([lstm_num_hidden]), name=\"b_onto_value\")\n\n # Apply the transformation from the embedding space of the ontology to the hidden space\n domain_vec = tf.slice(ontology, begin=[0, 0], size=[-1, vector_dimension])\n slot_vec = tf.slice(ontology, begin=[0, vector_dimension], size=[-1, vector_dimension])\n value_vec = tf.slice(ontology, begin=[0, 2 * vector_dimension], size=[-1, vector_dimension])\n # Each [num_slots, vector_dimension]\n d = tf.nn.dropout(tf.tanh(tf.matmul(domain_vec, W_onto_domain) + b_onto_domain), keep_prob, name=\"d\")\n s = tf.nn.dropout(tf.tanh(tf.matmul(slot_vec, W_onto_slot) + b_onto_slot), keep_prob, name=\"s\")\n v = tf.nn.dropout(tf.tanh(tf.matmul(value_vec, W_onto_value) + b_onto_value), keep_prob, name=\"v\")\n # Each [num_slots, num_hidden]\n\n # Apply the comparison mechanism for all the user and system utterances and ontology values\n domain_user = tf.multiply(usr_dom_en, d, name=\"domain_user\")\n domain_sys = tf.multiply(sys_dom_en, d, name=\"domain_sys\")\n slot_user = tf.multiply(usr_slot_en, s, name=\"slot_user\")\n slot_sys = tf.multiply(sys_slot_en, s, name=\"slot_sys\")\n value_user = tf.multiply(usr_val_en, v, name=\"value_user\")\n value_sys = tf.multiply(sys_val_en, v, name=\"value_sys\")\n # All of size [batch_size, max_turns, num_slots, num_hidden]\n\n # -------------- Domain Detection -------------------------------------------------------------------------\n W_domain = tf.Variable(tf.random_normal([2 * lstm_num_hidden]), name=\"W_domain\")\n b_domain = tf.Variable(tf.zeros([1]), name=\"b_domain\")\n y_d = tf.sigmoid(tf.reduce_sum(tf.multiply(tf.concat([domain_user, domain_sys], axis=3), W_domain), axis=3)\n + b_domain) # [batch_size, max_turns, num_slots]\n\n # -------- Run through each of the 3 case ( inform, request, confirm) and decode the inferred state ---------\n # 1 Inform (User is informing the system about the goal, e.g. \"I am looking for a place to stay in the centre\")\n W_inform = tf.Variable(tf.random_normal([2 * lstm_num_hidden]), name=\"W_inform\")\n b_inform = tf.Variable(tf.random_normal([1]), name=\"b_inform\")\n inform = tf.add(tf.reduce_sum(tf.multiply(tf.concat([slot_user, value_user], axis=3), W_inform), axis=3), b_inform,\n name=\"inform\") # [batch_size, max_turns, num_slots]\n\n # 2 Request (The system is requesting information from the user, e.g. \"what type of food would you like?\")\n with tf.device(devs[2 % len(devs)]):\n W_request = tf.Variable(tf.random_normal([2 * lstm_num_hidden]), name=\"W_request\")\n b_request = tf.Variable(tf.random_normal([1]), name=\"b_request\")\n request = tf.add(tf.reduce_sum(tf.multiply(tf.concat([slot_sys, value_user], axis=3), W_request), axis=3),\n b_request, name=\"request\") # [batch_size, max_turns, num_slots]\n\n # 3 Confirm (The system is confirming values given by the user, e.g. \"How about turkish food?\")\n with tf.device(devs[3 % len(devs)]):\n size = 2 * lstm_num_hidden + lstm_num_hidden // 5\n W_confirm = tf.Variable(tf.random_normal([size]), name=\"W_confirm\")\n b_confirm = tf.Variable(tf.random_normal([1]), name=\"b_confirm\")\n confirm = tf.add(\n tf.reduce_sum(tf.multiply(tf.concat([slot_sys, value_sys, usr_en], axis=3), W_confirm), axis=3),\n b_confirm, name=\"confirm\") # [batch_size, max_turns, num_slots]\n\n output = inform + request + confirm\n\n # -------------------- Adding the belief update RNN with memory cell (Taken from previous model) -------------------\n with tf.device(devs[2 % len(devs)]):\n domain_memory = tf.Variable(tf.random_normal([1, 1]), name=\"domain_memory\")\n domain_current = tf.Variable(tf.random_normal([1, 1]), name=\"domain_current\")\n domain_M_h = tf.Variable(tf.random_normal([1, 1]), name=\"domain_M_h\")\n domain_W_m = tf.Variable(tf.random_normal([1, 1], name=\"domain_W_m\"))\n domain_U_m = tf.Variable(tf.random_normal([1, 1]), name=\"domain_U_m\")\n a_memory = tf.Variable(tf.random_normal([1, 1]), name=\"a_memory\")\n b_memory = tf.Variable(tf.random_normal([1, 1]), name=\"b_memory\")\n a_current = tf.Variable(tf.random_normal([1, 1]), name=\"a_current\")\n b_current = tf.Variable(tf.random_normal([1, 1]), name=\"b_current\")\n M_h_a = tf.Variable(tf.random_normal([1, 1]), name=\"M_h_a\")\n M_h_b = tf.Variable(tf.random_normal([1, 1]), name=\"M_h_b\")\n W_m_a = tf.Variable(tf.random_normal([1, 1]), name=\"W_m_a\")\n W_m_b = tf.Variable(tf.random_normal([1, 1]), name=\"W_m_b\")\n U_m_a = tf.Variable(tf.random_normal([1, 1]), name=\"U_m_a\")\n U_m_b = tf.Variable(tf.random_normal([1, 1]), name=\"U_m_b\")\n\n # ---------------------------------- Unroll the domain over time --------------------------------------------------\n with tf.device(devs[1 % len(devs)]):\n cell = GRU(domain_memory * tf.diag(tf.ones(num_slots)), domain_current * tf.diag(tf.ones(num_slots)),\n domain_M_h * tf.diag(tf.ones(num_slots)), domain_W_m * tf.diag(tf.ones(num_slots)),\n domain_U_m * tf.diag(tf.ones(num_slots)), num_slots,\n binary_output=True)\n\n y_d, _ = tf.nn.dynamic_rnn(cell, y_d, sequence_length=num_turns, dtype=tf.float32)\n\n domain_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=domain_labels, logits=y_d), axis=2,\n name=\"domain_loss\") / (num_slots / len(slots))\n\n y_d = tf.sigmoid(y_d)\n\n with tf.device(devs[0 % len(devs)]):\n\n loss = [None for _ in range(len(slots))]\n slot_pred = [None for _ in range(len(slots))]\n slot_label = [None for _ in range(len(slots))]\n val_pred = [None for _ in range(len(slots))]\n val_label = [None for _ in range(len(slots))]\n y = [None for _ in range(len(slots))]\n y_pred = [None for _ in range(len(slots))]\n for i in range(len(slots)):\n\n num_values = slots[i] + 1 # For the none case\n size = sum(slots[:i + 1]) - slots[i]\n if test:\n domain_output = tf.slice(tf.round(y_d), begin=[0, 0, size], size=[-1, -1, slots[i]])\n else:\n domain_output = tf.slice(domain_labels, begin=[0, 0, size], size=[-1, -1, slots[i]])\n max_val = tf.expand_dims(tf.reduce_max(domain_output, axis=2), axis=2)\n # tf.assert_less_equal(max_val, 1.0)\n # tf.assert_equal(tf.round(max_val), max_val)\n domain_output = tf.concat([tf.zeros(tf.shape(domain_output)), 1 - max_val], axis=2)\n\n slot_output = tf.slice(output, begin=[0, 0, size], size=[-1, -1, slots[i]])\n slot_output = tf.concat([slot_output, tf.zeros([tf.shape(output)[0], max_no_turns, 1])], axis=2)\n\n labels_output = tf.slice(labels, begin=[0, 0, size], size=[-1, -1, slots[i]])\n max_val = tf.expand_dims(tf.reduce_max(labels_output, axis=2), axis=2)\n # tf.assert_less_equal(max_val, 1.0)\n # tf.assert_equal(tf.round(max_val), max_val)\n slot_label[i] = max_val\n # [Batch_size, max_turns, 1]\n labels_output = tf.argmax(tf.concat([labels_output, 1 - max_val], axis=2), axis=2)\n # [Batch_size, max_turns]\n val_label[i] = tf.cast(tf.expand_dims(labels_output, axis=2), dtype=\"float\")\n # [Batch_size, max_turns, 1]\n\n diag_memory = a_memory * tf.diag(tf.ones(num_values))\n non_diag_memory = tf.matrix_set_diag(b_memory * tf.ones([num_values, num_values]), tf.zeros(num_values))\n W_memory = diag_memory + non_diag_memory\n\n diag_current = a_current * tf.diag(tf.ones(num_values))\n non_diag_current = tf.matrix_set_diag(b_current * tf.ones([num_values, num_values]), tf.zeros(num_values))\n W_current = diag_current + non_diag_current\n\n diag_M_h = M_h_a * tf.diag(tf.ones(num_values))\n non_diag_M_h = tf.matrix_set_diag(M_h_b * tf.ones([num_values, num_values]), tf.zeros(num_values))\n M_h = diag_M_h + non_diag_M_h\n\n diag_U_m = U_m_a * tf.diag(tf.ones(num_values))\n non_diag_U_m = tf.matrix_set_diag(U_m_b * tf.ones([num_values, num_values]), tf.zeros(num_values))\n U_m = diag_U_m + non_diag_U_m\n\n diag_W_m = W_m_a * tf.diag(tf.ones(num_values))\n non_diag_W_m = tf.matrix_set_diag(W_m_b * tf.ones([num_values, num_values]), tf.zeros(num_values))\n W_m = diag_W_m + non_diag_W_m\n\n cell = GRU(W_memory, W_current, M_h, W_m, U_m, num_values)\n y_predict, _ = tf.nn.dynamic_rnn(cell, slot_output, sequence_length=num_turns, dtype=tf.float32)\n\n y_predict = y_predict + 1000000.0 * domain_output\n # [Batch_size, max_turns, num_values]\n\n y[i] = tf.nn.softmax(y_predict)\n val_pred[i] = tf.cast(tf.expand_dims(tf.argmax(y[i], axis=2), axis=2), dtype=\"float32\")\n # [Batch_size, max_turns, 1]\n y_pred[i] = tf.slice(tf.one_hot(tf.argmax(y[i], axis=2), dtype=tf.float32, depth=num_values),\n begin=[0, 0, 0], size=[-1, -1, num_values - 1])\n y[i] = tf.slice(y[i], begin=[0, 0, 0], size=[-1, -1, num_values - 1])\n slot_pred[i] = tf.cast(tf.reduce_max(y_pred[i], axis=2, keep_dims=True), dtype=\"float32\")\n # [Batch_size, max_turns, 1]\n loss[i] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_output, logits=y_predict)\n # [Batch_size, max_turns]\n\n # ---------------- Compute the output and the loss function (cross_entropy) and add to optimizer--------------------\n cross_entropy = tf.add_n(loss, name=\"cross_entropy\")\n # Add the error from the domains\n cross_entropy = tf.add(cross_entropy, domain_loss, name=\"total_loss\")\n\n y = tf.concat(y, axis=2, name=\"y\")\n\n mask = tf.cast(tf.sequence_mask(num_turns, maxlen=max_no_turns), dtype=tf.float32)\n mask_extended = tf.tile(tf.expand_dims(mask, axis=2), [1, 1, num_slots])\n cross_entropy = tf.reduce_sum(mask * cross_entropy, axis=1) / tf.cast(num_turns, dtype=tf.float32)\n\n optimizer = tf.train.AdamOptimizer(0.001)\n train_step = optimizer.minimize(cross_entropy, colocate_gradients_with_ops=True)\n\n # ----------------- Get the precision, recall f1-score and accuracy -----------------------------------------------\n\n # Domain accuracy\n true_predictions = tf.reshape(domain_labels, [-1, num_slots])\n predictions = tf.reshape(tf.round(y_d) * mask_extended, [-1, num_slots])\n\n y_d = tf.reshape(y_d * mask_extended, [-1, num_slots])\n\n _, _, _, domain_accuracy = get_metrics(predictions, true_predictions, num_turns, mask_extended, num_slots)\n\n mask_extended_2 = tf.tile(tf.expand_dims(mask, axis=2), [1, 1, len(slots)])\n\n # Slot accuracy\n true_predictions = tf.reshape(tf.concat(slot_label, axis=2), [-1, len(slots)])\n predictions = tf.reshape(tf.concat(slot_pred, axis=2) * mask_extended_2, [-1, len(slots)])\n\n _, _, _, slot_accuracy = get_metrics(predictions, true_predictions, num_turns, mask_extended_2, len(slots))\n\n # accuracy\n if test:\n value_accuracy = []\n mask_extended_3 = tf.expand_dims(mask, axis=2)\n for i in range(len(slots)):\n true_predictions = tf.reshape(val_label[i] * mask_extended_3, [-1, 1])\n predictions = tf.reshape(val_pred[i] * mask_extended_3, [-1, 1])\n\n _, _, _, value_acc = get_metrics(predictions, true_predictions, num_turns, mask_extended_3, 1)\n value_accuracy.append(value_acc)\n\n value_accuracy = tf.stack(value_accuracy)\n else:\n true_predictions = tf.reshape(tf.concat(val_label, axis=2) * mask_extended_2, [-1, len(slots)])\n predictions = tf.reshape(tf.concat(val_pred, axis=2) * mask_extended_2, [-1, len(slots)])\n\n _, _, _, value_accuracy = get_metrics(predictions, true_predictions, num_turns, mask_extended_2, len(slots))\n\n # Value f1score a\n true_predictions = tf.reshape(labels, [-1, num_slots])\n predictions = tf.reshape(tf.concat(y_pred, axis=2) * mask_extended, [-1, num_slots])\n\n precision, recall, value_f1_score, _ = get_metrics(predictions, true_predictions, num_turns,\n mask_extended, num_slots)\n\n y_ = tf.reshape(y, [-1, num_slots])\n\n # -------------------- Summarise the statistics of training to be viewed in tensorboard-----------------------------\n tf.summary.scalar(\"domain_accuracy\", domain_accuracy)\n tf.summary.scalar(\"slot_accuracy\", slot_accuracy)\n tf.summary.scalar(\"value_accuracy\", value_accuracy)\n tf.summary.scalar(\"value_f1_score\", value_f1_score)\n tf.summary.scalar(\"cross_entropy\", tf.reduce_mean(cross_entropy))\n\n value_f1_score = [precision, recall, value_f1_score]\n\n return user_input, system_input, num_turns, user_utterance_lengths, sys_utterance_lengths, labels, domain_labels, \\\n domain_accuracy, slot_accuracy, value_accuracy, value_f1_score, train_step, keep_prob, predictions, \\\n true_predictions, [y_, y_d]\n\n\ndef get_metrics(predictions, true_predictions, no_turns, mask, num_slots):\n mask = tf.reshape(mask, [-1, num_slots])\n correct_prediction = tf.cast(tf.equal(predictions, true_predictions), \"float32\") * mask\n\n num_positives = tf.reduce_sum(true_predictions)\n classified_positives = tf.reduce_sum(predictions)\n\n true_positives = tf.multiply(predictions, true_predictions)\n num_true_positives = tf.reduce_sum(true_positives)\n\n recall = num_true_positives / num_positives\n precision = num_true_positives / classified_positives\n f_score = (2 * recall * precision) / (recall + precision)\n accuracy = tf.reduce_sum(correct_prediction) / (tf.cast(tf.reduce_sum(no_turns), dtype=\"float32\") * num_slots)\n\n return precision, recall, f_score, accuracy\n\n\n\n# main.py\ndef normalise_word_vectors(word_vectors, norm=1.0):\n \"\"\"\n This method normalises the collection of word vectors provided in the word_vectors dictionary.\n \"\"\"\n for word in word_vectors:\n word_vectors[word] /= math.sqrt(sum(word_vectors[word]**2) + 1e-6)\n word_vectors[word] *= norm\n return word_vectors\n\n\ndef xavier_vector(word, D=300):\n \"\"\"\n Returns a D-dimensional vector for the word.\n\n We hash the word to always get the same vector for the given word.\n \"\"\"\n def hash_string(_s):\n return abs(hash(_s)) % (10 ** 8)\n seed_value = hash_string(word)\n np.random.seed(seed_value)\n\n neg_value = - math.sqrt(6)/math.sqrt(D)\n pos_value = math.sqrt(6)/math.sqrt(D)\n\n rsample = np.random.uniform(low=neg_value, high=pos_value, size=(D,))\n norm = np.linalg.norm(rsample)\n rsample_normed = rsample/norm\n\n return rsample_normed\n\n\ndef load_ontology(url, word_vectors):\n '''\n Load the ontology from a file\n :param url: to the ontology\n :param word_vectors: dictionary of the word embeddings [words, vector_dimension]\n :return: list([domain-slot-value]), [no_slots, vector_dimension]\n '''\n global num_slots\n # print(\"\\tMDBT: Loading the ontology....................\")\n data = json.load(open(url, mode='r', encoding='utf8'), object_pairs_hook=OrderedDict)\n slot_values = []\n ontology = []\n slots_values = []\n ontology_vectors = []\n for slots in data:\n [domain, slot] = slots.split('-')\n if domain not in domains or slot == 'name':\n continue\n values = data[slots]\n if \"book\" in slot:\n [slot, value] = slot.split(\" \")\n booking_slots[domain+'-'+value] = values\n values = [value]\n elif slot == \"departure\" or slot == \"destination\":\n values = [\"place\"]\n domain_vec = np.sum(process_text(domain, word_vectors), axis=0)\n if domain not in word_vectors:\n word_vectors[domain.replace(\" \", \"\")] = domain_vec\n slot_vec = np.sum(process_text(slot, word_vectors), axis=0)\n if domain+'-'+slot not in slots_values:\n slots_values.append(domain+'-'+slot)\n if slot not in word_vectors:\n word_vectors[slot.replace(\" \", \"\")] = slot_vec\n slot_values.append(len(values))\n for value in values:\n ontology.append(domain + '-' + slot + '-' + value)\n value_vec = np.sum(process_text(value, word_vectors, print_mode=True), axis=0)\n if value not in word_vectors:\n word_vectors[value.replace(\" \", \"\")] = value_vec\n ontology_vectors.append(np.concatenate((domain_vec, slot_vec, value_vec)))\n\n num_slots = len(slots_values)\n # print(\"\\tMDBT: We have about {} values\".format(len(ontology)))\n # print(\"\\tMDBT: The Full Ontology is:\")\n # print(ontology)\n # print(\"\\tMDBT: The slots in this ontology:\")\n # print(slots_values)\n return ontology, np.asarray(ontology_vectors, dtype='float32'), slot_values\n\n\ndef load_word_vectors(url):\n '''\n Load the word embeddings from the url\n :param url: to the word vectors\n :return: dict of word and vector values\n '''\n word_vectors = {}\n # print(\"Loading the word embeddings....................\")\n # print('abs path: ', os.path.abspath(url))\n with open(url, mode='r', encoding='utf8') as f:\n for line in f:\n line = line.split(\" \", 1)\n key = line[0]\n word_vectors[key] = np.fromstring(line[1], dtype=\"float32\", sep=\" \")\n # print(\"\\tMDBT: The vocabulary contains about {} word embeddings\".format(len(word_vectors)))\n return normalise_word_vectors(word_vectors)\n\n\ndef track_dialogue(data, ontology, predictions, y):\n overall_accuracy_total = 0\n overall_accuracy_corr = 0\n joint_accuracy_total = 0\n joint_accuracy_corr = 0\n global num_slots\n dialogues = []\n idx = 0\n for dialogue in data:\n turn_ids = []\n for key in dialogue.keys():\n if key.isdigit():\n turn_ids.append(int(key))\n elif dialogue[key] and key not in domains:\n continue\n turn_ids.sort()\n turns = []\n previous_terms = []\n for key in turn_ids:\n turn = dialogue[str(key)]\n user_input = turn['user']['text']\n sys_res = turn['system']\n state = turn['user']['belief_state']\n turn_obj = dict()\n turn_obj['user'] = user_input\n turn_obj['system'] = sys_res\n prediction = predictions[idx, :]\n indices = np.argsort(prediction)[:-(int(np.sum(prediction)) + 1):-1]\n predicted_terms = [process_booking(ontology[i], user_input, previous_terms) for i in indices]\n previous_terms = deepcopy(predicted_terms)\n turn_obj['prediction'] = [\"{}: {}\".format(predicted_terms[x], y[idx, i]) for x, i in enumerate(indices)]\n turn_obj['True state'] = []\n idx += 1\n unpredicted_labels = 0\n for domain in state:\n if domain not in domains:\n continue\n slots = state[domain]['semi']\n for slot in slots:\n if slot == 'name':\n continue\n value = slots[slot]\n if value != '':\n label = domain + '-' + slot + '-' + value\n turn_obj['True state'].append(label)\n if label in predicted_terms:\n predicted_terms.remove(label)\n else:\n unpredicted_labels += 1\n\n turns.append(turn_obj)\n overall_accuracy_total += num_slots\n overall_accuracy_corr += (num_slots - unpredicted_labels - len(predicted_terms))\n if unpredicted_labels + len(predicted_terms) == 0:\n joint_accuracy_corr += 1\n joint_accuracy_total += 1\n\n dialogues.append(turns)\n return dialogues, overall_accuracy_corr/overall_accuracy_total, joint_accuracy_corr/joint_accuracy_total\n\n\ndef process_booking(ontolog_term, usr_input, previous_terms):\n usr_input = usr_input.lower().split()\n domain, slot, value = ontolog_term.split('-')\n if slot == 'book':\n for term in previous_terms:\n if domain+'-book '+value in term:\n ontolog_term = term\n break\n else:\n if value == 'stay' or value == 'people':\n numbers = [int(s) for s in usr_input if s.isdigit()]\n if len(numbers) == 1:\n ontolog_term = domain + '-' + slot + ' ' + value + '-' + str(numbers[0])\n elif len(numbers) == 2:\n vals = {}\n if usr_input[usr_input.index(str(numbers[0]))+1] in ['people', 'person']:\n vals['people'] = str(numbers[0])\n vals['stay'] = str(numbers[1])\n else:\n vals['people'] = str(numbers[1])\n vals['stay'] = str(numbers[0])\n ontolog_term = domain + '-' + slot + ' ' + value + '-' + vals[value]\n else:\n for val in booking_slots[domain+'-'+value]:\n if val in ' '.join(usr_input):\n ontolog_term = domain + '-' + slot + ' ' + value + '-' + val\n break\n return ontolog_term\n\n\ndef process_history(sessions, word_vectors, ontology):\n '''\n Load the woz3 data and extract feature vectors\n :param data: the data to load\n :param word_vectors: word embeddings\n :param ontology: list of domain-slot-value\n :param url: Is the data coming from a url, default true\n :return: list(num of turns, user_input vectors, system_response vectors, labels)\n '''\n dialogues = []\n actual_dialogues = []\n for dialogue in sessions:\n turn_ids = []\n for key in dialogue.keys():\n if key.isdigit():\n turn_ids.append(int(key))\n elif dialogue[key] and key not in domains:\n continue\n turn_ids.sort()\n num_turns = len(turn_ids)\n user_vecs = []\n sys_vecs = []\n turn_labels = []\n turn_domain_labels = []\n add = False\n good = True\n pre_sys = np.zeros([max_utterance_length, vector_dimension], dtype=\"float32\")\n for key in turn_ids:\n turn = dialogue[str(key)]\n user_v, sys_v, labels, domain_labels = process_turn(turn, word_vectors, ontology)\n if good and (user_v.shape[0] > max_utterance_length or pre_sys.shape[0] > max_utterance_length):\n # cut overlength utterance instead of discarding them\n if user_v.shape[0] > max_utterance_length:\n user_v = user_v[:max_utterance_length]\n if pre_sys.shape[0] > max_utterance_length:\n pre_sys = pre_sys[:max_utterance_length]\n # good = False\n # break\n user_vecs.append(user_v)\n sys_vecs.append(pre_sys)\n turn_labels.append(labels)\n turn_domain_labels.append(domain_labels)\n if not add and sum(labels) > -1:\n add = True\n pre_sys = sys_v\n if add and good:\n dialogues.append((num_turns, user_vecs, sys_vecs, turn_labels, turn_domain_labels))\n actual_dialogues.append(dialogue)\n # print(\"\\tMDBT: The data contains about {} dialogues\".format(len(dialogues)))\n return dialogues, actual_dialogues\n\n\ndef load_woz_data(data, word_vectors, ontology, url=True):\n '''\n Load the woz3 data and extract feature vectors\n :param data: the data to load\n :param word_vectors: word embeddings\n :param ontology: list of domain-slot-value\n :param url: Is the data coming from a url, default true\n :return: list(num of turns, user_input vectors, system_response vectors, labels)\n '''\n if url:\n # print(\"Loading data from url {} ....................\".format(data))\n data = json.load(open(data, mode='r', encoding='utf8'))\n\n dialogues = []\n actual_dialogues = []\n for dialogue in data:\n turn_ids = []\n for key in dialogue.keys():\n if key.isdigit():\n turn_ids.append(int(key))\n elif dialogue[key] and key not in domains:\n continue\n turn_ids.sort()\n num_turns = len(turn_ids)\n user_vecs = []\n sys_vecs = []\n turn_labels = []\n turn_domain_labels = []\n add = False\n good = True\n pre_sys = np.zeros([max_utterance_length, vector_dimension], dtype=\"float32\")\n for key in turn_ids:\n turn = dialogue[str(key)]\n user_v, sys_v, labels, domain_labels = process_turn(turn, word_vectors, ontology)\n if good and (user_v.shape[0] > max_utterance_length or pre_sys.shape[0] > max_utterance_length):\n good = False\n break\n user_vecs.append(user_v)\n sys_vecs.append(pre_sys)\n turn_labels.append(labels)\n turn_domain_labels.append(domain_labels)\n if not add and sum(labels) > 0:\n add = True\n pre_sys = sys_v\n if add and good:\n dialogues.append((num_turns, user_vecs, sys_vecs, turn_labels, turn_domain_labels))\n actual_dialogues.append(dialogue)\n # print(\"\\tMDBT: The data contains about {} dialogues\".format(len(dialogues)))\n return dialogues, actual_dialogues\n\n\ndef process_turn(turn, word_vectors, ontology):\n '''\n Process a single turn extracting and processing user text, system response and labels\n :param turn: dict\n :param word_vectors: word embeddings\n :param ontology: list(domain-slot-value)\n :return: ([utterance length, 300], [utterance length, 300], [no_slots])\n '''\n user_input = turn['user']['text']\n sys_res = turn['system']\n state = turn['user']['belief_state']\n user_v = process_text(user_input, word_vectors, ontology)\n sys_v = process_text(sys_res, word_vectors, ontology)\n labels = np.zeros(len(ontology), dtype='float32')\n domain_labels = np.zeros(len(ontology), dtype='float32')\n for domain in state:\n if domain not in domains:\n continue\n slots = state[domain]['semi']\n domain_mention = False\n for slot in slots:\n\n if slot == 'name':\n continue\n value = slots[slot]\n if \"book\" in slot:\n [slot, value] = slot.split(\" \")\n if value != '' and value != 'corsican':\n if slot == \"destination\" or slot == \"departure\":\n value = \"place\"\n elif value == '09;45':\n value = '09:45'\n elif 'alpha-milton' in value:\n value = value.replace('alpha-milton', 'alpha milton')\n elif value == 'east side':\n value = 'east'\n elif value == ' expensive':\n value = 'expensive'\n labels[ontology.index(domain + '-' + slot + '-' + value)] = 1\n domain_mention = True\n if domain_mention:\n for idx, slot in enumerate(ontology):\n if domain in slot:\n domain_labels[idx] = 1\n\n return user_v, sys_v, labels, domain_labels\n\n\ndef process_text(text, word_vectors, ontology=None, print_mode=False):\n '''\n Process a line/sentence converting words to feature vectors\n :param text: sentence\n :param word_vectors: word embeddings\n :param ontology: The ontology to do exact matching\n :param print_mode: Log the cases where the word is not in the pre-trained word vectors\n :return: [length of sentence, 300]\n '''\n text = text.replace(\"(\", \"\").replace(\")\", \"\").replace('\"', \"\").replace(u\"’\", \"'\").replace(u\"‘\", \"'\")\n text = text.replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").strip().lower()\n text = text.replace(',', ' ').replace('.', ' ').replace('?', ' ').replace('-', ' ').replace('/', ' / ')\\\n .replace(':', ' ')\n if ontology:\n for slot in ontology:\n [domain, slot, value] = slot.split('-')\n text.replace(domain, domain.replace(\" \", \"\"))\\\n .replace(slot, slot.replace(\" \", \"\"))\\\n .replace(value, value.replace(\" \", \"\"))\n\n words = text.split()\n\n vectors = []\n for word in words:\n word = word.replace(\"'\", \"\").replace(\"!\", \"\")\n if word == \"\":\n continue\n if word not in word_vectors:\n length = len(word)\n for i in range(1, length)[::-1]:\n if word[:i] in word_vectors and word[i:] in word_vectors:\n vec = word_vectors[word[:i]] + word_vectors[word[i:]]\n break\n else:\n vec = xavier_vector(word)\n word_vectors[word] = vec\n if print_mode:\n pass\n # print(\"\\tMDBT: Adding new word: {}\".format(word))\n else:\n vec = word_vectors[word]\n vectors.append(vec)\n return np.asarray(vectors, dtype='float32')\n\n\ndef generate_batch(dialogues, batch_no, batch_size, ontology_size):\n '''\n Generate examples for minibatch training\n :param dialogues: list(num of turns, user_input vectors, system_response vectors, labels)\n :param batch_no: where we are in the training data\n :param batch_size: number of dialogues to generate\n :param ontology_size: no_slots\n :return: list(user_input, system_response, labels, user_sentence_length, system_sentence_length, number of turns)\n '''\n user = np.zeros((batch_size, max_no_turns, max_utterance_length, vector_dimension), dtype='float32')\n sys_res = np.zeros((batch_size, max_no_turns, max_utterance_length, vector_dimension), dtype='float32')\n labels = np.zeros((batch_size, max_no_turns, ontology_size), dtype='float32')\n domain_labels = np.zeros((batch_size, max_no_turns, ontology_size), dtype='float32')\n user_uttr_len = np.zeros((batch_size, max_no_turns), dtype='int32')\n sys_uttr_len = np.zeros((batch_size, max_no_turns), dtype='int32')\n no_turns = np.zeros(batch_size, dtype='int32')\n idx = 0\n for i in range(batch_no*train_batch_size, batch_no*train_batch_size + batch_size):\n (num_turns, user_vecs, sys_vecs, turn_labels, turn_domain_labels) = dialogues[i]\n no_turns[idx] = num_turns\n for j in range(num_turns):\n user_uttr_len[idx, j] = user_vecs[j].shape[0]\n sys_uttr_len[idx, j] = sys_vecs[j].shape[0]\n user[idx, j, :user_uttr_len[idx, j], :] = user_vecs[j]\n sys_res[idx, j, :sys_uttr_len[idx, j], :] = sys_vecs[j]\n labels[idx, j, :] = turn_labels[j]\n domain_labels[idx, j, :] = turn_domain_labels[j]\n idx += 1\n return user, sys_res, labels, domain_labels, user_uttr_len, sys_uttr_len, no_turns\n\n\ndef evaluate_model(sess, model_variables, val_data, summary, batch_id, i):\n\n '''\n Evaluate the model against validation set\n :param sess: training session\n :param model_variables: all model input variables\n :param val_data: validation data\n :param summary: For tensorboard\n :param batch_id: where we are in the training data\n :param i: the index of the validation data to load\n :return: evaluation accuracy and the summary\n '''\n\n (user, sys_res, no_turns, user_uttr_len, sys_uttr_len, labels, domain_labels, domain_accuracy,\n slot_accuracy, value_accuracy, value_f1, train_step, keep_prob, _, _, _) = model_variables\n\n batch_user, batch_sys, batch_labels, batch_domain_labels, batch_user_uttr_len, batch_sys_uttr_len, \\\n batch_no_turns = val_data\n\n start_time = time.time()\n\n b_z = train_batch_size\n [precision, recall, value_f1] = value_f1\n [d_acc, s_acc, v_acc, f1_score, pr, re, sm1, sm2] = sess.run([domain_accuracy, slot_accuracy, value_accuracy,\n value_f1, precision, recall] + summary,\n feed_dict={user: batch_user[i:i+b_z, :, :, :],\n sys_res: batch_sys[i:i+b_z, :, :, :],\n labels: batch_labels[i:i+b_z, :, :],\n domain_labels: batch_domain_labels[i:i+b_z, :, :],\n user_uttr_len: batch_user_uttr_len[i:i+b_z, :],\n sys_uttr_len: batch_sys_uttr_len[i:i+b_z, :],\n no_turns: batch_no_turns[i:i+b_z],\n keep_prob: 1.0})\n\n print(\"Batch\", batch_id, \"[Domain Accuracy] = \", d_acc, \"[Slot Accuracy] = \", s_acc, \"[Value Accuracy] = \",\n v_acc, \"[F1 Score] = \", f1_score, \"[Precision] = \", pr, \"[Recall] = \", re,\n \" ----- \", round(time.time() - start_time, 3),\n \"seconds. ---\")\n\n return d_acc, s_acc, v_acc, f1_score, sm1, sm2\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.Tensor",
"torch.autograd.Variable"
],
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"numpy.log",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.zeros",
"torch.transpose",
"torch.mm",
"torch.sum",
"torch.eye",
"torch.multinomial",
"torch.nn.Tanh",
"torch.exp",
"torch.nn.Linear",
"torch.t",
"torch.pow"
],
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.zeros",
"numpy.asarray",
"tensorflow.reduce_sum",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.stack",
"tensorflow.equal",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.add",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.argsort",
"tensorflow.round",
"tensorflow.sequence_mask",
"numpy.sum",
"tensorflow.nn.bias_add",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.nn.softmax",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.sigmoid",
"numpy.linalg.norm",
"tensorflow.expand_dims",
"tensorflow.ones",
"numpy.fromstring",
"tensorflow.variable_scope",
"numpy.random.uniform",
"tensorflow.nn.rnn_cell.BasicRNNCell",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Ethan-Yang0101/Mini-DeepText-Project | [
"6ed70fae7d00610b942fb9b2526d11ebfd1b48f7"
] | [
"Mini-DeepText-2.0/train.py"
] | [
"\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom TextDataset import TextDataset\nfrom Model.BasicModel.TextCLRModel import TextCLRModel\nfrom Model.BasicModel.TextSLBModel import TextSLBModel\nfrom Model.BasicModel.TextNMTModel import TextNMTModel\nfrom Model.BasicModel.TextDSMModel import TextDSMModel\nfrom Model.Transformer.Transformer import Transformer\nfrom Vectorizer.CLRVectorizer import CLRVectorizer\nfrom Vectorizer.SLBVectorizer import SLBVectorizer\nfrom Vectorizer.NMTVectorizer import NMTVectorizer\nfrom Vectorizer.DSMVectorizer import DSMVectorizer\nfrom Utils.Data import read_json_dataset\nfrom ModelTrainer import ModelTrainer\nfrom Utils.Config import Config\nimport json\nimport sys\nimport os\n\n\ndef get_data_loaders(args, dataset):\n '''通过数据集创建用于训练,验证和测试的数据批生成器'''\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n if os.path.exists(args.vectorizer_file):\n parameters = {'dataset': dataset,\n 'split_ratio': args.split_ratio,\n 'max_seq_length': args.max_seq_length,\n 'task': args.task,\n 'vectorizer_file': args.vectorizer_file}\n dataset = TextDataset.dataset_load_vectorizer(**parameters)\n else:\n parameters = {'dataset': dataset,\n 'split_ratio': args.split_ratio,\n 'max_seq_length': args.max_seq_length,\n 'task': args.task,\n 'cutoff': args.cutoff}\n dataset = TextDataset.dataset_make_vectorizer(**parameters)\n dataset.save_vectorizer(args.vectorizer_file)\n dataset.set_split('train')\n train_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n dataset.set_split('val')\n val_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n dataset.set_split('test')\n test_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n data_loaders = (train_data_loader, val_data_loader, test_data_loader)\n return data_loaders\n\n\ndef get_task_model(args, vectorizer):\n '''根据任务类型获取用于训练的模型类型'''\n model = None\n if args.task == 'classification':\n if args.model_name == 'TextCLRModel':\n model = TextCLRModel(\n num_embeddings=len(vectorizer.source_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n num_classes=len(vectorizer.label_vocab),\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'labeling':\n if args.model_name == 'TextSLBModel':\n model = TextSLBModel(\n num_embeddings=len(vectorizer.source_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'matching':\n if args.model_name == 'TextDSMModel':\n model = TextDSMModel(\n num_embeddings1=len(vectorizer.source_vocab),\n num_embeddings2=len(vectorizer.target_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'translation':\n if args.model_name == 'Transformer':\n model = Transformer(\n source_vocab_size=len(vectorizer.source_vocab),\n target_vocab_size=len(vectorizer.target_vocab),\n source_embed_dim=args.source_embed_dim,\n target_embed_dim=args.target_embed_dim,\n encoder_n_heads=args.encoder_n_heads,\n decoder_n_heads=args.decoder_n_heads,\n encoder_hid_dim=args.encoder_hid_dim,\n decoder_hid_dim=args.decoder_hid_dim,\n encoder_n_layers=args.encoder_n_layers,\n decoder_n_layers=args.decoder_n_layers,\n encoder_max_seq_len=args.max_seq_length,\n decoder_max_seq_len=args.max_seq_length\n )\n if args.model_name == 'TextNMTModel':\n model = TextNMTModel(\n source_num_embeddings=len(vectorizer.source_vocab),\n source_embedding_size=args.source_embedding_size,\n target_num_embeddings=len(vectorizer.target_vocab),\n target_embedding_size=args.target_embedding_size,\n encoding_size=args.encoding_size)\n return model\n\n\ndef get_optimizer(args, model):\n '''获取想要使用的优化器'''\n if args.optimizer == 'adam':\n return optim.Adam(model.parameters(), lr=args.learning_rate)\n\n\ndef get_loss_func(args):\n '''根据任务类型获取损失函数'''\n if args.task == 'classification':\n return nn.CrossEntropyLoss()\n if args.task == 'matching':\n return nn.CrossEntropyLoss()\n if args.task == 'labeling':\n return sequence_loss\n if args.task == 'translation':\n return sequence_loss\n\n\ndef sequence_loss(pred, target, mask_index):\n '''用于计算序列模型的损失函数'''\n pred = pred.contiguous().view(-1, pred.size(2))\n target = target.contiguous().view(-1)\n return F.cross_entropy(pred, target, ignore_index=mask_index)\n\n\ndef get_vectorizer(args):\n '''根据任务获取矢量化器'''\n with open(args.vectorizer_file, \"r\") as fp:\n if args.task == 'classification':\n return CLRVectorizer.from_serializable(json.load(fp))\n if args.task == 'matching':\n return DSMVectorizer.from_serializable(json.load(fp))\n if args.task == 'labeling':\n return GENVectorizer.from_serializable(json.load(fp))\n if args.task == 'translation':\n return NMTVectorizer.from_serializable(json.load(fp))\n\n\nif __name__ == '__main__':\n # 获取配置文件信息\n config_filename = sys.argv[1]\n config = Config.from_config_json(config_filename)\n args = config.args\n # 获取数据集\n dataset = read_json_dataset(args.data_filepath, args.max_seq_length)\n # 获取数据批生成器\n data_loaders = get_data_loaders(args, dataset)\n # 获取模型\n vectorizer = get_vectorizer(args)\n model = get_task_model(args, vectorizer)\n # 获取优化器\n optimizer = get_optimizer(args, model)\n # 获取损失函数\n loss_func = get_loss_func(args)\n # 获取训练器\n model_trainer = ModelTrainer(\n args, data_loaders, model, optimizer, loss_func)\n # 训练模型\n model_trainer.train_val_test_model()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rikorose/DeepFilterNet | [
"afe6bfb53efae70207e18df7ed372c2cfe337fee"
] | [
"DeepFilterNet/df/utils.py"
] | [
"import collections\nimport math\nimport os\nimport random\nimport subprocess\nfrom socket import gethostname\nfrom typing import Any, Dict, Set, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom loguru import logger\nfrom torch import Tensor\nfrom torch._six import string_classes\nfrom torch.autograd import Function\nfrom torch.types import Number\n\nfrom df.config import config\nfrom df.model import ModelParams\n\ntry:\n from torchaudio.functional import resample as ta_resample\nexcept ImportError:\n from torchaudio.compliance.kaldi import resample_waveform as ta_resample # type: ignore\n\n\ndef get_resample_params(method: str) -> Dict[str, Any]:\n params = {\n \"sinc_fast\": {\"resampling_method\": \"sinc_interpolation\", \"lowpass_filter_width\": 16},\n \"sinc_best\": {\"resampling_method\": \"sinc_interpolation\", \"lowpass_filter_width\": 64},\n \"kaiser_fast\": {\n \"resampling_method\": \"kaiser_window\",\n \"lowpass_filter_width\": 16,\n \"rolloff\": 0.85,\n \"beta\": 8.555504641634386,\n },\n \"kaiser_best\": {\n \"resampling_method\": \"kaiser_window\",\n \"lowpass_filter_width\": 16,\n \"rolloff\": 0.9475937167399596,\n \"beta\": 14.769656459379492,\n },\n }\n assert method in params.keys(), f\"method must be one of {list(params.keys())}\"\n return params[method]\n\n\ndef resample(audio: Tensor, orig_sr: int, new_sr: int, method=\"sinc_fast\"):\n params = get_resample_params(method)\n return ta_resample(audio, orig_sr, new_sr, **params)\n\n\ndef get_device():\n s = config(\"DEVICE\", default=\"\", section=\"train\")\n if s == \"\":\n if torch.cuda.is_available():\n DEVICE = torch.device(\"cuda:0\")\n else:\n DEVICE = torch.device(\"cpu\")\n else:\n DEVICE = torch.device(s)\n return DEVICE\n\n\ndef as_complex(x: Tensor):\n if torch.is_complex(x):\n return x\n if x.shape[-1] != 2:\n raise ValueError(f\"Last dimension need to be of length 2 (re + im), but got {x.shape}\")\n if x.stride(-1) != 1:\n x = x.contiguous()\n return torch.view_as_complex(x)\n\n\ndef as_real(x: Tensor):\n if torch.is_complex(x):\n return torch.view_as_real(x)\n return x\n\n\nclass angle_re_im(Function):\n \"\"\"Similar to torch.angle but robustify the gradient for zero magnitude.\"\"\"\n\n @staticmethod\n def forward(ctx, re: Tensor, im: Tensor):\n ctx.save_for_backward(re, im)\n return torch.atan2(im, re)\n\n @staticmethod\n def backward(ctx, grad: Tensor) -> Tuple[Tensor, Tensor]:\n re, im = ctx.saved_tensors\n grad_inv = grad / (re.square() + im.square()).clamp_min_(1e-10)\n return -im * grad_inv, re * grad_inv\n\n\nclass angle(Function):\n \"\"\"Similar to torch.angle but robustify the gradient for zero magnitude.\"\"\"\n\n @staticmethod\n def forward(ctx, x: Tensor):\n ctx.save_for_backward(x)\n return torch.atan2(x.imag, x.real)\n\n @staticmethod\n def backward(ctx, grad: Tensor):\n (x,) = ctx.saved_tensors\n grad_inv = grad / (x.real.square() + x.imag.square()).clamp_min_(1e-10)\n return torch.view_as_complex(torch.stack((-x.imag * grad_inv, x.real * grad_inv), dim=-1))\n\n\ndef check_finite_module(obj, name=\"Module\", _raise=True) -> Set[str]:\n out: Set[str] = set()\n if isinstance(obj, torch.nn.Module):\n for name, child in obj.named_children():\n out = out | check_finite_module(child, name)\n for name, param in obj.named_parameters():\n out = out | check_finite_module(param, name)\n for name, buf in obj.named_buffers():\n out = out | check_finite_module(buf, name)\n if _raise and len(out) > 0:\n raise ValueError(f\"{name} not finite during checkpoint writing including: {out}\")\n return out\n\n\ndef make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:\n \"\"\"Transforms Tensor to numpy.\n Args:\n x: An instance of torch tensor or caffe blob name\n\n Returns:\n numpy.array: Numpy array\n \"\"\"\n if isinstance(x, np.ndarray):\n return x\n if np.isscalar(x):\n return np.array([x])\n if isinstance(x, Tensor):\n return x.detach().cpu().numpy()\n raise NotImplementedError(\n \"Got {}, but numpy array, scalar, or torch tensor are expected.\".format(type(x))\n )\n\n\ndef get_norm_alpha(log: bool = True) -> float:\n p = ModelParams()\n a_ = _calculate_norm_alpha(sr=p.sr, hop_size=p.hop_size, tau=p.norm_tau)\n precision = 3\n a = 1.0\n while a >= 1.0:\n a = round(a_, precision)\n precision += 1\n if log:\n logger.info(f\"Running with normalization window alpha = '{a}'\")\n return a\n\n\ndef _calculate_norm_alpha(sr: int, hop_size: int, tau: float):\n \"\"\"Exponential decay factor alpha for a given tau (decay window size [s]).\"\"\"\n dt = hop_size / sr\n return math.exp(-dt / tau)\n\n\ndef check_manual_seed(seed: int = None):\n \"\"\"If manual seed is not specified, choose a random one and communicate it to the user.\"\"\"\n seed = seed or random.randint(1, 10000)\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n return seed\n\n\ndef get_git_root():\n git_local_dir = os.path.dirname(os.path.abspath(__file__))\n args = [\"git\", \"-C\", git_local_dir, \"rev-parse\", \"--show-toplevel\"]\n return subprocess.check_output(args).strip().decode()\n\n\ndef get_commit_hash():\n \"\"\"Returns the current git commit.\"\"\"\n try:\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n commit = subprocess.check_output(args).strip().decode()\n except subprocess.CalledProcessError:\n # probably not in git repo\n commit = None\n return commit\n\n\ndef get_host() -> str:\n return gethostname()\n\n\ndef get_branch_name():\n try:\n git_dir = os.path.dirname(os.path.abspath(__file__))\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--abbrev-ref\", \"HEAD\"]\n branch = subprocess.check_output(args).strip().decode()\n except subprocess.CalledProcessError:\n # probably not in git repo\n branch = None\n return branch\n\n\n# from pytorch/ignite:\ndef apply_to_tensor(input_, func):\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\"\"\"\n if isinstance(input_, torch.nn.Module):\n return [apply_to_tensor(c, func) for c in input_.children()]\n elif isinstance(input_, torch.nn.Parameter):\n return func(input_.data)\n elif isinstance(input_, Tensor):\n return func(input_)\n elif isinstance(input_, string_classes):\n return input_\n elif isinstance(input_, collections.Mapping):\n return {k: apply_to_tensor(sample, func) for k, sample in input_.items()}\n elif isinstance(input_, collections.Iterable):\n return [apply_to_tensor(sample, func) for sample in input_]\n elif input_ is None:\n return input_\n else:\n return input_\n\n\ndef detach_hidden(hidden: Any) -> Any:\n \"\"\"Cut backpropagation graph.\n Auxillary function to cut the backpropagation graph by detaching the hidden\n vector.\n \"\"\"\n return apply_to_tensor(hidden, Tensor.detach)\n"
] | [
[
"torch.view_as_real",
"numpy.random.seed",
"torch.manual_seed",
"torch.view_as_complex",
"numpy.isscalar",
"torch.cuda.is_available",
"torch.device",
"numpy.array",
"torch.is_complex",
"torch.atan2",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iPieter/kiwi | [
"76b66872fce68873809a0dea112e2ed552ae5b63",
"76b66872fce68873809a0dea112e2ed552ae5b63"
] | [
"examples/sklearn_logistic_regression/train.py",
"examples/hyperparam/search_random.py"
] | [
"import numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\nimport kiwi\nimport kiwi.sklearn\n\nif __name__ == \"__main__\":\n X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1, 1, 0])\n lr = LogisticRegression()\n lr.fit(X, y)\n score = lr.score(X, y)\n print(\"Score: %s\" % score)\n kiwi.log_metric(\"score\", score)\n kiwi.sklearn.log_model(lr, \"model\")\n print(\"Model saved in run %s\" % kiwi.active_run().info.run_uuid)\n",
"\"\"\"\nExample of hyperparameter search in MLflow using simple random search.\n\nThe run method will evaluate random combinations of parameters in a new MLflow run.\n\nThe runs are evaluated based on validation set loss. Test set score is calculated to verify the\nresults.\n\nSeveral runs can be run in parallel.\n\"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport click\n\nimport numpy as np\n\nimport kiwi\nimport kiwi.sklearn\nimport kiwi.tracking\nimport kiwi.projects\nfrom kiwi.tracking.client import MlflowClient\n\n_inf = np.finfo(np.float64).max\n\n\[email protected](help=\"Perform grid search over train (main entry point).\")\[email protected](\"--max-runs\", type=click.INT, default=32,\n help=\"Maximum number of runs to evaluate.\")\[email protected](\"--max-p\", type=click.INT, default=1,\n help=\"Maximum number of parallel runs.\")\[email protected](\"--epochs\", type=click.INT, default=32,\n help=\"Number of epochs\")\[email protected](\"--metric\", type=click.STRING, default=\"rmse\",\n help=\"Metric to optimize on.\")\[email protected](\"--seed\", type=click.INT, default=97531,\n help=\"Seed for the random generator\")\[email protected](\"training_data\")\ndef run(training_data, max_runs, max_p, epochs, metric, seed):\n train_metric = \"train_{}\".format(metric)\n val_metric = \"val_{}\".format(metric)\n test_metric = \"test_{}\".format(metric)\n np.random.seed(seed)\n tracking_client = kiwi.tracking.MlflowClient()\n\n def new_eval(nepochs,\n experiment_id,\n null_train_loss=_inf,\n null_val_loss=_inf,\n null_test_loss=_inf):\n def eval(parms):\n lr, momentum = parms\n with kiwi.start_run(nested=True) as child_run:\n p = kiwi.projects.run(\n run_id=child_run.info.run_id,\n uri=\".\",\n entry_point=\"train\",\n parameters={\n \"training_data\": training_data,\n \"epochs\": str(nepochs),\n \"learning_rate\": str(lr),\n \"momentum\": str(momentum),\n \"seed\": str(seed)},\n experiment_id=experiment_id,\n synchronous=False)\n succeeded = p.wait()\n if succeeded:\n training_run = tracking_client.get_run(p.run_id)\n metrics = training_run.data.metrics\n # cap the loss at the loss of the null model\n train_loss = min(null_train_loss, metrics[train_metric])\n val_loss = min(null_val_loss, metrics[val_metric])\n test_loss = min(null_test_loss, metrics[test_metric])\n else:\n # run failed => return null loss\n tracking_client.set_terminated(p.run_id, \"FAILED\")\n train_loss = null_train_loss\n val_loss = null_val_loss\n test_loss = null_test_loss\n kiwi.log_metrics({\n \"train_{}\".format(metric): train_loss,\n \"val_{}\".format(metric): val_loss,\n \"test_{}\".format(metric): test_loss\n })\n return p.run_id, train_loss, val_loss, test_loss\n\n return eval\n\n with kiwi.start_run() as run:\n experiment_id = run.info.experiment_id\n _, null_train_loss, null_val_loss, null_test_loss = new_eval(0, experiment_id)((0, 0))\n runs = [(np.random.uniform(1e-5, 1e-1), np.random.uniform(0, 1.0)) for _ in range(max_runs)]\n with ThreadPoolExecutor(max_workers=max_p) as executor:\n _ = executor.map(new_eval(epochs,\n experiment_id,\n null_train_loss,\n null_val_loss,\n null_test_loss),\n runs)\n\n # find the best run, log its metrics as the final metrics of this run.\n client = MlflowClient()\n runs = client.search_runs([experiment_id],\n \"tags.mlflow.parentRunId = '{run_id}' \".format(\n run_id=run.info.run_id\n ))\n best_val_train = _inf\n best_val_valid = _inf\n best_val_test = _inf\n best_run = None\n for r in runs:\n if r.data.metrics[\"val_rmse\"] < best_val_valid:\n best_run = r\n best_val_train = r.data.metrics[\"train_rmse\"]\n best_val_valid = r.data.metrics[\"val_rmse\"]\n best_val_test = r.data.metrics[\"test_rmse\"]\n kiwi.set_tag(\"best_run\", best_run.info.run_id)\n kiwi.log_metrics({\n \"train_{}\".format(metric): best_val_train,\n \"val_{}\".format(metric): best_val_valid,\n \"test_{}\".format(metric): best_val_test\n })\n\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"numpy.array",
"sklearn.linear_model.LogisticRegression"
],
[
"numpy.random.uniform",
"numpy.random.seed",
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cf-vrgl/pandas | [
"0b68d87a4438a13f14a2ed5af2e432df02eb0b2c"
] | [
"pandas/core/computation/pytables.py"
] | [
"\"\"\" manage PyTables query interface via Expressions \"\"\"\nfrom __future__ import annotations\n\nimport ast\nfrom functools import partial\nfrom typing import Any\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n Timedelta,\n Timestamp,\n)\nfrom pandas.compat.chainmap import DeepChainMap\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas.core.common as com\nfrom pandas.core.computation import (\n expr,\n ops,\n scope as _scope,\n)\nfrom pandas.core.computation.common import ensure_decoded\nfrom pandas.core.computation.expr import BaseExprVisitor\nfrom pandas.core.computation.ops import (\n UndefinedVariableError,\n is_term,\n)\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.base import Index\n\nfrom pandas.io.formats.printing import (\n pprint_thing,\n pprint_thing_encoded,\n)\n\n\nclass PyTablesScope(_scope.Scope):\n __slots__ = (\"queryables\",)\n\n queryables: dict[str, Any]\n\n def __init__(\n self,\n level: int,\n global_dict=None,\n local_dict=None,\n queryables: dict[str, Any] | None = None,\n ):\n super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)\n self.queryables = queryables or {}\n\n\nclass Term(ops.Term):\n env: PyTablesScope\n\n def __new__(cls, name, env, side=None, encoding=None):\n if isinstance(name, str):\n klass = cls\n else:\n klass = Constant\n return object.__new__(klass)\n\n def __init__(self, name, env: PyTablesScope, side=None, encoding=None):\n super().__init__(name, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n # must be a queryables\n if self.side == \"left\":\n # Note: The behavior of __new__ ensures that self.name is a str here\n if self.name not in self.env.queryables:\n raise NameError(f\"name {repr(self.name)} is not defined\")\n return self.name\n\n # resolve the rhs (and allow it to be None)\n try:\n return self.env.resolve(self.name, is_local=False)\n except UndefinedVariableError:\n return self.name\n\n # read-only property overwriting read/write property\n @property # type: ignore[misc]\n def value(self):\n return self._value\n\n\nclass Constant(Term):\n def __init__(self, value, env: PyTablesScope, side=None, encoding=None):\n assert isinstance(env, PyTablesScope), type(env)\n super().__init__(value, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n return self._name\n\n\nclass BinOp(ops.BinOp):\n\n _max_selectors = 31\n\n op: str\n queryables: dict[str, Any]\n condition: str | None\n\n def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):\n super().__init__(op, lhs, rhs)\n self.queryables = queryables\n self.encoding = encoding\n self.condition = None\n\n def _disallow_scalar_only_bool_ops(self):\n pass\n\n def prune(self, klass):\n def pr(left, right):\n \"\"\"create and return a new specialized BinOp from myself\"\"\"\n if left is None:\n return right\n elif right is None:\n return left\n\n k = klass\n if isinstance(left, ConditionBinOp):\n if isinstance(right, ConditionBinOp):\n k = JointConditionBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n elif isinstance(left, FilterBinOp):\n if isinstance(right, FilterBinOp):\n k = JointFilterBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n return k(\n self.op, left, right, queryables=self.queryables, encoding=self.encoding\n ).evaluate()\n\n left, right = self.lhs, self.rhs\n\n if is_term(left) and is_term(right):\n res = pr(left.value, right.value)\n elif not is_term(left) and is_term(right):\n res = pr(left.prune(klass), right.value)\n elif is_term(left) and not is_term(right):\n res = pr(left.value, right.prune(klass))\n elif not (is_term(left) or is_term(right)):\n res = pr(left.prune(klass), right.prune(klass))\n\n return res\n\n def conform(self, rhs):\n \"\"\"inplace conform rhs\"\"\"\n if not is_list_like(rhs):\n rhs = [rhs]\n if isinstance(rhs, np.ndarray):\n rhs = rhs.ravel()\n return rhs\n\n @property\n def is_valid(self) -> bool:\n \"\"\"return True if this is a valid field\"\"\"\n return self.lhs in self.queryables\n\n @property\n def is_in_table(self) -> bool:\n \"\"\"\n return True if this is a valid column name for generation (e.g. an\n actual column in the table)\n \"\"\"\n return self.queryables.get(self.lhs) is not None\n\n @property\n def kind(self):\n \"\"\"the kind of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"kind\", None)\n\n @property\n def meta(self):\n \"\"\"the meta of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"meta\", None)\n\n @property\n def metadata(self):\n \"\"\"the metadata of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"metadata\", None)\n\n def generate(self, v) -> str:\n \"\"\"create and return the op string for this TermValue\"\"\"\n val = v.tostring(self.encoding)\n return f\"({self.lhs} {self.op} {val})\"\n\n def convert_value(self, v) -> TermValue:\n \"\"\"\n convert the expression that is in the term to something that is\n accepted by pytables\n \"\"\"\n\n def stringify(value):\n if self.encoding is not None:\n return pprint_thing_encoded(value, encoding=self.encoding)\n return pprint_thing(value)\n\n kind = ensure_decoded(self.kind)\n meta = ensure_decoded(self.meta)\n if kind == \"datetime64\" or kind == \"datetime\":\n if isinstance(v, (int, float)):\n v = stringify(v)\n v = ensure_decoded(v)\n v = Timestamp(v)\n if v.tz is not None:\n v = v.tz_convert(\"UTC\")\n return TermValue(v, v.value, kind)\n elif kind == \"timedelta64\" or kind == \"timedelta\":\n if isinstance(v, str):\n v = Timedelta(v).value\n else:\n v = Timedelta(v, unit=\"s\").value\n return TermValue(int(v), v, kind)\n elif meta == \"category\":\n metadata = extract_array(self.metadata, extract_numpy=True)\n if v not in metadata:\n result = -1\n else:\n # error: Incompatible types in assignment (expression has type\n # \"Union[Any, ndarray]\", variable has type \"int\")\n result = metadata.searchsorted( # type: ignore[assignment]\n v, side=\"left\"\n )\n return TermValue(result, result, \"integer\")\n elif kind == \"integer\":\n v = int(float(v))\n return TermValue(v, v, kind)\n elif kind == \"float\":\n v = float(v)\n return TermValue(v, v, kind)\n elif kind == \"bool\":\n if isinstance(v, str):\n v = not v.strip().lower() in [\n \"false\",\n \"f\",\n \"no\",\n \"n\",\n \"none\",\n \"0\",\n \"[]\",\n \"{}\",\n \"\",\n ]\n else:\n v = bool(v)\n return TermValue(v, v, kind)\n elif isinstance(v, str):\n # string quoting\n return TermValue(v, stringify(v), \"string\")\n else:\n raise TypeError(f\"Cannot compare {v} of type {type(v)} to {kind} column\")\n\n def convert_values(self):\n pass\n\n\nclass FilterBinOp(BinOp):\n filter: tuple[Any, Any, Index] | None = None\n\n def __repr__(self) -> str:\n if self.filter is None:\n return \"Filter: Not Initialized\"\n return pprint_thing(f\"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]\")\n\n def invert(self):\n \"\"\"invert the filter\"\"\"\n if self.filter is not None:\n self.filter = (\n self.filter[0],\n self.generate_filter_op(invert=True),\n self.filter[2],\n )\n return self\n\n def format(self):\n \"\"\"return the actual filter format\"\"\"\n return [self.filter]\n\n def evaluate(self):\n\n if not self.is_valid:\n raise ValueError(f\"query term is not valid [{self}]\")\n\n rhs = self.conform(self.rhs)\n values = list(rhs)\n\n if self.is_in_table:\n\n # if too many values to create the expression, use a filter instead\n if self.op in [\"==\", \"!=\"] and len(values) > self._max_selectors:\n\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n return self\n return None\n\n # equality conditions\n if self.op in [\"==\", \"!=\"]:\n\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n else:\n raise TypeError(\n f\"passing a filterable condition to a non-table indexer [{self}]\"\n )\n\n return self\n\n def generate_filter_op(self, invert: bool = False):\n if (self.op == \"!=\" and not invert) or (self.op == \"==\" and invert):\n return lambda axis, vals: ~axis.isin(vals)\n else:\n return lambda axis, vals: axis.isin(vals)\n\n\nclass JointFilterBinOp(FilterBinOp):\n def format(self):\n raise NotImplementedError(\"unable to collapse Joint Filters\")\n\n def evaluate(self):\n return self\n\n\nclass ConditionBinOp(BinOp):\n def __repr__(self) -> str:\n return pprint_thing(f\"[Condition : [{self.condition}]]\")\n\n def invert(self):\n \"\"\"invert the condition\"\"\"\n # if self.condition is not None:\n # self.condition = \"~(%s)\" % self.condition\n # return self\n raise NotImplementedError(\n \"cannot use an invert condition when passing to numexpr\"\n )\n\n def format(self):\n \"\"\"return the actual ne format\"\"\"\n return self.condition\n\n def evaluate(self):\n\n if not self.is_valid:\n raise ValueError(f\"query term is not valid [{self}]\")\n\n # convert values if we are in the table\n if not self.is_in_table:\n return None\n\n rhs = self.conform(self.rhs)\n values = [self.convert_value(v) for v in rhs]\n\n # equality conditions\n if self.op in [\"==\", \"!=\"]:\n\n # too many values to create the expression?\n if len(values) <= self._max_selectors:\n vs = [self.generate(v) for v in values]\n self.condition = f\"({' | '.join(vs)})\"\n\n # use a filter after reading\n else:\n return None\n else:\n self.condition = self.generate(values[0])\n\n return self\n\n\nclass JointConditionBinOp(ConditionBinOp):\n def evaluate(self):\n self.condition = f\"({self.lhs.condition} {self.op} {self.rhs.condition})\"\n return self\n\n\nclass UnaryOp(ops.UnaryOp):\n def prune(self, klass):\n\n if self.op != \"~\":\n raise NotImplementedError(\"UnaryOp only support invert type ops\")\n\n operand = self.operand\n operand = operand.prune(klass)\n\n if operand is not None and (\n issubclass(klass, ConditionBinOp)\n and operand.condition is not None\n or not issubclass(klass, ConditionBinOp)\n and issubclass(klass, FilterBinOp)\n and operand.filter is not None\n ):\n return operand.invert()\n return None\n\n\nclass PyTablesExprVisitor(BaseExprVisitor):\n const_type = Constant\n term_type = Term\n\n def __init__(self, env, engine, parser, **kwargs):\n super().__init__(env, engine, parser)\n for bin_op in self.binary_ops:\n bin_node = self.binary_op_nodes_map[bin_op]\n setattr(\n self,\n f\"visit_{bin_node}\",\n lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),\n )\n\n def visit_UnaryOp(self, node, **kwargs):\n if isinstance(node.op, (ast.Not, ast.Invert)):\n return UnaryOp(\"~\", self.visit(node.operand))\n elif isinstance(node.op, ast.USub):\n return self.const_type(-self.visit(node.operand).value, self.env)\n elif isinstance(node.op, ast.UAdd):\n raise NotImplementedError(\"Unary addition not supported\")\n\n def visit_Index(self, node, **kwargs):\n return self.visit(node.value).value\n\n def visit_Assign(self, node, **kwargs):\n cmpr = ast.Compare(\n ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]\n )\n return self.visit(cmpr)\n\n def visit_Subscript(self, node, **kwargs):\n # only allow simple subscripts\n\n value = self.visit(node.value)\n slobj = self.visit(node.slice)\n try:\n value = value.value\n except AttributeError:\n pass\n\n if isinstance(slobj, Term):\n # In py39 np.ndarray lookups with Term containing int raise\n slobj = slobj.value\n\n try:\n return self.const_type(value[slobj], self.env)\n except TypeError as err:\n raise ValueError(\n f\"cannot subscript {repr(value)} with {repr(slobj)}\"\n ) from err\n\n def visit_Attribute(self, node, **kwargs):\n attr = node.attr\n value = node.value\n\n ctx = type(node.ctx)\n if ctx == ast.Load:\n # resolve the value\n resolved = self.visit(value)\n\n # try to get the value to see if we are another expression\n try:\n resolved = resolved.value\n except (AttributeError):\n pass\n\n try:\n return self.term_type(getattr(resolved, attr), self.env)\n except AttributeError:\n\n # something like datetime.datetime where scope is overridden\n if isinstance(value, ast.Name) and value.id == attr:\n return resolved\n\n raise ValueError(f\"Invalid Attribute context {ctx.__name__}\")\n\n def translate_In(self, op):\n return ast.Eq() if isinstance(op, ast.In) else op\n\n def _rewrite_membership_op(self, node, left, right):\n return self.visit(node.op), node.op, left, right\n\n\ndef _validate_where(w):\n \"\"\"\n Validate that the where statement is of the right type.\n\n The type may either be String, Expr, or list-like of Exprs.\n\n Parameters\n ----------\n w : String term expression, Expr, or list-like of Exprs.\n\n Returns\n -------\n where : The original where clause if the check was successful.\n\n Raises\n ------\n TypeError : An invalid data type was passed in for w (e.g. dict).\n \"\"\"\n if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):\n raise TypeError(\n \"where must be passed as a string, PyTablesExpr, \"\n \"or list-like of PyTablesExpr\"\n )\n\n return w\n\n\nclass PyTablesExpr(expr.Expr):\n \"\"\"\n Hold a pytables-like expression, comprised of possibly multiple 'terms'.\n\n Parameters\n ----------\n where : string term expression, PyTablesExpr, or list-like of PyTablesExprs\n queryables : a \"kinds\" map (dict of column name -> kind), or None if column\n is non-indexable\n encoding : an encoding that will encode the query terms\n\n Returns\n -------\n a PyTablesExpr object\n\n Examples\n --------\n 'index>=date'\n \"columns=['A', 'D']\"\n 'columns=A'\n 'columns==A'\n \"~(columns=['A','B'])\"\n 'index>df.index[3] & string=\"bar\"'\n '(index>df.index[3] & index<=df.index[6]) | string=\"bar\"'\n \"ts>=Timestamp('2012-02-01')\"\n \"major_axis>=20130101\"\n \"\"\"\n\n _visitor: PyTablesExprVisitor | None\n env: PyTablesScope\n expr: str\n\n def __init__(\n self,\n where,\n queryables: dict[str, Any] | None = None,\n encoding=None,\n scope_level: int = 0,\n ):\n\n where = _validate_where(where)\n\n self.encoding = encoding\n self.condition = None\n self.filter = None\n self.terms = None\n self._visitor = None\n\n # capture the environment if needed\n local_dict: DeepChainMap[Any, Any] = DeepChainMap()\n\n if isinstance(where, PyTablesExpr):\n local_dict = where.env.scope\n _where = where.expr\n\n elif is_list_like(where):\n where = list(where)\n for idx, w in enumerate(where):\n if isinstance(w, PyTablesExpr):\n local_dict = w.env.scope\n else:\n w = _validate_where(w)\n where[idx] = w\n _where = \" & \".join(f\"({w})\" for w in com.flatten(where))\n else:\n # _validate_where ensures we otherwise have a string\n _where = where\n\n self.expr = _where\n self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)\n\n if queryables is not None and isinstance(self.expr, str):\n self.env.queryables.update(queryables)\n self._visitor = PyTablesExprVisitor(\n self.env,\n queryables=queryables,\n parser=\"pytables\",\n engine=\"pytables\",\n encoding=encoding,\n )\n self.terms = self.parse()\n\n def __repr__(self) -> str:\n if self.terms is not None:\n return pprint_thing(self.terms)\n return pprint_thing(self.expr)\n\n def evaluate(self):\n \"\"\"create and return the numexpr condition and filter\"\"\"\n try:\n self.condition = self.terms.prune(ConditionBinOp)\n except AttributeError as err:\n raise ValueError(\n f\"cannot process expression [{self.expr}], [{self}] \"\n \"is not a valid condition\"\n ) from err\n try:\n self.filter = self.terms.prune(FilterBinOp)\n except AttributeError as err:\n raise ValueError(\n f\"cannot process expression [{self.expr}], [{self}] \"\n \"is not a valid filter\"\n ) from err\n\n return self.condition, self.filter\n\n\nclass TermValue:\n \"\"\"hold a term value the we use to construct a condition/filter\"\"\"\n\n def __init__(self, value, converted, kind: str):\n assert isinstance(kind, str), kind\n self.value = value\n self.converted = converted\n self.kind = kind\n\n def tostring(self, encoding) -> str:\n \"\"\"quote the string if not encoded else encode and return\"\"\"\n if self.kind == \"string\":\n if encoding is not None:\n return str(self.converted)\n return f'\"{self.converted}\"'\n elif self.kind == \"float\":\n # python 2 str(float) is not always\n # round-trippable so use repr()\n return repr(self.converted)\n return str(self.converted)\n\n\ndef maybe_expression(s) -> bool:\n \"\"\"loose checking if s is a pytables-acceptable expression\"\"\"\n if not isinstance(s, str):\n return False\n ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + (\"=\",)\n\n # make sure we have an op at least\n return any(op in s for op in ops)\n"
] | [
[
"pandas.core.common.flatten",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.tslibs.Timedelta",
"pandas.compat.chainmap.DeepChainMap",
"pandas._libs.tslibs.Timestamp",
"pandas.core.indexes.base.Index",
"pandas.core.computation.ops.is_term",
"pandas.io.formats.printing.pprint_thing_encoded",
"pandas.core.computation.common.ensure_decoded",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.construction.extract_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
goodfree/ActorCloud | [
"9c34b371c23464981323ef9865d9913bde1fe09c"
] | [
"server/app/services/tasks_scheduler/async_tasks/app/excels/devices_import.py"
] | [
"import json\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import Dict, AnyStr\n\nimport pandas as pd\n\nfrom actor_libs.database.async_db import db\nfrom actor_libs.tasks.backend import update_task\nfrom actor_libs.tasks.exceptions import TaskException\nfrom actor_libs.utils import generate_uuid\nfrom ._utils import pg_to_excel\nfrom ._utils import read_excel\nfrom .multi_language import (\n ImportStatus, STATUS_MESSAGE, IMPORT_RENAME_ZH, IMPORT_ERROR_RENAME\n)\nfrom .sql_statements import (\n device_import_sql, dict_code_sql,\n query_tenant_devices_limit_sql,\n)\nfrom .validate import validates_schema\nfrom ..config import project_config\n\n\n__all__ = ['devices_import_task']\n\n\nlogger = logging.getLogger(__name__)\n\n\nasync def devices_import_task(request_dict):\n \"\"\"\n {'taskID', 'language', 'filePath', 'tenantID', 'userIntID'}\n \"\"\"\n\n task_id = request_dict['taskID']\n await _update_task_progress(\n task_id, status=2, progress=10,\n import_status=ImportStatus.UPLOADED\n )\n dict_code = await get_dict_code(request_dict['language'])\n import_records = await read_devices_excels(\n request_dict, dict_code\n )\n if not import_records:\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=15, import_status=ImportStatus.FAILED\n )\n raise TaskException(code=500, error_code='FAILED')\n correct_records, error_records = await handle_import_records(\n import_records, request_dict\n )\n correct_num, error_nums = len(correct_records), len(error_records)\n result_info = {\n 'success': correct_num,\n 'failed': error_nums\n }\n if correct_num > 0:\n await _import_correct_rows(correct_records, correct_num, request_dict)\n if error_records:\n try:\n export_path = await _export_error_rows(\n error_records, dict_code, request_dict\n )\n result_info['excelPath'] = export_path\n except Exception as e:\n logger.error(f\"error_records: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=3,\n progress=100, import_status=ImportStatus.COMPLETED,\n result=result_info,\n )\n\n\nasync def get_dict_code(language: AnyStr) -> Dict:\n dict_code = {}\n query_dict_code = await db.fetch_many(\n dict_code_sql.format(language=language)\n )\n for item in query_dict_code:\n # {code:{label:value}...}\n dict_code[item[0]] = dict(zip(item[2], item[1]))\n return dict_code\n\n\nasync def read_devices_excels(request_dict: Dict, dict_code):\n try:\n rename_dict = IMPORT_RENAME_ZH if request_dict['language'] != 'en' else None\n data_frame = await read_excel(\n request_dict['filePath'], rename_dict=rename_dict,\n replace_dict=dict_code\n )\n data_frame = await _handle_data_frame(data_frame)\n import_records = data_frame.to_dict('records')\n await _update_task_progress(\n request_dict['taskID'], status=2,\n progress=30, import_status=ImportStatus.READING\n )\n except Exception as e:\n logger.error(f\"read_devices_excels: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=35, import_status=ImportStatus.TEMPLATE_ERROR\n )\n raise TaskException(code=500, error_code='TEMPLATE_ERROR')\n return import_records\n\n\nasync def _handle_data_frame(data_frame):\n cover_float = ['longitude', 'latitude']\n data_frame[cover_float] = data_frame[cover_float].astype(float)\n # nan -> None\n data_frame = data_frame.where((pd.notnull(data_frame)), None)\n return data_frame\n\n\nasync def handle_import_records(import_records, request_dict):\n # use schema to validate imported data\n\n correct_records = []\n correct_record_append = correct_records.append\n error_records = []\n error_record_append = error_records.append\n try:\n validated_result = await validates_schema(\n import_records, request_dict\n )\n await _update_task_progress(\n request_dict['taskID'], status=2, progress=50,\n import_status=ImportStatus.VALIDATING\n )\n except Exception as e:\n logger.error(f\"validates_schema: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4, progress=55,\n import_status=ImportStatus.ABNORMAL\n )\n raise TaskException(code=500, error_code='ABNORMAL')\n rows_error_msg, devices_attr_info = validated_result\n products_info = devices_attr_info['products_info']\n gateways_info = devices_attr_info['gateways_info']\n\n for row, record in enumerate(import_records):\n if rows_error_msg.get(row):\n record.update(rows_error_msg[row])\n error_record_append(record)\n else:\n product_name = record['product']\n gateway_name = record['gateway']\n if products_info.get(product_name):\n record['productID'] = products_info[product_name]['productID']\n record['cloudProtocol'] = products_info[product_name]['cloudProtocol']\n if gateways_info.get(gateway_name):\n record['gateway'] = gateways_info[gateway_name]['id']\n record = await set_device_default_value(record)\n correct_record_append(record)\n return correct_records, error_records\n\n\nasync def _import_correct_rows(correct_records, correct_num, request_dict):\n is_exceed_limit = await _check_devices_limit(correct_num, request_dict)\n if is_exceed_limit:\n await _update_task_progress(\n request_dict['taskID'], status=4, progress=70,\n import_status=ImportStatus.LIMITED\n )\n raise TaskException(code=500, error_code='LIMITED')\n try:\n await _insert_correct_rows(correct_records, request_dict)\n await _update_task_progress(\n request_dict['taskID'], status=2,\n progress=80, import_status=ImportStatus.IMPORTING\n )\n except Exception as e:\n logger.error(f\"_import_correct_rows: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=85, import_status=ImportStatus.FAILED\n )\n raise TaskException(code=500, error_code='FAILED')\n\n\nasync def _check_devices_limit(correct_num, request_dict) -> bool:\n \"\"\"\n Check if the device limit is exceeded\n :return True if exceed limit otherwise False\n \"\"\"\n\n check_status = False\n query_sql = query_tenant_devices_limit_sql.format(\n tenantID=request_dict['tenantID']\n )\n query_result = await db.fetch_row(query_sql)\n if query_result:\n device_sum, devices_limit = query_result\n if device_sum + correct_num > devices_limit:\n check_status = True\n return check_status\n\n\nasync def _insert_correct_rows(correct_records, request_dict):\n default_columns = [\n \"createAt\", \"deviceName\", \"deviceType\", \"productID\",\n \"authType\", \"upLinkNetwork\", \"deviceID\", \"deviceUsername\", \"token\",\n \"location\", \"latitude\", \"longitude\",\n \"manufacturer\", \"serialNumber\", \"softVersion\", \"hardwareVersion\",\n \"deviceConsoleIP\", \"deviceConsoleUsername\", \"deviceConsolePort\",\n \"mac\", \"upLinkSystem\", \"gateway\", \"parentDevice\",\n \"loraData\", \"lwm2mData\", \"userIntID\", \"tenantID\"\n ]\n create_at = datetime.now()\n async with db.pool.acquire() as conn:\n async with conn.transaction():\n for record in correct_records:\n record['createAt'] = create_at\n record['userIntID'] = request_dict['userIntID']\n record['tenantID'] = request_dict['tenantID']\n miss_columns = set(default_columns) - set(record.keys())\n record.update({c: None for c in miss_columns})\n execute_sql = device_import_sql.format(**record)\n execute_sql = execute_sql.replace(\"'None'\", \"NULL\")\n execute_sql = execute_sql.replace(\"'NULL'\", \"NULL\")\n await conn.execute(execute_sql)\n\n\nasync def _export_error_rows(errors_rows, dict_code, request_dict):\n \"\"\" Export processing failure data to excel \"\"\"\n\n column_sort = list(IMPORT_ERROR_RENAME.keys())\n error_dict_code = defaultdict(dict)\n for code, code_value in dict_code.items():\n for code_k, code_v in code_value.items():\n error_dict_code[code][code_v] = code_k\n data_frame = pd.DataFrame(errors_rows)\n data_frame = data_frame[column_sort].replace(error_dict_code)\n if request_dict['language'] != 'en':\n data_frame = data_frame.rename(columns=IMPORT_ERROR_RENAME)\n state_dict = await pg_to_excel(\n export_path=project_config.get('EXPORT_EXCEL_PATH'),\n table_name='ErrorImportDevicesW5',\n export_data=data_frame,\n tenant_uid=request_dict['tenantID'])\n export_path = state_dict.get('excelPath')\n return export_path\n\n\nasync def set_device_default_value(device_info):\n if device_info.get('upLinkSystem') != 3:\n device_info['gateway'] = None\n if device_info.get('upLinkSystem') == 3 and not device_info.get('gateway'):\n device_info['upLinkSystem'] = 1\n device_info['gateway'] = None\n if device_info.get('cloudProtocol') == 3:\n # lwm2m protocol\n if device_info.get('deviceID'):\n imei = device_info['deviceID']\n else:\n imei = generate_uuid(size=15)\n device_info['deviceID'] = imei\n lwm2m_data = {\n 'autoSub': 0,\n 'IMEI': imei,\n 'IMSI': imei\n }\n device_info['lwm2mData'] = json.dumps(lwm2m_data)\n if not device_info.get('deviceID'):\n device_info['deviceID'] = generate_uuid()\n if not device_info.get('deviceUsername'):\n device_info['deviceUsername'] = generate_uuid()\n if not device_info.get('token'):\n device_info['token'] = device_info['deviceUsername']\n if not device_info.get('token'):\n device_info['token'] = device_info['deviceUsername']\n device_info['upLinkNetwork'] = 1\n device_info['deviceType'] = 1 # end_devices\n return device_info\n\n\nasync def _update_task_progress(task_id,\n *,\n status=None,\n progress=None,\n import_status=None,\n result=None):\n if not result:\n result = {}\n result['message'] = STATUS_MESSAGE.get(import_status)\n result['code'] = import_status.value\n update_dict = {\n 'status': status,\n 'progress': progress,\n 'result': result,\n 'taskID': task_id\n }\n await update_task(task_id, update_dict)\n return result\n"
] | [
[
"pandas.notnull",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
micka59200/Python-Baseball | [
"dda463b1ba49e70dab676d1d3e57edc8238d0df6"
] | [
"stats/defense.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom frames import games, info, events\n\nplays = games.query(\"type == 'play' & event != 'NP'\")\nplays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']\n\npa = plays.loc[plays['player'].shift() != plays['player'], ['year', 'game_id', 'inning', 'team', 'player']]\npa = pa.groupby(['year', 'game_id', 'team']).size().reset_index(name='PA')\n\nevents = events.set_index(['year', 'game_id', 'team', 'event_type'])\nevents = events.unstack().fillna(0).reset_index()\nevents.columns = events.columns.droplevel()\nevents.columns = ['year', 'game_id', 'team', 'BB', 'E', 'H', 'HBP', 'HR', 'ROE', 'SO']\nevents = events.rename_axis(None, axis='columns')\nevents_plus_pa = pd.merge(events, pa, how='outer', left_on=['year', 'game_id', 'team'], right_on=['year', 'game_id', 'team'])\ndefense = pd.merge(events_plus_pa, info)\ndefense.loc[:, 'DER'] = 1 - ((defense['H'] + defense['ROE']) / (defense['PA'] - defense['BB'] -defense['SO'] - defense['HBP'] - defense['HR']))\ndefense.loc[:, 'year'] = pd.to_numeric(defense['year'])\nder = defense.loc[defense['year'] >= 1978, ['year', 'defense', 'DER']]\n\nder = der.pivot(index='year', columns='defense', values='DER')\nder.plot(x_compat=True, xticks=range(1978, 2018, 4), rot=45)\n\nplt.show()"
] | [
[
"pandas.merge",
"matplotlib.pyplot.show",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rpachauri/connect4 | [
"6caf6965afaaff6883193ac295c6ac5b1f4e9c4a",
"6caf6965afaaff6883193ac295c6ac5b1f4e9c4a"
] | [
"connect_four/evaluation/incremental_victor/graph/graph_manager_add_solution_profile.py",
"connect_four/evaluation/victor/evaluator/evaluator_profile.py"
] | [
"import cProfile\n\nimport gym\n\nimport numpy as np\n\nfrom connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager\nfrom connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager\nfrom connect_four.problem import ConnectFourGroupManager\n\nenv = gym.make('connect_four-v0')\nenv.state = np.array([\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 0, 0, 0, ],\n ],\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 1, 0, 0, ],\n [0, 0, 0, 0, 1, 0, 0, ],\n ],\n])\n\n# noinspection SpellCheckingInspection\ncfgm = ConnectFourGroupManager(env_variables=env.env_variables)\nvsm = VictorSolutionManager(env_variables=env.env_variables)\n\nplayer, row, col = 0, 5, 0\n\ngm = GraphManager(player=player, problem_manager=cfgm, solution_manager=vsm)\n\n_, removed_problems = cfgm.move(player=player, row=row, col=col)\nfor problem in removed_problems:\n gm._remove_problem(problem)\n\nremoved_solutions, added_solutions = vsm.move(player=player, row=row, col=col)\nprint(\"len(removed_solutions) = \", len(removed_solutions))\nprint(\"len(added_solutions) = \", len(added_solutions))\n# print(\"number of useful solutions =\", len(self.solution_to_solutions))\nfor solution in removed_solutions:\n gm._remove_solution(solution)\nprint(\"number of solutions that remained =\", len(gm.solution_to_solutions))\n\n\ndef add_solutions():\n for solution in added_solutions:\n gm._add_solution(solution)\n\n print(\"number of solutions after adding =\", len(gm.solution_to_solutions))\n\n\ncProfile.run(\n 'add_solutions()',\n sort=\"cumtime\",\n)\n",
"import gym\nimport numpy as np\n\nimport cProfile\n\nfrom connect_four.evaluation.board import Board\nfrom connect_four.envs.connect_four_env import ConnectFourEnv\n\nenv = gym.make('connect_four-v0')\nConnectFourEnv.M = 6\nConnectFourEnv.N = 7\n\n# The empty 6x7 board has no solution set for Black because White is guaranteed to win.\nenv.state = np.array([\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n ],\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n ],\n])\nboard = Board(env.env_variables)\n\ncProfile.run('evaluator.evaluate(board=board)', sort=\"cumtime\")\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thomasly/slgnn | [
"caa1e7814498da41ad025b4e62c569fe511848ff",
"caa1e7814498da41ad025b4e62c569fe511848ff"
] | [
"slgnn/data_processing/jakfp_dataset.py",
"slgnn/training/train_gin_with_zinc_pretrained_model_covid19_datasets.py"
] | [
"import os\n\nimport pandas as pd\nfrom chemreader.writers import GraphWriter\nfrom chemreader.readers import Smiles\nfrom rdkit.Chem import MolFromSmiles\nfrom slgnn.models.gcn.utils import get_filtered_fingerprint\nfrom tqdm import tqdm\n\n\ndef _is_active(value):\n if value < 1000:\n return 1\n elif value >= 10000:\n return -1\n else:\n return 0\n\n\ndef filter_(path):\n \"\"\" Filter JAK dataset\n \"\"\"\n jak = pd.read_csv(path)\n jak.dropna(subset=[\"Standard Relation\", \"Standard Value\"], inplace=True)\n not_eq = jak[\"Standard Relation\"] != \"'='\"\n lt_10um = jak[\"Standard Value\"] < 100000\n filtered = jak.drop(jak.loc[not_eq & lt_10um].index)\n gt = jak[\"Standard Relation\"] == \"'>'\"\n eq_1um = jak[\"Standard Value\"] >= 1000\n add_back = jak.loc[gt & eq_1um]\n filtered = filtered.append(add_back)\n filtered[\"Activity\"] = filtered[\"Standard Value\"].apply(_is_active)\n out_path = os.path.join(os.path.dirname(path), \"filtered_\" + os.path.basename(path))\n filtered[[\"Smiles\", \"Activity\"]].to_csv(out_path)\n\n\ndef write_graphs(inpath, outpath, prefix=None):\n \"\"\" Convert JAK dataset to graphs\n \"\"\"\n smiles = list()\n fps = list()\n pb = tqdm()\n with open(inpath, \"r\") as inf:\n line = inf.readline()\n while line:\n _, sm, _ = line.strip().split(\",\")\n if MolFromSmiles(sm) is None:\n line = inf.readline()\n continue\n smiles.append(Smiles(sm))\n fps.append(\",\".join(map(str, get_filtered_fingerprint(sm))))\n pb.update(1)\n line = inf.readline()\n writer = GraphWriter(smiles)\n writer.write(outpath, prefix=prefix, graph_labels=fps)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--path\", help=\"Path to the JAK file\")\n args = parser.parse_args()\n filter_(args.path)\n inpath = os.path.join(\n os.path.dirname(args.path), \"filtered_\" + os.path.basename(args.path)\n )\n pre = os.path.basename(args.path).split(\".\")[0] + \"FP\"\n write_graphs(inpath, os.path.join(os.path.dirname(args.path), \"graphs\"), prefix=pre)\n",
"\"\"\" Train GIN model with or without pretrained parameters.\n\"\"\"\nimport os\nimport os.path as osp\nfrom datetime import datetime\nimport random\nimport logging\n\nimport torch\nimport yaml\n\nfrom slgnn.configs.base import Grid, Config\nfrom slgnn.configs.arg_parsers import ModelTrainingArgs\nfrom slgnn.data_processing.covid19_datasets import (\n Amu,\n AmuFP,\n Ellinger,\n EllingerFP,\n Mpro,\n MproFP,\n)\nfrom slgnn.data_processing.utils import AtomFeaturesOneHotTransformer\nfrom slgnn.data_processing.loaders import OversamplingSplitter\nfrom slgnn.models.decoder.model import GINDecoder\nfrom .trainers import EncoderDecoderTrainer, EncoderClassifierTrainer\n\n\nif __name__ == \"__main__\":\n args = ModelTrainingArgs().parse_args()\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n time_stamp = datetime.now().strftime(r\"%Y%m%d_%H%M%S\")\n\n exp_datasets = {\n \"Amu\": [Amu, AmuFP],\n \"Ellinger\": [Ellinger, EllingerFP],\n \"Mpro\": [Mpro, MproFP],\n }\n random_seeds = [0, 5, 193, 84234, 839574]\n # random_seeds = [0, 1]\n pretrained_model = os.path.join(args.pretrained_model)\n\n for ds_name, ds in exp_datasets.items():\n Dataset = ds[0]\n FPDataset = ds[1]\n config_grid = Grid(os.path.join(\"model_configs\", ds_name + \".yml\"))\n for config_idx, config_dict in enumerate(config_grid):\n for seed_idx, seed in enumerate(random_seeds):\n config = Config.from_dict(config_dict)\n torch.manual_seed(seed)\n random.seed(seed)\n dataset = FPDataset(transform=AtomFeaturesOneHotTransformer())\n if config[\"encoder_epochs\"] == 0:\n ifencoder = \"nosecondpretrain\"\n else:\n ifencoder = \"secondpretrain\"\n log_name = \"_\".join(\n [\n config[\"classifier_dataset_name\"],\n config[\"classifier_data_splitter_name\"],\n \"_\".join(map(str, config[\"data_splitting_ratio\"])),\n \"embed{}\".format(config[\"embedding_dim\"]),\n config[\"classifier_loss_name\"],\n \"bs\" + str(config[\"batch_size\"]),\n ifencoder,\n \"freeze\" + \"_\".join(map(str, config[\"frozen_epochs\"])),\n str(config_idx),\n ]\n )\n log_dir = osp.join(\n \"logs\",\n time_stamp,\n ds_name,\n \"ZINC_pretrained_\" + log_name,\n str(seed_idx),\n )\n os.makedirs(log_dir)\n # log configs\n with open(osp.join(log_dir, osp.pardir, \"configs.yml\"), \"w\") as f:\n f.write(yaml.dump(config_dict))\n # init encoder\n dim_encoder_target = config[\"embedding_dim\"]\n dim_decoder_target = dataset[0].y.size(1)\n dim_features = dataset[0].x.size(1)\n dropout = config[\"dropout\"]\n Encoder = config[\"model\"]\n encoder = Encoder(\n dim_features=dim_features,\n dim_target=dim_encoder_target,\n config=config,\n )\n encoder.load_state_dict(torch.load(args.pretrained_model))\n # init decoder and pretrain\n if config[\"encoder_epochs\"] > 0:\n decoder = GINDecoder(\n dim_encoder_target, dim_decoder_target, dropout\n )\n\n dloader = config[\"encoder_data_splitter\"](dataset)\n encoder_trainer = EncoderDecoderTrainer(\n config, encoder, decoder, dloader\n )\n encoder_trainer.metrics = []\n encoder_trainer.freeze_encoder = False\n encoder_trainer.train()\n encoder_trainer.log_results(\n out=log_dir,\n txt_name=\"encoder_losses.txt\",\n pk_name=\"encoder_losses.pk\",\n )\n encoder_trainer.plot_training_metrics(\n log_dir, name=\"encoder_losses.png\"\n )\n for index in range(5):\n encoder_trainer.plot_reconstructions(\n index, log_dir, f\"reconstruction_{index}.png\"\n )\n\n # fine-tune\n cls_dataset = Dataset(transform=AtomFeaturesOneHotTransformer())\n classifier = GINDecoder(\n dim_encoder_target, cls_dataset.num_classes, dropout\n )\n cls_dloader = OversamplingSplitter(\n cls_dataset, batch_size=config[\"batch_size\"], random_seed=seed\n )\n cls_trainer = EncoderClassifierTrainer(\n config,\n encoder,\n classifier,\n cls_dloader,\n )\n cls_trainer.train()\n cls_trainer.plot_training_metrics(log_dir)\n cls_trainer.test()\n cls_trainer.log_results(\n out=log_dir,\n txt_name=\"classifier_metrics.txt\",\n pk_name=\"classifier_metrics.pk\",\n )\n\n # no ZINC pretrained model\n config_grid = Grid(os.path.join(\"model_configs\", ds_name + \".yml\"))\n for config_idx, config_dict in enumerate(config_grid):\n for seed_idx, seed in enumerate(random_seeds):\n config = Config.from_dict(config_dict)\n torch.manual_seed(seed)\n random.seed(seed)\n dataset = FPDataset(transform=AtomFeaturesOneHotTransformer())\n if config[\"encoder_epochs\"] == 0:\n ifencoder = \"nosecondpretrain\"\n else:\n ifencoder = \"secondpretrain\"\n log_name = \"_\".join(\n [\n config[\"classifier_dataset_name\"],\n config[\"classifier_data_splitter_name\"],\n \"_\".join(map(str, config[\"data_splitting_ratio\"])),\n \"embed{}\".format(config[\"embedding_dim\"]),\n config[\"classifier_loss_name\"],\n \"bs\" + str(config[\"batch_size\"]),\n ifencoder,\n \"freeze\" + \"_\".join(map(str, config[\"frozen_epochs\"])),\n str(config_idx),\n ]\n )\n log_dir = osp.join(\n \"logs\",\n time_stamp,\n ds_name,\n \"not_ZINC_pretrained_\" + log_name,\n str(seed_idx),\n )\n os.makedirs(log_dir)\n with open(osp.join(log_dir, osp.pardir, \"configs.yml\"), \"w\") as f:\n f.write(yaml.dump(config_dict))\n\n dim_encoder_target = config[\"embedding_dim\"]\n dim_decoder_target = dataset[0].y.size(1)\n dim_features = dataset[0].x.size(1)\n dropout = config[\"dropout\"]\n Encoder = config[\"model\"]\n encoder = Encoder(\n dim_features=dim_features,\n dim_target=dim_encoder_target,\n config=config,\n )\n if config[\"encoder_epochs\"] > 0:\n decoder = GINDecoder(\n dim_encoder_target, dim_decoder_target, dropout\n )\n\n dloader = config[\"encoder_data_splitter\"](dataset)\n encoder_trainer = EncoderDecoderTrainer(\n config, encoder, decoder, dloader\n )\n encoder_trainer.metrics = []\n encoder_trainer.freeze_encoder = False\n encoder_trainer.train()\n encoder_trainer.log_results(\n out=log_dir,\n txt_name=\"encoder_losses.txt\",\n pk_name=\"encoder_losses.pk\",\n )\n encoder_trainer.plot_training_metrics(\n log_dir, name=\"encoder_losses.png\"\n )\n for index in range(5):\n encoder_trainer.plot_reconstructions(\n index, log_dir, f\"reconstruction_{index}.png\"\n )\n\n cls_dataset = Dataset(transform=AtomFeaturesOneHotTransformer())\n classifier = GINDecoder(\n dim_encoder_target, cls_dataset.num_classes, dropout\n )\n cls_dloader = OversamplingSplitter(\n cls_dataset, batch_size=config[\"batch_size\"], random_seed=seed\n )\n cls_trainer = EncoderClassifierTrainer(\n config,\n encoder,\n classifier,\n cls_dloader,\n )\n cls_trainer.train()\n cls_trainer.plot_training_metrics(log_dir)\n cls_trainer.test()\n cls_trainer.log_results(\n out=log_dir,\n txt_name=\"classifier_metrics.txt\",\n pk_name=\"classifier_metrics.pk\",\n )\n"
] | [
[
"pandas.read_csv"
],
[
"torch.manual_seed",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryuwd/uproot4 | [
"20d8575e941c32559c7b5e62b0ed5f92bc4927d0",
"20d8575e941c32559c7b5e62b0ed5f92bc4927d0",
"20d8575e941c32559c7b5e62b0ed5f92bc4927d0"
] | [
"uproot/const.py",
"tests/test_0228_read-TProfiles.py",
"uproot/behaviors/TProfile3D.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines integer constants used by serialization and deserialization routines.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport numpy\n\n# used in unmarshaling\nkByteCountMask = numpy.int64(0x40000000)\nkByteCountVMask = numpy.int64(0x4000)\nkClassMask = numpy.int64(0x80000000)\nkNewClassTag = numpy.int64(0xFFFFFFFF)\n\nkIsOnHeap = numpy.uint32(0x01000000)\nkIsReferenced = numpy.uint32(1 << 4)\n\nkMapOffset = 2\n\n# not used?\nkNullTag = 0\nkNotDeleted = numpy.uint32(0x02000000)\nkZombie = numpy.uint32(0x04000000)\nkBitMask = numpy.uint32(0x00FFFFFF)\nkDisplacementMask = numpy.uint32(0xFF000000)\n\n############# core/zip/inc/Compression.h\n\nkZLIB = 1\nkLZMA = 2\nkOldCompressionAlgo = 3\nkLZ4 = 4\nkZSTD = 5\nkUndefinedCompressionAlgorithm = 6\n\n############# constants for streamers\n\nkBase = 0\nkChar = 1\nkShort = 2\nkInt = 3\nkLong = 4\nkFloat = 5\nkCounter = 6\nkCharStar = 7\nkDouble = 8\nkDouble32 = 9\nkLegacyChar = 10\nkUChar = 11\nkUShort = 12\nkUInt = 13\nkULong = 14\nkBits = 15\nkLong64 = 16\nkULong64 = 17\nkBool = 18\nkFloat16 = 19\nkOffsetL = 20\nkOffsetP = 40\nkObject = 61\nkAny = 62\nkObjectp = 63\nkObjectP = 64\nkTString = 65\nkTObject = 66\nkTNamed = 67\nkAnyp = 68\nkAnyP = 69\nkAnyPnoVT = 70\nkSTLp = 71\n\nkSkip = 100\nkSkipL = 120\nkSkipP = 140\n\nkConv = 200\nkConvL = 220\nkConvP = 240\n\nkSTL = 300\nkSTLstring = 365\n\nkStreamer = 500\nkStreamLoop = 501\n\n############# constants from core/foundation/inc/ESTLType.h\n\nkNotSTL = 0\nkSTLvector = 1\nkSTLlist = 2\nkSTLdeque = 3\nkSTLmap = 4\nkSTLmultimap = 5\nkSTLset = 6\nkSTLmultiset = 7\nkSTLbitset = 8\nkSTLforwardlist = 9\nkSTLunorderedset = 10\nkSTLunorderedmultiset = 11\nkSTLunorderedmap = 12\nkSTLunorderedmultimap = 13\nkSTLend = 14\nkSTLany = 300\n\n############# IOFeatures\n\nkGenerateOffsetMap = numpy.uint8(1)\n\n############# other\n\nkStreamedMemberWise = numpy.uint16(1 << 14)\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport skhep_testdata\nimport uproot\n\n\ndef test_read_TProfile2D():\n\n file = skhep_testdata.data_path(\"uproot-issue-227a.root\")\n\n with uproot.open(file) as h:\n T = h[\"hprof2d\"]\n\n assert T.kind == \"MEAN\"\n assert_array_equal(T.axis(\"x\").edges(), np.array([1.0, 2.0, 3.0]))\n assert_array_equal(T.axis(\"y\").edges(), np.array([1.0, 2.0, 3.0, 4.0]))\n assert np.sum(T.counts(flow=True)) == 12\n assert_array_equal(T.values().tolist(), [[1.0, 2.0, 0.0], [2.0, 4.0, 6.0]])\n\n\ndef test_read_TProfile3D():\n\n file = skhep_testdata.data_path(\"uproot-issue-227b.root\")\n\n with uproot.open(file) as h:\n T = h[\"hprof3d\"]\n\n assert T.kind == \"MEAN\"\n assert_array_equal(T.axis(\"x\").edges(), np.array([1.0, 2.0, 3.0]))\n assert_array_equal(T.axis(\"y\").edges(), np.array([1.0, 2.0, 3.0, 4.0]))\n assert_array_equal(T.axis(\"z\").edges(), np.array([1.0, 2.0, 3.0, 4.0, 5.0]))\n assert np.sum(T.counts(flow=True)) == 12\n assert_array_equal(\n T.values().tolist(),\n [\n [[2.0, 0.0, 0.0, 0.0], [0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 6.0, 0.0]],\n [[0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0], [0.0, 0.0, 0.0, 0.0]],\n ],\n )\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines the behavior of ``TProfile3D``.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport numpy\n\nimport uproot\nfrom uproot.behaviors.TH1 import boost_metadata, boost_axis_metadata\nimport uproot.behaviors.TProfile\nimport uproot.behaviors.TH3\n\n\nclass TProfile3D(uproot.behaviors.TProfile.Profile):\n \"\"\"\n Behaviors for three-dimensional profiles: ROOT's ``TProfile3D``.\n \"\"\"\n\n no_inherit = (uproot.behaviors.TH3.TH3,)\n\n @property\n def axes(self):\n return (self.member(\"fXaxis\"), self.member(\"fYaxis\"), self.member(\"fZaxis\"))\n\n def axis(self, axis):\n if axis == 0 or axis == -3 or axis == \"x\":\n return self.member(\"fXaxis\")\n elif axis == 1 or axis == -2 or axis == \"y\":\n return self.member(\"fYaxis\")\n elif axis == 2 or axis == -1 or axis == \"z\":\n return self.member(\"fZaxis\")\n else:\n raise ValueError(\n \"axis must be 0 (-3), 1 (-2), 2 (-1) or 'x', 'y', 'z' for a TProfile3D\"\n )\n\n @property\n def weighted(self):\n fBinSumw2 = self.member(\"fBinSumw2\", none_if_missing=True)\n return fBinSumw2 is None or len(fBinSumw2) != len(self.member(\"fNcells\"))\n\n def counts(self, flow=False):\n fBinEntries = numpy.asarray(self.member(\"fBinEntries\"))\n out = uproot.behaviors.TProfile._effective_counts_1d(\n fBinEntries.reshape(-1),\n numpy.asarray(self.member(\"fBinSumw2\")).reshape(-1),\n self.member(\"fNcells\"),\n )\n out = out.reshape(fBinEntries.shape)\n if flow:\n return out\n else:\n return out[1:-1, 1:-1, 1:-1]\n\n def values(self, flow=False):\n if hasattr(self, \"_values\"):\n values = self._values\n else:\n (root_cont,) = self.base(uproot.models.TArray.Model_TArray)\n root_cont = numpy.asarray(root_cont, dtype=numpy.float64)\n values = uproot.behaviors.TProfile._values_1d(\n numpy.asarray(self.member(\"fBinEntries\")).reshape(-1),\n root_cont.reshape(-1),\n )\n xaxis_fNbins = self.member(\"fXaxis\").member(\"fNbins\")\n yaxis_fNbins = self.member(\"fYaxis\").member(\"fNbins\")\n zaxis_fNbins = self.member(\"fZaxis\").member(\"fNbins\")\n values = numpy.transpose(\n values.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)\n )\n self._values = values\n\n if flow:\n return values\n else:\n return values[1:-1, 1:-1, 1:-1]\n\n def _values_errors(self, flow, error_mode):\n attr = \"_errors\" + uproot.behaviors.TProfile._error_mode_str(error_mode)\n if hasattr(self, attr):\n values = self._values\n errors = getattr(self, attr)\n else:\n (root_cont,) = self.base(uproot.models.TArray.Model_TArray)\n root_cont = numpy.asarray(root_cont, dtype=numpy.float64)\n fSumw2 = self.member(\"fSumw2\", none_if_missing=True)\n if fSumw2 is not None:\n fSumw2 = numpy.asarray(fSumw2).reshape(-1)\n values, errors = uproot.behaviors.TProfile._values_errors_1d(\n error_mode,\n numpy.asarray(self.member(\"fBinEntries\")).reshape(-1),\n root_cont.reshape(-1),\n fSumw2,\n self.member(\"fNcells\"),\n numpy.asarray(self.member(\"fBinSumw2\")).reshape(-1),\n )\n xaxis_fNbins = self.member(\"fXaxis\").member(\"fNbins\")\n yaxis_fNbins = self.member(\"fYaxis\").member(\"fNbins\")\n zaxis_fNbins = self.member(\"fZaxis\").member(\"fNbins\")\n values = numpy.transpose(\n values.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)\n )\n errors = numpy.transpose(\n errors.reshape(zaxis_fNbins + 2, yaxis_fNbins + 2, xaxis_fNbins + 2)\n )\n self._values = values\n setattr(self, attr, errors)\n\n if flow:\n return values, errors\n else:\n return values[1:-1, 1:-1, 1:-1], errors[1:-1, 1:-1, 1:-1]\n\n def to_boost(self, metadata=boost_metadata, axis_metadata=boost_axis_metadata):\n raise NotImplementedError(\"FIXME @henryiii: this one kinda doesn't exist\")\n"
] | [
[
"numpy.int64",
"numpy.uint8",
"numpy.uint32",
"numpy.uint16"
],
[
"numpy.array"
],
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
piotrsobecki/PCa-CNNs2 | [
"01504db2037c67dc6832c2c8aaf4b3d5e4f2808f"
] | [
"src/prostatex/normalization.py"
] | [
"import numpy\n\n\n# Normalization functions\nclass NormalizationNo():\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n return img\n\n\nclass NormalizationMean(NormalizationNo):\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n if img.std() == 0:\n return img\n return (img - img.mean()) / img.std()\n\n\nclass NormalizationMedian(NormalizationNo):\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n denominator = numpy.median(img) + 2 * img.std()\n if denominator == 0.0:\n return img\n return img / denominator\n\nclass NormalizationFeatureScaling(NormalizationNo):\n\n def __init__(self, vmin=0, vmax=1):\n self.vmin=vmin\n self.vmax=vmax\n\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n OldValue = img\n OldMin = img.min()\n OldMax = img.max()\n NewMax = self.vmax\n NewMin = self.vmin\n OldRange = (OldMax - OldMin)\n NewRange = (NewMax - NewMin)\n if OldRange == 0.0:\n return img\n NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin\n return NewValue\n"
] | [
[
"numpy.median"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jay90099/struct2tensor | [
"47d651757efa27586bf75f991b2174d8173a750b",
"47d651757efa27586bf75f991b2174d8173a750b",
"47d651757efa27586bf75f991b2174d8173a750b"
] | [
"struct2tensor/expression_impl/map_prensor.py",
"struct2tensor/expression_impl/reroot.py",
"struct2tensor/expression_impl/parquet.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Arbitrary operations from sparse and ragged tensors to a leaf field.\n\nThere are two public methods of note right now: map_sparse_tensor\nand map_ragged_tensor.\n\nAssume expr is:\n\n```\nsession: {\n event: {\n val_a: 10\n val_b: 1\n }\n event: {\n val_a: 20\n val_b: 2\n }\n event: {\n }\n event: {\n val_a: 40\n }\n event: {\n val_b: 5\n }\n}\n```\n\nEither of the following alternatives will add val_a and val_b\nto create val_sum.\n\nmap_sparse_tensor converts val_a and val_b to sparse tensors,\nand then add them to produce val_sum.\n\n```\nnew_root = map_prensor.map_sparse_tensor(\n expr,\n path.Path([\"event\"]),\n [path.Path([\"val_a\"]), path.Path([\"val_b\"])],\n lambda x,y: x + y,\n False,\n tf.int32,\n \"val_sum\")\n```\n\nmap_ragged_tensor converts val_a and val_b to ragged tensors,\nand then add them to produce val_sum.\n\n```\nnew_root = map_prensor.map_ragged_tensor(\n expr,\n path.Path([\"event\"]),\n [path.Path([\"val_a\"]), path.Path([\"val_b\"])],\n lambda x,y: x + y,\n False,\n tf.int32,\n \"val_sum\")\n```\n\nThe result of either is:\n\n```\nsession: {\n event: {\n val_a: 10\n val_b: 1\n val_sum: 11\n }\n event: {\n val_a: 20\n val_b: 2\n val_sum: 22\n }\n event: {\n }\n event: {\n val_a: 40\n val_sum: 40\n }\n event: {\n val_b: 5\n val_sum: 5\n }\n}\n```\n\n\"\"\"\n\nfrom typing import Callable, FrozenSet, Optional, Sequence, Tuple\n\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import expression_add\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nfrom struct2tensor.expression_impl import project\nimport tensorflow as tf\n\n\ndef map_sparse_tensor(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.SparseTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> expression.Expression:\n \"\"\"Maps a sparse tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the sparse tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of sparse tensors as input and\n returns a sparse tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n A new root expression containing the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n\n return _map_sparse_tensor_impl(root, root_path, paths, operation, is_repeated,\n dtype, new_field_name)[0]\n\n\ndef map_ragged_tensor(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.RaggedTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> expression.Expression:\n \"\"\"Map a ragged tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the ragged tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of ragged tensors as input and\n returns a ragged tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n A new root expression containing the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n return _map_ragged_tensor_impl(root, root_path, paths, operation, is_repeated,\n dtype, new_field_name)[0]\n\n\nclass _MapPrensorExpression(expression.Expression):\n \"\"\"Maps the values of the given expression.\n\n It maps the value of a sub-tree (i.e. a Prensor) to a single prensor\n LeafNodeTensor. Therefore its sources are all the (known) descendants of\n `origin`: it usually should follow a project(...) to make known descendants\n clear.\n\n _MapPrensorExpression is intended to be a child of the origin. See\n map_prensor_impl for example usage.\n\n \"\"\"\n\n def __init__(self, origin: expression.Expression,\n operation: Callable[[prensor.Prensor, calculate_options\n .Options], prensor.LeafNodeTensor],\n is_repeated: bool, dtype: tf.DType):\n super().__init__(is_repeated, dtype)\n self._origin = origin\n self._operation = operation\n\n def _get_source_paths(self) -> Sequence[path.Path]:\n \"\"\"Returns the source paths in a deterministic order.\"\"\"\n result = [k for k in self._origin.get_known_descendants().keys()]\n result.sort()\n return result\n\n def get_source_expressions(self) -> Sequence[expression.Expression]:\n subtree = self._origin.get_known_descendants()\n source_paths = self._get_source_paths()\n return [subtree[k] for k in source_paths]\n\n def calculate(\n self,\n sources: Sequence[prensor.NodeTensor],\n destinations: Sequence[expression.Expression],\n options: calculate_options.Options,\n side_info: Optional[prensor.Prensor] = None) -> prensor.LeafNodeTensor:\n source_tree = prensor.create_prensor_from_descendant_nodes(\n {k: v for k, v in zip(self._get_source_paths(), sources)})\n return self._operation(source_tree, options)\n\n def calculation_is_identity(self) -> bool:\n return False\n\n def calculation_equal(self, expr: expression.Expression) -> bool:\n return self is expr\n\n def _get_child_impl(self,\n field_name: path.Step) -> Optional[expression.Expression]:\n return None\n\n def known_field_names(self) -> FrozenSet[path.Step]:\n return frozenset()\n\n\ndef _as_leaf_node_no_checks(sparse_tensor: tf.SparseTensor,\n is_repeated: bool) -> prensor.LeafNodeTensor:\n \"\"\"Take a SparseTensor and create a LeafNodeTensor, no checks.\"\"\"\n if is_repeated:\n parent_index = tf.transpose(sparse_tensor.indices)[0]\n else:\n parent_index = tf.reshape(sparse_tensor.indices, [-1])\n return prensor.LeafNodeTensor(parent_index, sparse_tensor.values, is_repeated)\n\n\ndef _as_leaf_node_with_checks(sparse_tensor: tf.SparseTensor, is_repeated: bool,\n required_batch_size: tf.Tensor\n ) -> prensor.LeafNodeTensor:\n \"\"\"Take a SparseTensor and create a LeafNodeTensor, with checks.\"\"\"\n assertions = [\n tf.assert_equal(sparse_tensor.dense_shape[0], required_batch_size)\n ]\n if is_repeated:\n assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 2))\n else:\n assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 1))\n\n with tf.control_dependencies(assertions):\n # TODO(b/72947444): Check that the resulting tensor is canonical, that the\n # indices are in lexicographical order, and that the indices fit in the\n # shape. Moreover, maybe we should check if it is repeated that it is a\n # \"ragged array\".\n return _as_leaf_node_no_checks(sparse_tensor, is_repeated)\n\n\ndef _as_leaf_node(sparse_tensor: tf.SparseTensor, is_repeated: bool,\n required_batch_size: tf.Tensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n if options.sparse_checks:\n return _as_leaf_node_with_checks(sparse_tensor, is_repeated,\n required_batch_size)\n else:\n return _as_leaf_node_no_checks(sparse_tensor, is_repeated)\n\n\ndef _map_prensor_impl(\n root: expression.Expression, root_path: path.Path,\n paths_needed: Sequence[path.Path],\n operation: Callable[[prensor.Prensor, calculate_options.Options], prensor\n .LeafNodeTensor], is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Map prensor implementation.\"\"\"\n child_expr = root.get_descendant_or_error(root_path)\n sibling_child_expr = project.project(child_expr, paths_needed)\n new_field_expr = _MapPrensorExpression(sibling_child_expr, operation,\n is_repeated, dtype)\n new_path = root_path.get_child(new_field_name)\n return expression_add.add_paths(root, {new_path: new_field_expr}), new_path\n\n\ndef _map_sparse_tensor_impl(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.SparseTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step\n ) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Helper method for map_sparse_tensor.\"\"\"\n\n def new_op(pren: prensor.Prensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n \"\"\"Op for mapping prensor using the operation.\"\"\"\n sparse_tensor_map = pren.get_sparse_tensors(options)\n sparse_tensors = [sparse_tensor_map[p] for p in paths]\n result_as_tensor = operation(*sparse_tensors)\n result = _as_leaf_node(result_as_tensor, is_repeated,\n sparse_tensors[0].dense_shape[0], options)\n if result.values.dtype != dtype:\n raise ValueError(\"Type unmatched: actual ({})!= expected ({})\".format(\n str(result.values.dtype), str(dtype)))\n return result\n\n return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,\n new_field_name)\n\n\ndef _ragged_as_leaf_node(ragged_tensor: tf.RaggedTensor, is_repeated: bool,\n reference_ragged_tensor: tf.RaggedTensor,\n options: calculate_options.Options\n ) -> prensor.LeafNodeTensor:\n \"\"\"Creates a ragged tensor as a leaf node.\"\"\"\n assertions = []\n size_dim = tf.compat.dimension_at_index(ragged_tensor.shape, 0).value\n reference_size_dim = tf.compat.dimension_at_index(\n reference_ragged_tensor.shape, 0).value\n if (size_dim is not None and reference_size_dim is not None):\n if size_dim != reference_size_dim:\n raise ValueError(\"Returned ragged tensor is not the right size.\")\n elif options.ragged_checks:\n assertions.append(\n tf.assert_equal(ragged_tensor.nrows(), reference_ragged_tensor.nrows()))\n\n if not is_repeated:\n rowids = ragged_tensor.value_rowids()\n if options.ragged_checks:\n assertions.append(tf.compat.v1.assert_positive(rowids[1:] - rowids[:-1]))\n if assertions:\n with tf.control_dependencies(assertions):\n parent_index = ragged_tensor.value_rowids()\n return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,\n is_repeated)\n else:\n parent_index = ragged_tensor.value_rowids()\n return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,\n is_repeated)\n\n\ndef _map_ragged_tensor_impl(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.RaggedTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step\n ) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Maps a ragged tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the ragged tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of ragged tensors as input and\n returns a ragged tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n An expression/path pair (expr,p) with a new root expression containing\n the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n\n def new_op(tree: prensor.Prensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n \"\"\"Apply operation to tree.\"\"\"\n ragged_tensor_map = tree.get_ragged_tensors(options)\n ragged_tensors = [ragged_tensor_map[p] for p in paths]\n result_as_tensor = operation(*ragged_tensors)\n result = _ragged_as_leaf_node(result_as_tensor, is_repeated,\n ragged_tensors[0], options)\n if result.values.dtype != dtype:\n raise ValueError(\"Type unmatched: actual ({})!= expected ({})\".format(\n str(result.values.dtype), str(dtype)))\n return result\n\n return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,\n new_field_name)\n",
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Reroot to a subtree, maintaining an input proto index.\n\nreroot is similar to get_descendant_or_error. However, this method allows\nyou to call create_proto_index(...) later on, that gives you a reference to the\noriginal proto.\n\n\"\"\"\nfrom typing import FrozenSet, Optional, Sequence\n\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import expression_add\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nimport tensorflow as tf\n\n\ndef reroot(root: expression.Expression,\n source_path: path.Path) -> expression.Expression:\n \"\"\"Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n \"\"\"\n\n new_root = root\n for step in source_path.field_list:\n new_root = _RerootExpression(new_root, step)\n return new_root\n\n\ndef create_proto_index_field(root: expression.Expression,\n new_field_name: path.Step\n ) -> expression.Expression:\n return expression_add.add_paths(\n root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})\n\n\nclass _RerootRootNodeTensor(prensor.RootNodeTensor):\n \"\"\"The reroot root node.\n\n This contains a map from a current index to the original index of a proto.\n \"\"\"\n\n def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):\n super().__init__(size)\n self._input_proto_index = input_proto_index\n\n @property\n def input_proto_index(self):\n return self._input_proto_index\n\n\ndef _get_proto_index_parent_index(node: prensor.RootNodeTensor):\n return tf.range(node.size)\n\n\ndef _get_input_proto_index(node: prensor.RootNodeTensor):\n if isinstance(node, _RerootRootNodeTensor):\n return node.input_proto_index\n return _get_proto_index_parent_index(node)\n\n\nclass _RerootExpression(expression.Expression):\n \"\"\"Reroot to a new path, maintaining a input proto index.\"\"\"\n\n def __init__(self, original_root: expression.Expression,\n field_name: path.Step):\n super().__init__(True, None)\n self._field_name = field_name\n self._original_root = original_root\n self._new_root = original_root.get_child_or_error(field_name)\n if self._new_root.type is not None:\n raise ValueError(\"New root must be a message type: {}\".format(\n str(self._field_name)))\n # TODO(martinz): Check that the \"original root source expression\" has a type\n # in (_RerootExpression, prensor._ProtoRootExpression)\n # To do this, we need a general technique similar to\n # expression_add._is_true_source_expression: however, this should also cover\n # intermediate operations like \"project\".\n # Since this check is not present, if it should have fired, there will be\n # an error when calculate(...) is called.\n\n def get_source_expressions(self) -> Sequence[expression.Expression]:\n return [self._original_root, self._new_root]\n\n def calculate(\n self,\n sources: Sequence[prensor.NodeTensor],\n destinations: Sequence[expression.Expression],\n options: calculate_options.Options,\n side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:\n [old_root_value, new_root_value] = sources\n if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(\n new_root_value, prensor.ChildNodeTensor):\n old_input_proto_index = _get_input_proto_index(old_root_value)\n # Notice that the \"gather\" operation is similar to promote.\n return _RerootRootNodeTensor(\n tf.size(new_root_value.parent_index, out_type=tf.int64),\n tf.gather(old_input_proto_index, new_root_value.parent_index))\n raise ValueError(\"Source types incorrect\")\n\n def calculation_is_identity(self) -> bool:\n return False\n\n def calculation_equal(self, expr: expression.Expression) -> bool:\n # Although path can vary, it is not used in the calculation, just to\n return isinstance(expr, _RerootExpression)\n\n def _get_child_impl(self,\n field_name: path.Step) -> Optional[expression.Expression]:\n return self._new_root.get_child(field_name)\n\n def known_field_names(self) -> FrozenSet[path.Step]:\n return self._new_root.known_field_names()\n\n\nclass _InputProtoIndexExpression(expression.Leaf):\n \"\"\"A proto index expression.\"\"\"\n\n def __init__(self, root: expression.Expression):\n \"\"\"Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n \"\"\"\n super().__init__(is_repeated=False, my_type=tf.int64)\n self._root = root\n\n def get_source_expressions(self) -> Sequence[expression.Expression]:\n return [self._root]\n\n def calculate(\n self,\n sources: Sequence[prensor.NodeTensor],\n destinations: Sequence[expression.Expression],\n options: calculate_options.Options,\n side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:\n [root_node] = sources\n # The following check ensures not just that we can calculate the value,\n # but that no \"improper\" reroots were done.\n if isinstance(root_node, prensor.RootNodeTensor):\n return prensor.LeafNodeTensor(\n _get_proto_index_parent_index(root_node),\n _get_input_proto_index(root_node),\n is_repeated=False)\n raise ValueError(\n \"Illegal operation: expected a true root node: got {}\".format(\n str(root_node)))\n\n def calculation_is_identity(self) -> bool:\n return False\n\n def calculation_equal(self, expr: expression.Expression) -> bool:\n # Although path can vary, it is not used in the calculation, just to\n return isinstance(expr, _InputProtoIndexExpression)\n",
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Apache Parquet Dataset.\n\nExample usage:\n\n```\n exp = create_expression_from_parquet_file(filenames)\n docid_project_exp = project.project(exp, [path.Path([\"DocId\"])])\n pqds = parquet_dataset.calculate_parquet_values([docid_project_exp], exp,\n filenames, batch_size)\n\n for prensors in pqds:\n doc_id_prensor = prensors[0]\n```\n\n\"\"\"\n\nimport collections\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom struct2tensor import calculate\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nfrom struct2tensor.expression_impl import map_prensor_to_prensor as mpp\nfrom struct2tensor.expression_impl import placeholder\nfrom struct2tensor.ops import gen_parquet_dataset\nimport tensorflow as tf\n\n\ndef create_expression_from_parquet_file(\n filenames: List[str]) -> placeholder._PlaceholderRootExpression: # pylint: disable=protected-access\n \"\"\"Creates a placeholder expression from a parquet file.\n\n Args:\n filenames: A list of parquet files.\n\n Returns:\n A PlaceholderRootExpression that should be used as the root of an expression\n graph.\n \"\"\"\n\n metadata = pq.ParquetFile(filenames[0]).metadata\n parquet_schema = metadata.schema\n arrow_schema = parquet_schema.to_arrow_schema()\n\n root_schema = mpp.create_schema(\n is_repeated=True,\n children=_create_children_from_arrow_fields(\n [arrow_schema.field_by_name(name) for name in arrow_schema.names]))\n\n # pylint: disable=protected-access\n return placeholder._PlaceholderRootExpression(root_schema)\n\n\ndef calculate_parquet_values(\n expressions: List[expression.Expression],\n root_exp: placeholder._PlaceholderRootExpression, # pylint: disable=protected-access\n filenames: List[str],\n batch_size: int,\n options: Optional[calculate_options.Options] = None):\n \"\"\"Calculates expressions and returns a parquet dataset.\n\n Args:\n expressions: A list of expressions to calculate.\n root_exp: The root placeholder expression to use as the feed dict.\n filenames: A list of parquet files.\n batch_size: The number of messages to batch.\n options: calculate options.\n\n Returns:\n A parquet dataset.\n \"\"\"\n pqds = _ParquetDatasetWithExpression(expressions, root_exp, filenames,\n batch_size, options)\n return pqds.map(pqds._calculate_prensor) # pylint: disable=protected-access\n\n\nclass _RawParquetDataset(tf.compat.v1.data.Dataset):\n \"\"\"A dataset which reads columns from parquet and outputs a vector of tensors.\n\n A ParquetDataset is a Dataset of batches of messages (records).\n Every leaf field field of the messages in each batch has its own values tensor\n and parent indices tensors (which encodes the structural information).\n\n The user has control over which parent indices of which fields in a path to\n read, and is determined by parent_index_paths and path_index.\n\n View //struct2tensor/ops/parquet_dataset_op.cc\n for a better understanding of what format the vector of tensors is in.\n \"\"\"\n\n def __init__(self, filenames: List[str], value_paths: List[str],\n value_dtypes: List[tf.DType], parent_index_paths: List[str],\n path_index: List[int], batch_size: int):\n \"\"\"Creates a ParquetDataset.\n\n Args:\n filenames: A list containing the name(s) of the file(s) to be read.\n value_paths: A list of strings of the dotstring path(s) of each leaf\n path(s).\n value_dtypes: value_dtypes[i] is the Tensorflow data type value_paths[i]\n would be of.\n parent_index_paths: A list of strings of the dotstring path(s) of the\n path(s) to be read.\n path_index: A list containing the index of each field to get the parent\n index of. This will have the same length as parent_index_paths.\n batch_size: An int that determines how many messages are parsed into one\n prensor tree in an iteration. If there are fewer than batch_size\n remaining messages, then all remaining messages will be returned.\n\n Raises:\n ValueError: if the column does not exist in the parquet schema.\n ValueError: if the column dtype does not match the value_dtype passed in.\n \"\"\"\n self._filenames = filenames\n self._value_paths = value_paths\n self._value_dtypes = tuple(value_dtypes)\n self._parent_index_paths = parent_index_paths\n self._path_index = path_index\n self._batch_size = batch_size\n\n super().__init__()\n\n def _get_column_path_to_index_mapping(self, metadata_file) -> Dict[str, int]:\n \"\"\"Gets the column index of every column.\n\n Args:\n metadata_file: the file to be used as the metadata. If there is no\n metadata_file, any file from file_names will suffice.\n\n Returns:\n A dictionary mapping path name (str) to column index (int).\n \"\"\"\n metadata = pq.ParquetFile(metadata_file).metadata\n\n path_to_column_index = {\n metadata.schema.column(index).path: index\n for index in range(metadata.num_columns)\n }\n\n return path_to_column_index\n\n def _parquet_to_tf_type(self, parquet_type: str) -> Union[tf.DType, None]:\n \"\"\"Maps tensorflow datatype to a parquet datatype.\n\n Args:\n parquet_type: a string representing the parquet datatype.\n\n Returns:\n the tensorflow datatype equivalent of a parquet datatype.\n \"\"\"\n return {\n \"BOOLEAN\": tf.bool,\n \"INT32\": tf.int32,\n \"INT64\": tf.int64,\n \"FLOAT\": tf.float32,\n \"DOUBLE\": tf.double,\n \"BYTE_ARRAY\": tf.string\n }.get(parquet_type)\n\n def _as_variant_tensor(self):\n return gen_parquet_dataset.parquet_dataset(\n self._filenames,\n value_paths=self._value_paths,\n value_dtypes=self._value_dtypes,\n parent_index_paths=self._parent_index_paths,\n path_index=self._path_index,\n batch_size=self._batch_size)\n\n def _inputs(self):\n return []\n\n @property\n def output_types(self):\n res = []\n column_counter = 0\n prev = self._parent_index_paths[0]\n res.append(tf.int64)\n for i in range(1, len(self._parent_index_paths)):\n curr = self._parent_index_paths[i]\n res.append(tf.int64)\n if curr != prev:\n res.append(self._value_dtypes[column_counter])\n column_counter += 1\n prev = curr\n res.append(tf.int64)\n res.append(self._value_dtypes[column_counter])\n self.output_dtypes = tuple(res)\n return self.output_dtypes\n\n @property\n def output_shapes(self):\n return (tf.TensorShape([]),) + tuple(\n tf.TensorShape([None]) for i in range(1, len(self.output_dtypes)))\n\n @property\n def output_classes(self):\n return tuple(tf.Tensor for i in range(len(self.output_dtypes)))\n\n\nclass ParquetDataset(_RawParquetDataset):\n \"\"\"A dataset which reads columns from a parquet file and returns a prensor.\n\n The prensor will have a PrensorTypeSpec, which is created based on\n value_paths.\n\n Note: In tensorflow v1 this dataset will not return a prensor. The output will\n be the same format as _RawParquetDataset's output (a vector of tensors).\n The following is a workaround in v1:\n pq_ds = ParquetDataset(...)\n type_spec = pq_ds.element_spec\n tensors = pq_ds.make_one_shot_iterator().get_next()\n prensor = type_spec.from_components(tensors)\n session.run(prensor)\n \"\"\"\n\n def __init__(self, filenames: List[str], value_paths: List[str],\n batch_size: int):\n \"\"\"Creates a ParquetDataset.\n\n Args:\n filenames: A list containing the name(s) of the file(s) to be read.\n value_paths: A list of strings of the dotstring path(s) of each leaf\n path(s).\n batch_size: An int that determines how many messages are parsed into one\n prensor tree in an iteration. If there are fewer than batch_size\n remaining messages, then all remaining messages will be returned.\n\n Raises:\n ValueError: if the column does not exist in the parquet schema.\n \"\"\"\n self._filenames = filenames\n self._value_paths = value_paths\n self._batch_size = batch_size\n\n for filename in filenames:\n self._validate_file(filename, value_paths)\n\n self._value_dtypes = self._get_column_dtypes(filenames[0], value_paths)\n\n self._parent_index_paths = []\n self._path_index = []\n\n self.element_structure = self._create_prensor_spec()\n self._create_parent_index_paths_and_index_from_type_spec(\n self.element_structure, 0, 0)\n\n super(ParquetDataset,\n self).__init__(filenames, self._value_paths, self._value_dtypes,\n self._parent_index_paths, self._path_index, batch_size)\n\n def _get_column_dtypes(\n self, metadata_file: str,\n value_paths: List[str]) -> List[Union[tf.DType, None]]:\n \"\"\"Returns a list of tensorflow datatypes for each column.\n\n Args:\n metadata_file: the file to be used as the metadata. If there is no\n metadata_file, any file from file_names will suffice.\n value_paths: A list of strings of the dotstring path(s).\n\n Returns:\n A list of tensorflow datatypes for each column. This list aligns with\n value_paths.\n \"\"\"\n path_to_column_index = self._get_column_path_to_index_mapping(metadata_file)\n metadata = pq.ParquetFile(metadata_file).metadata\n\n value_dtypes = []\n for column in value_paths:\n col = metadata.schema.column(path_to_column_index[column])\n parquet_type = col.physical_type\n value_dtypes.append(self._parquet_to_tf_type(parquet_type))\n return value_dtypes\n\n def _validate_file(self, filename: str, value_paths: List[str]):\n \"\"\"Checks if each requested path exists in the parquet file.\n\n Args:\n filename: The parquet filename.\n value_paths: A list of strings of the dotstring path(s).\n\n Raises:\n ValueError: if a path does not exist in the parquet file's schema.\n \"\"\"\n metadata = pq.ParquetFile(filename).metadata\n\n paths = {}\n for i in range(metadata.num_columns):\n col = metadata.schema.column(i)\n p = (col.path)\n paths[p] = col.physical_type\n\n for i, p in enumerate(value_paths):\n if p not in paths:\n raise ValueError(\"path \" + p + \" does not exist in the file.\")\n\n def _create_children_spec(\n self, field: pa.lib.Field, index_and_paths: List[Tuple[int,\n List[path.Step]]]\n ) -> Tuple[path.Step, prensor._PrensorTypeSpec]:\n \"\"\"Creates the _PrensorTypeSpec for children and leaves.\n\n Args:\n field: a pyarrow field.\n index_and_paths: a list of tuple(index, list[step]), where index is the\n column index this step belongs to, and list[step] are children steps of\n the passed in step arg. The reason index is needed is because we need to\n keep track of which column this step belongs to, to populate\n parent_index_paths and path_index.\n\n Returns:\n a child or leaf _PrensorTypeSpec.\n \"\"\"\n\n # pylint: disable=protected-access\n curr_steps_as_set = collections.OrderedDict()\n # Construct the dictionary of paths we need.\n if len(index_and_paths) >= 1 and len(index_and_paths[0][1]) >= 1:\n for p in index_and_paths:\n index = p[0]\n p = p[1]\n curr_step = p[0]\n if p:\n if curr_step in curr_steps_as_set:\n curr_steps_as_set[curr_step].append((index, p[1:]))\n else:\n curr_steps_as_set[curr_step] = [(index, p[1:])]\n\n field_type = field.type\n if isinstance(field_type, pa.lib.ListType):\n field_type = field_type.value_type\n is_repeated = True\n else:\n is_repeated = False\n if isinstance(field_type, pa.lib.StructType):\n node_type = prensor._PrensorTypeSpec._NodeType.CHILD\n dtype = tf.int64\n children = [\n self._create_children_spec(field_type[step], curr_steps_as_set[step])\n for step in curr_steps_as_set\n ]\n else:\n node_type = prensor._PrensorTypeSpec._NodeType.LEAF\n dtype = tf.dtypes.as_dtype(field_type)\n children = []\n\n return (field.name,\n prensor._PrensorTypeSpec(is_repeated, node_type, dtype, children))\n\n def _create_prensor_spec(self) -> prensor._PrensorTypeSpec: # pylint: disable=protected-access\n \"\"\"Creates the prensor type spec based on value_paths.\n\n Returns:\n a root _PrensorTypeSpec.\n \"\"\"\n\n metadata = pq.ParquetFile(self._filenames[0]).metadata\n parquet_schema = metadata.schema\n arrow_schema = parquet_schema.to_arrow_schema()\n\n # pylint: disable=protected-access\n # Sort the paths by number of fields.\n paths = [path.create_path(p) for p in self._value_paths]\n mapped = zip(paths, self._value_paths, self._value_dtypes)\n sorted_mapped = sorted(mapped, key=lambda x: len(x[0].field_list))\n paths, self._value_paths, self._value_dtypes = zip(*sorted_mapped)\n\n # Creates an ordered dictionary mapping step to a list of children fields.\n # This will allow us to find paths that share a parent.\n curr_steps_as_set = collections.OrderedDict()\n for (i, p) in enumerate(paths):\n step = p.field_list[0]\n if step in curr_steps_as_set:\n curr_steps_as_set[step].append((i, p.field_list[1:]))\n else:\n curr_steps_as_set[step] = [(i, p.field_list[1:])]\n\n return prensor._PrensorTypeSpec(\n None, prensor._PrensorTypeSpec._NodeType.ROOT, tf.int64, [\n self._create_children_spec(\n arrow_schema.field(step), curr_steps_as_set[step])\n for step in curr_steps_as_set\n ])\n\n def _create_parent_index_paths_and_index_from_type_spec(\n self, type_spec, index, level):\n \"\"\"Populates self._parent_index_paths and self.path_index from the typespec.\n\n It traverses the prensor type spec to get index and level. It then uses\n index to get the correct path from self._value_paths.\n\n This assumes that self._value_paths is sorted alphabetically, and thus the\n prensor type spec has the same order of paths as self._value_paths.\n\n Args:\n type_spec: A Prensor type spec.\n index: The index of self._value_paths. It is incremented each time we\n reach a leaf, ie we have a new path.\n level: the step number in a path. It is incremented each time we go to a\n spec's child. It is then decremented when exiting the child spec.\n \"\"\"\n fields = type_spec._children_specs # pylint: disable=protected-access\n\n for field_tuple in fields:\n spec = field_tuple[1]\n self._parent_index_paths.append(self._value_paths[index])\n self._path_index.append(level)\n level += 1\n self._create_parent_index_paths_and_index_from_type_spec(\n spec, index, level)\n level -= 1\n index += 1\n\n @property\n def element_spec(self):\n return self.element_structure\n\n\ndef _create_children_from_arrow_fields(\n fields: pa.lib.Field) -> Dict[str, Dict[Any, Any]]:\n \"\"\"Creates a dictionary of children schema for a pyarrow field.\n\n Args:\n fields: A list of pyarrow fields.\n\n Returns:\n A dictionary of children. Key is field name. Value is a dictionary\n representing a schema.\n \"\"\"\n children = {}\n for field in fields:\n field_type = field.type\n if isinstance(field_type, pa.lib.ListType):\n sub_field_type = field_type.value_type\n if isinstance(sub_field_type, pa.lib.StructType):\n children[field.name] = {\n \"is_repeated\":\n True,\n \"children\":\n _create_children_from_arrow_fields(\n [subfield for subfield in sub_field_type])\n }\n elif isinstance(sub_field_type, pa.lib.DataType):\n children[field.name] = {\n \"is_repeated\": True,\n \"dtype\": tf.dtypes.as_dtype(sub_field_type)\n }\n else:\n print(\"this should never be printed\")\n elif isinstance(field_type, pa.lib.StructType):\n children[field.name] = {\n \"is_repeated\":\n False,\n \"children\":\n _create_children_from_arrow_fields(\n [subfield for subfield in field_type])\n }\n else:\n children[field.name] = {\n \"is_repeated\": False,\n \"dtype\": tf.dtypes.as_dtype(field_type)\n }\n return children\n\n\nclass _ParquetDatasetWithExpression(ParquetDataset):\n \"\"\"A dataset which reads columns from a parquet file based on the expressions.\n\n The data read from the parquet file will then have the expression queries\n applied to it, creating a new prensor.\n\n This dataset should not be created by the user, call\n parquet_dataset.calculate_parquet_values() to get this dataset instead.\n \"\"\"\n\n def __init__(self, exprs: List[expression.Expression],\n root_expr: placeholder._PlaceholderRootExpression,\n filenames: List[str], batch_size: int,\n options: Optional[calculate_options.Options]):\n self._exprs = exprs\n self._root_expr = root_expr\n self._filesnames = filenames\n self._batch_size = batch_size\n self._options = options\n\n # pylint: disable=protected-access\n self._subtrees = [x.get_known_descendants() for x in self._exprs]\n self._all_expressions = []\n for tree in self._subtrees:\n self._all_expressions.extend(tree.values())\n\n expression_graph = calculate.OriginalExpressionGraph(self._all_expressions)\n self._canonical_graph = calculate.CanonicalExpressionGraph(expression_graph)\n paths = placeholder.get_placeholder_paths_from_graph(self._canonical_graph)\n\n parquet_paths = [\".\".join(p.field_list) for p in paths]\n\n super(_ParquetDatasetWithExpression,\n self).__init__(filenames, parquet_paths, batch_size)\n\n def _calculate_prensor(self, pren) -> List[prensor.Prensor]:\n \"\"\"Function for applying expression queries to a prensor.\n\n This function should be passed into dataset.map().\n\n Args:\n pren: The prensor that will be used to bind to the root expression.\n\n Returns:\n A list of modified prensor that have the expression queries applied.\n \"\"\"\n self._canonical_graph.calculate_values(\n options=self._options, feed_dict={self._root_expr: pren})\n values = [\n self._canonical_graph.get_value_or_die(x) for x in self._all_expressions\n ]\n\n expr_to_value_map = {\n id(expr): value for expr, value in zip(self._all_expressions, values)\n }\n\n # pylint: disable=protected-access\n return [\n calculate._get_prensor(subtree, expr_to_value_map)\n for subtree in self._subtrees\n ]\n"
] | [
[
"tensorflow.transpose",
"tensorflow.control_dependencies",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.assert_equal",
"tensorflow.compat.v1.assert_positive",
"tensorflow.compat.dimension_at_index"
],
[
"tensorflow.gather",
"tensorflow.size",
"tensorflow.range"
],
[
"tensorflow.TensorShape",
"tensorflow.dtypes.as_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sunghern/Auto-Compression | [
"7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e",
"7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e",
"7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e"
] | [
"pruning/cifar10_fbnet/supernet_main_file.py",
"quantization/cifar10/supernet_main_file.py",
"quantization/imagenet/fbnet_building_blocks/quantize.py"
] | [
"import numpy as np\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\nfrom scipy.special import softmax\nimport argparse\n\nfrom general_functions.dataloaders import get_loaders, get_test_loader\nfrom general_functions.utils import get_logger, weights_init, load, create_directories_from_list, \\\n check_tensor_in_list, writh_new_ARCH_to_fbnet_modeldef\nfrom supernet_functions.lookup_table_builder import LookUpTable_HIGH\nfrom supernet_functions.model_supernet import FBNet_Stochastic_SuperNet, SupernetLoss\nfrom supernet_functions.training_functions_supernet import TrainerSupernet\nfrom supernet_functions.config_for_supernet import CONFIG_SUPERNET\nfrom fbnet_building_blocks.fbnet_modeldef import MODEL_ARCH\nimport copy\nimport torch.nn.utils.prune as prune\n\nparser = argparse.ArgumentParser(\"action\")\nparser.add_argument('--train_or_sample', type=str, default='', \\\n help='train means training of the SuperNet, sample means sample from SuperNet\\'s results')\nparser.add_argument('--architecture_name', type=str, default='', \\\n help='Name of an architecture to be sampled')\nparser.add_argument('--hardsampling_bool_value', type=str, default='True', \\\n help='If not False or 0 -> do hardsampling, else - softmax sampling')\nparser.add_argument('--prune', type=str, default='channel', \\\n help='channel or group')\nargs = parser.parse_args()\n\ndef train_supernet():\n manual_seed = 1\n np.random.seed(manual_seed)\n torch.manual_seed(manual_seed)\n torch.cuda.manual_seed_all(manual_seed)\n torch.backends.cudnn.benchmark = True\n\n create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])\n \n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])\n #### DataLoading\n train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],\n CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'],\n logger)\n test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'])\n lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], prune_type=args.prune)\n\n ###MODEL\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = model.apply(weights_init)\n model = nn.DataParallel(model, device_ids=[0])\n for m in model.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n prune.remove(m, 'weight')\n #### Loss, Optimizer and Scheduler\n criterion = SupernetLoss().cuda()\n\n\n thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]\n params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]\n\n w_optimizer = torch.optim.SGD(params=params_except_thetas,\n lr=CONFIG_SUPERNET['optimizer']['w_lr'], \n momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],\n weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])\n \n theta_optimizer = torch.optim.Adam(params=thetas_params,\n lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],\n weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])\n\n last_epoch = -1\n w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,\n T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],\n last_epoch=last_epoch)\n #### Training Loop\n trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)\n trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n '''\n for layer in model.module.stages_to_search:\n #layer.thetas = nn.Parameter(torch.Tensor([1.0 / 1 for i in range(1)]).cuda())\n print(layer.thetas)\n '''\n f = open(\"result.txt\", \"w\")\n for i, layer in enumerate(model.module.stages_to_search):\n print('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end=\" \")\n f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())]+'\\n')\n f.close()\n print()\n\n# Arguments:\n# hardsampling=True means get operations with the largest weights\n# =False means apply softmax to weights and sample from the distribution\n# unique_name_of_arch - name of architecture. will be written into fbnet_building_blocks/fbnet_modeldef.py\n# and can be used in the training by train_architecture_main_file.py\ndef sample_architecture_from_the_supernet(unique_name_of_arch, hardsampling=True):\n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n \n lookup_table = LookUpTable()\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = nn.DataParallel(model)\n\n load(model, CONFIG_SUPERNET['train_settings']['path_to_save_model'])\n\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n cnt_ops = len(ops_names)\n\n arch_operations=[]\n if hardsampling:\n for layer in model.module.stages_to_search:\n arch_operations.append(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])\n else:\n rng = np.linspace(0, cnt_ops - 1, cnt_ops, dtype=int)\n for layer in model.module.stages_to_search:\n distribution = softmax(layer.thetas.detach().cpu().numpy())\n arch_operations.append(ops_names[np.random.choice(rng, p=distribution)])\n \n logger.info(\"Sampled Architecture: \" + \" - \".join(arch_operations))\n writh_new_ARCH_to_fbnet_modeldef(arch_operations, my_unique_name_for_ARCH=unique_name_of_arch)\n logger.info(\"CONGRATULATIONS! New architecture \" + unique_name_of_arch \\\n + \" was written into fbnet_building_blocks/fbnet_modeldef.py\")\n \nif __name__ == \"__main__\":\n assert args.train_or_sample in ['train', 'sample']\n if args.train_or_sample == 'train':\n train_supernet()\n elif args.train_or_sample == 'sample':\n assert args.architecture_name != '' and args.architecture_name not in MODEL_ARCH\n hardsampling = False if args.hardsampling_bool_value in ['False', '0'] else True\n sample_architecture_from_the_supernet(unique_name_of_arch=args.architecture_name, hardsampling=hardsampling)\n",
"import numpy as np\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\nfrom scipy.special import softmax\nimport argparse\n\nfrom general_functions.dataloaders import get_loaders, get_test_loader\nfrom general_functions.utils import get_logger, weights_init, load, create_directories_from_list, \\\n check_tensor_in_list, writh_new_ARCH_to_fbnet_modeldef\nfrom supernet_functions.lookup_table_builder import LookUpTable, LookUpTable_HIGH\nfrom supernet_functions.model_supernet import FBNet_Stochastic_SuperNet, SupernetLoss\nfrom supernet_functions.training_functions_supernet import TrainerSupernet\nfrom supernet_functions.config_for_supernet import CONFIG_SUPERNET\nfrom fbnet_building_blocks.fbnet_modeldef import MODEL_ARCH\nimport copy\n\nparser = argparse.ArgumentParser(\"action\")\nparser.add_argument('--train_or_sample', type=str, default='', \\\n help='train means training of the SuperNet, sample means sample from SuperNet\\'s results')\nparser.add_argument('--architecture_name', type=str, default='', \\\n help='Name of an architecture to be sampled')\nparser.add_argument('--hardsampling_bool_value', type=str, default='True', \\\n help='If not False or 0 -> do hardsampling, else - softmax sampling')\nparser.add_argument('--high_or_low', type=str, default='high')\nargs = parser.parse_args()\n\ndef train_supernet():\n manual_seed = 1\n np.random.seed(manual_seed)\n torch.manual_seed(manual_seed)\n torch.cuda.manual_seed_all(manual_seed)\n torch.backends.cudnn.benchmark = True\n\n create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])\n \n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])\n #### DataLoading\n train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],\n CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'],\n logger)\n test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'])\n ###TRAIN HIGH_LEVEL\n lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'])\n\n if args.high_or_low == 'high':\n ###MODEL\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = model.apply(weights_init)\n model = nn.DataParallel(model, device_ids=[0])\n model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/pretrained_high.pth'))\n #### Loss, Optimizer and Scheduler\n criterion = SupernetLoss().cuda()\n\n for layer in model.module.stages_to_search:\n layer.thetas = nn.Parameter(torch.Tensor([1.0 / 6 for i in range(6)]).cuda())\n\n thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]\n params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]\n\n w_optimizer = torch.optim.SGD(params=params_except_thetas,\n lr=CONFIG_SUPERNET['optimizer']['w_lr'], \n momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],\n weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])\n \n theta_optimizer = torch.optim.Adam(params=thetas_params,\n lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],\n weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])\n\n last_epoch = -1\n w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,\n T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],\n last_epoch=last_epoch)\n #### Training Loop\n trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)\n trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n f = open('result.txt', 'w')\n for i, layer in enumerate(model.module.stages_to_search):\n print(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end=\" \")\n f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])\n f.close()\n\n else:\n count = 0\n previous = []\n index = []\n act_update=[]\n weight_update=[]\n while True:\n print(count, \"th Iterations\")\n lookup_table = LookUpTable(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], count=count, act_update=act_update, weight_update=weight_update)\n for i in range(len(weight_update)):\n weight_update[i] = 0\n #if count != 0:\n # lookup_table.index[0] = copy.deepcopy(index)\n ###MODEL\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = nn.DataParallel(model, device_ids=[0])\n #if count == 0:\n # model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/pretrained.pth'))\n #else:\n #model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/best_model.pth'))\n model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/best_model.pth'))\n #model = model.apply(weights_init)\n #### Loss, Optimizer and Scheduler\n criterion = SupernetLoss().cuda()\n\n for layer in model.module.stages_to_search:\n layer.thetas = nn.Parameter(torch.Tensor([1.0 / 3 for i in range(3)]).cuda())\n\n thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]\n params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]\n\n w_optimizer = torch.optim.SGD(params=params_except_thetas,\n lr=CONFIG_SUPERNET['optimizer']['w_lr'], \n momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],\n weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])\n \n theta_optimizer = torch.optim.Adam(params=thetas_params,\n lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],\n weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])\n\n last_epoch = -1\n w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,\n T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],\n last_epoch=last_epoch)\n #### Training Loop\n trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, False)\n trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)\n del index[:]\n with open('index.txt', 'w') as f:\n for idx,layer in enumerate(model.module.stages_to_search):\n ops = np.argmax(layer.thetas.detach().cpu().numpy())\n tmp = lookup_table.index[ops][idx]\n index.append(tmp)\n f.write('%s\\n' % tmp)\n f.close()\n same = 1\n if count != 0:\n for i in range(len(previous)):\n for j in range(len(previous[i])):\n if previous[i][j] not in index[i]:\n same = 0\n if same == 1:\n break\n previous = copy.deepcopy(index)\n count += 1\n\n# Arguments:\n# hardsampling=True means get operations with the largest weights\n# =False means apply softmax to weights and sample from the distribution\n# unique_name_of_arch - name of architecture. will be written into fbnet_building_blocks/fbnet_modeldef.py\n# and can be used in the training by train_architecture_main_file.py\ndef sample_architecture_from_the_supernet(unique_name_of_arch, hardsampling=True):\n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n \n lookup_table = LookUpTable()\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = nn.DataParallel(model)\n\n load(model, CONFIG_SUPERNET['train_settings']['path_to_save_model'])\n\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n cnt_ops = len(ops_names)\n\n arch_operations=[]\n if hardsampling:\n for layer in model.module.stages_to_search:\n arch_operations.append(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])\n else:\n rng = np.linspace(0, cnt_ops - 1, cnt_ops, dtype=int)\n for layer in model.module.stages_to_search:\n distribution = softmax(layer.thetas.detach().cpu().numpy())\n arch_operations.append(ops_names[np.random.choice(rng, p=distribution)])\n \n logger.info(\"Sampled Architecture: \" + \" - \".join(arch_operations))\n writh_new_ARCH_to_fbnet_modeldef(arch_operations, my_unique_name_for_ARCH=unique_name_of_arch)\n logger.info(\"CONGRATULATIONS! New architecture \" + unique_name_of_arch \\\n + \" was written into fbnet_building_blocks/fbnet_modeldef.py\")\n \nif __name__ == \"__main__\":\n assert args.train_or_sample in ['train', 'sample']\n if args.train_or_sample == 'train':\n train_supernet()\n elif args.train_or_sample == 'sample':\n assert args.architecture_name != '' and args.architecture_name not in MODEL_ARCH\n hardsampling = False if args.hardsampling_bool_value in ['False', '0'] else True\n sample_architecture_from_the_supernet(unique_name_of_arch=args.architecture_name, hardsampling=hardsampling)\n",
"import torch\nfrom torch.autograd.function import InplaceFunction, Function\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\ndef _mean(p, dim):\n \"\"\"Computes the mean over all dimensions except dim\"\"\"\n if dim is None:\n return p.mean()\n elif dim == 0:\n output_size = (p.size(0),) + (1,) * (p.dim() - 1)\n return p.contiguous().view(p.size(0), -1).mean(dim=1).view(*output_size)\n elif dim == p.dim() - 1:\n output_size = (1,) * (p.dim() - 1) + (p.size(-1),)\n return p.contiguous().view(-1, p.size(-1)).mean(dim=0).view(*output_size)\n else:\n return _mean(p.transpose(0, dim), 0).transpose(0, dim)\n\n\nclass UniformQuantize(InplaceFunction):\n\n @classmethod\n def forward(cls, ctx, input, num_bits=8, min_value=None, max_value=None,\n stochastic=False, inplace=False, num_chunks=None, out_half=False, quantize=False, layer_num=-1, multi=False, index=[], is_act=False):\n if is_act:\n multi=False\n num_chunks = num_chunks = input.shape[\n 0] if num_chunks is None else num_chunks\n if min_value is None or max_value is None:\n B = input.shape[0]\n y = input.view(B // num_chunks, -1)\n if min_value is None:\n min_value = y.min(-1)[0].mean(-1) # C\n #min_value = float(input.view(input.size(0), -1).min(-1)[0].mean())\n if max_value is None:\n #max_value = float(input.view(input.size(0), -1).max(-1)[0].mean())\n max_value = y.max(-1)[0].mean(-1) # C\n ctx.inplace = inplace\n ctx.num_bits = num_bits\n ctx.min_value = min_value\n ctx.max_value = max_value\n ctx.stochastic = stochastic\n \n if ctx.inplace:\n ctx.mark_dirty(input)\n output = input\n else:\n output = input.clone()\n if multi:\n bit_max = 8\n for i in range(bit_max):\n if len(index[layer_num][i]) == 0:\n continue\n else:\n idx = index[layer_num][i]\n min_value = output[idx].min()\n max_value = output[idx].max()\n qmin = 0.\n qmax = 2.**(1+i) - 1.\n scale = (max_value - min_value) / (qmax - qmin)\n scale = max(scale, 1e-8)\n output[idx] = output[idx].add_(-min_value).div_(scale).add_(qmin)\n output[idx] = output[idx].clamp_(qmin, qmax).round_() # quantize\n output[idx] = output[idx].add_(-qmin).mul_(scale).add_(min_value)\n else:\n min_value = output.min()\n max_value = output.max()\n qmin = 0.\n qmax = 2.**num_bits - 1.\n scale = (max_value - min_value) / (qmax - qmin)\n scale = max(scale, 1e-8)\n output = output.add_(-min_value).div_(scale).add_(qmin)\n output = output.clamp_(qmin, qmax).round_() # quantize\n output = output.add_(-qmin).mul_(scale).add_(min_value)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n # straight-through estimator\n grad_input = grad_output\n return grad_input, None, None, None, None, None, None, None, None, None, None, None, None\n\ndef quantize(x, num_bits=8, min_value=None, max_value=None, num_chunks=None, stochastic=False, inplace=False, quantize=False, layer_num=-1, multi=False, index=[], is_act=False):\n return UniformQuantize().apply(x, num_bits, min_value, max_value, stochastic, inplace, num_chunks, False, quantize, layer_num, multi, index, is_act)\n\nclass QuantMeasure(nn.Module):\n \"\"\"docstring for QuantMeasure.\"\"\"\n\n def __init__(self, num_bits=8, quantize=False, momentum=0.1):\n super(QuantMeasure, self).__init__()\n self.register_buffer('running_min', torch.zeros(1))\n self.register_buffer('running_max', torch.zeros(1))\n self.momentum = momentum\n self.num_bits = num_bits\n self.quantize = quantize\n\n def forward(self, input):\n if self.training:\n min_value = input.detach().view(\n input.size(0), -1).min(-1)[0].mean()\n max_value = input.detach().view(\n input.size(0), -1).max(-1)[0].mean()\n self.running_min.mul_(self.momentum).add_(\n min_value * (1 - self.momentum))\n self.running_max.mul_(self.momentum).add_(\n max_value * (1 - self.momentum))\n else:\n min_value = self.running_min\n max_value = self.running_max\n return quantize(input, self.num_bits, min_value=float(min_value), max_value=float(max_value), num_chunks=16, quantize=self.quantize, is_act=True)\n\n\nclass QConv2d(nn.Conv2d):\n \"\"\"docstring for QConv2d.\"\"\"\n\n def __init__(self, in_channels, out_channels, num_bits=8, num_bits_weight=None, kernel_size=3, stride=1,\n padding=1, dilation=1, groups=1, bias=True, num_bits_grad=None, layer_num=-1, multi=False, index=[]):\n super(QConv2d, self).__init__(in_channels, out_channels, kernel_size,\n stride, padding, dilation, groups, bias)\n self.num_bits = num_bits\n self.num_bits_weight = num_bits_weight or num_bits\n self.num_bits_grad = num_bits_grad\n self.quantize_input = QuantMeasure(self.num_bits, quantize=True)\n if num_bits_weight != None:\n self.quantize = True\n self.layer_num = layer_num\n self.multi = multi\n self.index = index\n def forward(self, input):\n if self.quantize:\n qinput = self.quantize_input(input)\n #qinput = input\n qweight = quantize(self.weight, num_bits=self.num_bits_weight,\n min_value=float(self.weight.min()),\n max_value=float(self.weight.max()), layer_num=self.layer_num, quantize=self.quantize, multi=self.multi, index=self.index)\n if self.bias is not None:\n qbias = quantize(self.bias, num_bits=self.num_bits_weight,quantize=False)\n else:\n qbias = None\n output = F.conv2d(qinput, qweight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n else:\n output = F.conv2d(input, self.weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n return output\n\n\nclass QLinear(nn.Linear):\n \"\"\"docstring for QConv2d.\"\"\"\n\n def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=None, num_bits_grad=None, biprecision=False, quantize=False, layer_num=-1, multi=False, index=[]):\n super(QLinear, self).__init__(in_features, out_features, bias)\n self.num_bits = num_bits\n self.num_bits_weight = num_bits_weight or num_bits\n self.num_bits_grad = num_bits_grad\n self.quantize_input = QuantMeasure(self.num_bits, quantize=False)\n self.quantize = quantize\n self.layer_num = layer_num\n self.multi = multi\n self.index = index\n def forward(self, input):\n if self.quantize:\n #qinput = self.quantize_input(input)\n qinput = input\n qweight = quantize(self.weight, num_bits=self.num_bits_weight,\n min_value=float(self.weight.min()),\n max_value=float(self.weight.max()), layer_num=self.layer_num, quantize=self.quantize, multi=self.multi, index=self.index)\n if self.bias is not None:\n qbias = quantize(self.bias, num_bits=self.num_bits_weight, quantize=False)\n else:\n qbias = None\n output = F.linear(qinput, qweight, self.bias)\n else:\n output = F.linear(input, self.weight, self.bias)\n return output\n\n"
] | [
[
"torch.optim.Adam",
"numpy.random.seed",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"numpy.linspace",
"torch.manual_seed",
"numpy.random.choice",
"torch.nn.utils.prune.remove",
"torch.cuda.manual_seed_all",
"torch.optim.SGD",
"torch.nn.DataParallel"
],
[
"torch.optim.Adam",
"numpy.random.seed",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"numpy.linspace",
"torch.manual_seed",
"torch.load",
"numpy.random.choice",
"torch.cuda.manual_seed_all",
"torch.optim.SGD",
"torch.nn.DataParallel"
],
[
"torch.nn.functional.conv2d",
"torch.nn.functional.linear",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
levishai/3DMPPE_POSENET_RELEASE | [
"e364053b5a4e51f4a84eb50abb26026094931d90"
] | [
"main/test.py"
] | [
"import argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\nfrom config import cfg\nimport torch\nfrom base import Tester\nfrom utils.vis import vis_keypoints\nfrom utils.pose_utils import flip\nimport torch.backends.cudnn as cudnn\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=str, dest='gpu_ids')\n parser.add_argument('--test_epoch', type=str, dest='test_epoch')\n args = parser.parse_args()\n\n # test gpus\n if not args.gpu_ids:\n assert 0, \"Please set proper gpu ids\"\n\n if '-' in args.gpu_ids:\n gpus = args.gpu_ids.split('-')\n gpus[0] = int(gpus[0])\n gpus[1] = int(gpus[1]) + 1\n args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))\n \n assert args.test_epoch, 'Test epoch is required.'\n return args\n\ndef main():\n\n args = parse_args()\n cfg.set_args(args.gpu_ids)\n cudnn.fastest = True\n cudnn.benchmark = True\n cudnn.deterministic = False\n cudnn.enabled = True\n\n tester = Tester(args.test_epoch)\n tester._make_batch_generator()\n tester._make_model()\n\n preds = []\n\n with torch.no_grad():\n for itr, input_img in enumerate(tqdm(tester.batch_generator)):\n \n # forward\n coord_out = tester.model(input_img)\n\n if cfg.flip_test:\n flipped_input_img = flip(input_img, dims=3)\n flipped_coord_out = tester.model(flipped_input_img)\n flipped_coord_out[:, :, 0] = cfg.output_shape[1] - flipped_coord_out[:, :, 0] - 1\n for pair in tester.flip_pairs:\n flipped_coord_out[:, pair[0], :], flipped_coord_out[:, pair[1], :] = flipped_coord_out[:, pair[1], :].clone(), flipped_coord_out[:, pair[0], :].clone()\n coord_out = (coord_out + flipped_coord_out)/2.\n\n vis = False\n if vis:\n filename = str(itr)\n tmpimg = input_img[0].cpu().numpy()\n tmpimg = tmpimg * np.array(cfg.pixel_std).reshape(3,1,1) + np.array(cfg.pixel_mean).reshape(3,1,1)\n tmpimg = tmpimg.astype(np.uint8)\n tmpimg = tmpimg[::-1, :, :]\n tmpimg = np.transpose(tmpimg,(1,2,0)).copy()\n tmpkps = np.zeros((3,tester.joint_num))\n tmpkps[:2,:] = coord_out[0,:,:2].cpu().numpy().transpose(1,0) / cfg.output_shape[0] * cfg.input_shape[0]\n tmpkps[2,:] = 1\n tmpimg = vis_keypoints(tmpimg, tmpkps, tester.skeleton)\n cv2.imwrite(filename + '_output.jpg', tmpimg)\n\n coord_out = coord_out.cpu().numpy()\n preds.append(coord_out)\n \n # evaluate\n preds = np.concatenate(preds, axis=0)\n tester._evaluate(preds, cfg.result_dir) \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.concatenate",
"torch.no_grad",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sanjaymsh/silx | [
"3f9bcda88c074438fdb30cde29fec314d26f471c",
"50c2b4820d4786abcce866645b1d3c138891a25f"
] | [
"silx/math/fit/fittheories.py",
"silx/gui/plot/items/scatter.py"
] | [
"# coding: utf-8\n#/*##########################################################################\n#\n# Copyright (c) 2004-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n########################################################################### */\n\"\"\"This modules provides a set of fit functions and associated\nestimation functions in a format that can be imported into a\n:class:`silx.math.fit.FitManager` instance.\n\nThese functions are well suited for fitting multiple gaussian shaped peaks\ntypically found in spectroscopy data. The estimation functions are designed\nto detect how many peaks are present in the data, and provide an initial\nestimate for their height, their center location and their full-width\nat half maximum (fwhm).\n\nThe limitation of these estimation algorithms is that only gaussians having a\nsimilar fwhm can be detected by the peak search algorithm.\nThis *search fwhm* can be defined by the user, if\nhe knows the characteristics of his data, or can be automatically estimated\nbased on the fwhm of the largest peak in the data.\n\nThe source code of this module can serve as template for defining your own\nfit functions.\n\nThe functions to be imported by :meth:`FitManager.loadtheories` are defined by\na dictionary :const:`THEORY`: with the following structure::\n\n from silx.math.fit.fittheory import FitTheory\n\n THEORY = {\n 'theory_name_1': FitTheory(\n description='Description of theory 1',\n function=fitfunction1,\n parameters=('param name 1', 'param name 2', …),\n estimate=estimation_function1,\n configure=configuration_function1,\n derivative=derivative_function1),\n\n 'theory_name_2': FitTheory(…),\n }\n\n.. note::\n\n Consider using an OrderedDict instead of a regular dictionary, when\n defining your own theory dictionary, if the order matters to you.\n This will likely be the case if you intend to load a selection of\n functions in a GUI such as :class:`silx.gui.fit.FitManager`.\n\nTheory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).\n\nThe mandatory parameters for :class:`FitTheory` are ``function`` and\n``parameters``.\n\nYou can also define an ``INIT`` function that will be executed by\n:meth:`FitManager.loadtheories`.\n\nSee the documentation of :class:`silx.math.fit.fittheory.FitTheory`\nfor more information.\n\nModule members:\n---------------\n\"\"\"\nimport numpy\nfrom collections import OrderedDict\nimport logging\n\nfrom silx.math.fit import functions\nfrom silx.math.fit.peaks import peak_search, guess_fwhm\nfrom silx.math.fit.filters import strip, savitsky_golay\nfrom silx.math.fit.leastsq import leastsq\nfrom silx.math.fit.fittheory import FitTheory\n\n_logger = logging.getLogger(__name__)\n\n__authors__ = [\"V.A. Sole\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"15/05/2017\"\n\n\nDEFAULT_CONFIG = {\n 'NoConstraintsFlag': False,\n 'PositiveFwhmFlag': True,\n 'PositiveHeightAreaFlag': True,\n 'SameFwhmFlag': False,\n 'QuotedPositionFlag': False, # peak not outside data range\n 'QuotedEtaFlag': False, # force 0 < eta < 1\n # Peak detection\n 'AutoScaling': False,\n 'Yscaling': 1.0,\n 'FwhmPoints': 8,\n 'AutoFwhm': True,\n 'Sensitivity': 2.5,\n 'ForcePeakPresence': True,\n # Hypermet\n 'HypermetTails': 15,\n 'QuotedFwhmFlag': 0,\n 'MaxFwhm2InputRatio': 1.5,\n 'MinFwhm2InputRatio': 0.4,\n # short tail parameters\n 'MinGaussArea4ShortTail': 50000.,\n 'InitialShortTailAreaRatio': 0.050,\n 'MaxShortTailAreaRatio': 0.100,\n 'MinShortTailAreaRatio': 0.0010,\n 'InitialShortTailSlopeRatio': 0.70,\n 'MaxShortTailSlopeRatio': 2.00,\n 'MinShortTailSlopeRatio': 0.50,\n # long tail parameters\n 'MinGaussArea4LongTail': 1000.0,\n 'InitialLongTailAreaRatio': 0.050,\n 'MaxLongTailAreaRatio': 0.300,\n 'MinLongTailAreaRatio': 0.010,\n 'InitialLongTailSlopeRatio': 20.0,\n 'MaxLongTailSlopeRatio': 50.0,\n 'MinLongTailSlopeRatio': 5.0,\n # step tail\n 'MinGaussHeight4StepTail': 5000.,\n 'InitialStepTailHeightRatio': 0.002,\n 'MaxStepTailHeightRatio': 0.0100,\n 'MinStepTailHeightRatio': 0.0001,\n # Hypermet constraints\n # position in range [estimated position +- estimated fwhm/2]\n 'HypermetQuotedPositionFlag': True,\n 'DeltaPositionFwhmUnits': 0.5,\n 'SameSlopeRatioFlag': 1,\n 'SameAreaRatioFlag': 1,\n # Strip bg removal\n 'StripBackgroundFlag': True,\n 'SmoothingFlag': True,\n 'SmoothingWidth': 5,\n 'StripWidth': 2,\n 'StripIterations': 5000,\n 'StripThresholdFactor': 1.0}\n\"\"\"This dictionary defines default configuration parameters that have effects\non fit functions and estimation functions, mainly on fit constraints.\nThis dictionary is accessible as attribute :attr:`FitTheories.config`,\nwhich can be modified by configuration functions defined in\n:const:`CONFIGURE`.\n\"\"\"\n\nCFREE = 0\nCPOSITIVE = 1\nCQUOTED = 2\nCFIXED = 3\nCFACTOR = 4\nCDELTA = 5\nCSUM = 6\nCIGNORED = 7\n\n\nclass FitTheories(object):\n \"\"\"Class wrapping functions from :class:`silx.math.fit.functions`\n and providing estimate functions for all of these fit functions.\"\"\"\n def __init__(self, config=None):\n if config is None:\n self.config = DEFAULT_CONFIG\n else:\n self.config = config\n\n def ahypermet(self, x, *pars):\n \"\"\"\n Wrapping of :func:`silx.math.fit.functions.sum_ahypermet` without\n the tail flags in the function signature.\n\n Depending on the value of `self.config['HypermetTails']`, one can\n activate or deactivate the various terms of the hypermet function.\n\n `self.config['HypermetTails']` must be an integer between 0 and 15.\n It is a set of 4 binary flags, one for activating each one of the\n hypermet terms: *gaussian function, short tail, long tail, step*.\n\n For example, 15 can be expressed as ``1111`` in base 2, so a flag of\n 15 means all terms are active.\n \"\"\"\n g_term = self.config['HypermetTails'] & 1\n st_term = (self.config['HypermetTails'] >> 1) & 1\n lt_term = (self.config['HypermetTails'] >> 2) & 1\n step_term = (self.config['HypermetTails'] >> 3) & 1\n return functions.sum_ahypermet(x, *pars,\n gaussian_term=g_term, st_term=st_term,\n lt_term=lt_term, step_term=step_term)\n\n def poly(self, x, *pars):\n \"\"\"Order n polynomial.\n The order of the polynomial is defined by the number of\n coefficients (``*pars``).\n\n \"\"\"\n p = numpy.poly1d(pars)\n return p(x)\n\n @staticmethod\n def estimate_poly(x, y, n=2):\n \"\"\"Estimate polynomial coefficients for a degree n polynomial.\n\n \"\"\"\n pcoeffs = numpy.polyfit(x, y, n)\n constraints = numpy.zeros((n + 1, 3), numpy.float)\n return pcoeffs, constraints\n\n def estimate_quadratic(self, x, y):\n \"\"\"Estimate quadratic coefficients\n\n \"\"\"\n return self.estimate_poly(x, y, n=2)\n\n def estimate_cubic(self, x, y):\n \"\"\"Estimate coefficients for a degree 3 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=3)\n\n def estimate_quartic(self, x, y):\n \"\"\"Estimate coefficients for a degree 4 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=4)\n\n def estimate_quintic(self, x, y):\n \"\"\"Estimate coefficients for a degree 5 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=5)\n\n def strip_bg(self, y):\n \"\"\"Return the strip background of y, using parameters from\n :attr:`config` dictionary (*StripBackgroundFlag, StripWidth,\n StripIterations, StripThresholdFactor*)\"\"\"\n remove_strip_bg = self.config.get('StripBackgroundFlag', False)\n if remove_strip_bg:\n if self.config['SmoothingFlag']:\n y = savitsky_golay(y, self.config['SmoothingWidth'])\n strip_width = self.config['StripWidth']\n strip_niterations = self.config['StripIterations']\n strip_thr_factor = self.config['StripThresholdFactor']\n return strip(y, w=strip_width,\n niterations=strip_niterations,\n factor=strip_thr_factor)\n else:\n return numpy.zeros_like(y)\n\n def guess_yscaling(self, y):\n \"\"\"Estimate scaling for y prior to peak search.\n A smoothing filter is applied to y to estimate the noise level\n (chi-squared)\n\n :param y: Data array\n :return: Scaling factor\n \"\"\"\n # ensure y is an array\n yy = numpy.array(y, copy=False)\n\n # smooth\n convolution_kernel = numpy.ones(shape=(3,)) / 3.\n ysmooth = numpy.convolve(y, convolution_kernel, mode=\"same\")\n\n # remove zeros\n idx_array = numpy.fabs(y) > 0.0\n yy = yy[idx_array]\n ysmooth = ysmooth[idx_array]\n\n # compute scaling factor\n chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))\n if chisq > 0:\n return 1. / chisq\n else:\n return 1.0\n\n def peak_search(self, y, fwhm, sensitivity):\n \"\"\"Search for peaks in y array, after padding the array and\n multiplying its value by a scaling factor.\n\n :param y: 1-D data array\n :param int fwhm: Typical full width at half maximum for peaks,\n in number of points. This parameter is used for to discriminate between\n true peaks and background fluctuations.\n :param float sensitivity: Sensitivity parameter. This is a threshold factor\n for peak detection. Only peaks larger than the standard deviation\n of the noise multiplied by this sensitivity parameter are detected.\n :return: List of peak indices\n \"\"\"\n # add padding\n ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float)\n ysearch[0:fwhm] = y[0]\n ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]\n ysearch[fwhm:fwhm + len(y)] = y[:]\n\n scaling = self.guess_yscaling(y) if self.config[\"AutoScaling\"] else self.config[\"Yscaling\"]\n\n if len(ysearch) > 1.5 * fwhm:\n peaks = peak_search(scaling * ysearch,\n fwhm=fwhm, sensitivity=sensitivity)\n return [peak_index - fwhm for peak_index in peaks\n if 0 <= peak_index - fwhm < len(y)]\n else:\n return []\n\n def estimate_height_position_fwhm(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM* of peaks, for gaussian-like\n curves.\n\n This functions finds how many parameters are needed, based on the\n number of peaks detected. Then it estimates the fit parameters\n with a few iterations of fitting gaussian functions.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar = []\n\n bg = self.strip_bg(y)\n\n if self.config['AutoFwhm']:\n search_fwhm = guess_fwhm(y)\n else:\n search_fwhm = int(float(self.config['FwhmPoints']))\n search_sens = float(self.config['Sensitivity'])\n\n if search_fwhm < 3:\n _logger.warning(\"Setting peak fwhm to 3 (lower limit)\")\n search_fwhm = 3\n self.config['FwhmPoints'] = 3\n\n if search_sens < 1:\n _logger.warning(\"Setting peak search sensitivity to 1. \" +\n \"(lower limit to filter out noise peaks)\")\n search_sens = 1\n self.config['Sensitivity'] = 1\n\n npoints = len(y)\n\n # Find indices of peaks in data array\n peaks = self.peak_search(y,\n fwhm=search_fwhm,\n sensitivity=search_sens)\n\n if not len(peaks):\n forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))\n if forcepeak:\n delta = y - bg\n # get index of global maximum\n # (first one if several samples are equal to this value)\n peaks = [numpy.nonzero(delta == delta.max())[0][0]]\n\n # Find index of largest peak in peaks array\n index_largest_peak = 0\n if len(peaks) > 0:\n # estimate fwhm as 5 * sampling interval\n sig = 5 * abs(x[npoints - 1] - x[0]) / npoints\n peakpos = x[int(peaks[0])]\n if abs(peakpos) < 1.0e-16:\n peakpos = 0.0\n param = numpy.array(\n [y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])\n height_largest_peak = param[0]\n peak_index = 1\n for i in peaks[1:]:\n param2 = numpy.array(\n [y[int(i)] - bg[int(i)], x[int(i)], sig])\n param = numpy.concatenate((param, param2))\n if param2[0] > height_largest_peak:\n height_largest_peak = param2[0]\n index_largest_peak = peak_index\n peak_index += 1\n\n # Subtract background\n xw = x\n yw = y - bg\n\n cons = numpy.zeros((len(param), 3), numpy.float)\n\n # peak height must be positive\n cons[0:len(param):3, 0] = CPOSITIVE\n # force peaks to stay around their position\n cons[1:len(param):3, 0] = CQUOTED\n\n # set possible peak range to estimated peak +- guessed fwhm\n if len(xw) > search_fwhm:\n fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])\n cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx\n cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx\n else:\n shape = [max(1, int(x)) for x in (param[1:len(param):3])]\n cons[1:len(param):3, 1] = min(xw) * numpy.ones(\n shape,\n numpy.float)\n cons[1:len(param):3, 2] = max(xw) * numpy.ones(\n shape,\n numpy.float)\n\n # ensure fwhm is positive\n cons[2:len(param):3, 0] = CPOSITIVE\n\n # run a quick iterative fit (4 iterations) to improve\n # estimations\n fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,\n max_iter=4, constraints=cons.tolist(),\n full_output=True)\n\n # set final constraints based on config parameters\n cons = numpy.zeros((len(fittedpar), 3), numpy.float)\n peak_index = 0\n for i in range(len(peaks)):\n # Setup height area constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveHeightAreaFlag']:\n cons[peak_index, 0] = CPOSITIVE\n cons[peak_index, 1] = 0\n cons[peak_index, 2] = 0\n peak_index += 1\n\n # Setup position constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['QuotedPositionFlag']:\n cons[peak_index, 0] = CQUOTED\n cons[peak_index, 1] = min(x)\n cons[peak_index, 2] = max(x)\n peak_index += 1\n\n # Setup positive FWHM constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveFwhmFlag']:\n cons[peak_index, 0] = CPOSITIVE\n cons[peak_index, 1] = 0\n cons[peak_index, 2] = 0\n if self.config['SameFwhmFlag']:\n if i != index_largest_peak:\n cons[peak_index, 0] = CFACTOR\n cons[peak_index, 1] = 3 * index_largest_peak + 2\n cons[peak_index, 2] = 1.0\n peak_index += 1\n\n return fittedpar, cons\n\n def estimate_agauss(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM* of peaks, for gaussian-like\n curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n converts the height parameters to area under the curve with the\n formula ``area = sqrt(2*pi) * height * fwhm / (2 * sqrt(2 * log(2))``\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n for i in range(npeaks):\n height = fittedpar[3 * i]\n fwhm = fittedpar[3 * i + 2]\n # Replace height with area in fittedpar\n fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (\n 2.0 * numpy.sqrt(2 * numpy.log(2)))\n return fittedpar, cons\n\n def estimate_alorentz(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM* of peaks, for Lorentzian\n curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n converts the height parameters to area under the curve with the\n formula ``area = height * fwhm * 0.5 * pi``\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n for i in range(npeaks):\n height = fittedpar[3 * i]\n fwhm = fittedpar[3 * i + 2]\n # Replace height with area in fittedpar\n fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)\n return fittedpar, cons\n\n def estimate_splitgauss(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM1, FWHM2* of peaks, for\n asymmetric gaussian-like curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds a second (identical) estimation of FWHM to the fit parameters\n for each peak, and the corresponding constraint.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM1, FWHM2*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n estimated_parameters = []\n estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float)\n for i in range(npeaks):\n for j in range(3):\n estimated_parameters.append(fittedpar[3 * i + j])\n # fwhm2 estimate = fwhm1\n estimated_parameters.append(fittedpar[3 * i + 2])\n # height\n estimated_constraints[4 * i, 0] = cons[3 * i, 0]\n estimated_constraints[4 * i, 1] = cons[3 * i, 1]\n estimated_constraints[4 * i, 2] = cons[3 * i, 2]\n # position\n estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]\n estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]\n estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]\n # fwhm1\n estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]\n estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]\n estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]\n # fwhm2\n estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]\n estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]\n estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]\n if cons[3 * i + 2, 0] == CFACTOR:\n # convert indices of related parameters\n # (this happens if SameFwhmFlag == True)\n estimated_constraints[4 * i + 2, 1] = \\\n int(cons[3 * i + 2, 1] / 3) * 4 + 2\n estimated_constraints[4 * i + 3, 1] = \\\n int(cons[3 * i + 2, 1] / 3) * 4 + 3\n return estimated_parameters, estimated_constraints\n\n def estimate_pvoigt(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM, eta* of peaks, for\n pseudo-Voigt curves.\n\n Pseudo-Voigt are a sum of a gaussian curve *G(x)* and a lorentzian\n curve *L(x)* with the same height, center, fwhm parameters:\n ``y(x) = eta * G(x) + (1-eta) * L(x)``\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds a constant estimation of *eta* (0.5) to the fit parameters\n for each peak, and the corresponding constraint.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM, eta*.\n Constraint for the eta parameter can be set to QUOTED (0.--1.)\n by setting :attr:`config`['QuotedEtaFlag'] to ``True``.\n If this is not the case, the constraint code is set to FREE.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((4 * npeaks, 3), numpy.float)\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 4 * j + 2\n for i in range(npeaks):\n newpar.append(fittedpar[3 * i])\n newpar.append(fittedpar[3 * i + 1])\n newpar.append(fittedpar[3 * i + 2])\n newpar.append(0.5)\n # height\n newcons[4 * i, 0] = cons[3 * i, 0]\n newcons[4 * i, 1] = cons[3 * i, 1]\n newcons[4 * i, 2] = cons[3 * i, 2]\n # position\n newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]\n newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]\n newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]\n # fwhm\n newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]\n newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]\n newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]\n # Eta constrains\n newcons[4 * i + 3, 0] = CFREE\n newcons[4 * i + 3, 1] = 0\n newcons[4 * i + 3, 2] = 0\n if self.config['QuotedEtaFlag']:\n newcons[4 * i + 3, 0] = CQUOTED\n newcons[4 * i + 3, 1] = 0.0\n newcons[4 * i + 3, 2] = 1.0\n return newpar, newcons\n\n def estimate_splitpvoigt(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM1, FWHM2, eta* of peaks, for\n asymmetric pseudo-Voigt curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds an identical FWHM2 parameter and a constant estimation of\n *eta* (0.5) to the fit parameters for each peak, and the corresponding\n constraints.\n\n Constraint for the eta parameter can be set to QUOTED (0.--1.)\n by setting :attr:`config`['QuotedEtaFlag'] to ``True``.\n If this is not the case, the constraint code is set to FREE.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM1, FWHM2, eta*.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((5 * npeaks, 3), numpy.float)\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 4 * j + 2\n for i in range(npeaks):\n # height\n newpar.append(fittedpar[3 * i])\n # position\n newpar.append(fittedpar[3 * i + 1])\n # fwhm1\n newpar.append(fittedpar[3 * i + 2])\n # fwhm2 estimate equal to fwhm1\n newpar.append(fittedpar[3 * i + 2])\n # eta\n newpar.append(0.5)\n # constraint codes\n # ----------------\n # height\n newcons[5 * i, 0] = cons[3 * i, 0]\n # position\n newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]\n # fwhm1\n newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]\n # fwhm2\n newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]\n # cons 1\n # ------\n newcons[5 * i, 1] = cons[3 * i, 1]\n newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]\n newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]\n newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]\n # cons 2\n # ------\n newcons[5 * i, 2] = cons[3 * i, 2]\n newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]\n newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]\n newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]\n\n if cons[3 * i + 2, 0] == CFACTOR:\n # fwhm2 connstraint depends on fwhm1\n newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1\n # eta constraints\n newcons[5 * i + 4, 0] = CFREE\n newcons[5 * i + 4, 1] = 0\n newcons[5 * i + 4, 2] = 0\n if self.config['QuotedEtaFlag']:\n newcons[5 * i + 4, 0] = CQUOTED\n newcons[5 * i + 4, 1] = 0.0\n newcons[5 * i + 4, 2] = 1.0\n return newpar, newcons\n\n def estimate_apvoigt(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM1, eta* of peaks, for\n pseudo-Voigt curves.\n\n This functions uses :meth:`estimate_pvoigt`, then converts the height\n parameter to area.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM, eta*.\n \"\"\"\n fittedpar, cons = self.estimate_pvoigt(x, y)\n npeaks = len(fittedpar) // 4\n # Assume 50% of the area is determined by the gaussian and 50% by\n # the Lorentzian.\n for i in range(npeaks):\n height = fittedpar[4 * i]\n fwhm = fittedpar[4 * i + 2]\n fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\\\n 0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n ) * numpy.sqrt(2 * numpy.pi)\n return fittedpar, cons\n\n def estimate_ahypermet(self, x, y):\n \"\"\"Estimation of *area, position, fwhm, st_area_r, st_slope_r,\n lt_area_r, lt_slope_r, step_height_r* of peaks, for hypermet curves.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *area, position, fwhm, st_area_r, st_slope_r,\n lt_area_r, lt_slope_r, step_height_r* .\n \"\"\"\n yscaling = self.config.get('Yscaling', 1.0)\n if yscaling == 0:\n yscaling = 1.0\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((8 * npeaks, 3), numpy.float)\n main_peak = 0\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 8 * j + 2\n main_peak = j\n for i in range(npeaks):\n if fittedpar[3 * i] > fittedpar[3 * main_peak]:\n main_peak = i\n\n for i in range(npeaks):\n height = fittedpar[3 * i]\n position = fittedpar[3 * i + 1]\n fwhm = fittedpar[3 * i + 2]\n area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n ) * numpy.sqrt(2 * numpy.pi)\n # the gaussian parameters\n newpar.append(area)\n newpar.append(position)\n newpar.append(fwhm)\n # print \"area, pos , fwhm = \",area,position,fwhm\n # Avoid zero derivatives because of not calculating contribution\n g_term = 1\n st_term = 1\n lt_term = 1\n step_term = 1\n if self.config['HypermetTails'] != 0:\n g_term = self.config['HypermetTails'] & 1\n st_term = (self.config['HypermetTails'] >> 1) & 1\n lt_term = (self.config['HypermetTails'] >> 2) & 1\n step_term = (self.config['HypermetTails'] >> 3) & 1\n if g_term == 0:\n # fix the gaussian parameters\n newcons[8 * i, 0] = CFIXED\n newcons[8 * i + 1, 0] = CFIXED\n newcons[8 * i + 2, 0] = CFIXED\n # the short tail parameters\n if ((area * yscaling) <\n self.config['MinGaussArea4ShortTail']) | \\\n (st_term == 0):\n newpar.append(0.0)\n newpar.append(0.0)\n newcons[8 * i + 3, 0] = CFIXED\n newcons[8 * i + 3, 1] = 0.0\n newcons[8 * i + 3, 2] = 0.0\n newcons[8 * i + 4, 0] = CFIXED\n newcons[8 * i + 4, 1] = 0.0\n newcons[8 * i + 4, 2] = 0.0\n else:\n newpar.append(self.config['InitialShortTailAreaRatio'])\n newpar.append(self.config['InitialShortTailSlopeRatio'])\n newcons[8 * i + 3, 0] = CQUOTED\n newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']\n newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']\n newcons[8 * i + 4, 0] = CQUOTED\n newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']\n newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']\n # the long tail parameters\n if ((area * yscaling) <\n self.config['MinGaussArea4LongTail']) | \\\n (lt_term == 0):\n newpar.append(0.0)\n newpar.append(0.0)\n newcons[8 * i + 5, 0] = CFIXED\n newcons[8 * i + 5, 1] = 0.0\n newcons[8 * i + 5, 2] = 0.0\n newcons[8 * i + 6, 0] = CFIXED\n newcons[8 * i + 6, 1] = 0.0\n newcons[8 * i + 6, 2] = 0.0\n else:\n newpar.append(self.config['InitialLongTailAreaRatio'])\n newpar.append(self.config['InitialLongTailSlopeRatio'])\n newcons[8 * i + 5, 0] = CQUOTED\n newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']\n newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']\n newcons[8 * i + 6, 0] = CQUOTED\n newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']\n newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']\n # the step parameters\n if ((height * yscaling) <\n self.config['MinGaussHeight4StepTail']) | \\\n (step_term == 0):\n newpar.append(0.0)\n newcons[8 * i + 7, 0] = CFIXED\n newcons[8 * i + 7, 1] = 0.0\n newcons[8 * i + 7, 2] = 0.0\n else:\n newpar.append(self.config['InitialStepTailHeightRatio'])\n newcons[8 * i + 7, 0] = CQUOTED\n newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']\n newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']\n # if self.config['NoConstraintsFlag'] == 1:\n # newcons=numpy.zeros((8*npeaks, 3),numpy.float)\n if npeaks > 0:\n if g_term:\n if self.config['PositiveHeightAreaFlag']:\n for i in range(npeaks):\n newcons[8 * i, 0] = CPOSITIVE\n if self.config['PositiveFwhmFlag']:\n for i in range(npeaks):\n newcons[8 * i + 2, 0] = CPOSITIVE\n if self.config['SameFwhmFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 2, 0] = CFACTOR\n newcons[8 * i + 2, 1] = 8 * main_peak + 2\n newcons[8 * i + 2, 2] = 1.0\n if self.config['HypermetQuotedPositionFlag']:\n for i in range(npeaks):\n delta = self.config['DeltaPositionFwhmUnits'] * fwhm\n newcons[8 * i + 1, 0] = CQUOTED\n newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta\n newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta\n if self.config['SameSlopeRatioFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 4, 0] = CFACTOR\n newcons[8 * i + 4, 1] = 8 * main_peak + 4\n newcons[8 * i + 4, 2] = 1.0\n newcons[8 * i + 6, 0] = CFACTOR\n newcons[8 * i + 6, 1] = 8 * main_peak + 6\n newcons[8 * i + 6, 2] = 1.0\n if self.config['SameAreaRatioFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 3, 0] = CFACTOR\n newcons[8 * i + 3, 1] = 8 * main_peak + 3\n newcons[8 * i + 3, 2] = 1.0\n newcons[8 * i + 5, 0] = CFACTOR\n newcons[8 * i + 5, 1] = 8 * main_peak + 5\n newcons[8 * i + 5, 2] = 1.0\n return newpar, newcons\n\n def estimate_stepdown(self, x, y):\n \"\"\"Estimation of parameters for stepdown curves.\n\n The functions estimates gaussian parameters for the derivative of\n the data, takes the largest gaussian peak and uses its estimated\n parameters to define the center of the step and its fwhm. The\n estimated amplitude returned is simply ``max(y) - min(y)``.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit newconstraints.\n Parameters to be estimated for each stepdown are:\n *height, centroid, fwhm* .\n \"\"\"\n crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]\n cutoff = len(crappyfilter) // 2\n y_deriv = numpy.convolve(y,\n crappyfilter,\n mode=\"valid\")\n\n # make the derivative's peak have the same amplitude as the step\n if max(y_deriv) > 0:\n y_deriv = y_deriv * max(y) / max(y_deriv)\n\n fittedpar, newcons = self.estimate_height_position_fwhm(\n x[cutoff:-cutoff], y_deriv)\n\n data_amplitude = max(y) - min(y)\n\n # use parameters from largest gaussian found\n if len(fittedpar):\n npeaks = len(fittedpar) // 3\n largest_index = 0\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n for i in range(npeaks):\n if fittedpar[3 * i] > largest[0]:\n largest_index = i\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n else:\n # no peak was found\n largest = [data_amplitude, # height\n x[len(x)//2], # center: middle of x range\n self.config[\"FwhmPoints\"] * (x[1] - x[0])] # fwhm: default value\n\n # Setup constrains\n newcons = numpy.zeros((3, 3), numpy.float)\n if not self.config['NoConstraintsFlag']:\n # Setup height constrains\n if self.config['PositiveHeightAreaFlag']:\n newcons[0, 0] = CPOSITIVE\n newcons[0, 1] = 0\n newcons[0, 2] = 0\n\n # Setup position constrains\n if self.config['QuotedPositionFlag']:\n newcons[1, 0] = CQUOTED\n newcons[1, 1] = min(x)\n newcons[1, 2] = max(x)\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n newcons[2, 0] = CPOSITIVE\n newcons[2, 1] = 0\n newcons[2, 2] = 0\n\n return largest, newcons\n\n def estimate_slit(self, x, y):\n \"\"\"Estimation of parameters for slit curves.\n\n The functions estimates stepup and stepdown parameters for the largest\n steps, and uses them for calculating the center (middle between stepup\n and stepdown), the height (maximum amplitude in data), the fwhm\n (distance between the up- and down-step centers) and the beamfwhm\n (average of FWHM for up- and down-step).\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each slit are:\n *height, position, fwhm, beamfwhm* .\n \"\"\"\n largestup, cons = self.estimate_stepup(x, y)\n largestdown, cons = self.estimate_stepdown(x, y)\n fwhm = numpy.fabs(largestdown[1] - largestup[1])\n beamfwhm = 0.5 * (largestup[2] + largestdown[1])\n beamfwhm = min(beamfwhm, fwhm / 10.0)\n beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))\n\n y_minus_bg = y - self.strip_bg(y)\n height = max(y_minus_bg)\n\n i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]\n xx = numpy.take(x, i1)\n position = (xx[0] + xx[-1]) / 2.0\n fwhm = xx[-1] - xx[0]\n largest = [height, position, fwhm, beamfwhm]\n cons = numpy.zeros((4, 3), numpy.float)\n # Setup constrains\n if not self.config['NoConstraintsFlag']:\n # Setup height constrains\n if self.config['PositiveHeightAreaFlag']:\n cons[0, 0] = CPOSITIVE\n cons[0, 1] = 0\n cons[0, 2] = 0\n\n # Setup position constrains\n if self.config['QuotedPositionFlag']:\n cons[1, 0] = CQUOTED\n cons[1, 1] = min(x)\n cons[1, 2] = max(x)\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n cons[2, 0] = CPOSITIVE\n cons[2, 1] = 0\n cons[2, 2] = 0\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n cons[3, 0] = CPOSITIVE\n cons[3, 1] = 0\n cons[3, 2] = 0\n return largest, cons\n\n def estimate_stepup(self, x, y):\n \"\"\"Estimation of parameters for a single step up curve.\n\n The functions estimates gaussian parameters for the derivative of\n the data, takes the largest gaussian peak and uses its estimated\n parameters to define the center of the step and its fwhm. The\n estimated amplitude returned is simply ``max(y) - min(y)``.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each stepup are:\n *height, centroid, fwhm* .\n \"\"\"\n crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]\n cutoff = len(crappyfilter) // 2\n y_deriv = numpy.convolve(y, crappyfilter, mode=\"valid\")\n if max(y_deriv) > 0:\n y_deriv = y_deriv * max(y) / max(y_deriv)\n\n fittedpar, cons = self.estimate_height_position_fwhm(\n x[cutoff:-cutoff], y_deriv)\n\n # for height, use the data amplitude after removing the background\n data_amplitude = max(y) - min(y)\n\n # find params of the largest gaussian found\n if len(fittedpar):\n npeaks = len(fittedpar) // 3\n largest_index = 0\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n for i in range(npeaks):\n if fittedpar[3 * i] > largest[0]:\n largest_index = i\n largest = [fittedpar[3 * largest_index],\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n else:\n # no peak was found\n largest = [data_amplitude, # height\n x[len(x)//2], # center: middle of x range\n self.config[\"FwhmPoints\"] * (x[1] - x[0])] # fwhm: default value\n\n newcons = numpy.zeros((3, 3), numpy.float)\n # Setup constrains\n if not self.config['NoConstraintsFlag']:\n # Setup height constraints\n if self.config['PositiveHeightAreaFlag']:\n newcons[0, 0] = CPOSITIVE\n newcons[0, 1] = 0\n newcons[0, 2] = 0\n\n # Setup position constraints\n if self.config['QuotedPositionFlag']:\n newcons[1, 0] = CQUOTED\n newcons[1, 1] = min(x)\n newcons[1, 2] = max(x)\n\n # Setup positive FWHM constraints\n if self.config['PositiveFwhmFlag']:\n newcons[2, 0] = CPOSITIVE\n newcons[2, 1] = 0\n newcons[2, 2] = 0\n\n return largest, newcons\n\n def estimate_periodic_gauss(self, x, y):\n \"\"\"Estimation of parameters for periodic gaussian curves:\n *number of peaks, distance between peaks, height, position of the\n first peak, fwhm*\n\n The functions detects all peaks, then computes the parameters the\n following way:\n\n - *distance*: average of distances between detected peaks\n - *height*: average height of detected peaks\n - *fwhm*: fwhm of the highest peak (in number of samples) if\n field ``'AutoFwhm'`` in :attr:`config` is ``True``, else take\n the default value (field ``'FwhmPoints'`` in :attr:`config`)\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n \"\"\"\n yscaling = self.config.get('Yscaling', 1.0)\n if yscaling == 0:\n yscaling = 1.0\n\n bg = self.strip_bg(y)\n\n if self.config['AutoFwhm']:\n search_fwhm = guess_fwhm(y)\n else:\n search_fwhm = int(float(self.config['FwhmPoints']))\n search_sens = float(self.config['Sensitivity'])\n\n if search_fwhm < 3:\n search_fwhm = 3\n\n if search_sens < 1:\n search_sens = 1\n\n if len(y) > 1.5 * search_fwhm:\n peaks = peak_search(yscaling * y, fwhm=search_fwhm,\n sensitivity=search_sens)\n else:\n peaks = []\n npeaks = len(peaks)\n if not npeaks:\n fittedpar = []\n cons = numpy.zeros((len(fittedpar), 3), numpy.float)\n return fittedpar, cons\n\n fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]\n\n # The number of peaks\n fittedpar[0] = npeaks\n\n # The separation between peaks in x units\n delta = 0.0\n height = 0.0\n for i in range(npeaks):\n height += y[int(peaks[i])] - bg[int(peaks[i])]\n if i != npeaks - 1:\n delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])\n\n # delta between peaks\n if npeaks > 1:\n fittedpar[1] = delta / (npeaks - 1)\n\n # starting height\n fittedpar[2] = height / npeaks\n\n # position of the first peak\n fittedpar[3] = x[int(peaks[0])]\n\n # Estimate the fwhm\n fittedpar[4] = search_fwhm\n\n # setup constraints\n cons = numpy.zeros((5, 3), numpy.float)\n cons[0, 0] = CFIXED # the number of gaussians\n if npeaks == 1:\n cons[1, 0] = CFIXED # the delta between peaks\n else:\n cons[1, 0] = CFREE\n j = 2\n # Setup height area constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveHeightAreaFlag']:\n # POSITIVE = 1\n cons[j, 0] = CPOSITIVE\n cons[j, 1] = 0\n cons[j, 2] = 0\n j += 1\n\n # Setup position constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['QuotedPositionFlag']:\n # QUOTED = 2\n cons[j, 0] = CQUOTED\n cons[j, 1] = min(x)\n cons[j, 2] = max(x)\n j += 1\n\n # Setup positive FWHM constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveFwhmFlag']:\n # POSITIVE=1\n cons[j, 0] = CPOSITIVE\n cons[j, 1] = 0\n cons[j, 2] = 0\n j += 1\n return fittedpar, cons\n\n def configure(self, **kw):\n \"\"\"Add new / unknown keyword arguments to :attr:`config`,\n update entries in :attr:`config` if the parameter name is a existing\n key.\n\n :param kw: Dictionary of keyword arguments.\n :return: Configuration dictionary :attr:`config`\n \"\"\"\n if not kw.keys():\n return self.config\n for key in kw.keys():\n notdone = 1\n # take care of lower / upper case problems ...\n for config_key in self.config.keys():\n if config_key.lower() == key.lower():\n self.config[config_key] = kw[key]\n notdone = 0\n if notdone:\n self.config[key] = kw[key]\n return self.config\n\nfitfuns = FitTheories()\n\nTHEORY = OrderedDict((\n ('Gaussians',\n FitTheory(description='Gaussian functions',\n function=functions.sum_gauss,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_height_position_fwhm,\n configure=fitfuns.configure)),\n ('Lorentz',\n FitTheory(description='Lorentzian functions',\n function=functions.sum_lorentz,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_height_position_fwhm,\n configure=fitfuns.configure)),\n ('Area Gaussians',\n FitTheory(description='Gaussian functions (area)',\n function=functions.sum_agauss,\n parameters=('Area', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_agauss,\n configure=fitfuns.configure)),\n ('Area Lorentz',\n FitTheory(description='Lorentzian functions (area)',\n function=functions.sum_alorentz,\n parameters=('Area', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_alorentz,\n configure=fitfuns.configure)),\n ('Pseudo-Voigt Line',\n FitTheory(description='Pseudo-Voigt functions',\n function=functions.sum_pvoigt,\n parameters=('Height', 'Position', 'FWHM', 'Eta'),\n estimate=fitfuns.estimate_pvoigt,\n configure=fitfuns.configure)),\n ('Area Pseudo-Voigt',\n FitTheory(description='Pseudo-Voigt functions (area)',\n function=functions.sum_apvoigt,\n parameters=('Area', 'Position', 'FWHM', 'Eta'),\n estimate=fitfuns.estimate_apvoigt,\n configure=fitfuns.configure)),\n ('Split Gaussian',\n FitTheory(description='Asymmetric gaussian functions',\n function=functions.sum_splitgauss,\n parameters=('Height', 'Position', 'LowFWHM',\n 'HighFWHM'),\n estimate=fitfuns.estimate_splitgauss,\n configure=fitfuns.configure)),\n ('Split Lorentz',\n FitTheory(description='Asymmetric lorentzian functions',\n function=functions.sum_splitlorentz,\n parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),\n estimate=fitfuns.estimate_splitgauss,\n configure=fitfuns.configure)),\n ('Split Pseudo-Voigt',\n FitTheory(description='Asymmetric pseudo-Voigt functions',\n function=functions.sum_splitpvoigt,\n parameters=('Height', 'Position', 'LowFWHM',\n 'HighFWHM', 'Eta'),\n estimate=fitfuns.estimate_splitpvoigt,\n configure=fitfuns.configure)),\n ('Step Down',\n FitTheory(description='Step down function',\n function=functions.sum_stepdown,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_stepdown,\n configure=fitfuns.configure)),\n ('Step Up',\n FitTheory(description='Step up function',\n function=functions.sum_stepup,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_stepup,\n configure=fitfuns.configure)),\n ('Slit',\n FitTheory(description='Slit function',\n function=functions.sum_slit,\n parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),\n estimate=fitfuns.estimate_slit,\n configure=fitfuns.configure)),\n ('Atan',\n FitTheory(description='Arctan step up function',\n function=functions.atan_stepup,\n parameters=('Height', 'Position', 'Width'),\n estimate=fitfuns.estimate_stepup,\n configure=fitfuns.configure)),\n ('Hypermet',\n FitTheory(description='Hypermet functions',\n function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet\n parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',\n 'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),\n estimate=fitfuns.estimate_ahypermet,\n configure=fitfuns.configure)),\n # ('Periodic Gaussians',\n # FitTheory(description='Periodic gaussian functions',\n # function=functions.periodic_gauss,\n # parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),\n # estimate=fitfuns.estimate_periodic_gauss,\n # configure=fitfuns.configure))\n ('Degree 2 Polynomial',\n FitTheory(description='Degree 2 polynomial'\n '\\ny = a*x^2 + b*x +c',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c'],\n estimate=fitfuns.estimate_quadratic)),\n ('Degree 3 Polynomial',\n FitTheory(description='Degree 3 polynomial'\n '\\ny = a*x^3 + b*x^2 + c*x + d',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd'],\n estimate=fitfuns.estimate_cubic)),\n ('Degree 4 Polynomial',\n FitTheory(description='Degree 4 polynomial'\n '\\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd', 'e'],\n estimate=fitfuns.estimate_quartic)),\n ('Degree 5 Polynomial',\n FitTheory(description='Degree 5 polynomial'\n '\\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd', 'e', 'f'],\n estimate=fitfuns.estimate_quintic)),\n))\n\"\"\"Dictionary of fit theories: fit functions and their associated estimation\nfunction, parameters list, configuration function and description.\n\"\"\"\n\n\ndef test(a):\n from silx.math.fit import fitmanager\n x = numpy.arange(1000).astype(numpy.float)\n p = [1500, 100., 50.0,\n 1500, 700., 50.0]\n y_synthetic = functions.sum_gauss(x, *p) + 1\n\n fit = fitmanager.FitManager(x, y_synthetic)\n fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],\n a.estimate_height_position_fwhm)\n fit.settheory('Gaussians')\n fit.setbackground('Linear')\n\n fit.estimate()\n fit.runfit()\n\n y_fit = fit.gendata()\n\n print(\"Fit parameter names: %s\" % str(fit.get_names()))\n print(\"Theoretical parameters: %s\" % str(numpy.append([1, 0], p)))\n print(\"Fitted parameters: %s\" % str(fit.get_fitted_parameters()))\n\n try:\n from silx.gui import qt\n from silx.gui.plot import plot1D\n app = qt.QApplication([])\n\n # Offset of 1 to see the difference in log scale\n plot1D(x, (y_synthetic + 1, y_fit), \"Input data + 1, Fit\")\n\n app.exec_()\n except ImportError:\n _logger.warning(\"Unable to load qt binding, can't plot results.\")\n\n\nif __name__ == \"__main__\":\n test(fitfuns)\n",
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides the :class:`Scatter` item of the :class:`Plot`.\n\"\"\"\n\nfrom __future__ import division\n\n\n__authors__ = [\"T. Vincent\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"29/03/2017\"\n\n\nfrom collections import namedtuple\nimport logging\nimport threading\nimport numpy\n\nfrom collections import defaultdict\nfrom concurrent.futures import ThreadPoolExecutor, CancelledError\n\nfrom ....utils.proxy import docstring\nfrom ....math.combo import min_max\nfrom ....math.histogram import Histogramnd\nfrom ....utils.weakref import WeakList\nfrom .._utils.delaunay import delaunay\nfrom .core import PointsBase, ColormapMixIn, ScatterVisualizationMixIn\nfrom .axis import Axis\nfrom ._pick import PickingResult\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass _GreedyThreadPoolExecutor(ThreadPoolExecutor):\n \"\"\":class:`ThreadPoolExecutor` with an extra :meth:`submit_greedy` method.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(_GreedyThreadPoolExecutor, self).__init__(*args, **kwargs)\n self.__futures = defaultdict(WeakList)\n self.__lock = threading.RLock()\n\n def submit_greedy(self, queue, fn, *args, **kwargs):\n \"\"\"Same as :meth:`submit` but cancel previous tasks in given queue.\n\n This means that when a new task is submitted for a given queue,\n all other pending tasks of that queue are cancelled.\n\n :param queue: Identifier of the queue. This must be hashable.\n :param callable fn: The callable to call with provided extra arguments\n :return: Future corresponding to this task\n :rtype: concurrent.futures.Future\n \"\"\"\n with self.__lock:\n # Cancel previous tasks in given queue\n for future in self.__futures.pop(queue, []):\n if not future.done():\n future.cancel()\n\n future = super(_GreedyThreadPoolExecutor, self).submit(\n fn, *args, **kwargs)\n self.__futures[queue].append(future)\n\n return future\n\n\n# Functions to guess grid shape from coordinates\n\ndef _get_z_line_length(array):\n \"\"\"Return length of line if array is a Z-like 2D regular grid.\n\n :param numpy.ndarray array: The 1D array of coordinates to check\n :return: 0 if no line length could be found,\n else the number of element per line.\n :rtype: int\n \"\"\"\n sign = numpy.sign(numpy.diff(array))\n if len(sign) == 0 or sign[0] == 0: # We don't handle that\n return 0\n # Check this way to account for 0 sign (i.e., diff == 0)\n beginnings = numpy.where(sign == - sign[0])[0] + 1\n if len(beginnings) == 0:\n return 0\n length = beginnings[0]\n if numpy.all(numpy.equal(numpy.diff(beginnings), length)):\n return length\n return 0\n\n\ndef _guess_z_grid_shape(x, y):\n \"\"\"Guess the shape of a grid from (x, y) coordinates.\n\n The grid might contain more elements than x and y,\n as the last line might be partly filled.\n\n :param numpy.ndarray x:\n :paran numpy.ndarray y:\n :returns: (order, (height, width)) of the regular grid,\n or None if could not guess one.\n 'order' is 'row' if X (i.e., column) is the fast dimension, else 'column'.\n :rtype: Union[List(str,int),None]\n \"\"\"\n width = _get_z_line_length(x)\n if width != 0:\n return 'row', (int(numpy.ceil(len(x) / width)), width)\n else:\n height = _get_z_line_length(y)\n if height != 0:\n return 'column', (height, int(numpy.ceil(len(y) / height)))\n return None\n\n\ndef is_monotonic(array):\n \"\"\"Returns whether array is monotonic (increasing or decreasing).\n\n :param numpy.ndarray array: 1D array-like container.\n :returns: 1 if array is monotonically increasing,\n -1 if array is monotonically decreasing,\n 0 if array is not monotonic\n :rtype: int\n \"\"\"\n diff = numpy.diff(numpy.ravel(array))\n with numpy.errstate(invalid='ignore'):\n if numpy.all(diff >= 0):\n return 1\n elif numpy.all(diff <= 0):\n return -1\n else:\n return 0\n\n\ndef _guess_grid(x, y):\n \"\"\"Guess a regular grid from the points.\n\n Result convention is (x, y)\n\n :param numpy.ndarray x: X coordinates of the points\n :param numpy.ndarray y: Y coordinates of the points\n :returns: (order, (height, width)\n order is 'row' or 'column'\n :rtype: Union[List[str,List[int]],None]\n \"\"\"\n x, y = numpy.ravel(x), numpy.ravel(y)\n\n guess = _guess_z_grid_shape(x, y)\n if guess is not None:\n return guess\n\n else:\n # Cannot guess a regular grid\n # Let's assume it's a single line\n order = 'row' # or 'column' doesn't matter for a single line\n y_monotonic = is_monotonic(y)\n if is_monotonic(x) or y_monotonic: # we can guess a line\n x_min, x_max = min_max(x)\n y_min, y_max = min_max(y)\n\n if not y_monotonic or x_max - x_min >= y_max - y_min:\n # x only is monotonic or both are and X varies more\n # line along X\n shape = 1, len(x)\n else:\n # y only is monotonic or both are and Y varies more\n # line along Y\n shape = len(y), 1\n\n else: # Cannot guess a line from the points\n return None\n\n return order, shape\n\n\ndef _quadrilateral_grid_coords(points):\n \"\"\"Compute an irregular grid of quadrilaterals from a set of points\n\n The input points are expected to lie on a grid.\n\n :param numpy.ndarray points:\n 3D data set of 2D input coordinates (height, width, 2)\n height and width must be at least 2.\n :return: 3D dataset of 2D coordinates of the grid (height+1, width+1, 2)\n \"\"\"\n assert points.ndim == 3\n assert points.shape[0] >= 2\n assert points.shape[1] >= 2\n assert points.shape[2] == 2\n\n dim0, dim1 = points.shape[:2]\n grid_points = numpy.zeros((dim0 + 1, dim1 + 1, 2), dtype=numpy.float64)\n\n # Compute inner points as mean of 4 neighbours\n neighbour_view = numpy.lib.stride_tricks.as_strided(\n points,\n shape=(dim0 - 1, dim1 - 1, 2, 2, points.shape[2]),\n strides=points.strides[:2] + points.strides[:2] + points.strides[-1:], writeable=False)\n inner_points = numpy.mean(neighbour_view, axis=(2, 3))\n grid_points[1:-1, 1:-1] = inner_points\n\n # Compute 'vertical' sides\n # Alternative: grid_points[1:-1, [0, -1]] = points[:-1, [0, -1]] + points[1:, [0, -1]] - inner_points[:, [0, -1]]\n grid_points[1:-1, [0, -1], 0] = points[:-1, [0, -1], 0] + points[1:, [0, -1], 0] - inner_points[:, [0, -1], 0]\n grid_points[1:-1, [0, -1], 1] = inner_points[:, [0, -1], 1]\n\n # Compute 'horizontal' sides\n grid_points[[0, -1], 1:-1, 0] = inner_points[[0, -1], :, 0]\n grid_points[[0, -1], 1:-1, 1] = points[[0, -1], :-1, 1] + points[[0, -1], 1:, 1] - inner_points[[0, -1], :, 1]\n\n # Compute corners\n d0, d1 = [0, 0, -1, -1], [0, -1, -1, 0]\n grid_points[d0, d1] = 2 * points[d0, d1] - inner_points[d0, d1]\n return grid_points\n\n\ndef _quadrilateral_grid_as_triangles(points):\n \"\"\"Returns the points and indices to make a grid of quadirlaterals\n\n :param numpy.ndarray points:\n 3D array of points (height, width, 2)\n :return: triangle corners (4 * N, 2), triangle indices (2 * N, 3)\n With N = height * width, the number of input points\n \"\"\"\n nbpoints = numpy.prod(points.shape[:2])\n\n grid = _quadrilateral_grid_coords(points)\n coords = numpy.empty((4 * nbpoints, 2), dtype=grid.dtype)\n coords[::4] = grid[:-1, :-1].reshape(-1, 2)\n coords[1::4] = grid[1:, :-1].reshape(-1, 2)\n coords[2::4] = grid[:-1, 1:].reshape(-1, 2)\n coords[3::4] = grid[1:, 1:].reshape(-1, 2)\n\n indices = numpy.empty((2 * nbpoints, 3), dtype=numpy.uint32)\n indices[::2, 0] = numpy.arange(0, 4 * nbpoints, 4)\n indices[::2, 1] = numpy.arange(1, 4 * nbpoints, 4)\n indices[::2, 2] = numpy.arange(2, 4 * nbpoints, 4)\n indices[1::2, 0] = indices[::2, 1]\n indices[1::2, 1] = indices[::2, 2]\n indices[1::2, 2] = numpy.arange(3, 4 * nbpoints, 4)\n\n return coords, indices\n\n\n_RegularGridInfo = namedtuple(\n '_RegularGridInfo', ['bounds', 'origin', 'scale', 'shape', 'order'])\n\n\n_HistogramInfo = namedtuple(\n '_HistogramInfo', ['mean', 'count', 'sum', 'origin', 'scale', 'shape'])\n\n\nclass Scatter(PointsBase, ColormapMixIn, ScatterVisualizationMixIn):\n \"\"\"Description of a scatter\"\"\"\n\n _DEFAULT_SELECTABLE = True\n \"\"\"Default selectable state for scatter plots\"\"\"\n\n _SUPPORTED_SCATTER_VISUALIZATION = (\n ScatterVisualizationMixIn.Visualization.POINTS,\n ScatterVisualizationMixIn.Visualization.SOLID,\n ScatterVisualizationMixIn.Visualization.REGULAR_GRID,\n ScatterVisualizationMixIn.Visualization.IRREGULAR_GRID,\n ScatterVisualizationMixIn.Visualization.BINNED_STATISTIC,\n )\n \"\"\"Overrides supported Visualizations\"\"\"\n\n def __init__(self):\n PointsBase.__init__(self)\n ColormapMixIn.__init__(self)\n ScatterVisualizationMixIn.__init__(self)\n self._value = ()\n self.__alpha = None\n # Cache Delaunay triangulation future object\n self.__delaunayFuture = None\n # Cache interpolator future object\n self.__interpolatorFuture = None\n self.__executor = None\n\n # Cache triangles: x, y, indices\n self.__cacheTriangles = None, None, None\n\n # Cache regular grid and histogram info\n self.__cacheRegularGridInfo = None\n self.__cacheHistogramInfo = None\n\n def _updateColormappedData(self):\n \"\"\"Update the colormapped data, to be called when changed\"\"\"\n if self.getVisualization() is self.Visualization.BINNED_STATISTIC:\n histoInfo = self.__getHistogramInfo()\n if histoInfo is None:\n data = None\n else:\n data = getattr(\n histoInfo,\n self.getVisualizationParameter(\n self.VisualizationParameter.BINNED_STATISTIC_FUNCTION))\n else:\n data = self.getValueData(copy=False)\n self._setColormappedData(data, copy=False)\n\n @docstring(ScatterVisualizationMixIn)\n def setVisualization(self, mode):\n previous = self.getVisualization()\n if super().setVisualization(mode):\n if (bool(mode is self.Visualization.BINNED_STATISTIC) ^\n bool(previous is self.Visualization.BINNED_STATISTIC)):\n self._updateColormappedData()\n return True\n else:\n return False\n\n @docstring(ScatterVisualizationMixIn)\n def setVisualizationParameter(self, parameter, value):\n parameter = self.VisualizationParameter.from_value(parameter)\n\n if super(Scatter, self).setVisualizationParameter(parameter, value):\n if parameter in (self.VisualizationParameter.GRID_BOUNDS,\n self.VisualizationParameter.GRID_MAJOR_ORDER,\n self.VisualizationParameter.GRID_SHAPE):\n self.__cacheRegularGridInfo = None\n\n if parameter in (self.VisualizationParameter.BINNED_STATISTIC_SHAPE,\n self.VisualizationParameter.BINNED_STATISTIC_FUNCTION,\n self.VisualizationParameter.DATA_BOUNDS_HINT):\n if parameter in (self.VisualizationParameter.BINNED_STATISTIC_SHAPE,\n self.VisualizationParameter.DATA_BOUNDS_HINT):\n self.__cacheHistogramInfo = None # Clean-up cache\n if self.getVisualization() is self.Visualization.BINNED_STATISTIC:\n self._updateColormappedData()\n return True\n else:\n return False\n\n @docstring(ScatterVisualizationMixIn)\n def getCurrentVisualizationParameter(self, parameter):\n value = self.getVisualizationParameter(parameter)\n if (parameter is self.VisualizationParameter.DATA_BOUNDS_HINT or\n value is not None):\n return value # Value has been set, return it\n\n elif parameter is self.VisualizationParameter.GRID_BOUNDS:\n grid = self.__getRegularGridInfo()\n return None if grid is None else grid.bounds\n \n elif parameter is self.VisualizationParameter.GRID_MAJOR_ORDER:\n grid = self.__getRegularGridInfo()\n return None if grid is None else grid.order\n\n elif parameter is self.VisualizationParameter.GRID_SHAPE:\n grid = self.__getRegularGridInfo()\n return None if grid is None else grid.shape\n\n elif parameter is self.VisualizationParameter.BINNED_STATISTIC_SHAPE:\n info = self.__getHistogramInfo()\n return None if info is None else info.shape\n\n else:\n raise NotImplementedError()\n\n def __getRegularGridInfo(self):\n \"\"\"Get grid info\"\"\"\n if self.__cacheRegularGridInfo is None:\n shape = self.getVisualizationParameter(\n self.VisualizationParameter.GRID_SHAPE)\n order = self.getVisualizationParameter(\n self.VisualizationParameter.GRID_MAJOR_ORDER)\n if shape is None or order is None:\n guess = _guess_grid(self.getXData(copy=False),\n self.getYData(copy=False))\n if guess is None:\n _logger.warning(\n 'Cannot guess a grid: Cannot display as regular grid image')\n return None\n if shape is None:\n shape = guess[1]\n if order is None:\n order = guess[0]\n\n nbpoints = len(self.getXData(copy=False))\n if nbpoints > shape[0] * shape[1]:\n # More data points that provided grid shape: enlarge grid\n _logger.warning(\n \"More data points than provided grid shape size: extends grid\")\n dim0, dim1 = shape\n if order == 'row': # keep dim1, enlarge dim0\n dim0 = nbpoints // dim1 + (1 if nbpoints % dim1 else 0)\n else: # keep dim0, enlarge dim1\n dim1 = nbpoints // dim0 + (1 if nbpoints % dim0 else 0)\n shape = dim0, dim1\n\n bounds = self.getVisualizationParameter(\n self.VisualizationParameter.GRID_BOUNDS)\n if bounds is None:\n x, y = self.getXData(copy=False), self.getYData(copy=False)\n min_, max_ = min_max(x)\n xRange = (min_, max_) if (x[0] - min_) < (max_ - x[0]) else (max_, min_)\n min_, max_ = min_max(y)\n yRange = (min_, max_) if (y[0] - min_) < (max_ - y[0]) else (max_, min_)\n bounds = (xRange[0], yRange[0]), (xRange[1], yRange[1])\n\n begin, end = bounds\n scale = ((end[0] - begin[0]) / max(1, shape[1] - 1),\n (end[1] - begin[1]) / max(1, shape[0] - 1))\n if scale[0] == 0 and scale[1] == 0:\n scale = 1., 1.\n elif scale[0] == 0:\n scale = scale[1], scale[1]\n elif scale[1] == 0:\n scale = scale[0], scale[0]\n\n origin = begin[0] - 0.5 * scale[0], begin[1] - 0.5 * scale[1]\n\n self.__cacheRegularGridInfo = _RegularGridInfo(\n bounds=bounds, origin=origin, scale=scale, shape=shape, order=order)\n\n return self.__cacheRegularGridInfo\n\n def __getHistogramInfo(self):\n \"\"\"Get histogram info\"\"\"\n if self.__cacheHistogramInfo is None:\n shape = self.getVisualizationParameter(\n self.VisualizationParameter.BINNED_STATISTIC_SHAPE)\n if shape is None:\n shape = 100, 100 # TODO compute auto shape\n\n x, y, values = self.getData(copy=False)[:3]\n if len(x) == 0: # No histogram\n return None\n\n if not numpy.issubdtype(x.dtype, numpy.floating):\n x = x.astype(numpy.float64)\n if not numpy.issubdtype(y.dtype, numpy.floating):\n y = y.astype(numpy.float64)\n if not numpy.issubdtype(values.dtype, numpy.floating):\n values = values.astype(numpy.float64)\n\n ranges = (tuple(min_max(y, finite=True)),\n tuple(min_max(x, finite=True)))\n rangesHint = self.getVisualizationParameter(\n self.VisualizationParameter.DATA_BOUNDS_HINT)\n if rangesHint is not None:\n ranges = tuple((min(dataMin, hintMin), max(dataMax, hintMax))\n for (dataMin, dataMax), (hintMin, hintMax) in zip(ranges, rangesHint))\n\n points = numpy.transpose(numpy.array((y, x)))\n counts, sums, bin_edges = Histogramnd(\n points,\n histo_range=ranges,\n n_bins=shape,\n weights=values)\n yEdges, xEdges = bin_edges\n origin = xEdges[0], yEdges[0]\n scale = ((xEdges[-1] - xEdges[0]) / (len(xEdges) - 1),\n (yEdges[-1] - yEdges[0]) / (len(yEdges) - 1))\n\n with numpy.errstate(divide='ignore', invalid='ignore'):\n histo = sums / counts\n\n self.__cacheHistogramInfo = _HistogramInfo(\n mean=histo, count=counts, sum=sums,\n origin=origin, scale=scale, shape=shape)\n\n return self.__cacheHistogramInfo\n\n def _addBackendRenderer(self, backend):\n \"\"\"Update backend renderer\"\"\"\n # Filter-out values <= 0\n xFiltered, yFiltered, valueFiltered, xerror, yerror = self.getData(\n copy=False, displayed=True)\n\n # Remove not finite numbers (this includes filtered out x, y <= 0)\n mask = numpy.logical_and(numpy.isfinite(xFiltered), numpy.isfinite(yFiltered))\n xFiltered = xFiltered[mask]\n yFiltered = yFiltered[mask]\n\n if len(xFiltered) == 0:\n return None # No data to display, do not add renderer to backend\n\n visualization = self.getVisualization()\n\n if visualization is self.Visualization.BINNED_STATISTIC:\n plot = self.getPlot()\n if (plot is None or\n plot.getXAxis().getScale() != Axis.LINEAR or\n plot.getYAxis().getScale() != Axis.LINEAR):\n # Those visualizations are not available with log scaled axes\n return None\n\n histoInfo = self.__getHistogramInfo()\n if histoInfo is None:\n return None\n data = getattr(histoInfo, self.getVisualizationParameter(\n self.VisualizationParameter.BINNED_STATISTIC_FUNCTION))\n\n return backend.addImage(\n data=data,\n origin=histoInfo.origin,\n scale=histoInfo.scale,\n colormap=self.getColormap(),\n alpha=self.getAlpha())\n\n # Compute colors\n cmap = self.getColormap()\n rgbacolors = cmap.applyToData(self)\n\n if self.__alpha is not None:\n rgbacolors[:, -1] = (rgbacolors[:, -1] * self.__alpha).astype(numpy.uint8)\n\n visualization = self.getVisualization()\n\n if visualization is self.Visualization.POINTS:\n return backend.addCurve(xFiltered, yFiltered,\n color=rgbacolors[mask],\n symbol=self.getSymbol(),\n linewidth=0,\n linestyle=\"\",\n yaxis='left',\n xerror=xerror,\n yerror=yerror,\n fill=False,\n alpha=self.getAlpha(),\n symbolsize=self.getSymbolSize(),\n baseline=None)\n\n else:\n plot = self.getPlot()\n if (plot is None or\n plot.getXAxis().getScale() != Axis.LINEAR or\n plot.getYAxis().getScale() != Axis.LINEAR):\n # Those visualizations are not available with log scaled axes\n return None\n\n if visualization is self.Visualization.SOLID:\n triangulation = self._getDelaunay().result()\n if triangulation is None:\n _logger.warning(\n 'Cannot get a triangulation: Cannot display as solid surface')\n return None\n else:\n triangles = triangulation.simplices.astype(numpy.int32)\n return backend.addTriangles(xFiltered,\n yFiltered,\n triangles,\n color=rgbacolors[mask],\n alpha=self.getAlpha())\n\n elif visualization is self.Visualization.REGULAR_GRID:\n gridInfo = self.__getRegularGridInfo()\n if gridInfo is None:\n return None\n\n dim0, dim1 = gridInfo.shape\n if gridInfo.order == 'column': # transposition needed\n dim0, dim1 = dim1, dim0\n\n if len(rgbacolors) == dim0 * dim1:\n image = rgbacolors.reshape(dim0, dim1, -1)\n else:\n # The points do not fill the whole image\n image = numpy.empty((dim0 * dim1, 4), dtype=rgbacolors.dtype)\n image[:len(rgbacolors)] = rgbacolors\n image[len(rgbacolors):] = 0, 0, 0, 0 # Transparent pixels\n image.shape = dim0, dim1, -1\n\n if gridInfo.order == 'column':\n image = numpy.transpose(image, axes=(1, 0, 2))\n\n return backend.addImage(\n data=image,\n origin=gridInfo.origin,\n scale=gridInfo.scale,\n colormap=None,\n alpha=self.getAlpha())\n\n elif visualization is self.Visualization.IRREGULAR_GRID:\n gridInfo = self.__getRegularGridInfo()\n if gridInfo is None:\n return None\n\n shape = gridInfo.shape\n if shape is None: # No shape, no display\n return None\n\n nbpoints = len(xFiltered)\n if nbpoints == 1:\n # single point, render as a square points\n return backend.addCurve(xFiltered, yFiltered,\n color=rgbacolors[mask],\n symbol='s',\n linewidth=0,\n linestyle=\"\",\n yaxis='left',\n xerror=None,\n yerror=None,\n fill=False,\n alpha=self.getAlpha(),\n symbolsize=7,\n baseline=None)\n\n # Make shape include all points\n gridOrder = gridInfo.order\n if nbpoints != numpy.prod(shape):\n if gridOrder == 'row':\n shape = int(numpy.ceil(nbpoints / shape[1])), shape[1]\n else: # column-major order\n shape = shape[0], int(numpy.ceil(nbpoints / shape[0]))\n\n if shape[0] < 2 or shape[1] < 2: # Single line, at least 2 points\n points = numpy.ones((2, nbpoints, 2), dtype=numpy.float64)\n # Use row/column major depending on shape, not on info value\n gridOrder = 'row' if shape[0] == 1 else 'column'\n\n if gridOrder == 'row':\n points[0, :, 0] = xFiltered\n points[0, :, 1] = yFiltered\n else: # column-major order\n points[0, :, 0] = yFiltered\n points[0, :, 1] = xFiltered\n\n # Add a second line that will be clipped in the end\n points[1, :-1] = points[0, :-1] + numpy.cross(\n points[0, 1:] - points[0, :-1], (0., 0., 1.))[:, :2]\n points[1, -1] = points[0, -1] + numpy.cross(\n points[0, -1] - points[0, -2], (0., 0., 1.))[:2]\n\n points.shape = 2, nbpoints, 2 # Use same shape for both orders\n coords, indices = _quadrilateral_grid_as_triangles(points)\n\n elif gridOrder == 'row': # row-major order\n if nbpoints != numpy.prod(shape):\n points = numpy.empty((numpy.prod(shape), 2), dtype=numpy.float64)\n points[:nbpoints, 0] = xFiltered\n points[:nbpoints, 1] = yFiltered\n # Index of last element of last fully filled row\n index = (nbpoints // shape[1]) * shape[1]\n points[nbpoints:, 0] = xFiltered[index - (numpy.prod(shape) - nbpoints):index]\n points[nbpoints:, 1] = yFiltered[-1]\n else:\n points = numpy.transpose((xFiltered, yFiltered))\n points.shape = shape[0], shape[1], 2\n\n else: # column-major order\n if nbpoints != numpy.prod(shape):\n points = numpy.empty((numpy.prod(shape), 2), dtype=numpy.float64)\n points[:nbpoints, 0] = yFiltered\n points[:nbpoints, 1] = xFiltered\n # Index of last element of last fully filled column\n index = (nbpoints // shape[0]) * shape[0]\n points[nbpoints:, 0] = yFiltered[index - (numpy.prod(shape) - nbpoints):index]\n points[nbpoints:, 1] = xFiltered[-1]\n else:\n points = numpy.transpose((yFiltered, xFiltered))\n points.shape = shape[1], shape[0], 2\n\n coords, indices = _quadrilateral_grid_as_triangles(points)\n\n # Remove unused extra triangles\n coords = coords[:4*nbpoints]\n indices = indices[:2*nbpoints]\n\n if gridOrder == 'row':\n x, y = coords[:, 0], coords[:, 1]\n else: # column-major order\n y, x = coords[:, 0], coords[:, 1]\n\n rgbacolors = rgbacolors[mask] # Filter-out not finite points\n gridcolors = numpy.empty(\n (4 * nbpoints, rgbacolors.shape[-1]), dtype=rgbacolors.dtype)\n for first in range(4):\n gridcolors[first::4] = rgbacolors[:nbpoints]\n\n return backend.addTriangles(x,\n y,\n indices,\n color=gridcolors,\n alpha=self.getAlpha())\n\n else:\n _logger.error(\"Unhandled visualization %s\", visualization)\n return None\n\n @docstring(PointsBase)\n def pick(self, x, y):\n result = super(Scatter, self).pick(x, y)\n\n if result is not None:\n visualization = self.getVisualization()\n\n if visualization is self.Visualization.IRREGULAR_GRID:\n # Specific handling of picking for the irregular grid mode\n index = result.getIndices(copy=False)[0] // 4\n result = PickingResult(self, (index,))\n\n elif visualization is self.Visualization.REGULAR_GRID:\n # Specific handling of picking for the regular grid mode\n picked = result.getIndices(copy=False)\n if picked is None:\n return None\n row, column = picked[0][0], picked[1][0]\n\n gridInfo = self.__getRegularGridInfo()\n if gridInfo is None:\n return None\n\n if gridInfo.order == 'row':\n index = row * gridInfo.shape[1] + column\n else:\n index = row + column * gridInfo.shape[0]\n if index >= len(self.getXData(copy=False)): # OK as long as not log scale\n return None # Image can be larger than scatter\n\n result = PickingResult(self, (index,))\n\n elif visualization is self.Visualization.BINNED_STATISTIC:\n picked = result.getIndices(copy=False)\n if picked is None or len(picked) == 0 or len(picked[0]) == 0:\n return None\n row, col = picked[0][0], picked[1][0]\n histoInfo = self.__getHistogramInfo()\n if histoInfo is None:\n return None\n sx, sy = histoInfo.scale\n ox, oy = histoInfo.origin\n xdata = self.getXData(copy=False)\n ydata = self.getYData(copy=False)\n indices = numpy.nonzero(numpy.logical_and(\n numpy.logical_and(xdata >= ox + sx * col, xdata < ox + sx * (col + 1)),\n numpy.logical_and(ydata >= oy + sy * row, ydata < oy + sy * (row + 1))))[0]\n result = None if len(indices) == 0 else PickingResult(self, indices)\n\n return result\n\n def __getExecutor(self):\n \"\"\"Returns async greedy executor\n\n :rtype: _GreedyThreadPoolExecutor\n \"\"\"\n if self.__executor is None:\n self.__executor = _GreedyThreadPoolExecutor(max_workers=2)\n return self.__executor\n\n def _getDelaunay(self):\n \"\"\"Returns a :class:`Future` which result is the Delaunay object.\n\n :rtype: concurrent.futures.Future\n \"\"\"\n if self.__delaunayFuture is None or self.__delaunayFuture.cancelled():\n # Need to init a new delaunay\n x, y = self.getData(copy=False)[:2]\n # Remove not finite points\n mask = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))\n\n self.__delaunayFuture = self.__getExecutor().submit_greedy(\n 'delaunay', delaunay, x[mask], y[mask])\n\n return self.__delaunayFuture\n\n @staticmethod\n def __initInterpolator(delaunayFuture, values):\n \"\"\"Returns an interpolator for the given data points\n\n :param concurrent.futures.Future delaunayFuture:\n Future object which result is a Delaunay object\n :param numpy.ndarray values: The data value of valid points.\n :rtype: Union[callable,None]\n \"\"\"\n # Wait for Delaunay to complete\n try:\n triangulation = delaunayFuture.result()\n except CancelledError:\n triangulation = None\n\n if triangulation is None:\n interpolator = None # Error case\n else:\n # Lazy-loading of interpolator\n try:\n from scipy.interpolate import LinearNDInterpolator\n except ImportError:\n LinearNDInterpolator = None\n\n if LinearNDInterpolator is not None:\n interpolator = LinearNDInterpolator(triangulation, values)\n\n # First call takes a while, do it here\n interpolator([(0., 0.)])\n\n else:\n # Fallback using matplotlib interpolator\n import matplotlib.tri\n\n x, y = triangulation.points.T\n tri = matplotlib.tri.Triangulation(\n x, y, triangles=triangulation.simplices)\n mplInterpolator = matplotlib.tri.LinearTriInterpolator(\n tri, values)\n\n # Wrap interpolator to have same API as scipy's one\n def interpolator(points):\n return mplInterpolator(*points.T)\n\n return interpolator\n\n def _getInterpolator(self):\n \"\"\"Returns a :class:`Future` which result is the interpolator.\n\n The interpolator is a callable taking an array Nx2 of points\n as a single argument.\n The :class:`Future` result is None in case the interpolator cannot\n be initialized.\n\n :rtype: concurrent.futures.Future\n \"\"\"\n if (self.__interpolatorFuture is None or\n self.__interpolatorFuture.cancelled()):\n # Need to init a new interpolator\n x, y, values = self.getData(copy=False)[:3]\n # Remove not finite points\n mask = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))\n x, y, values = x[mask], y[mask], values[mask]\n\n self.__interpolatorFuture = self.__getExecutor().submit_greedy(\n 'interpolator',\n self.__initInterpolator, self._getDelaunay(), values)\n return self.__interpolatorFuture\n\n def _logFilterData(self, xPositive, yPositive):\n \"\"\"Filter out values with x or y <= 0 on log axes\n\n :param bool xPositive: True to filter arrays according to X coords.\n :param bool yPositive: True to filter arrays according to Y coords.\n :return: The filtered arrays or unchanged object if not filtering needed\n :rtype: (x, y, value, xerror, yerror)\n \"\"\"\n # overloaded from PointsBase to filter also value.\n value = self.getValueData(copy=False)\n\n if xPositive or yPositive:\n clipped = self._getClippingBoolArray(xPositive, yPositive)\n\n if numpy.any(clipped):\n # copy to keep original array and convert to float\n value = numpy.array(value, copy=True, dtype=numpy.float)\n value[clipped] = numpy.nan\n\n x, y, xerror, yerror = PointsBase._logFilterData(self, xPositive, yPositive)\n\n return x, y, value, xerror, yerror\n\n def getValueData(self, copy=True):\n \"\"\"Returns the value assigned to the scatter data points.\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray\n \"\"\"\n return numpy.array(self._value, copy=copy)\n\n def getAlphaData(self, copy=True):\n \"\"\"Returns the alpha (transparency) assigned to the scatter data points.\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray\n \"\"\"\n return numpy.array(self.__alpha, copy=copy)\n\n def getData(self, copy=True, displayed=False):\n \"\"\"Returns the x, y coordinates and the value of the data points\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :param bool displayed: True to only get curve points that are displayed\n in the plot. Default: False.\n Note: If plot has log scale, negative points\n are not displayed.\n :returns: (x, y, value, xerror, yerror)\n :rtype: 5-tuple of numpy.ndarray\n \"\"\"\n if displayed:\n data = self._getCachedData()\n if data is not None:\n assert len(data) == 5\n return data\n\n return (self.getXData(copy),\n self.getYData(copy),\n self.getValueData(copy),\n self.getXErrorData(copy),\n self.getYErrorData(copy))\n\n # reimplemented from PointsBase to handle `value`\n def setData(self, x, y, value, xerror=None, yerror=None, alpha=None, copy=True):\n \"\"\"Set the data of the scatter.\n\n :param numpy.ndarray x: The data corresponding to the x coordinates.\n :param numpy.ndarray y: The data corresponding to the y coordinates.\n :param numpy.ndarray value: The data corresponding to the value of\n the data points.\n :param xerror: Values with the uncertainties on the x values\n :type xerror: A float, or a numpy.ndarray of float32.\n If it is an array, it can either be a 1D array of\n same length as the data or a 2D array with 2 rows\n of same length as the data: row 0 for positive errors,\n row 1 for negative errors.\n :param yerror: Values with the uncertainties on the y values\n :type yerror: A float, or a numpy.ndarray of float32. See xerror.\n :param alpha: Values with the transparency (between 0 and 1)\n :type alpha: A float, or a numpy.ndarray of float32 \n :param bool copy: True make a copy of the data (default),\n False to use provided arrays.\n \"\"\"\n value = numpy.array(value, copy=copy)\n assert value.ndim == 1\n assert len(x) == len(value)\n\n # Reset triangulation and interpolator\n if self.__delaunayFuture is not None:\n self.__delaunayFuture.cancel()\n self.__delaunayFuture = None\n if self.__interpolatorFuture is not None:\n self.__interpolatorFuture.cancel()\n self.__interpolatorFuture = None\n\n # Data changed, this needs update\n self.__cacheRegularGridInfo = None\n self.__cacheHistogramInfo = None\n\n self._value = value\n self._updateColormappedData()\n\n if alpha is not None:\n # Make sure alpha is an array of float in [0, 1]\n alpha = numpy.array(alpha, copy=copy)\n assert alpha.ndim == 1\n assert len(x) == len(alpha)\n if alpha.dtype.kind != 'f':\n alpha = alpha.astype(numpy.float32)\n if numpy.any(numpy.logical_or(alpha < 0., alpha > 1.)):\n alpha = numpy.clip(alpha, 0., 1.)\n self.__alpha = alpha\n\n # set x, y, xerror, yerror\n\n # call self._updated + plot._invalidateDataRange()\n PointsBase.setData(self, x, y, xerror, yerror, copy)\n"
] | [
[
"numpy.convolve",
"numpy.polyfit",
"numpy.poly1d",
"numpy.log",
"numpy.take",
"numpy.nonzero",
"numpy.sqrt",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"numpy.append",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.fabs"
],
[
"numpy.issubdtype",
"numpy.lib.stride_tricks.as_strided",
"numpy.all",
"numpy.mean",
"numpy.any",
"numpy.cross",
"numpy.where",
"numpy.clip",
"numpy.arange",
"numpy.ceil",
"numpy.diff",
"numpy.ravel",
"numpy.zeros",
"numpy.logical_or",
"scipy.interpolate.LinearNDInterpolator",
"numpy.transpose",
"numpy.errstate",
"numpy.array",
"numpy.logical_and",
"numpy.isfinite",
"numpy.ones",
"numpy.prod",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwallnoefer/multisat_qrepeater_sim_archive | [
"69b4c242fb760cf195871f38b3172d4dfd26c01a",
"69b4c242fb760cf195871f38b3172d4dfd26c01a"
] | [
"verificator/Maps.py",
"scenarios/multimemory/multi_memory_variant.py"
] | [
"\"\"\"\nThe maps that model the different processes in the QKD return for input that is diagonal in Bell-basis a diagonal output.\nTo reduce calculations I determined in the scipt \"How many numbers for state\" the effect of the maps on the diagonal elements\n\"\"\"\nimport numpy as np\nimport functools\n\n\"\"\"These are some helper functions. a-d represents the diagonal elements of the first state, e-h the ones of the second state\"\"\"\n\nz_rot = lambda a, b, c, d: np.array([b, a, d, c])\ny_rot = lambda a, b, c, d: np.array([d, c, b, a])\n\nperf_dist = lambda a, b, c, d, e, f, g, h: np.array(\n [a * e, d * h, a * g, d * f, d * e, a * h, d * g, a * f, c * g, b * f, c * e, b * h, b * g, c * f, b * e, c * h])\ndc0 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(\n [ae + af, be + bf, ce + cf, de + df])\ndc1 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(\n [ae + af + ag + ah, be + bf + bg + bh, ce + cf + cg + ch, de + df + dg + dh])\n\"\"\"p is the ideality of the map, q = 1-p\"\"\"\nmixnswap = lambda p, q, a, b, c, d, e, f, g, h: np.array([a * e * p + b * f * p + c * g * p + d * h * p + q / 4,\n a * f * p + b * e * p + c * h * p + d * g * p + q / 4,\n a * g * p + b * h * p + c * e * p + d * f * p + q / 4,\n a * h * p + b * g * p + c * f * p + d * e * p + q / 4])\n\n\ndef dp_sing(t, T, a, b, c, d):\n \"\"\" Calculate the state after dephasing for one memory for time t.\n Parameters\n ----------\n t : float \n time of dephasig\n T : float\n dephasing time of the memory\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal elements of the state after dephasing\n\n \"\"\"\n lam = (1 - np.exp(-t / (2 * T))) / 2\n return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()\n\n\ndef dp_doub(t, T, a, b, c, d):\n \"\"\" Calculate the state after dephasing for time t1 for one memory and t2 for the other memory.\n Parameters\n ----------\n t : float \n time of dephasig\n T : float\n dephasing time of the memories\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal elements of the state after dephasing\n\n \"\"\"\n lam = (1 - np.exp(- t / (2 * T))) / 2\n lam = lam + lam - 2 * lam**2\n return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()\n\n\ndef coupl(em, a, b, c, d):\n \"\"\" Calculate the state after imperfect coupling to the fibre.\n Parameters\n ----------\n em1, em2 : float \n misalignment errors of the stations (0-1)\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal element of the state after coupling\n\n \"\"\"\n p = 1 - em\n q = em\n return (p * np.array([a, b, c, d]) + q * y_rot(a, b, c, d)).tolist()\n\n\[email protected]_cache(maxsize=2048)\ndef distil(lam, pd1, pd2, a, b, c, d, e, f, g, h):\n \"\"\" Calculate the state after imperfect entanglement distillation and dephasing.\n Parameters\n ----------\n lam1, lam2 : float\n idealities of the distillation process of the stations\n pd1, pd2 : float\n probabilities for dark counts in the measurement for the stations\n a-d: float\n diagonal elements of the fist state\n e-h: float\n diagonal elements of the second state\n\n Returns\n -------\n list of diagonal element of the state after dephasing, probability for acceptance of the distillation result\n\n \"\"\"\n p0 = (1 - pd1) * (1 - pd2) # probability for zero dark counts\n # probability for one or two dark counts\n p1 = 0.5 * (pd1 + pd2 - pd1 * pd2)\n mixed = (lam * perf_dist(a, b, c, d, e, f, g, h) + (1 - lam) * np.ones((16)) /\n 16).tolist() # mixing the result of the perfect map with abs mixed state\n # state times the accapance probability\n unnormed = p0 * dc0(*mixed) + p1 * dc1(*mixed)\n trace = np.sum(unnormed) # acceptance probability\n normed = (unnormed / trace).tolist() # normalising the state\n return normed, trace\n\n\ndef swap(lam, a, b, c, d, e, f, g, h):\n \"\"\" Calculate the state after imperfect entanglement swapping and dephasing.\n Parameters\n ----------\n lam: float\n idealities of the swapping process of the middle station\n a-d: float\n diagonal elements of the fist state\n e-h: float\n diagonal elements of the second state\n\n Returns\n -------\n list of diagonal element of the state after swapping\n\n \"\"\"\n swapped = mixnswap(lam, 1 - lam, a, b, c, d, e, f, g, h)\n normed = swapped / np.sum(swapped) # normalising the state\n return np.array(normed).tolist()\n",
"import os, sys; sys.path.insert(0, os.path.abspath(\".\"))\nfrom quantum_objects import SchedulingSource, Source, Station\nfrom protocol import TwoLinkProtocol\nfrom world import World\nfrom events import SourceEvent, EntanglementSwappingEvent, EntanglementPurificationEvent\nimport libs.matrix as mat\nimport numpy as np\nfrom libs.aux_functions import apply_single_qubit_map, y_noise_channel, z_noise_channel, w_noise_channel\nfrom warnings import warn\nfrom collections import defaultdict\nfrom noise import NoiseModel, NoiseChannel\nimport pandas as pd\nfrom consts import C, L_ATT\n\n\ndef construct_dephasing_noise_channel(dephasing_time):\n def lambda_dp(t):\n return (1 - np.exp(-t / dephasing_time)) / 2\n\n def dephasing_noise_channel(rho, t):\n return z_noise_channel(rho=rho, epsilon=lambda_dp(t))\n\n return dephasing_noise_channel\n\n\ndef construct_y_noise_channel(epsilon):\n return lambda rho: y_noise_channel(rho=rho, epsilon=epsilon)\n\n\ndef construct_w_noise_channel(epsilon):\n return lambda rho: w_noise_channel(rho=rho, alpha=(1 - epsilon))\n\n\ndef alpha_of_eta(eta, p_d):\n return eta * (1 - p_d) / (1 - (1 - eta) * (1 - p_d)**2)\n\n\nclass MultiMemoryProtocol(TwoLinkProtocol):\n def __init__(self, world, num_memories):\n self.num_memories = num_memories\n super(MultiMemoryProtocol, self).__init__(world=world)\n\n def check(self):\n left_pairs = self._get_left_pairs()\n num_left_pairs = len(left_pairs)\n right_pairs = self._get_right_pairs()\n num_right_pairs = len(right_pairs)\n num_left_pairs_scheduled = len(self._left_pairs_scheduled())\n num_right_pairs_scheduled = len(self._right_pairs_scheduled())\n left_used = num_left_pairs + num_left_pairs_scheduled\n right_used = num_right_pairs + num_right_pairs_scheduled\n\n if left_used < self.num_memories:\n for _ in range(self.num_memories - left_used):\n self.source_A.schedule_event()\n if right_used < self.num_memories:\n for _ in range(self.num_memories - right_used):\n self.source_B.schedule_event()\n\n if num_left_pairs != 0 and num_right_pairs != 0:\n num_swappings = min(num_left_pairs, num_right_pairs)\n for left_pair, right_pair in zip(left_pairs[:num_swappings], right_pairs[:num_swappings]):\n # assert that we do not schedule the same swapping more than once\n try:\n next(filter(lambda event: (isinstance(event, EntanglementSwappingEvent)\n and (left_pair in event.pairs)\n and (right_pair in event.pairs)\n ),\n self.world.event_queue.queue))\n is_already_scheduled = True\n except StopIteration:\n is_already_scheduled = False\n if not is_already_scheduled:\n ent_swap_event = EntanglementSwappingEvent(time=self.world.event_queue.current_time, pairs=[left_pair, right_pair])\n self.world.event_queue.add_event(ent_swap_event)\n\n long_range_pairs = self._get_long_range_pairs()\n if long_range_pairs:\n for long_range_pair in long_range_pairs:\n self._eval_pair(long_range_pair)\n # cleanup\n long_range_pair.qubits[0].destroy()\n long_range_pair.qubits[1].destroy()\n long_range_pair.destroy()\n self.check()\n\n\ndef run(length, max_iter, params, cutoff_time=None, num_memories=1, mode=\"sim\"):\n # unpack the parameters\n try:\n P_LINK = params[\"P_LINK\"]\n except KeyError:\n P_LINK = 1.0\n try:\n T_P = params[\"T_P\"] # preparation time\n except KeyError:\n T_P = 0\n try:\n T_DP = params[\"T_DP\"] # dephasing time\n except KeyError:\n T_DP = 1.0\n try:\n E_MA = params[\"E_MA\"] # misalignment error\n except KeyError:\n E_MA = 0\n try:\n P_D = params[\"P_D\"] # dark count probability\n except KeyError:\n P_D = 0\n try:\n LAMBDA_BSM = params[\"LAMBDA_BSM\"]\n except KeyError:\n LAMBDA_BSM = 1\n\n def imperfect_bsm_err_func(four_qubit_state):\n return LAMBDA_BSM * four_qubit_state + (1 - LAMBDA_BSM) * mat.reorder(mat.tensor(mat.ptrace(four_qubit_state, [1, 2]), mat.I(4) / 4), [0, 2, 3, 1])\n\n def time_distribution(source):\n comm_distance = np.max([np.abs(source.position - source.target_stations[0].position), np.abs(source.position - source.target_stations[1].position)])\n comm_time = 2 * comm_distance / C\n eta = P_LINK * np.exp(-comm_distance / L_ATT)\n eta_effective = 1 - (1 - eta) * (1 - P_D)**2\n trial_time = T_P + comm_time # I don't think that paper uses latency time and loading time?\n random_num = np.random.geometric(eta_effective)\n return random_num * trial_time, random_num\n\n def state_generation(source):\n state = np.dot(mat.phiplus, mat.H(mat.phiplus))\n comm_distance = np.max([np.abs(source.position - source.target_stations[0].position), np.abs(source.position - source.target_stations[1].position)])\n storage_time = 2 * comm_distance / C\n for idx, station in enumerate(source.target_stations):\n if station.memory_noise is not None: # dephasing that has accrued while other qubit was travelling\n state = apply_single_qubit_map(map_func=station.memory_noise, qubit_index=idx, rho=state, t=storage_time)\n if station.dark_count_probability is not None: # dark counts are handled here because the information about eta is needed for that\n eta = P_LINK * np.exp(-comm_distance / L_ATT)\n state = apply_single_qubit_map(map_func=w_noise_channel, qubit_index=idx, rho=state, alpha=alpha_of_eta(eta=eta, p_d=station.dark_count_probability))\n return state\n\n misalignment_noise = NoiseChannel(n_qubits=1, channel_function=construct_y_noise_channel(epsilon=E_MA))\n\n world = World()\n station_A = Station(world, position=0, memory_noise=None,\n creation_noise_channel=misalignment_noise,\n dark_count_probability=P_D\n )\n station_B = Station(world, position=length, memory_noise=None,\n creation_noise_channel=misalignment_noise,\n dark_count_probability=P_D\n )\n station_central = Station(world, position=length / 2,\n memory_noise=construct_dephasing_noise_channel(dephasing_time=T_DP),\n memory_cutoff_time=cutoff_time,\n BSM_noise_model=NoiseModel(channel_before=NoiseChannel(n_qubits=4, channel_function=imperfect_bsm_err_func))\n )\n source_A = SchedulingSource(world, position=length / 2, target_stations=[station_A, station_central], time_distribution=time_distribution, state_generation=state_generation)\n source_B = SchedulingSource(world, position=length / 2, target_stations=[station_central, station_B], time_distribution=time_distribution, state_generation=state_generation)\n protocol = MultiMemoryProtocol(world, num_memories=num_memories)\n protocol.setup()\n\n # def step_check():\n # protocol.check()\n # world.print_status()\n #\n # def step_resolve():\n # world.event_queue.resolve_next_event()\n # world.print_status()\n #\n # import code\n # code.interact(local=locals())\n\n while len(protocol.time_list) < max_iter:\n protocol.check()\n world.event_queue.resolve_next_event()\n\n return protocol\n\n\nif __name__ == \"__main__\":\n p = run(length=22000, max_iter=1000, params={\"P_LINK\": 0.01}, num_memories=400, mode=\"sim\")\n # import matplotlib.pyplot as plt\n # plt.scatter(p.time_list, p.fidelity_list)\n # plt.show()\n"
] | [
[
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.ones"
],
[
"numpy.exp",
"numpy.random.geometric",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aayushkafle/implicit_alignment | [
"4835a8a5acc4b30daf7e1c95195f160e76306cd1"
] | [
"ai/domain_adaptation/utils/vis.py"
] | [
"import numpy as np\nfrom ai.domain_adaptation.datasets import image_index\nfrom ai.domain_adaptation.utils import np_utils\nfrom IPython.display import display, Image\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n\ndef load_data_for_vis(prob_path, target_domain_file, dataset_dir):\n domain_info = image_index.parse_domain_file(target_domain_file, dataset_dir)\n yhat_info = np_utils.parse_predictions_from_pickle(prob_path)\n\n return domain_info, yhat_info\n\n\ndef visulize_confidence(prob_path, target_domain_file, dataset_dir, cls_id):\n domain_info, yhat_info = load_data_for_vis(prob_path, target_domain_file, dataset_dir)\n vis_confident_predictions(cls_id, None, domain_info, yhat_info)\n\n\ndef vis_confident_predictions(cls_id, top_k=20, domain_info=None, yhat_info=None):\n sorted_id_indices = np_utils.retrieve_sorted_indices_for_one_cls(cls_id, yhat_info)\n\n for ith, example_id in enumerate(sorted_id_indices):\n filename, label = domain_info.image_path_label_tuples[example_id]\n print(f'{domain_info.label_description_dict[label]}, P {yhat_info.prob[example_id, cls_id]:.3}')\n img = Image(filename=filename, width=150, height=150)\n display(img)\n if top_k is not None and ith > top_k:\n break\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # np.set_printoptions(precision=3)\n\n fig, ax = plt.subplots(figsize=(20, 20))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n # ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.savefig(f'./plots/confusion_matrix{title}.pdf')\n return ax\n"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.arange",
"sklearn.metrics.confusion_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ibadkureshi/tnk-locationallocation | [
"b06abcb7bf8675b13e4c2e4fe419afb5ee11018f"
] | [
"pmedian/views.py"
] | [
"from django.shortcuts import render\nfrom pmedian.tasks import *\nfrom pandas import errors\nfrom pmedapp.common.utilities import *\nimport json\nimport pandas as pd\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.datastructures import MultiValueDictKeyError\nimport glob\nimport os.path\n\n\n@csrf_exempt\ndef extract_csv(request):\n \"\"\"\n Getting a (two-column) csv and returning it as a json\n **Expected a lat/lon csv with headers\n \"\"\"\n if request.method == 'POST' and request.FILES['myfile']:\n\n if not validate_upload(request, '.csv'):\n return HttpResponseBadRequest(\"Data error: Please provide a valid csv file\")\n\n try:\n # expecting csv with headers\n df = pd.read_csv(request.FILES['myfile'])\n if column_numeric(df[df.columns[0]]) and column_numeric(df[df.columns[1]]) and not df.isnull().values.any():\n df.columns = ['latitude', 'longitude']\n return HttpResponse(df.to_json(orient='records'))\n else:\n return HttpResponseBadRequest(\"Data input error: Ensure data is numeric and no missing values exist\")\n\n except errors.EmptyDataError:\n return HttpResponse('CSV file is empty')\n\n else:\n # In case of GET request, just show the form\n return render(request, 'file_upload.html', locals())\n\n\n@csrf_exempt\ndef create_task(request):\n if request.method == 'POST':\n try:\n args = json.loads(request.POST.get('data')) # error checking\n input_df = pd.read_csv(request.FILES['myfile'], header=0)\n task = p_median_calculation_task.delay(input_df.to_json(), args)\n response_data = {'task_id': str(task)}\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except MultiValueDictKeyError:\n return HttpResponseBadRequest(\"Please provide the correct input data\")\n else:\n return HttpResponse(status=405, reason=\"Method not allowed\")\n\n\n@csrf_exempt\ndef get_task(request):\n \"\"\"\n Return the status of a task given it's id\n \"\"\"\n try:\n task_id = request.GET['task-id']\n result = AsyncResult(task_id)\n result_dct = {result.task_id: {\n 'status': result.status, 'date_done': str(result.date_done)}}\n result_dct[result.task_id]['result'] = result.result\n\n try:\n file = glob.glob(\"output/*\"+str(result)+\".json\")[0]\n result_dct['result_location'] = \"http://localhost:8000/pmedian/get-file?filename=\" + file[7:]\n except IndexError:\n result_dct['result_location'] = 'Calculation ongoing'\n\n return HttpResponse(json.dumps(result_dct))\n\n except KeyError:\n return HttpResponseBadRequest(\"Please provide a valid task-id\")\n\n\n@csrf_exempt\ndef get_all_tasks(request):\n \"\"\"\n Get all celery tasks from and return id, status (json)\n \"\"\"\n\n path = \"/tmp/results/celery-task-meta-*\"\n results = (glob.glob(path))\n\n result_array = []\n for result in results:\n asyng_result = AsyncResult(result[len(path) - 1:])\n result_dct = {}\n result_dct['id'] = result[len(path) - 1:]\n result_dct['status'] = asyng_result.status\n result_dct['date_done'] = str(asyng_result.date_done)\n try:\n file = glob.glob(\"output/*\"+str(asyng_result)+\".json\")[0]\n result_dct['result'] = \"http://localhost:8000/pmedian/get-file?filename=\" + file[7:]\n with open(file) as f:\n result_dct['name'] = json.load(f)['name']\n except IndexError:\n result_dct['result'] = 'Calculation ongoing'\n\n result_array.append(result_dct)\n\n return HttpResponse(json.dumps(result_array))\n\n\n@csrf_exempt\ndef get_file(request):\n \"\"\"\n Download output file to disk.\n \"\"\"\n return download_output_file(request)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
brentyi/multimodalfilter | [
"210b0e241120e0fbbeaef5e478bab36ffe1e159d",
"210b0e241120e0fbbeaef5e478bab36ffe1e159d",
"210b0e241120e0fbbeaef5e478bab36ffe1e159d"
] | [
"crossmodal/door_models/layers.py",
"crossmodal/base_models/crossmodal_kf.py",
"crossmodal/push_models/pf.py"
] | [
"import torch\nimport torch.nn as nn\nfrom fannypack.nn import resblocks\n\nstate_dim = 3\ncontrol_dim = 7\nobs_pos_dim = 3\nobs_sensors_dim = 7\n\n\ndef state_layers(units: int) -> nn.Module:\n \"\"\"Create a state encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(state_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef control_layers(units: int) -> nn.Module:\n \"\"\"Create a control command encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(control_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_image_layers(units: int) -> nn.Module:\n \"\"\"Create an image encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n resblocks.Conv2d(channels=32, kernel_size=3),\n nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3, padding=1),\n nn.Flatten(), # 32 * 32 * 8\n nn.Linear(8 * 32 * 32, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_pos_layers(units: int) -> nn.Module:\n \"\"\"Create an end effector position encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(obs_pos_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_sensors_layers(units: int) -> nn.Module:\n \"\"\"Create an F/T sensor encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(obs_sensors_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n",
"import abc\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchfilter\nfrom torchfilter import types\n\nfrom .utility import weighted_average\n\n\nclass CrossmodalKalmanFilterWeightModel(nn.Module, abc.ABC):\n \"\"\"Crossmodal weight model.\"\"\"\n\n def __init__(self, modality_count: int, state_dim: int):\n super().__init__()\n\n self.modality_count = modality_count\n self.state_dim = state_dim\n \"\"\"int: Number of modalities.\"\"\"\n\n @abc.abstractmethod\n def forward(self, *, observations: types.ObservationsTorch) -> torch.Tensor:\n \"\"\"Compute log-modality weights.\n\n Args:\n observations (types.ObservationsTorch): Model observations.\n\n Returns:\n torch.Tensor: Computed weights for states. Shape should be\n `(modality_count, N, state_dim)`.\n torch.Tensor: Computed weights for state covariances. Shape should be\n `(modality_count, N, state_dim, state_dim)`.\n \"\"\"\n pass\n\n\nclass CrossmodalKalmanFilter(torchfilter.base.Filter):\n \"\"\"Utility class for merging unimodal kalman filter models via crossmodal weighting.\"\"\"\n\n def __init__(\n self,\n *,\n filter_models: List[torchfilter.filters.VirtualSensorExtendedKalmanFilter],\n crossmodal_weight_model: CrossmodalKalmanFilterWeightModel,\n state_dim: int,\n ):\n super().__init__(state_dim=state_dim)\n\n self.filter_models = nn.ModuleList(filter_models)\n \"\"\" nn.ModuleList: List of measurement models. \"\"\"\n\n self.crossmodal_weight_model = crossmodal_weight_model\n \"\"\" crossmodal.base_models.CrossmodalKalmanFilterWeightModel: Crossmodal\n weight model; should output one weight per measurement model. \"\"\"\n\n self._enabled_models: List[bool] = [True for _ in self.filter_models]\n self.weighted_covariances = None\n\n @property\n def enabled_models(self) -> List[bool]:\n \"\"\"List of enabled unimodal measurement models.\n\n Returns:\n List[bool]: List of booleans, one for each measurement model: set flag to\n False to disable a modality.\n \"\"\"\n return self._enabled_models\n\n @enabled_models.setter\n def enabled_models(self, enabled_models: List[bool]) -> None:\n \"\"\"Setter for the `enabled_models` property.\n\n Args:\n enabled_models (List[bool]): New value.\n \"\"\"\n\n # Input validation\n assert isinstance(enabled_models, list)\n assert len(enabled_models) == len(self.filter_models)\n for x in enabled_models:\n assert type(x) == bool\n\n # Assign value\n self._enabled_models = enabled_models\n\n def forward(\n self,\n *,\n observations: types.ObservationsTorch,\n controls: types.ControlsTorch,\n ) -> types.StatesTorch:\n \"\"\"Kalman filter with crossmodal weights forward pass, single timestep.\n\n Args:\n observations (dict or torch.Tensor): Measurement inputs. Should be\n either a dict of tensors or tensor of size `(N, ...)`.\n controls (dict or torch.Tensor): control inputs. should be either a\n dict of tensors or tensor of shape `(N, ...)`.\n Returns:\n torch.Tensor: Weighted filter state estimation. Shape should be `(N, state_dim)`\n torch.Tensor: Weighted filter state estimation covariance. Shape should be `(N, state_dim, state_dim)`.\n \"\"\"\n\n N, _ = controls.shape\n\n unimodal_states, unimodal_covariances = self.calculate_unimodal_states(\n observations, controls\n )\n\n assert unimodal_states.shape == (\n np.sum(self._enabled_models),\n N,\n self.state_dim,\n )\n assert unimodal_covariances.shape == (\n np.sum(self._enabled_models),\n N,\n self.state_dim,\n self.state_dim,\n )\n\n if np.sum(self._enabled_models) < len(self._enabled_models):\n state_weights = torch.from_numpy(\n np.array(self._enabled_models).astype(np.float32)\n )\n state_weights = (\n state_weights.unsqueeze(-1).unsqueeze(-1).repeat(1, N, self.state_dim)\n )\n state_weights = state_weights.to(unimodal_states.device)\n else:\n state_weights = self.crossmodal_weight_model(observations=observations)\n state_weights = state_weights[self._enabled_models]\n # note: my crossmodal weights will look different in output than PF\n assert state_weights.shape == (np.sum(self._enabled_models), N, self.state_dim)\n\n weighted_states, weighted_covariances = self.calculate_weighted_states(\n state_weights, unimodal_states, unimodal_covariances\n )\n\n assert weighted_states.shape == (N, self.state_dim)\n assert weighted_covariances.shape == (N, self.state_dim, self.state_dim)\n\n self.weighted_covariances = weighted_covariances\n\n for f in self.filter_models:\n f.states_prev = weighted_states\n f.states_covariance_prev = weighted_covariances\n\n return weighted_states\n\n def calculate_weighted_states(\n self, state_weights, unimodal_states, unimodal_covariances\n ):\n model_dim, N, state_dim = state_weights.shape\n assert model_dim == np.sum(self._enabled_models)\n assert state_dim == self.state_dim\n\n weighted_states = weighted_average(unimodal_states, state_weights)\n covariance_weights = state_weights.unsqueeze(-1).repeat(\n (1, 1, 1, self.state_dim)\n )\n covariance_weights = covariance_weights * covariance_weights.transpose(-1, -2)\n weighted_covariances = torch.sum(covariance_weights * unimodal_covariances, 0)\n\n return weighted_states, weighted_covariances\n\n def calculate_unimodal_states(self, observations, controls):\n unimodal_states = torch.stack(\n [\n (filter_model(observations=observations, controls=controls))\n for i, filter_model in enumerate(self.filter_models)\n if self._enabled_models[i]\n ]\n )\n\n unimodal_covariances = torch.stack(\n [\n filter_model._belief_covariance\n for i, filter_model in enumerate(self.filter_models)\n if self._enabled_models[i]\n ]\n )\n\n return unimodal_states, unimodal_covariances\n\n @property\n def state_covariance_estimate(self):\n return self.weighted_covariances\n\n def initialize_beliefs(self, *, mean: torch.Tensor, covariance: torch.Tensor):\n \"\"\"Set kalman state prediction and state covariance to mean and covariance.\n\n Args:\n mean (torch.Tensor): Mean of belief. Shape should be\n `(N, state_dim)`.\n covariance (torch.Tensor): Covariance of belief. Shape should be\n `(N, state_dim, state_dim)`.\n \"\"\"\n N = mean.shape[0]\n assert mean.shape == (N, self.state_dim)\n assert covariance.shape == (N, self.state_dim, self.state_dim)\n\n for model in self.filter_models:\n model.initialize_beliefs(mean=mean, covariance=covariance)\n\n def measurement_initialize_beliefs(self, observations):\n N = observations[[*observations][0]].shape[0]\n\n model_list = [\n filter_model.virtual_sensor_model(observations=observations)\n for i, filter_model in enumerate(self.filter_models)\n if self._enabled_models[i]\n ]\n\n unimodal_states = torch.stack([x[0] for x in model_list])\n unimodal_scale_trils = torch.stack([x[1] for x in model_list])\n unimodal_covariances = unimodal_scale_trils @ unimodal_scale_trils.transpose(\n -1, -2\n )\n\n state_weights = self.crossmodal_weight_model(observations=observations)\n state_weights = state_weights[self._enabled_models]\n\n weighted_states = weighted_average(unimodal_states, state_weights)\n covariance_multiplier = (\n torch.prod(torch.prod(state_weights, dim=-1), dim=0)\n .unsqueeze(-1)\n .unsqueeze(-1)\n )\n assert covariance_multiplier.shape == (N, 1, 1)\n weighted_covariances = covariance_multiplier * torch.sum(\n unimodal_covariances, dim=0\n )\n\n assert weighted_states.shape == (N, self.state_dim)\n assert weighted_covariances.shape == (N, self.state_dim, self.state_dim)\n\n self.initialize_beliefs(mean=weighted_states, covariance=weighted_covariances)\n\n\nclass CrossmodalVirtualSensorModel(torchfilter.base.VirtualSensorModel):\n \"\"\"Utility class for merging unimodal measurement models via crossmodal weighting.\"\"\"\n\n def __init__(\n self,\n *,\n virtual_sensor_model: List[torchfilter.base.VirtualSensorModel],\n crossmodal_weight_model: CrossmodalKalmanFilterWeightModel,\n state_dim: int,\n ):\n super().__init__(state_dim=state_dim)\n\n self.virtual_sensor_model = nn.ModuleList(virtual_sensor_model)\n \"\"\" nn.ModuleList: List of measurement models. \"\"\"\n\n self.crossmodal_weight_model = crossmodal_weight_model\n \"\"\" crossmodal.base_models.CrossmodalKalmanFilterWeightModel: Crossmodal\n weight model; should output one weight per measurement model. \"\"\"\n\n self._enabled_models: List[bool] = [True for _ in self.virtual_sensor_model]\n\n @property\n def enabled_models(self) -> List[bool]:\n \"\"\"List of enabled unimodal measurement models.\n\n Returns:\n List[bool]: List of booleans, one for each measurement model: set flag to\n False to disable a modality.\n \"\"\"\n return self._enabled_models\n\n @enabled_models.setter\n def enabled_models(self, enabled_models: List[bool]) -> None:\n \"\"\"Setter for the `enabled_models` property.\n\n Args:\n enabled_models (List[bool]): New value.\n \"\"\"\n\n # Input validation\n assert isinstance(enabled_models, list)\n assert len(enabled_models) == len(self.virtual_sensor_model)\n for x in enabled_models:\n assert type(x) == bool\n\n # Assign value\n self._enabled_models = enabled_models\n\n def forward(\n self, *, observations: types.ObservationsTorch\n ) -> Tuple[types.StatesTorch, types.ScaleTrilTorch]:\n \"\"\"Observation model forward pass, over batch size `N`.\n For each member of a batch, we expect one unique observation.\n\n Args:\n observations (dict or torch.Tensor): Measurement inputs. Should be\n either a dict of tensors or tensor of size `(N, ...)`.\n Returns:\n torch.Tensor: Measurement state prediction. Shape should be `(N, state_dim)`\n torch.Tensor: Measurement state prediction covariance. Shape should be `(N, state_dim, state_dim)`.\n \"\"\"\n N = observations[[*observations][0]].shape[0]\n\n model_list = [\n (virtual_sensor_model(observations=observations))\n for i, virtual_sensor_model in enumerate(self.virtual_sensor_model)\n if self._enabled_models[i]\n ]\n\n unimodal_states = torch.stack([x[0] for x in model_list])\n unimodal_scale_trils = torch.stack([x[1] for x in model_list])\n unimodal_covariances = unimodal_scale_trils @ unimodal_scale_trils.transpose(\n -1, -2\n )\n\n assert unimodal_states.shape == (\n np.sum(self._enabled_models),\n N,\n self.state_dim,\n )\n assert unimodal_covariances.shape == (\n np.sum(self._enabled_models),\n N,\n self.state_dim,\n self.state_dim,\n )\n\n if np.sum(self._enabled_models) < len(self._enabled_models):\n state_weights = torch.from_numpy(\n np.array(self._enabled_models).astype(np.float32)\n )\n state_weights = (\n state_weights.unsqueeze(-1).unsqueeze(-1).repeat(1, N, self.state_dim)\n )\n state_weights = state_weights.to(unimodal_states.device)\n else:\n state_weights = self.crossmodal_weight_model(observations=observations)\n state_weights = state_weights[self._enabled_models]\n\n # note: my crossmodal weights will look different in output than PF\n assert state_weights.shape == (np.sum(self._enabled_models), N, self.state_dim)\n\n weighted_states = weighted_average(unimodal_states, state_weights)\n covariance_multiplier = (\n torch.prod(torch.prod(state_weights, dim=-1), dim=0)\n .unsqueeze(-1)\n .unsqueeze(-1)\n )\n assert covariance_multiplier.shape == (N, 1, 1)\n weighted_covariances = covariance_multiplier * torch.sum(\n unimodal_covariances, dim=0\n )\n\n assert weighted_states.shape == (N, self.state_dim)\n assert weighted_covariances.shape == (N, self.state_dim, self.state_dim)\n\n return weighted_states, torch.cholesky(weighted_covariances)\n",
"from typing import Set, cast\n\nimport torch\nimport torch.nn as nn\nimport torchfilter\nimport torchfilter.types as types\nfrom fannypack.nn import resblocks\n\nfrom ..tasks import PushTask\nfrom . import layers\nfrom .dynamics import PushDynamicsModel\n\n\nclass PushParticleFilter(torchfilter.filters.ParticleFilter, PushTask.Filter):\n def __init__(self):\n \"\"\"Initializes a particle filter for our door task.\"\"\"\n\n super().__init__(\n dynamics_model=PushDynamicsModel(),\n measurement_model=PushMeasurementModel(),\n num_particles=30,\n )\n\n def train(self, mode: bool = True):\n \"\"\"Adjust particle count based on train vs eval mode.\"\"\"\n self.num_particles = 30 if mode else 300\n super().train(mode)\n\n\nclass PushMeasurementModel(torchfilter.base.ParticleFilterMeasurementModel):\n def __init__(\n self, units: int = 64, modalities: Set[str] = {\"image\", \"pos\", \"sensors\"}\n ):\n \"\"\"Initializes a measurement model for our door task.\"\"\"\n\n super().__init__(state_dim=2)\n\n valid_modalities = {\"image\", \"pos\", \"sensors\"}\n assert len(valid_modalities | modalities) == 3, \"Received invalid modality\"\n assert len(modalities) > 0, \"Received empty modality list\"\n self.modalities = modalities\n\n if \"image\" in modalities:\n self.observation_image_layers = layers.observation_image_layers(\n units, spanning_avg_pool=False\n )\n if \"pos\" in modalities:\n self.observation_pos_layers = layers.observation_pos_layers(units)\n if \"sensors\" in modalities:\n self.observation_sensors_layers = layers.observation_sensors_layers(units)\n\n self.state_layers = layers.state_layers(units)\n\n self.shared_layers = nn.Sequential(\n nn.Linear(units * (1 + len(modalities)), units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n resblocks.Linear(units),\n nn.Linear(units, 1),\n # nn.LogSigmoid()\n )\n\n self.units = units\n\n def forward(\n self, *, states: types.StatesTorch, observations: types.ObservationsTorch\n ) -> types.StatesTorch:\n assert type(observations) == dict\n assert len(states.shape) == 3 # (N, M, state_dim)\n assert states.shape[2] == self.state_dim\n observations = cast(types.TorchDict, observations)\n\n # N := distinct trajectory count\n # M := particle count\n N, M, _ = states.shape\n\n # Construct observations feature vector\n # (N, obs_dim)\n obs = []\n if \"image\" in self.modalities:\n obs.append(\n self.observation_image_layers(observations[\"image\"][:, None, :, :])\n )\n if \"pos\" in self.modalities:\n obs.append(self.observation_pos_layers(observations[\"gripper_pos\"]))\n if \"sensors\" in self.modalities:\n obs.append(self.observation_sensors_layers(observations[\"gripper_sensors\"]))\n observation_features = torch.cat(obs, dim=1)\n\n # (N, obs_features) => (N, M, obs_features)\n observation_features = observation_features[:, None, :].expand(\n N, M, self.units * len(obs)\n )\n assert observation_features.shape == (N, M, self.units * len(obs))\n\n # (N, M, state_dim) => (N, M, units)\n state_features = self.state_layers(states)\n # state_features = self.state_layers(states * torch.tensor([[[1., 0.]]], device=states.device))\n assert state_features.shape == (N, M, self.units)\n\n merged_features = torch.cat((observation_features, state_features), dim=2)\n assert merged_features.shape == (N, M, self.units * (len(obs) + 1))\n\n # (N, M, merged_dim) => (N, M, 1)\n log_likelihoods = self.shared_layers(merged_features)\n assert log_likelihoods.shape == (N, M, 1)\n\n # Return (N, M)\n return torch.squeeze(log_likelihoods, dim=2)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Flatten"
],
[
"torch.nn.ModuleList",
"torch.sum",
"torch.cholesky",
"torch.prod",
"torch.stack",
"numpy.array",
"numpy.sum"
],
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.squeeze",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ojInc/google-research | [
"650580cbf928aa640bf39897c5758ddb71b68a51",
"084c18934c353207662aba0db6db52850029faf2"
] | [
"kws_streaming/train/model_train_eval.py",
"gfsa/datasets/random_python/python_numbers_control_flow.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple speech recognition to spot a limited number of keywords.\n\nIt is based on tensorflow/examples/speech_commands\nThis is a self-contained example script that will train a very basic audio\nrecognition model in TensorFlow. It downloads the necessary training data and\nruns with reasonable defaults to train within a few hours even only using a CPU.\n\nIt is intended as an introduction to using neural networks for audio\nrecognition, and is not a full speech recognition system. This network uses a\nkeyword detection style to spot discrete words from a small vocabulary,\nconsisting of\n\"yes\", \"no\", \"up\", \"down\", \"left\", \"right\", \"on\", \"off\", \"stop\", and \"go\".\n\nTo run the training process, use:\n\nbazel run model_train_eval.py\n\nThis will write out checkpoints to /tmp/speech_commands_train/, and will\ndownload over 1GB of open source training data, so you'll need enough free space\nand a good internet connection. The default data is a collection of thousands of\none-second .wav files, each containing one spoken word. This data set is\ncollected from https://aiyprojects.withgoogle.com/open_speech_recording, please\nconsider contributing to help improve this and other models!\n\nAs training progresses, it will print out its accuracy metrics, which should\nrise above 90% by the end. Once it's complete, it will produce\nKeras, SavedModel, TFLite and graphdef representations.\n\nIf you want to train on your own data, you'll need to create .wavs with your\nrecordings, all at a consistent length, and then arrange them into subfolders\norganized by label. For example, here's a possible file structure:\n\ndata >\n up >\n audio_0.wav\n audio_1.wav\n down >\n audio_2.wav\n audio_3.wav\n other>\n audio_4.wav\n audio_5.wav\n\nYou'll also need to tell the script what labels to look for, using the\n`--wanted_words` argument. In this case, 'up,down' might be what you want, and\nthe audio in the 'other' folder would be used to train an 'unknown' category.\n\nTo pull this all together, you'd run:\n\nbazel run tensorflow/examples/speech_commands:train --\n--data_dir /data --wanted_words up,down\n\nAbove script will automatically split data into training/validation and testing.\n\nIf you prefer to split the data on your own, then you should set flag\n\"--split_data 0\" and prepare folders with structure:\n\ndata >\n training >\n up >\n audio_0.wav\n audio_1.wav\n down >\n audio_2.wav\n audio_3.wav\n validation >\n up >\n audio_6.wav\n audio_7.wav\n down >\n audio_8.wav\n audio_9.wav\n testing >\n up >\n audio_12.wav\n audio_13.wav\n down >\n audio_14.wav\n audio_15.wav\n _background_noise_ >\n audio_18.wav\n\nTo pull this all together, you'd run:\n\nbazel run tensorflow/examples/speech_commands:train --\n--data_dir /data --wanted_words up,down --split_data 0\n\n\"\"\"\nimport json\nimport os\nimport sys\nfrom absl import logging\nimport tensorflow.compat.v1 as tf\nfrom kws_streaming.layers import modes\nimport kws_streaming.models.att_mh_rnn as att_mh_rnn\nimport kws_streaming.models.att_rnn as att_rnn\nimport kws_streaming.models.cnn as cnn\nimport kws_streaming.models.crnn as crnn\nimport kws_streaming.models.dnn as dnn\nimport kws_streaming.models.dnn_raw as dnn_raw\nimport kws_streaming.models.ds_cnn as ds_cnn\nimport kws_streaming.models.ds_tc_resnet as ds_tc_resnet\nimport kws_streaming.models.gru as gru\nimport kws_streaming.models.inception as inception\nimport kws_streaming.models.inception_resnet as inception_resnet\nimport kws_streaming.models.lstm as lstm\nimport kws_streaming.models.mobilenet as mobilenet\nimport kws_streaming.models.mobilenet_v2 as mobilenet_v2\nimport kws_streaming.models.svdf as svdf\nimport kws_streaming.models.svdf_resnet as svdf_resnet\nimport kws_streaming.models.tc_resnet as tc_resnet\nfrom kws_streaming.models.utils import parse\nimport kws_streaming.models.xception as xception\nfrom kws_streaming.train import base_parser\nfrom kws_streaming.train import model_flags\nfrom kws_streaming.train import train\nimport kws_streaming.train.test as test\n\nFLAGS = None\n\n\ndef main(_):\n # Update flags\n flags = model_flags.update_flags(FLAGS)\n\n if flags.train:\n # Create model folders where logs and model will be stored\n os.makedirs(flags.train_dir)\n os.mkdir(flags.summaries_dir)\n\n # Model training\n train.train(flags)\n else:\n if not os.path.isdir(flags.train_dir):\n raise ValueError('model is not trained set \"--train 1\" and retrain it')\n\n # write all flags settings into json\n with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:\n json.dump(flags.__dict__, f)\n\n # convert to SavedModel\n test.convert_model_saved(flags, 'non_stream',\n modes.Modes.NON_STREAM_INFERENCE)\n try:\n test.convert_model_saved(flags, 'stream_state_internal',\n modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TF streaming: %s', e)\n\n logging.info('run TF non streaming model accuracy evaluation')\n # with TF\n folder_name = 'tf'\n test.tf_non_stream_model_accuracy(flags, folder_name)\n\n # with TF.\n # We can apply non stream model on stream data, by running inference\n # every 200ms (for example), so that total latency will be similar with\n # streaming model which is executed every 20ms.\n # To measure the impact of sampling on model accuracy,\n # we introduce time_shift_ms during accuracy evaluation.\n # Convert milliseconds to samples:\n time_shift_samples = int(\n (flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)\n test.tf_non_stream_model_accuracy(\n flags,\n folder_name,\n time_shift_samples,\n accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')\n\n name2opt = {\n '': None,\n 'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],\n }\n\n for opt_name, optimizations in name2opt.items():\n\n if (opt_name and flags.feature_type == 'mfcc_tf' and\n flags.preprocess == 'raw'):\n logging.info('feature type mfcc_tf needs quantization aware training '\n 'for quantization - it is not implemented')\n continue\n\n folder_name = opt_name + 'tflite_non_stream'\n file_name = 'non_stream.tflite'\n mode = modes.Modes.NON_STREAM_INFERENCE\n test.convert_model_tflite(flags, folder_name, mode, file_name,\n optimizations=optimizations)\n test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)\n\n # these models are using bi-rnn, so they are non streamable by default\n # also models using striding or pooling are not supported for streaming now\n non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}\n\n model_is_streamable = True\n if flags.model_name in non_streamable_models:\n model_is_streamable = False\n # below models can use striding in time dimension,\n # but this is currently unsupported\n elif flags.model_name == 'cnn':\n for strides in parse(flags.cnn_strides):\n if strides[0] > 1:\n model_is_streamable = False\n break\n elif flags.model_name == 'ds_cnn':\n if parse(flags.cnn1_strides)[0] > 1:\n model_is_streamable = False\n for strides in parse(flags.dw2_strides):\n if strides[0] > 1:\n model_is_streamable = False\n break\n\n # if model can be streamed, then run conversion/evaluation in streaming mode\n if model_is_streamable:\n # ---------------- TF streaming model accuracy evaluation ----------------\n # Streaming model with external state evaluation using TF with state reset\n if not opt_name:\n logging.info('run TF evalution only without optimization/quantization')\n try:\n folder_name = 'tf'\n test.tf_stream_state_external_model_accuracy(\n flags,\n folder_name,\n accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',\n reset_state=True) # with state reset between test sequences\n\n # Streaming (with external state) evaluation using TF no state reset\n test.tf_stream_state_external_model_accuracy(\n flags,\n folder_name,\n accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',\n reset_state=False) # without state reset\n\n # Streaming (with internal state) evaluation using TF no state reset\n test.tf_stream_state_internal_model_accuracy(flags, folder_name)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TF streaming: %s', e)\n\n logging.info('run TFlite streaming model accuracy evaluation')\n try:\n # convert model to TFlite\n folder_name = opt_name + 'tflite_stream_state_external'\n file_name = 'stream_state_external.tflite'\n mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE\n test.convert_model_tflite(flags, folder_name, mode, file_name,\n optimizations=optimizations)\n\n # Streaming model accuracy evaluation with TFLite with state reset\n test.tflite_stream_state_external_model_accuracy(\n flags,\n folder_name,\n file_name,\n accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',\n reset_state=True)\n\n # Streaming model accuracy evaluation with TFLite without state reset\n test.tflite_stream_state_external_model_accuracy(\n flags,\n folder_name,\n file_name,\n accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',\n reset_state=False)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TFLite streaming: %s', e)\n\nif __name__ == '__main__':\n # parser for training/testing data and speach feature flags\n parser = base_parser.base_parser()\n\n # sub parser for model settings\n subparsers = parser.add_subparsers(dest='model_name', help='NN model name')\n\n # DNN model settings\n parser_dnn = subparsers.add_parser('dnn')\n dnn.model_parameters(parser_dnn)\n\n # DNN raw model settings\n parser_dnn_raw = subparsers.add_parser('dnn_raw')\n dnn_raw.model_parameters(parser_dnn_raw)\n\n # LSTM model settings\n parser_lstm = subparsers.add_parser('lstm')\n lstm.model_parameters(parser_lstm)\n\n # GRU model settings\n parser_gru = subparsers.add_parser('gru')\n gru.model_parameters(parser_gru)\n\n # SVDF model settings\n parser_svdf = subparsers.add_parser('svdf')\n svdf.model_parameters(parser_svdf)\n\n # CNN model settings\n parser_cnn = subparsers.add_parser('cnn')\n cnn.model_parameters(parser_cnn)\n\n # CRNN model settings\n parser_crnn = subparsers.add_parser('crnn')\n crnn.model_parameters(parser_crnn)\n\n # ATT MH RNN model settings\n parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')\n att_mh_rnn.model_parameters(parser_att_mh_rnn)\n\n # ATT RNN model settings\n parser_att_rnn = subparsers.add_parser('att_rnn')\n att_rnn.model_parameters(parser_att_rnn)\n\n # DS_CNN model settings\n parser_ds_cnn = subparsers.add_parser('ds_cnn')\n ds_cnn.model_parameters(parser_ds_cnn)\n\n # TC Resnet model settings\n parser_tc_resnet = subparsers.add_parser('tc_resnet')\n tc_resnet.model_parameters(parser_tc_resnet)\n\n # Mobilenet model settings\n parser_mobilenet = subparsers.add_parser('mobilenet')\n mobilenet.model_parameters(parser_mobilenet)\n\n # Mobilenet V2 model settings\n parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')\n mobilenet_v2.model_parameters(parser_mobilenet_v2)\n\n # xception model settings\n parser_xception = subparsers.add_parser('xception')\n xception.model_parameters(parser_xception)\n\n # inception model settings\n parser_inception = subparsers.add_parser('inception')\n inception.model_parameters(parser_inception)\n\n # inception resnet model settings\n parser_inception_resnet = subparsers.add_parser('inception_resnet')\n inception_resnet.model_parameters(parser_inception_resnet)\n\n # svdf resnet model settings\n parser_svdf_resnet = subparsers.add_parser('svdf_resnet')\n svdf_resnet.model_parameters(parser_svdf_resnet)\n\n # ds_tc_resnet model settings\n parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')\n ds_tc_resnet.model_parameters(parser_ds_tc_resnet)\n\n FLAGS, unparsed = parser.parse_known_args()\n if unparsed and tuple(unparsed) != ('--alsologtostderr',):\n raise ValueError('Unknown argument: {}'.format(unparsed))\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Generation templates for Python programs using numbers and control flow.\n\nFor this task, we assume that every variable holds a number.\n\nRandom sampling uses numpy for consistency with top_down_refinement, so that we\ncan control the whole random sampling using a single seed.\n\"\"\"\n\nimport enum\nfrom typing import FrozenSet, Optional\n\nimport dataclasses\nimport gast\nimport numpy as np\n\nfrom gfsa.datasets.random_python import top_down_refinement\n\n# Convenient type aliases\nHole = top_down_refinement.Hole\nASTWithHoles = top_down_refinement.ThingWithHoles\nASTNodeTemplate = top_down_refinement.HoleFillerTemplate\n\n\nclass ASTHoleType(enum.Enum):\n \"\"\"A type of hole for this task.\"\"\"\n NUMBER = \"NUMBER\" # An integer or float expression\n BOOL = \"BOOL\" # A boolean expression\n STMT = \"STMT\" # A single statement or control flow block\n STMTS = \"STMTS\" # Possibly empty list of statements (no jumps)\n STMTS_NONEMPTY = \"STMTS_NONEMPTY\" # Nonempty list of statements (no jumps)\n BLOCK = \"BLOCK\" # Nonempty list of statements, which might end in a jump\n\n\[email protected](frozen=True)\nclass ASTHoleMetadata:\n \"\"\"Context for what is valid inside this hole.\"\"\"\n names_in_scope: FrozenSet[str]\n inside_function: bool\n inside_loop: bool\n op_depth: int\n\n\n_NUMBER_COST = 1\n_BOOL_COST = 1\n_STMT_COST = 1\n_BLOCK_COST = 1\n_STMTS_COST = 0\n_STMTS_NONEMPTY_COST = 1\n\nALL_COSTS = {\n ASTHoleType.NUMBER: _NUMBER_COST,\n ASTHoleType.BOOL: _BOOL_COST,\n ASTHoleType.STMT: _STMT_COST,\n ASTHoleType.BLOCK: _BLOCK_COST,\n ASTHoleType.STMTS: _STMTS_COST,\n ASTHoleType.STMTS_NONEMPTY: _STMTS_NONEMPTY_COST,\n}\n\n\ndef make_name(identifier):\n \"\"\"Returns a gast.Name for the given string identifier.\n\n Convenience function to avoid having to specify all the fields we don't\n care about. NotImplemented is used as a sentinel value, since gast usually\n populates that according to context, but we don't bother.\n\n Args:\n identifier: Identifier to use.\n \"\"\"\n return gast.Name(\n id=identifier, ctx=NotImplemented, annotation=None, type_comment=None)\n\n\n##########################\n# Numbers\n##########################\n\n\nclass NameReferenceTemplate(ASTNodeTemplate):\n \"\"\"Reference an existing name.\"\"\"\n fills_type = ASTHoleType.NUMBER\n required_cost = 1\n\n def can_fill(self, hole):\n return bool(hole.metadata.names_in_scope)\n\n def fill(self, hole, rng):\n name = rng.choice(list(hole.metadata.names_in_scope))\n return ASTWithHoles(1, [], lambda: make_name(name))\n\n\nclass ConstIntTemplate(ASTNodeTemplate):\n \"\"\"Use a literal integer between 0 and 100.\"\"\"\n fills_type = ASTHoleType.NUMBER\n required_cost = 1\n\n def fill(self, hole, rng):\n i = rng.randint(0, 100)\n return ASTWithHoles(1, [], lambda: gast.Constant(value=i, kind=None))\n\n\nclass BinOpTemplate(ASTNodeTemplate):\n \"\"\"Mathematical operation on two numbers.\"\"\"\n fills_type = ASTHoleType.NUMBER\n required_cost = 2 + 2 * _NUMBER_COST\n\n def __init__(self, max_depth=None):\n self.max_depth = max_depth\n\n def can_fill(self, hole):\n return self.max_depth is None or hole.metadata.op_depth < self.max_depth\n\n def fill(self, hole, rng):\n op = rng.choice([gast.Add, gast.Sub, gast.Mult, gast.Div])()\n\n def build(left, right):\n return gast.BinOp(left=left, op=op, right=right)\n\n sub_hole = Hole(\n ASTHoleType.NUMBER,\n dataclasses.replace(hole.metadata, op_depth=hole.metadata.op_depth + 1))\n return ASTWithHoles(2, [sub_hole, sub_hole], build)\n\n\nclass FunctionCallTemplate(ASTNodeTemplate):\n \"\"\"Applies a function to some number of arguments.\"\"\"\n fills_type = ASTHoleType.NUMBER\n\n def __init__(self, num_args, names, max_depth=None):\n self.num_args = num_args\n self.max_depth = max_depth\n self.names = names\n\n @property\n def required_cost(self):\n return 2 + self.num_args * _NUMBER_COST\n\n def can_fill(self, hole):\n return self.max_depth is None or hole.metadata.op_depth < self.max_depth\n\n def fill(self, hole, rng):\n name = rng.choice(self.names)\n\n def build(*args):\n return gast.Call(func=make_name(name), args=list(args), keywords=[])\n\n sub_hole = Hole(\n ASTHoleType.NUMBER,\n dataclasses.replace(hole.metadata, op_depth=hole.metadata.op_depth + 1))\n return ASTWithHoles(2, [sub_hole] * self.num_args, build)\n\n\n##########################\n# Booleans\n##########################\n\n\nclass CompareTemplate(ASTNodeTemplate):\n \"\"\"Compare two numbers.\"\"\"\n fills_type = ASTHoleType.BOOL\n required_cost = 2 + 2 * _NUMBER_COST\n\n def fill(self, hole, rng):\n op = rng.choice([gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt,\n gast.GtE])()\n\n def build(left, right):\n return gast.Compare(left=left, ops=[op], comparators=[right])\n\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n return ASTWithHoles(2, [number_hole, number_hole], build)\n\n\nclass BoolOpTemplate(ASTNodeTemplate):\n \"\"\"And/or between two booleans.\"\"\"\n fills_type = ASTHoleType.BOOL\n required_cost = 2 + 2 * _BOOL_COST\n\n def __init__(self, max_depth=None):\n self.max_depth = max_depth\n\n def can_fill(self, hole):\n return self.max_depth is None or hole.metadata.op_depth < self.max_depth\n\n def fill(self, hole, rng):\n op = rng.choice([gast.And, gast.Or])()\n\n def build(left, right):\n return gast.BoolOp(op=op, values=[left, right])\n\n bool_hole = Hole(\n ASTHoleType.BOOL,\n dataclasses.replace(hole.metadata, op_depth=hole.metadata.op_depth + 1))\n return ASTWithHoles(2, [bool_hole, bool_hole], build)\n\n\nclass ConstBoolTemplate(ASTNodeTemplate):\n \"\"\"Literal true or false.\"\"\"\n fills_type = ASTHoleType.BOOL\n required_cost = 1\n\n def fill(self, hole, rng):\n value = rng.choice([True, False])\n return ASTWithHoles(1, [], lambda: gast.Constant(value=value, kind=None))\n\n\n##########################\n# Atomic statements\n##########################\n\n\nclass AssignExistingTemplate(ASTNodeTemplate):\n \"\"\"Assign to an existing variable.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 2 + _NUMBER_COST\n\n def can_fill(self, hole):\n return bool(hole.metadata.names_in_scope)\n\n def fill(self, hole, rng):\n name = rng.choice(list(hole.metadata.names_in_scope))\n\n def build(v):\n return gast.Assign(targets=[make_name(name)], value=v)\n\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n return ASTWithHoles(2, [number_hole], build)\n\n\nclass PassTemplate(ASTNodeTemplate):\n \"\"\"No-op.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 1\n\n def fill(self, hole, rng):\n return ASTWithHoles(1, [], gast.Pass)\n\n\nclass PrintNumberTemplate(ASTNodeTemplate):\n \"\"\"Print out a number.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 3 + _NUMBER_COST\n\n def fill(self, hole, rng):\n\n def build(v):\n return gast.Expr(\n value=gast.Call(func=make_name(\"print\"), args=[v], keywords=[]))\n\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n return ASTWithHoles(3, [number_hole], build)\n\n\n##########################\n# Composite statements\n##########################\n\n\nclass IfBlockTemplate(ASTNodeTemplate):\n \"\"\"Construct an if block.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 1 + _BOOL_COST + _BLOCK_COST\n\n def fill(self, hole, rng):\n\n def build(test, body):\n return gast.If(test=test, body=body, orelse=[])\n\n test_hole = Hole(ASTHoleType.BOOL, hole.metadata)\n block_hole = Hole(ASTHoleType.BLOCK, hole.metadata)\n return ASTWithHoles(1, [test_hole, block_hole], build)\n\n\nclass IfElseBlockTemplate(ASTNodeTemplate):\n \"\"\"Construct an if/else block.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 1 + _BOOL_COST + 2 * _BLOCK_COST\n\n def fill(self, hole, rng):\n\n def build(test, body, orelse):\n return gast.If(test=test, body=body, orelse=orelse)\n\n test_hole = Hole(ASTHoleType.BOOL, hole.metadata)\n block_hole = Hole(ASTHoleType.BLOCK, hole.metadata)\n return ASTWithHoles(1, [test_hole, block_hole, block_hole], build)\n\n\nclass ForRangeBlockTemplate(ASTNodeTemplate):\n \"\"\"Construct a for loop with a fresh variable over a range.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 6 + _NUMBER_COST + _BLOCK_COST\n\n def fill(self, hole, rng):\n fresh_name = f\"v{len(hole.metadata.names_in_scope)}\"\n assert fresh_name not in hole.metadata.names_in_scope\n\n def build(maxval, body):\n return gast.For(\n target=make_name(fresh_name),\n iter=gast.Call(\n func=make_name(\"range\"),\n args=[\n gast.Call(func=make_name(\"int\"), args=[maxval], keywords=[])\n ],\n keywords=[]),\n body=body,\n orelse=[],\n type_comment=None)\n\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n body_hole = Hole(\n ASTHoleType.BLOCK,\n dataclasses.replace(\n hole.metadata,\n inside_loop=True,\n names_in_scope=hole.metadata.names_in_scope.union((fresh_name,))))\n return ASTWithHoles(6, [number_hole, body_hole], build)\n\n\nclass WhileBlockTemplate(ASTNodeTemplate):\n \"\"\"Construct a while loop.\"\"\"\n fills_type = ASTHoleType.STMT\n required_cost = 1 + _BOOL_COST + _BLOCK_COST\n\n def fill(self, hole, rng):\n\n def build(test, body):\n return gast.While(test=test, body=body, orelse=[])\n\n test_hole = Hole(ASTHoleType.BOOL, hole.metadata)\n body_hole = Hole(ASTHoleType.BLOCK,\n dataclasses.replace(hole.metadata, inside_loop=True))\n return ASTWithHoles(1, [test_hole, body_hole], build)\n\n\n##########################\n# Blocks\n##########################\n\n# A block represents a contigouous sequence of statements that might end with\n# a return, break, or continue.\n\n\nclass ReturnNothingTemplate(ASTNodeTemplate):\n \"\"\"Block that ends with a bare return.\"\"\"\n fills_type = ASTHoleType.BLOCK\n required_cost = 1 + _STMTS_COST\n\n def can_fill(self, hole):\n return hole.metadata.inside_function\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS, hole.metadata)\n return ASTWithHoles(1, [stmts_hole],\n lambda stmts: stmts + [gast.Return(value=None)])\n\n\nclass ReturnNumberTemplate(ASTNodeTemplate):\n \"\"\"Block that ends by returning a number.\"\"\"\n fills_type = ASTHoleType.BLOCK\n required_cost = 1 + _NUMBER_COST + _STMTS_COST\n\n def can_fill(self, hole):\n return hole.metadata.inside_function\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS, hole.metadata)\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n return ASTWithHoles(1, [stmts_hole, number_hole],\n lambda stmts, v: stmts + [gast.Return(value=v)])\n\n\nclass BreakTemplate(ASTNodeTemplate):\n \"\"\"Block that ends by breaking out of the containing loop.\"\"\"\n fills_type = ASTHoleType.BLOCK\n required_cost = 1 + _STMTS_COST\n\n def can_fill(self, hole):\n return hole.metadata.inside_loop\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS, hole.metadata)\n return ASTWithHoles(1, [stmts_hole], lambda stmts: stmts + [gast.Break()])\n\n\nclass ContinueTemplate(ASTNodeTemplate):\n \"\"\"Block that ends by coninuing to the next iteration of the loop.\"\"\"\n fills_type = ASTHoleType.BLOCK\n required_cost = 1 + _STMTS_COST\n\n def can_fill(self, hole):\n return hole.metadata.inside_loop\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS, hole.metadata)\n return ASTWithHoles(1, [stmts_hole],\n lambda stmts: stmts + [gast.Continue()])\n\n\nclass FallthroughTemplate(ASTNodeTemplate):\n \"\"\"Block that ends by falling through to the outer block.\n\n Note that every block has to contain at least one statement.\n \"\"\"\n fills_type = ASTHoleType.BLOCK\n required_cost = _STMTS_NONEMPTY_COST\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS_NONEMPTY, hole.metadata)\n return ASTWithHoles(0, [stmts_hole], lambda stmts: stmts)\n\n\n##########################\n# Nonempty statements\n##########################\n\n# We handle fresh variables as a special case here, because they then are\n# available to the following statements in the block.\n\n\nclass NewAssignTemplate(ASTNodeTemplate):\n \"\"\"Assign to a new variable, and make it possible to use it later.\"\"\"\n fills_type = ASTHoleType.STMTS_NONEMPTY\n required_cost = 2 + _NUMBER_COST + _STMTS_COST\n\n def fill(self, hole, rng):\n fresh_name = f\"v{len(hole.metadata.names_in_scope)}\"\n assert fresh_name not in hole.metadata.names_in_scope\n\n def build(v, rest):\n return [gast.Assign(targets=[make_name(fresh_name)], value=v)] + rest\n\n number_hole = Hole(ASTHoleType.NUMBER, hole.metadata)\n rest_hole = Hole(\n ASTHoleType.STMTS,\n dataclasses.replace(\n hole.metadata,\n names_in_scope=hole.metadata.names_in_scope.union((fresh_name,))))\n return ASTWithHoles(2, [number_hole, rest_hole], build)\n\n\nclass NormalStatementTemplate(ASTNodeTemplate):\n \"\"\"Add a normal statement.\"\"\"\n fills_type = ASTHoleType.STMTS_NONEMPTY\n required_cost = _STMT_COST + _STMTS_COST\n\n def fill(self, hole, rng):\n\n def build(stmt, rest):\n return [stmt] + rest\n\n stmt_hole = Hole(ASTHoleType.STMT, hole.metadata)\n rest_hole = Hole(ASTHoleType.STMTS, hole.metadata)\n return ASTWithHoles(0, [stmt_hole, rest_hole], build)\n\n\n##########################\n# Possibly empty lists\n##########################\n\n\nclass SomeStatementsTemplate(ASTNodeTemplate):\n \"\"\"Insert some statements.\"\"\"\n fills_type = ASTHoleType.STMTS\n required_cost = _STMTS_NONEMPTY_COST\n\n def fill(self, hole, rng):\n stmts_hole = Hole(ASTHoleType.STMTS_NONEMPTY, hole.metadata)\n return ASTWithHoles(0, [stmts_hole], lambda stmts: stmts)\n\n\nclass NoMoreStatementsTemplate(ASTNodeTemplate):\n \"\"\"Don't insert any statements (as a last resort).\"\"\"\n fills_type = ASTHoleType.STMTS\n required_cost = 0\n\n def fill(self, hole, rng):\n return ASTWithHoles(0, [], lambda: [])\n\n\n##########################\n# Sampling distributions\n##########################\n\n# Note regarding hole selection weights:\n# We assign weights so that we avoid being forced into suboptimal choices later.\n# For instance, it's always fine to stop generating statements, so we give\n# adding more statements a low weight, and choose it less often. But it's\n# annoying to be forced to insert \"pass\" everywhere due to lack of space, so we\n# give partially-expanded single statements much more weight.\n\n# This distribution tends to create complex, nested control flow.\nCFG_DISTRIBUTION = top_down_refinement.RefinementDistribution(\n hole_selection_weights={\n ASTHoleType.NUMBER: 3,\n ASTHoleType.BOOL: 10,\n ASTHoleType.STMT: 100,\n ASTHoleType.BLOCK: 10,\n ASTHoleType.STMTS: 1,\n ASTHoleType.STMTS_NONEMPTY: 100,\n },\n weighted_templates=[\n # Numbers\n top_down_refinement.WeightedTemplate(\n NameReferenceTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ConstIntTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(BinOpTemplate(), weight=10),\n # Bools\n top_down_refinement.WeightedTemplate(CompareTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(BoolOpTemplate(), weight=3),\n top_down_refinement.WeightedTemplate(ConstBoolTemplate(), weight=2),\n # Statements\n top_down_refinement.WeightedTemplate(\n AssignExistingTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(PassTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(PrintNumberTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(IfBlockTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(IfElseBlockTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(ForRangeBlockTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(WhileBlockTemplate(), weight=3),\n # Blocks\n top_down_refinement.WeightedTemplate(ReturnNothingTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(ReturnNumberTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(BreakTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ContinueTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(FallthroughTemplate(), weight=30),\n # Nonempty statement sequences\n top_down_refinement.WeightedTemplate(NewAssignTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(\n NormalStatementTemplate(), weight=15),\n # Possibly empty statement sequences\n top_down_refinement.WeightedTemplate(\n SomeStatementsTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(\n NoMoreStatementsTemplate(), weight=1, precedence=0),\n ])\n\n# This distribution tends to create complex data flow.\nDATAFLOW_DISTRIBUTION = top_down_refinement.RefinementDistribution(\n hole_selection_weights={\n ASTHoleType.NUMBER: 3,\n ASTHoleType.BOOL: 10,\n ASTHoleType.STMT: 100,\n ASTHoleType.BLOCK: 10,\n ASTHoleType.STMTS: 1,\n ASTHoleType.STMTS_NONEMPTY: 100,\n },\n weighted_templates=[\n # Numbers\n top_down_refinement.WeightedTemplate(\n NameReferenceTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ConstIntTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(\n BinOpTemplate(max_depth=3), weight=7),\n # Bools\n top_down_refinement.WeightedTemplate(CompareTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(\n BoolOpTemplate(max_depth=3), weight=3),\n top_down_refinement.WeightedTemplate(ConstBoolTemplate(), weight=2),\n # Statements\n top_down_refinement.WeightedTemplate(\n AssignExistingTemplate(), weight=20),\n top_down_refinement.WeightedTemplate(PassTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(PrintNumberTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(IfBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(IfElseBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(ForRangeBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(WhileBlockTemplate(), weight=2),\n # Blocks\n top_down_refinement.WeightedTemplate(ReturnNothingTemplate(), weight=3),\n top_down_refinement.WeightedTemplate(ReturnNumberTemplate(), weight=3),\n top_down_refinement.WeightedTemplate(BreakTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ContinueTemplate(), weight=6),\n top_down_refinement.WeightedTemplate(FallthroughTemplate(), weight=40),\n # Nonempty statement sequences\n top_down_refinement.WeightedTemplate(NewAssignTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(\n NormalStatementTemplate(), weight=15),\n # Possibly empty statement sequences\n top_down_refinement.WeightedTemplate(\n SomeStatementsTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(\n NoMoreStatementsTemplate(), weight=1, precedence=0),\n ])\n\n\n# Meta-distribution for perturbed examples\ndef make_dataflow_fns_distribution(\n rng,\n weights_temperature = 0,\n max_depth_expected = 3,\n max_depth_maximum = 3):\n \"\"\"Randomly sample a refinement distribution.\n\n Args:\n rng: Random number generator to use.\n weights_temperature: Dirichlet temperature to use when adjusting weights.\n max_depth_expected: Expected value of maximum expression nesting depth.\n max_depth_maximum: Maximum value of maximum expression nesting depth.\n\n Returns:\n A refinement distribution for examples.\n \"\"\"\n if rng:\n max_depth = rng.binomial(max_depth_maximum,\n max_depth_expected / max_depth_maximum)\n else:\n assert weights_temperature == 0\n assert max_depth_expected == max_depth_maximum\n max_depth = max_depth_maximum\n\n groups = [\n [ # Numbers\n top_down_refinement.WeightedTemplate(\n NameReferenceTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ConstIntTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(\n BinOpTemplate(max_depth=max_depth), weight=5),\n top_down_refinement.WeightedTemplate(\n FunctionCallTemplate(\n num_args=1, names=[\"foo_1\", \"bar_1\"], max_depth=max_depth),\n weight=3),\n top_down_refinement.WeightedTemplate(\n FunctionCallTemplate(\n num_args=2, names=[\"foo_2\", \"bar_2\"], max_depth=max_depth),\n weight=2),\n top_down_refinement.WeightedTemplate(\n FunctionCallTemplate(\n num_args=4, names=[\"foo_4\", \"bar_4\"], max_depth=max_depth),\n weight=1),\n ],\n [ # Bools\n top_down_refinement.WeightedTemplate(CompareTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(\n BoolOpTemplate(max_depth=max_depth), weight=3),\n top_down_refinement.WeightedTemplate(ConstBoolTemplate(), weight=2),\n ],\n [ # Statements\n top_down_refinement.WeightedTemplate(\n AssignExistingTemplate(), weight=20),\n top_down_refinement.WeightedTemplate(PassTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(PrintNumberTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(IfBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(IfElseBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(\n ForRangeBlockTemplate(), weight=2),\n top_down_refinement.WeightedTemplate(WhileBlockTemplate(), weight=2),\n ],\n [ # Blocks\n top_down_refinement.WeightedTemplate(\n ReturnNothingTemplate(), weight=3),\n top_down_refinement.WeightedTemplate(\n ReturnNumberTemplate(), weight=3),\n top_down_refinement.WeightedTemplate(BreakTemplate(), weight=10),\n top_down_refinement.WeightedTemplate(ContinueTemplate(), weight=6),\n top_down_refinement.WeightedTemplate(\n FallthroughTemplate(), weight=40),\n ],\n [ # Nonempty statement sequences\n top_down_refinement.WeightedTemplate(NewAssignTemplate(), weight=5),\n top_down_refinement.WeightedTemplate(\n NormalStatementTemplate(), weight=15),\n ]\n ]\n weighted_templates = [\n # Possibly empty statement sequences\n top_down_refinement.WeightedTemplate(SomeStatementsTemplate(), weight=1),\n top_down_refinement.WeightedTemplate(\n NoMoreStatementsTemplate(), weight=1, precedence=0),\n ]\n for group in groups:\n weights = np.array([template.weight for template in group])\n weights = weights / np.sum(weights)\n if rng and weights_temperature > 0:\n weights = np.random.dirichlet(weights / weights_temperature)\n weighted_templates.extend(\n dataclasses.replace(template, weight=weight)\n for template, weight in zip(group, weights))\n\n return top_down_refinement.RefinementDistribution(\n hole_selection_weights={\n ASTHoleType.NUMBER: 3,\n ASTHoleType.BOOL: 10,\n ASTHoleType.STMT: 100,\n ASTHoleType.BLOCK: 10,\n ASTHoleType.STMTS: 1,\n ASTHoleType.STMTS_NONEMPTY: 100,\n },\n weighted_templates=weighted_templates,\n )\n\n\n# Dataflow distribution with function calls.\nDATAFLOW_FNS_DISTRIBUTION = make_dataflow_fns_distribution(rng=None)\n"
] | [
[
"tensorflow.compat.v1.app.run"
],
[
"numpy.array",
"numpy.random.dirichlet",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RyanXLi/OneshotDet | [
"77f629978d9d1739787b08de8cccea81341507bf"
] | [
"maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nfrom maskrcnn_benchmark.modeling import registry\nfrom torch import nn\n\n\[email protected]_BOX_PREDICTOR.register(\"FastRCNNPredictor\")\nclass FastRCNNPredictor(nn.Module):\n def __init__(self, config, in_channels):\n super(FastRCNNPredictor, self).__init__()\n assert in_channels is not None\n\n num_inputs = in_channels\n\n num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n if config.FEW_SHOT.SECOND_STAGE_METHOD == 'rn':\n num_classes = 2\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.cls_score = nn.Linear(num_inputs, num_classes)\n num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes\n self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)\n\n nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)\n nn.init.constant_(self.cls_score.bias, 0)\n\n nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)\n nn.init.constant_(self.bbox_pred.bias, 0)\n\n def forward(self, x):\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n cls_logit = self.cls_score(x)\n bbox_pred = self.bbox_pred(x)\n return cls_logit, bbox_pred\n\n\[email protected]_BOX_PREDICTOR.register(\"FPNPredictor\")\nclass FPNPredictor(nn.Module):\n def __init__(self, cfg, in_channels):\n super(FPNPredictor, self).__init__()\n num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n\n num_bbox_reg_classes = 2\n if cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':\n num_classes = 1 \n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS != 'focal_loss':\n num_classes= 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \\\n not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 1\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \\\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \\\n not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \\\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2 # originally 3, but 2 in new version neg support\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS =='cxe_loss' and cfg.FEW_SHOT.SOFT_LABELING:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['mse_loss','l1_loss']:\n num_classes = 1\n else:\n raise Exception('setting not compatible {} {} {}'.format(\n cfg.FEW_SHOT.SECOND_STAGE_METHOD,\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS,\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON\n ))\n\n if cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['focal_loss', 'mse_loss', 'l1_loss']:\n num_bbox_reg_classes = num_classes+1\n else:\n num_bbox_reg_classes = num_classes\n\n representation_size = in_channels\n\n self.cls_score = nn.Linear(representation_size, num_classes)\n # num_bbox_reg_classes = 2 #if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes\n self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n def forward(self, x):\n if x.ndimension() == 4:\n assert list(x.shape[2:]) == [1, 1]\n x = x.view(x.size(0), -1)\n scores = self.cls_score(x)\n bbox_deltas = self.bbox_pred(x)\n\n return scores, bbox_deltas\n\n\ndef make_roi_box_predictor(cfg, in_channels):\n func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]\n return func(cfg, in_channels)\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jerryzhucs21/spinningup | [
"2992e6a8163d78c3f82a3d92c5235fda0527c398",
"2992e6a8163d78c3f82a3d92c5235fda0527c398",
"2992e6a8163d78c3f82a3d92c5235fda0527c398"
] | [
"spinup/rewards/cvar_utils.py",
"spinup/rewards/broil_rtg_pg_v2.py",
"spinup/algos/pytorch/vpg/vpg.py"
] | [
"import numpy as np\n\ndef relu(x):\n if x > 0:\n return x\n else:\n return 0.0\n\n\ndef cvar_fn_val(sigma, exp_ret_rs, prob_rs, alpha):\n fn_val_relu_part = 0.0\n for i,ret in enumerate(exp_ret_rs):\n fn_val_relu_part += prob_rs[i] * relu(sigma - ret)\n \n fn_val = sigma - 1.0 / (1.0 - alpha) * fn_val_relu_part\n return fn_val\n\ndef cvar_line_search_pg(exp_ret_rs, prob_rs, alpha, num_discretize=1000):\n '''use a line search to approximate sigma'''\n assert(len(exp_ret_rs) == len(prob_rs))\n assert(alpha >= 0 and alpha <= 1)\n assert(np.abs(np.sum(prob_rs) - 1.0) < 0.000001)\n #run simple discrete line search to approximate sigma for now\n\n max_val = -np.inf\n max_sigma = None \n for x in np.linspace(min(exp_ret_rs), max(exp_ret_rs), num_discretize):\n cvar_val = cvar_fn_val(x, exp_ret_rs, prob_rs, alpha)\n #print(x, cvar_val)\n if cvar_val > max_val:\n max_val = cvar_val\n max_sigma = x\n #print(\"updating\")\n \n return max_sigma, max_val\n\n\n\n\n\ndef cvar_enumerate_pg(exp_ret_rs, prob_rs, alpha):\n '''cvar is piecewise linear/concave so the max must be at one of the endpoints!\n we can just iterate over them until we find the smallest one'''\n\n sorted_exp_ret_rs, sorted_prob_rs = zip(*sorted(zip(exp_ret_rs, prob_rs)))\n #print(\"sorted rets\", sorted_exp_ret_rs)\n #print(\"sorted probs\", sorted_prob_rs)\n cum_prob = 0.0\n \n \n max_val = -np.inf\n max_sigma = None \n for ret in sorted_exp_ret_rs:\n cvar_val = cvar_fn_val(ret, exp_ret_rs, prob_rs, alpha)\n #print(x, cvar_val)\n if cvar_val >= max_val:\n max_val = cvar_val\n max_sigma = ret\n #print(\"updating\")\n elif cvar_val < max_val:\n #this function is concave so once it starts decreasing we can stop since we are only interested in maximum\n break\n \n return max_sigma, max_val\n\n\n\n# if __name__ == \"__main__\":\n# #run test to make sure both give same answers.\n# #Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell\n# for i in range(100):\n# seed = np.random.randint(1000)\n# print(seed)\n# np.random.seed(seed)\n# num_rewards = 50\n# exp_rets = 200*np.random.rand(num_rewards) - 100 #[10,40, 80]\n# probs = np.random.rand(num_rewards)#[0.3, 0.3, 0.4]\n# probs /= np.sum(probs)\n# #print(np.sum(probs))\n# alpha = 0.6\n# num_discretize = 10000\n# #print(\"exp rets\", exp_rets)\n# #print(\"probs\", probs)\n# sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)\n# print(\"sigma = \", sigma)\n# print(\"cvar = \", cvar)\n\n# sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)\n# print(\"enum sigma\", sigma_enumerate)\n# print(\"sort cvar\", cvar_enumerate)\n\n# if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:\n# print(\"wrong\")\n# print(abs(sigma_enumerate - sigma))\n# input()\n\n\nif __name__ == \"__main__\":\n #run test to make sure both give same answers.\n #Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell\n num_rewards = 2\n exp_rets = [10, 90]\n probs = [0.05, 0.95]\n probs /= np.sum(probs)\n #print(np.sum(probs))\n alpha = 0.95\n num_discretize = 10000\n #print(\"exp rets\", exp_rets)\n #print(\"probs\", probs)\n sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)\n print(\"sigma = \", sigma)\n print(\"cvar = \", cvar)\n\n sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)\n print(\"enum sigma\", sigma_enumerate)\n print(\"sort cvar\", cvar_enumerate)\n\n if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:\n print(\"wrong\")\n print(abs(sigma_enumerate - sigma))\n input()\n",
"import torch\nimport torch.nn as nn\nfrom torch.distributions.categorical import Categorical\nfrom torch.optim import Adam\nimport numpy as np\nimport gym\nfrom gym.spaces import Discrete, Box\n\nfrom spinup.examples.pytorch.broil_rtg_pg_v2.cvar_utils import cvar_enumerate_pg\nfrom spinup.examples.pytorch.broil_rtg_pg_v2.cartpole_reward_utils import CartPoleReward\n\ndef mlp(sizes, activation=nn.Tanh, output_activation=nn.Identity):\n # Build a feedforward neural network.\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\ndef reward_to_go(rews):\n n = len(rews)\n rtgs = np.zeros_like(rews)\n for i in reversed(range(n)):\n rtgs[i] = rews[i] + (rtgs[i+1] if i+1 < n else 0)\n return rtgs\n\ndef train(reward_dist, lamda, alpha=0.95, env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,\n epochs=50, batch_size=5000, render=False):\n\n # make environment, check spaces, get obs / act dims\n env = gym.make(env_name)\n assert isinstance(env.observation_space, Box), \\\n \"This example only works for envs with continuous state spaces.\"\n assert isinstance(env.action_space, Discrete), \\\n \"This example only works for envs with discrete action spaces.\"\n\n obs_dim = env.observation_space.shape[0]\n n_acts = env.action_space.n\n\n # make core of policy network\n logits_net = mlp(sizes=[obs_dim]+hidden_sizes+[n_acts])\n\n # make function to compute action distribution\n def get_policy(obs):\n logits = logits_net(obs)\n return Categorical(logits=logits)\n\n # make action selection function (outputs int actions, sampled from policy)\n def get_action(obs):\n return get_policy(obs).sample().item()\n\n # make loss function whose gradient, for the right data, is policy gradient\n def compute_loss(obs, act, weights):\n logp = get_policy(obs).log_prob(act)\n return -(logp * weights).mean()\n\n\n #### compute BROIL policy gradient loss (robust version)\n def compute_broil_weights(batch_rets, batch_rewards_to_go):\n '''batch_returns: list of numpy arrays of size num_rollouts x num_reward_fns\n batch_rewards_to_go: list of rewards to go by reward function over all rollouts,\n size is num_rollouts*ave_rollout_length x num_reward_fns\n '''\n #inputs are lists of numpy arrays\n #need to compute BROIL weights for policy gradient and convert to pytorch\n\n #first find the expected on-policy return for current policy under each reward function in the posterior\n exp_batch_rets = np.mean(batch_rets, axis=0)\n print(exp_batch_rets)\n posterior_reward_weights = reward_dist.posterior\n\n\n #calculate sigma and find the conditional value at risk given the current policy\n sigma, cvar = cvar_enumerate_pg(exp_batch_rets, posterior_reward_weights, alpha)\n print(\"sigma = {}, cvar = {}\".format(sigma, cvar))\n\n #compute BROIL policy gradient weights\n\n total_rollout_steps = len(batch_rewards_to_go)\n broil_weights = np.zeros(total_rollout_steps)\n for i,prob_r in enumerate(posterior_reward_weights):\n if sigma > exp_batch_rets[i]:\n w_r_i = lamda + (1 - lamda) / (1 - alpha)\n else:\n w_r_i = lamda\n broil_weights += prob_r * w_r_i * np.array(batch_rewards_to_go)[:,i]\n\n\n return broil_weights,cvar\n\n\n\n\n\n\n\n\n\n\n # make optimizer\n optimizer = Adam(logits_net.parameters(), lr=lr)\n\n # for training policy\n def train_one_epoch():\n # make some empty lists for logging.\n batch_obs = [] # for observations\n batch_acts = [] # for actions\n batch_rewards_to_go = [] # for reward-to-go weighting in policy gradient\n batch_rets = [] # for measuring episode returns\n batch_lens = [] # for measuring episode lengths\n\n # reset episode-specific variables\n obs = env.reset() # first obs comes from starting distribution\n done = False # signal from environment that episode is over\n ep_rews = [] # list for rewards accrued throughout ep\n\n # render first episode of each epoch\n finished_rendering_this_epoch = False\n\n # collect experience by acting in the environment with current policy\n while True:\n\n # rendering\n if (not finished_rendering_this_epoch) and render:\n env.render()\n #print(obs[0])\n\n # save obs\n batch_obs.append(obs.copy())\n\n # act in the environment\n act = get_action(torch.as_tensor(obs, dtype=torch.float32))\n obs, rew, done, _ = env.step(act)\n\n # save action, posterior over reward\n batch_acts.append(act)\n ## old code from normal policy gradient:\n ## ep_rews.append(rew)\n #### New code for BROIL\n rew_dist = reward_dist.get_reward_distribution(obs) #S create reward\n ep_rews.append(rew_dist)\n ####\n\n if done:\n # if episode is over, record info about episode\n ## Old code\n ## ep_ret, ep_len = sum(ep_rews), len(ep_rews)\n #### New code\n ep_ret_dist, ep_len = np.sum(ep_rews, axis=0), len(ep_rews)\n ####\n\n batch_rets.append(ep_ret_dist)\n batch_lens.append(ep_len)\n\n # the weight for each logprob(a_t|s_t) is reward-to-go from t\n #### we are now computing this for every element in the reward function posterior but we can use the same function\n batch_rewards_to_go.extend(reward_to_go(ep_rews))\n\n # reset episode-specific variables\n obs, done, ep_rews = env.reset(), False, []\n\n # won't render again this epoch\n finished_rendering_this_epoch = True\n\n # end experience loop if we have enough of it\n if len(batch_obs) > batch_size:\n break\n\n #### take a single BROIL policy gradient update step\n broil_weights, cvar = compute_broil_weights(batch_rets, batch_rewards_to_go)\n ####\n optimizer.zero_grad()\n batch_loss = compute_loss(obs=torch.as_tensor(batch_obs, dtype=torch.float32),\n act=torch.as_tensor(batch_acts, dtype=torch.int32),\n weights=torch.as_tensor(broil_weights, dtype=torch.float32)\n )\n\n batch_loss.backward()\n optimizer.step()\n return batch_loss, batch_rets, batch_lens, cvar\n\n # training loop\n cvar_list = []\n exp_ret_list = []\n wc_ret_list = []\n for i in range(epochs):\n batch_loss, batch_rets, batch_lens, cvar = train_one_epoch()\n exp_ret = np.dot(np.mean(batch_rets,axis=0),reward_dist.posterior)\n worst_case_return = np.min(np.mean(batch_rets, axis=0))\n cvar_list.append(cvar)\n exp_ret_list.append(exp_ret)\n wc_ret_list.append(worst_case_return)\n print('epoch: %3d \\t loss: %.3f \\t exp return: %.3f \\t cvar: %.3f \\t wc return: %.3f \\t ep_len: %.3f'%\n (i, batch_loss, exp_ret, cvar, worst_case_return, np.mean(batch_lens)))\n\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(cvar_list)\n plt.title(\"conditional value at risk\")\n plt.figure()\n plt.plot(exp_ret_list)\n plt.title(\"expected return\")\n plt.figure()\n plt.plot(wc_ret_list)\n plt.title(\"worst case return\")\n\n plt.show()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--alpha', default=0.95, type=float, help=\"alpha for alpha CVaR\")\n parser.add_argument('--lamda', default = 0.0, type=float, help='blending between exp return (lamda=1) and cvar maximization (lamda=0)')\n parser.add_argument('--lr', type=float, default=1e-2)\n parser.add_argument('--epochs', type=int, default=100)\n args = parser.parse_args()\n print('\\nUsing reward-to-go formulation of BROIL policy gradient.\\n')\n #print('\\nUsing only two reward functions in posterior')\n #print(\"R1(s) = +1 (if s <= 0) +2 (if s > 0)\")\n #print(\"R2(s) = +1 (if s <= 0) -10 (if s > 0)\")\n #print(\"Pr(R1) = 0.95\")\n #print(\"Pr(R2) = 0.05\")\n #print(\"Expected reward R(s) = +1 (if s <= 0) +1.4 (if s > 0)\")\n\n #create reward function distribution\n reward_dist = CartPoleReward()\n\n train(reward_dist, args.lamda, args.alpha, env_name=args.env_name, epochs=args.epochs, render=args.render, lr=args.lr)\n",
"import numpy as np\nimport torch\nfrom torch.optim import Adam\nimport gym\nimport time\nimport spinup.algos.pytorch.vpg.core as core\nfrom spinup.utils.logx import EpochLogger\nfrom spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\nfrom spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs\n\n\nclass VPGBuffer:\n \"\"\"\n A buffer for storing trajectories experienced by a VPG agent interacting\n with the environment, and using Generalized Advantage Estimation (GAE-Lambda)\n for calculating the advantages of state-action pairs.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):\n self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)\n self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)\n self.adv_buf = np.zeros(size, dtype=np.float32)\n self.rew_buf = np.zeros(size, dtype=np.float32)\n self.ret_buf = np.zeros(size, dtype=np.float32)\n self.val_buf = np.zeros(size, dtype=np.float32)\n self.logp_buf = np.zeros(size, dtype=np.float32)\n self.gamma, self.lam = gamma, lam\n self.ptr, self.path_start_idx, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, val, logp):\n \"\"\"\n Append one timestep of agent-environment interaction to the buffer.\n \"\"\"\n assert self.ptr < self.max_size # buffer has to have room so you can store\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.logp_buf[self.ptr] = logp\n self.ptr += 1\n\n def finish_path(self, last_val=0):\n \"\"\"\n Call this at the end of a trajectory, or when one gets cut off\n by an epoch ending. This looks back in the buffer to where the\n trajectory started, and uses rewards and value estimates from\n the whole trajectory to compute advantage estimates with GAE-Lambda,\n as well as compute the rewards-to-go for each state, to use as\n the targets for the value function.\n The \"last_val\" argument should be 0 if the trajectory ended\n because the agent reached a terminal state (died), and otherwise\n should be V(s_T), the value function estimated for the last state.\n This allows us to bootstrap the reward-to-go calculation to account\n for timesteps beyond the arbitrary episode horizon (or epoch cutoff).\n \"\"\"\n\n path_slice = slice(self.path_start_idx, self.ptr)\n rews = np.append(self.rew_buf[path_slice], last_val)\n vals = np.append(self.val_buf[path_slice], last_val)\n \n # the next two lines implement GAE-Lambda advantage calculation\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)\n \n # the next line computes rewards-to-go, to be targets for the value function\n self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]\n \n self.path_start_idx = self.ptr\n\n def get(self):\n \"\"\"\n Call this at the end of an epoch to get all of the data from\n the buffer, with advantages appropriately normalized (shifted to have\n mean zero and std one). Also, resets some pointers in the buffer.\n \"\"\"\n assert self.ptr == self.max_size # buffer has to be full before you can get\n self.ptr, self.path_start_idx = 0, 0\n # the next two lines implement the advantage normalization trick\n adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)\n self.adv_buf = (self.adv_buf - adv_mean) / adv_std\n data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,\n adv=self.adv_buf, logp=self.logp_buf)\n return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}\n\n\n\ndef vpg(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), render=False, seed=0, \n steps_per_epoch=4000, epochs=50, gamma=0.99, pi_lr=3e-4,\n vf_lr=1e-3, train_v_iters=80, lam=0.97, max_ep_len=1000,\n logger_kwargs=dict(), save_freq=10):\n \"\"\"\n Vanilla Policy Gradient \n (with GAE-Lambda for advantage estimation)\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n actor_critic: The constructor method for a PyTorch Module with a \n ``step`` method, an ``act`` method, a ``pi`` module, and a ``v`` \n module. The ``step`` method should accept a batch of observations \n and return:\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``a`` (batch, act_dim) | Numpy array of actions for each \n | observation.\n ``v`` (batch,) | Numpy array of value estimates\n | for the provided observations.\n ``logp_a`` (batch,) | Numpy array of log probs for the\n | actions in ``a``.\n =========== ================ ======================================\n The ``act`` method behaves the same as ``step`` but only returns ``a``.\n The ``pi`` module's forward call should accept a batch of \n observations and optionally a batch of actions, and return:\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``pi`` N/A | Torch Distribution object, containing\n | a batch of distributions describing\n | the policy for the provided observations.\n ``logp_a`` (batch,) | Optional (only returned if batch of\n | actions is given). Tensor containing \n | the log probability, according to \n | the policy, of the provided actions.\n | If actions not given, will contain\n | ``None``.\n =========== ================ ======================================\n The ``v`` module's forward call should accept a batch of observations\n and return:\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``v`` (batch,) | Tensor containing the value estimates\n | for the provided observations. (Critical: \n | make sure to flatten this!)\n =========== ================ ======================================\n ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object \n you provided to VPG.\n seed (int): Seed for random number generators.\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n epochs (int): Number of epochs of interaction (equivalent to\n number of policy updates) to perform.\n gamma (float): Discount factor. (Always between 0 and 1.)\n pi_lr (float): Learning rate for policy optimizer.\n vf_lr (float): Learning rate for value function optimizer.\n train_v_iters (int): Number of gradient descent steps to take on \n value function per epoch.\n lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,\n close to 1.)\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n logger_kwargs (dict): Keyword args for EpochLogger.\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n \"\"\"\n\n # Special function to avoid certain slowdowns from PyTorch + MPI combo.\n setup_pytorch_for_mpi()\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Random seed\n seed += 10000 * proc_id()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Instantiate environment\n env = env_fn()\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape\n\n # Create actor-critic module\n ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)\n\n # Sync params across processes\n sync_params(ac)\n\n # Count variables\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t v: %d\\n'%var_counts)\n\n # Set up experience buffer\n local_steps_per_epoch = int(steps_per_epoch / num_procs())\n buf = VPGBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)\n\n # Set up function for computing VPG policy loss\n def compute_loss_pi(data):\n obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']\n\n # Policy loss\n pi, logp = ac.pi(obs, act)\n loss_pi = -(logp * adv).mean()\n\n # Useful extra info\n approx_kl = (logp_old - logp).mean().item()\n ent = pi.entropy().mean().item()\n pi_info = dict(kl=approx_kl, ent=ent)\n\n return loss_pi, pi_info\n\n # Set up function for computing value loss\n def compute_loss_v(data):\n obs, ret = data['obs'], data['ret']\n return ((ac.v(obs) - ret)**2).mean()\n\n # Set up optimizers for policy and value function\n pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)\n vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)\n\n # Set up model saving\n logger.setup_pytorch_saver(ac)\n\n def update():\n data = buf.get()\n\n # Get loss and info values before update\n pi_l_old, pi_info_old = compute_loss_pi(data)\n pi_l_old = pi_l_old.item()\n v_l_old = compute_loss_v(data).item()\n\n # Train policy with a single step of gradient descent\n pi_optimizer.zero_grad()\n loss_pi, pi_info = compute_loss_pi(data)\n loss_pi.backward()\n mpi_avg_grads(ac.pi) # average grads across MPI processes\n pi_optimizer.step()\n\n # Value function learning\n for i in range(train_v_iters):\n vf_optimizer.zero_grad()\n loss_v = compute_loss_v(data)\n loss_v.backward()\n mpi_avg_grads(ac.v) # average grads across MPI processes\n vf_optimizer.step()\n\n # Log changes from update\n kl, ent = pi_info['kl'], pi_info_old['ent']\n logger.store(LossPi=pi_l_old, LossV=v_l_old,\n KL=kl, Entropy=ent,\n DeltaLossPi=(loss_pi.item() - pi_l_old),\n DeltaLossV=(loss_v.item() - v_l_old))\n\n # Prepare for interaction with environment\n start_time = time.time()\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(epochs):\n for t in range(local_steps_per_epoch):\n a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))\n\n next_o, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # save and log\n buf.store(o, a, r, v, logp)\n logger.store(VVals=v)\n \n # Update obs (critical!)\n o = next_o\n\n timeout = ep_len == max_ep_len\n terminal = d or timeout\n epoch_ended = t==local_steps_per_epoch-1\n\n if render:\n env.render()\n\n if terminal or epoch_ended:\n if epoch_ended and not(terminal):\n print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)\n # if trajectory didn't reach terminal state, bootstrap value target\n if timeout or epoch_ended:\n _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))\n else:\n v = 0\n buf.finish_path(v)\n if terminal:\n # only save EpRet / EpLen if trajectory finished\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, None)\n\n # Perform VPG update!\n update()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('DeltaLossPi', average_only=True)\n logger.log_tabular('DeltaLossV', average_only=True)\n logger.log_tabular('Entropy', average_only=True)\n logger.log_tabular('KL', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--hid', type=int, default=64)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--cpu', type=int, default=1)\n parser.add_argument('--steps', type=int, default=4000)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--exp_name', type=str, default='vpg')\n parser.add_argument('--render', type=bool, default=False)\n parser.add_argument('--policy_lr', type=float, default=3e-3, help=\"learning rate for policy\")\n args = parser.parse_args()\n\n mpi_fork(args.cpu) # run parallel code with mpi\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n vpg(lambda : gym.make(args.env), actor_critic=core.MLPActorCritic, render=args.render,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma, \n seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,\n pi_lr=args.policy_lr,\n logger_kwargs=logger_kwargs)"
] | [
[
"numpy.sum"
],
[
"torch.nn.Sequential",
"torch.distributions.categorical.Categorical",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"torch.nn.Linear",
"torch.as_tensor",
"numpy.zeros_like",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
],
[
"numpy.random.seed",
"torch.manual_seed",
"numpy.append",
"numpy.zeros",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sbmalik/pytorchx | [
"938ba5855cfb72b0dbce91af8c0a6d0e3943f122",
"938ba5855cfb72b0dbce91af8c0a6d0e3943f122"
] | [
"squeezenet/squeezenet.py",
"mnasnet/inference.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\n\ndef main():\n print('cuda device count: ', torch.cuda.device_count())\n net = torchvision.models.squeezenet1_1(pretrained=True)\n #net.fc = nn.Linear(512, 2)\n net = net.eval()\n net = net.to('cuda:0')\n print(net)\n tmp = torch.ones(2, 3, 227, 227).to('cuda:0')\n out = net(tmp)\n print('squeezenet out:', out.shape)\n torch.save(net, \"squeezenet.pth\")\n\nif __name__ == '__main__':\n main()\n\n",
"import torch\nfrom torch import nn\nimport torchvision\nimport os\nimport struct\nfrom torchsummary import summary\n\ndef main():\n print('cuda device count: ', torch.cuda.device_count())\n net = torch.load('mnasnet.pth')\n net = net.to('cuda:0')\n net = net.eval()\n print('model: ', net)\n #print('state dict: ', net.state_dict().keys())\n tmp = torch.ones(1, 3, 224, 224).to('cuda:0')\n print('input: ', tmp)\n out = net(tmp)\n\n print('output:', out)\n\n summary(net, (3, 224, 224))\n #return\n f = open(\"mnasnet.wts\", 'w')\n f.write(\"{}\\n\".format(len(net.state_dict().keys())))\n for k,v in net.state_dict().items():\n print('key: ', k)\n print('value: ', v.shape)\n vr = v.reshape(-1).cpu().numpy()\n f.write(\"{} {}\".format(k, len(vr)))\n for vv in vr:\n f.write(\" \")\n f.write(struct.pack(\">f\", float(vv)).hex())\n f.write(\"\\n\")\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.cuda.device_count",
"torch.ones",
"torch.save"
],
[
"torch.cuda.device_count",
"torch.ones",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sage-Bionetworks/GENIE-Sponsored-Projects | [
"e34be3ece96144aa525c7281738736d3c5ef93cb"
] | [
"geniesp/sp_config.py"
] | [
"\"\"\"\nSponsored project configuration classes\n\nUSAGE:\ngit clone https://github.com/cBioPortal/cbioportal.git\npython runSP.py AKT1 ../cbioportal/ --staging\n\"\"\"\nimport os\nimport random\nimport string\n\nimport pandas as pd\nimport synapseclient\n\nfrom . import new_redcap_export_mapping\nfrom . import sp_redcap_export_mapping\n\n\nclass Akt1(sp_redcap_export_mapping.SponsoredProjectRunner):\n \"\"\"\n AKT1 PROCESSES\n - ONE TIMELINE FILE\n - CLINICAL FILE\n OS_MONTHS = death_date_int - mets_disease_date_int\n OS_MONTHS_PRIMARY = death_date_int - primary_dx_date_int \n All dates are converted from days to months (days/30.4)\n Add headers\n REMOVE PATIENTS/SAMPLES THAT DON'T HAVE GENIE SAMPLE IDS\n \"\"\"\n _SPONSORED_PROJECT = \"AKT1\"\n _DATES = [\"death_date_int\",\"follow_up_date_int\",\"primary_dx_date_int\",\"lrr_date_int\",\"mets_disease_date_int\",\"sample_date_int_1\",\n \"sequence_report_date_int_1\",\"sequence_report_date_int_1_static\",\"sample_date_int_2\",\"sample_date_int_2_static\",\n \"sequence_report_date_int_2\",\"sequence_report_date_int_2_static\",\"sequence_report_date_int_3_static\",\n \"OS_MONTHS\",\"OS_MONTHS_PRIMARY\"]\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = \"cancer_study_identifier: genie_akt1\\nstable_id: genie_akt1_sequenced\\ncase_list_category: all_cases_with_mutation_data\\ncase_list_name: Sequenced Tumors\\ncase_list_description: All sequenced samples (%s samples)\\ncase_list_ids: %s\"\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')\n _UNMAPPED_SYN_ID = \"syn11066652\"\n _MAPPED_SYN_ID = \"syn8404878\"\n _CASE_LIST_SYN_ID = \"syn10145838\"\n _SP_SYN_ID = \"syn8363325\"\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn8220815\"\n _SP_REDCAP_EXPORTS_SYNID = \"syn8404875\" #Storage of not found samples\n _NUM_SAMPLE_COLS = 3\n\n def addOSMonths(self, sponsoredProject_mapped_df):\n #Must add new date fields to the DATE variable along with add to the mapping table: syn8220815\n sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['mets_disease_date_int'] \n sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['primary_dx_date_int'] \n return(sponsoredProject_mapped_df)\n\n def createTemporaryGenieId(self, x, tempIdMapping):\n uniqId = x['record_id'] + x['redcap_data_access_group']\n tempIdMap = tempIdMapping['patientId'][tempIdMapping['uniqueId'] == uniqId]\n tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))\n if len(tempIdMap) == 0:\n return(tempId)\n else:\n return(tempIdMap.values[0])\n # if sum(tempIdMapping['uniqueId'] == uniqId) == 0:\n # #syn.store(synapseclient.Table(syn.get(\"syn10164044\"),[[uniqId, tempId, SPONSORED_PROJECT]]))\n # return(tempId)\n # elif pd.np.isnan(temp['tempPatientId'][tempIdMapping['uniqueId'] == uniqId].values[0]):\n\n # else:\n # return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['tempPatientId'].values[0])\n\n def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):\n print(\"RENAMING %s NULL PATIENTS\" % sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()))\n #Create temp patient Id\n allNullPatients = sponsoredProject_mapped_df[['record_id','redcap_data_access_group','genie_patient_id']][sponsoredProject_mapped_df['genie_patient_id'].isnull()]\n temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf), axis =1)\n if sponsoredProject_mapped_df['genie_patient_id'].isnull().any():\n sponsoredProject_mapped_df['genie_patient_id'][sponsoredProject_mapped_df['genie_patient_id'].isnull()] = temporaryIds\n assert sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()) ==0, \"Make sure there are no null genie patient Ids\"\n\n sponsoredProject_mapped_df['genie_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','genie_patient_id'), axis=1)\n sponsoredProject_mapped_df.reset_index(inplace=True,drop=True)\n return(sponsoredProject_mapped_df, temporaryIds)\n\n def makeTimeLineDf(self, redCapExportDf, therapyRange = 18):\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_DRUG_CLINTRIAL = []\n THERAPY_DRUG_AZD5363 = []\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n THERAPY_COMBO_YN = []\n THERAPY_COMBO_NUM = []\n #THERAPY NUMBER\n for therapyNumber in range(1,therapyRange):\n therapyCols = [i for i in redCapExportDf if \"therapy%d_\" % therapyNumber in i]\n START_DATE.extend([i for i in therapyCols if \"start_int\" in i])\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([i for i in therapyCols if len(i.split(\"_\")) == 2])\n THERAPY_DRUG_CLINTRIAL.extend([i for i in therapyCols if \"clintrial\" in i])\n THERAPY_DRUG_AZD5363.extend([i for i in therapyCols if \"azd\" in i])\n THERAPY_DRUG_OTHER.extend([i for i in therapyCols if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if \"discontinue\" in i])\n THERAPY_DRUG_REASON.extend([i for i in therapyCols if \"reason\" in i])\n THERAPY_COMBO_YN.extend([i for i in therapyCols if \"combo_yn\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_COMBO_NUM.extend([i for i in therapyCols if \"combo_num\" in i]* len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_TYPE.extend([\"Medical Therapy %d\" % therapyNumber]* len([i for i in therapyCols if \"start_int\" in i]))\n SUBTYPE.extend([\"Chemo/Target/Immuno etc.\"] * len([i for i in therapyCols if \"start_int\" in i]))\n #OVARIAN\n ovarian = [i for i in redCapExportDf if \"ovariansup\" in i]\n ovarian_len = len([i for i in ovarian if \"start_int\" in i])\n START_DATE.extend([i for i in ovarian if \"start_int\" in i])\n STOP_DATE.extend([i for i in ovarian if \"end_int\" in i])\n TREATMENT_TYPE.extend([\"Ovarian Suppression At Primary\"] * ovarian_len)\n SUBTYPE.extend([\"Ovarian Suppression\"] * ovarian_len)\n AGENT.extend(['']*ovarian_len)\n THERAPY_DRUG_CLINTRIAL.extend(['']*ovarian_len)\n THERAPY_DRUG_AZD5363.extend(['']*ovarian_len)\n THERAPY_DRUG_OTHER.extend(['']*ovarian_len)\n THERAPY_DRUG_DISCONTINUE.extend(['']*ovarian_len)\n THERAPY_DRUG_REASON.extend(['']*ovarian_len)\n THERAPY_COMBO_YN.extend(['']*ovarian_len)\n THERAPY_COMBO_NUM.extend(['']*ovarian_len)\n #HORMONE\n hormo = [i for i in redCapExportDf if \"hormo\" in i]\n hormo_len = len([i for i in hormo if \"start_int\" in i])\n START_DATE.extend([i for i in hormo if \"start_int\" in i])\n STOP_DATE.extend([i for i in hormo if \"end_int\" in i])\n THERAPY_DRUG_CLINTRIAL.extend([i for i in hormo if \"clintrial\" in i])\n THERAPY_DRUG_AZD5363.extend(['']*hormo_len)\n THERAPY_DRUG_OTHER.extend([i for i in hormo if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in hormo if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([i for i in hormo if \"reason\" in i])\n AGENT.extend([i for i in hormo if \"reason\" not in i and \"discon\" not in i and \"other\" not in i and \"clintrial\" not in i and \"start_int\" not in i and \"end_int\" not in i and \"therapy\" not in i])\n THERAPY_COMBO_YN.extend(['']*hormo_len)\n THERAPY_COMBO_NUM.extend(['']*hormo_len)\n SUBTYPE.extend([\"Hormone Therapy\"] * hormo_len)\n TREATMENT_TYPE.extend([\"Medical Therapy 1\"] * hormo_len)\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n\n #METASTATIC DIAGNOSIS\n metaDiagnosis = pd.DataFrame()\n metaDiagnosis['PATIENT_ID'] = redCapExportDf['genie_patient_id']\n #MET DISEASE IS TIMEPOINT 0\n metaDiagnosis['START_DATE'] = 0\n #metaDiagnosis['START_DATE'] = redCapExportDf['mets_disease_date_int']\n metaDiagnosis['EVENT_TYPE'] = 'STATUS'\n metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'\n metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]\n\n removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_CLINTRIAL+THERAPY_DRUG_AZD5363+THERAPY_DRUG_OTHER+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM\n lengths = set([\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(SUBTYPE),\n len(AGENT),\n len(THERAPY_DRUG_CLINTRIAL),\n len(THERAPY_DRUG_AZD5363),\n len(THERAPY_DRUG_OTHER),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(THERAPY_COMBO_YN),\n len(THERAPY_COMBO_NUM),\n len(EVENT_TYPE)])\n assert len(lengths) == 1,\"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(redCapExportDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = [redCapExportDf['genie_patient_id'][i]]*len(START_DATE)\n #timelineDF['START_DATE'] = redCapExportDf.ix[i][START_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']\n #timelineDF['STOP_DATE'] = redCapExportDf.ix[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']\n #MET DISEASE IS TIMEPOINT 0\n timelineDF['START_DATE'] = redCapExportDf.iloc[i][START_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']\n timelineDF['STOP_DATE'] = redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_CLINTRIAL'] = redCapExportDf.iloc[i][THERAPY_DRUG_CLINTRIAL].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_AZD5363'] = redCapExportDf.iloc[i][THERAPY_DRUG_AZD5363].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)\n total = total.append(timelineDF)\n total['STATUS'] = ''\n ordering = total.columns\n total = total.append(metaDiagnosis)\n total = total[ordering]\n return(total,removeCols)\n\n def getSpecimen(self, getTimelineSpecimen):\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']\n specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC - getTimelineSpecimen.METS_DISEASE_DATE_INT\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']\n specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n\nclass Erbb2(sp_redcap_export_mapping.SponsoredProjectRunner):\n\n _SPONSORED_PROJECT = \"ERBB2\"\n _DATES = ['follow_up_date_int','date_death_int','primary_dx_date_int','lrr_date_int','date_first_met_int',\n 'sample_date_int_1','seq_report_date_int_1','sample_date_int_2','seq_report_date_int_2','sample_date_int_3',\n 'sequence_report_date_int_3','sample_date_int_4','sequence_report_date_int_4','sample_date_int_5','sequence_report_date_int_5',\n 'sample_date_int_6','seq_report_date_int_6','sample_date_int_7','seq_report_date_int_7','sample_date_int_8',\n 'sequence_report_date_int_8','sample_date_int_9','sequence_report_date_int_9','sample_date_int_10',\n 'sequence_report_date_int_10','date_bso_int','OS_MONTHS','OS_MONTHS_PRIMARY']\n\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = \"cancer_study_identifier: genie_erbb2\\nstable_id: genie_erbb2_sequenced\\ncase_list_category: all_cases_with_mutation_data\\ncase_list_name: Sequenced Tumors\\ncase_list_description: All sequenced samples (%s samples)\\ncase_list_ids: %s\"\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')\n _UNMAPPED_SYN_ID = \"syn8356977\"\n _MAPPED_SYN_ID = \"syn8367692\"\n _CASE_LIST_SYN_ID = \"syn10145925\"\n _SP_SYN_ID = \"syn8363326\"\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn8363731\"\n _SP_REDCAP_EXPORTS_SYNID = \"syn8322425\" #Storage of not found samples\n _NUM_SAMPLE_COLS = 10\n\n def addOSMonths(self, sponsoredProject_mapped_df):\n #Must add new date fields to the DATE variable along with add to the mapping table: syn8220815\n sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['date_first_met_int'] \n sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['primary_dx_date_int'] \n return(sponsoredProject_mapped_df)\n\n def createTemporaryGenieId(self, x, tempIdMapping, patientIdCol):\n \"\"\"\n Create temporary genie id for those that don't have \n \"\"\"\n uniqId = x['record_id_patient_id'] + x['redcap_data_access_group']\n if sum(tempIdMapping['uniqueId'] == uniqId) == 0:\n tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))\n self.syn.store(synapseclient.Table(self.syn.get(\"syn10164044\"),[[uniqId, tempId]]))\n return(tempId)\n else:\n return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['temporaryId'].values[0])\n \n def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):\n #### TIMELINE FILE\n sponsoredProject_mapped_df['redcap_data_access_group'] = [i.upper() for i in sponsoredProject_mapped_df['redcap_data_access_group']]\n allNullPatients = sponsoredProject_mapped_df[['record_id_patient_id','redcap_data_access_group']][sponsoredProject_mapped_df['record_id_patient_id'].isnull()]\n temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf, 'record_id_patient_id'), axis =1)\n if not temporaryIds.empty:\n sponsoredProject_mapped_df['record_id_patient_id'][sponsoredProject_mapped_df['record_id_patient_id'].isnull()] = temporaryIds\n assert sum(sponsoredProject_mapped_df['record_id_patient_id'].isnull()) == 0, \"Make sure there are no null genie patient Ids\"\n sponsoredProject_mapped_df['record_id_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','record_id_patient_id'), axis=1)\n return(sponsoredProject_mapped_df, temporaryIds)\n\n def makeTimeLineDf(self, redCapExportDf, therapyRange = 16):\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_RESPONSE = []\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n THERAPY_COMBO_YN = []\n THERAPY_COMBO_NUM = []\n ADD_TREATMENT = []\n TREATMENT_SETTING = []\n for therapyNumber in range(1,therapyRange):\n therapyCols = [i for i in redCapExportDf if (\"therapy%d_\" % therapyNumber in i or \"combo_therapy_yn_%d\" %therapyNumber == i or \"add_treatment_%d\" % therapyNumber == i or \"treatment_setting_%d\" % therapyNumber == i)]\n START_DATE.extend([i for i in therapyCols if \"start_int\" in i])\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([i for i in therapyCols if len(i.split(\"_\")) == 2 and \"response\" not in i and \"ctdrug\" not in i])\n THERAPY_DRUG_OTHER.extend([i for i in therapyCols if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([i for i in therapyCols if \"reason\" in i])\n THERAPY_COMBO_YN.extend([i for i in therapyCols if \"combo_therapy_yn\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_COMBO_NUM.extend([i for i in therapyCols if \"combo_num\" in i]* len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_TYPE.extend([\"Medical Therapy %d\" % therapyNumber]* len([i for i in therapyCols if \"start_int\" in i]))\n SUBTYPE.extend([\"Chemo/Target/Immuno etc.\"] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_RESPONSE.extend([i for i in therapyCols if \"response\" in i] *len([i for i in therapyCols if \"start_int\" in i]))\n ADD_TREATMENT.extend([i for i in therapyCols if \"add_treatment\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_SETTING.extend([i for i in therapyCols if \"treatment_setting\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n ADD_TREATMENT.extend(['']*4)\n\n #METASTATIC DIAGNOSIS\n metaDiagnosis = pd.DataFrame()\n #MET DISEASE IS TIMEPOINT 0\n metaDiagnosis['PATIENT_ID'] = redCapExportDf['record_id_patient_id']\n metaDiagnosis['START_DATE'] = 0\n #metaDiagnosis['START_DATE'] = redCapExportDf['date_first_met_int']\n metaDiagnosis['EVENT_TYPE'] = 'STATUS'\n metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'\n metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]\n\n removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_OTHER+THERAPY_RESPONSE+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM+ADD_TREATMENT + TREATMENT_SETTING\n\n lengths = set([\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(SUBTYPE),\n len(AGENT),\n len(THERAPY_RESPONSE),\n len(THERAPY_DRUG_OTHER),\n len(TREATMENT_SETTING),\n len(ADD_TREATMENT),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(THERAPY_COMBO_YN),\n len(THERAPY_COMBO_NUM),\n len(EVENT_TYPE)])\n assert len(lengths) == 1,\"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(redCapExportDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = [redCapExportDf['record_id_patient_id'][i]]*len(START_DATE)\n if not pd.isnull(redCapExportDf.iloc[i]['date_first_met_int']):\n timelineDF['START_DATE'] = [start if pd.isnull(start) else int(start) - int(redCapExportDf.iloc[i]['date_first_met_int']) for start in redCapExportDf.iloc[i][START_DATE].reset_index(drop=True)]\n timelineDF['STOP_DATE'] = [end if pd.isnull(end) else int(end) - int(redCapExportDf.iloc[i]['date_first_met_int']) for end in redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True)]\n else:\n timelineDF['START_DATE'] = pd.np.nan\n timelineDF['STOP_DATE'] = pd.np.nan\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)\n total = total.append(timelineDF)\n total['STATUS'] = ''\n ordering = total.columns\n total = total.append(metaDiagnosis)\n total = total[ordering]\n return(total, removeCols)\n\n def getSpecimen(self, getTimelineSpecimen):\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']\n getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.isnull()]\n getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.METS_DISEASE_DATE_INT.isnull()]\n specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.astype(int) - getTimelineSpecimen.METS_DISEASE_DATE_INT.astype(int)\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']\n specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n\nclass Fgfr4(new_redcap_export_mapping.SponsoredProjectRunner):\n\n _DATA_ELEMENT_SYN_ID = \"syn12032922\"\n _SPONSORED_PROJECT = 'FGFR4'\n # No need to define in class\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT, 'case_lists')\n _NUM_COUNTS = 4\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn15572052\"\n _UNLABELLED_SYN_ID = \"syn15341849\"\n _LABELLED_SYN_ID = \"syn15341838\"\n # Storage of not found samples\n _SP_REDCAP_EXPORTS_SYNID = \"syn11812526\"\n _SP_SYN_ID = \"syn14721789\"\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = (\n \"cancer_study_identifier: genie_fgfr4\\n\"\n \"stable_id: genie_fgfr4_sequenced\\n\"\n \"case_list_category: all_cases_with_mutation_data\\n\"\n \"case_list_name: Sequenced Tumors\\n\"\n \"case_list_description: All sequenced samples \"\n \"(%s samples)\\ncase_list_ids: %s\")\n _CASE_LIST_SYN_ID = \"syn14721794\"\n\n # def addOSMonths(self, sponsoredProject_mapped_df):\n # '''\n # Must add new date fields to the DATE variable along with add\n # to the mapping table: syn8220815\n # '''\n # sponsoredProject_mapped_df['OS_MONTHS'] = \\\n # sponsoredProject_mapped_df['death_date_int'] - \\\n # sponsoredProject_mapped_df['date_first_met_int']\n # sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = \\\n # sponsoredProject_mapped_df['death_date_int'] - \\\n # sponsoredProject_mapped_df['primary_dx_date_int']\n # return(sponsoredProject_mapped_df)\n\n def makeTimeLineDf(\n self, treatmentDf, finalPatientDf, therapyRange=5):\n # These variables are capitalized to match with the column headers\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_RESPONSE = []\n # Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or\n # OTHER (NCIT ID)\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n TREATMENT_SETTING = []\n RXNORM_ID = []\n # Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or\n # OTHER\n THERAPY_DRUG_START_ESTIMATED = []\n THERAPY_DRUG_OTHER_NAME = []\n THERAPY_DRUG_END_ESTIMATED = []\n for therapyNumber in range(1, therapyRange):\n therapyCols = [\n i for i in treatmentDf\n if \"therapy_drug%d\" % therapyNumber in i]\n startCols = [i for i in therapyCols if \"start_int\" in i]\n START_DATE.extend(startCols)\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([\n i for i in therapyCols if \"name\" in i and \"other\" not in i])\n RXNORM_ID.extend([\n i for i in therapyCols\n if i == \"therapy_drug%d\" % therapyNumber])\n THERAPY_DRUG_OTHER.extend([\n i for i in therapyCols if \"other\" in i and 'name' not in i])\n THERAPY_DRUG_DISCONTINUE.extend([\n i for i in therapyCols if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([\n i for i in therapyCols if \"reason\" in i])\n THERAPY_DRUG_OTHER_NAME.extend([\n i for i in therapyCols if \"other_name\" in i])\n THERAPY_DRUG_START_ESTIMATED.extend([\n i for i in therapyCols if \"start_estimated\" in i])\n THERAPY_DRUG_END_ESTIMATED.extend([\n i for i in therapyCols if \"end_estimated\" in i])\n # Value\n TREATMENT_TYPE.extend([\n \"Medical Therapy %d\" % therapyNumber] * len(startCols))\n # Value\n SUBTYPE = [\"Chemo/Target/Immuno etc.\"] * len(AGENT)\n TREATMENT_SETTING = ['treatment_setting'] * len(AGENT)\n THERAPY_RESPONSE = ['therapy_response'] * len(AGENT)\n # Value\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n LINE_START = ['line_start_int'] * len(AGENT)\n REGIMEN_NAME = ['regimen_name'] * len(AGENT)\n CLINICAL_TRIAL = ['clinical_trial'] * len(AGENT)\n CENTER = ['redcap_data_access_group'] * len(AGENT)\n\n lengths = [\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(AGENT),\n len(THERAPY_DRUG_OTHER),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(RXNORM_ID),\n len(THERAPY_DRUG_OTHER_NAME),\n len(THERAPY_DRUG_START_ESTIMATED),\n len(THERAPY_DRUG_END_ESTIMATED),\n len(TREATMENT_TYPE)]\n assert len(set(lengths)) == 1, \"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(treatmentDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = \\\n [treatmentDf['patient_id'].iloc[i]]*len(START_DATE)\n timelineDF['START_DATE'] = \\\n treatmentDf.iloc[i][START_DATE].reset_index(drop=True)\n timelineDF['STOP_DATE'] = \\\n treatmentDf.iloc[i][STOP_DATE].reset_index(drop=True)\n\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n # has to be in this order of PATIENT_ID, START, STOP and EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = \\\n treatmentDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['RXNORM_ID'] = \\\n treatmentDf.iloc[i][RXNORM_ID].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = \\\n treatmentDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = treatmentDf.iloc[i][\n THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = \\\n treatmentDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER_NAME'] = treatmentDf.iloc[i][\n THERAPY_DRUG_OTHER_NAME].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_START_ESTIMATED'] = treatmentDf.iloc[i][\n THERAPY_DRUG_START_ESTIMATED].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_END_ESTIMATED'] = treatmentDf.iloc[i][\n THERAPY_DRUG_END_ESTIMATED].reset_index(drop=True)\n timelineDF['TREATMENT_SETTING'] = \\\n treatmentDf.iloc[i][TREATMENT_SETTING].reset_index(drop=True)\n timelineDF['THERAPY_RESPONSE'] = \\\n treatmentDf.iloc[i][THERAPY_RESPONSE].reset_index(drop=True)\n timelineDF['LINE_START'] = \\\n treatmentDf.iloc[i][LINE_START].reset_index(drop=True)\n timelineDF['REGIMEN_NAME'] = \\\n treatmentDf.iloc[i][REGIMEN_NAME].reset_index(drop=True)\n timelineDF['CLINICAL_TRIAL'] = \\\n treatmentDf.iloc[i][CLINICAL_TRIAL].reset_index(drop=True)\n timelineDF['CENTER'] = \\\n treatmentDf.iloc[i][CENTER].reset_index(drop=True)\n total = total.append(timelineDF, sort=False)\n # remove all without START dates\n total = total[~total['START_DATE'].isnull()]\n total['SP'] = self._SPONSORED_PROJECT\n total['STATUS'] = ''\n total['START_DATE'] = total['START_DATE'].astype('float')\n total['STOP_DATE'] = total['STOP_DATE'].astype('float')\n total['RXNORM_ID'] = total['RXNORM_ID'].astype('float')\n total['LINE_START'] = total['LINE_START'].astype('float')\n total.drop_duplicates(inplace=True)\n # Anchor point is MET_DX_DATE_INT\n date_met_int = [\n float(finalPatientDf['MET_DX_DATE_INT'][\n finalPatientDf['PATIENT_ID'] == patient].values[0])\n for patient in total['PATIENT_ID']]\n total['START_DATE'] = total['START_DATE'] - date_met_int\n total['STOP_DATE'] = total['STOP_DATE'] - date_met_int\n total['LINE_START'] = total['LINE_START'] - date_met_int\n\n return(total)\n\n def createSpecimenDf(self, sampleDf, patientDf):\n clinicalDf = sampleDf.merge(patientDf, on=\"PATIENT_ID\", how=\"outer\")\n clinicalDf = clinicalDf[~clinicalDf.AGE_AT_SEQ_REPORT.isnull()]\n clinicalDf = \\\n clinicalDf[~clinicalDf.DATE_FIRST_DISTANT_MET_INT.isnull()]\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = clinicalDf['PATIENT_ID']\n specimen['SAMPLE_ID'] = clinicalDf['SAMPLE_ID']\n specimen['START_DATE'] = \\\n clinicalDf.AGE_AT_SEQ_REPORT.astype(int) - \\\n clinicalDf.DATE_FIRST_DISTANT_MET_INT.astype(int)\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_NOTES'] = clinicalDf.AGE_AT_SEQ_REPORT\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pragnesh-ai/driverlessai-recipes | [
"97371a2d2cd853cdeeb15037f462af96d81a7c0b",
"97371a2d2cd853cdeeb15037f462af96d81a7c0b",
"97371a2d2cd853cdeeb15037f462af96d81a7c0b"
] | [
"models/mli/model_skopes_rules.py",
"transformers/numeric/boxcox_transformer.py",
"models/timeseries/fb_prophet_parallel.py"
] | [
"\"\"\"Skopes rules \"\"\"\n\nimport uuid\nimport os\nimport datatable as dt\nimport numpy as np\nfrom h2oaicore.models import CustomModel\nfrom sklearn.preprocessing import LabelEncoder\nfrom h2oaicore.systemutils import physical_cores_count\nfrom h2oaicore.systemutils import user_dir, remove, config\nfrom h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug\n\n\nclass SKOPE_RULES(CustomModel):\n _regression = False\n _binary = True\n _multiclass = False\n _display_name = \"SKOPE RULES\"\n _description = \"SKOPE RULES\"\n # using git master because pypi is very out of date (Jan 2020) but need Sept 1-ish master with fix for updated scikit-learn\n _modules_needed_by_name = ['git+https://github.com/scikit-learn-contrib/skope-rules.git']\n\n @staticmethod\n def do_acceptance_test():\n return True\n\n def set_default_params(self, accuracy=None, time_tolerance=None,\n interpretability=None, **kwargs):\n # Fill up parameters we care about\n self.params = dict(random_state=kwargs.get(\"random_state\", 1234),\n max_depth_duplication=None, n_estimators=10,\n precision_min=0.5, recall_min=0.01, max_samples=0.8,\n max_samples_features=1.0, max_depth=3,\n max_features=\"auto\", min_samples_split=2,\n bootstrap=False, bootstrap_features=False)\n\n def mutate_params(self, accuracy=10, **kwargs):\n if accuracy > 8:\n max_depth_duplication = [None, 2, 3]\n n_estimators = [10, 20, 40]\n precision_min = [0.1, 0.2, 0.3]\n recall_min = [0.01, 0.05]\n max_samples = [0.5, 0.8, 1.0]\n max_samples_features = [0.5, 0.8, 1.0]\n max_depth = [3, 4, 5]\n max_features = [\"sqrt\", \"log2\", \"auto\"]\n min_samples_split = [2, 11, 21]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n elif accuracy >= 5:\n max_depth_duplication = [None]\n n_estimators = [10, 20]\n precision_min = [0.1, 0.2, 0.3]\n recall_min = [0.01]\n max_samples = [0.8, 1.0]\n max_samples_features = [1.0]\n max_depth = [3, 4]\n max_features = [\"sqrt\", \"log2\", \"auto\"]\n min_samples_split = [2, 5, 11]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n else:\n max_depth_duplication = [None]\n n_estimators = [10]\n precision_min = [0.1, 0.2]\n recall_min = [0.01]\n max_samples = [0.8, 1.0]\n max_samples_features = [0.8, 1.0]\n max_depth = [3, 4]\n max_features = [\"auto\"]\n min_samples_split = [2]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n\n self.params[\"max_depth_duplication\"] = np.random.choice(max_depth_duplication)\n self.params[\"n_estimators\"] = np.random.choice(n_estimators)\n self.params[\"precision_min\"] = np.random.choice(precision_min)\n self.params[\"recall_min\"] = np.random.choice(recall_min)\n self.params[\"max_samples\"] = np.random.choice(max_samples)\n self.params[\"max_samples_features\"] = np.random.choice(max_samples_features)\n self.params[\"max_depth\"] = np.random.choice(max_depth)\n self.params[\"max_features\"] = np.random.choice(max_features)\n self.params[\"min_samples_split\"] = np.random.choice(min_samples_split)\n self.params[\"bootstrap\"] = np.random.choice(bootstrap)\n self.params[\"bootstrap_features\"] = np.random.choice(bootstrap_features)\n\n def _create_tmp_folder(self, logger):\n # Create a temp folder to store files \n # Set the default value without context available (required to pass acceptance test)\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n # Make a real tmp folder when experiment is available\n if self.context and self.context.experiment_id:\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_SKOPE_model_folder\" % uuid.uuid4())\n\n # Now let's try to create that folder\n try:\n os.mkdir(tmp_folder)\n except PermissionError:\n # This not occur so log a warning\n loggerwarning(logger, \"SKOPE was denied temp folder creation rights\")\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except FileExistsError:\n # We should never be here since temp dir name is expected to be unique\n loggerwarning(logger, \"SKOPE temp folder already exists\")\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except:\n # Revert to temporary file path\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n\n loggerinfo(logger, \"SKOPE temp folder {}\".format(tmp_folder))\n return tmp_folder\n\n def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):\n\n orig_cols = list(X.names)\n\n import pandas as pd\n import numpy as np\n from skrules import SkopeRules\n from sklearn.preprocessing import OneHotEncoder\n from collections import Counter\n\n # Get the logger if it exists\n logger = None\n if self.context and self.context.experiment_id:\n logger = make_experiment_logger(experiment_id=self.context.experiment_id,\n tmp_dir=self.context.tmp_dir,\n experiment_tmp_dir=self.context.experiment_tmp_dir)\n\n # Set up temp folder\n tmp_folder = self._create_tmp_folder(logger)\n\n # Set up model\n if self.num_classes >= 2:\n lb = LabelEncoder()\n lb.fit(self.labels)\n y = lb.transform(y)\n\n model = SkopeRules(max_depth_duplication=self.params[\"max_depth_duplication\"],\n n_estimators=self.params[\"n_estimators\"],\n precision_min=self.params[\"precision_min\"],\n recall_min=self.params[\"recall_min\"],\n max_samples=self.params[\"max_samples\"],\n max_samples_features=self.params[\"max_samples_features\"],\n max_depth=self.params[\"max_depth\"],\n max_features=self.params[\"max_features\"],\n min_samples_split=self.params[\"min_samples_split\"],\n bootstrap=self.params[\"bootstrap\"],\n bootstrap_features=self.params[\"bootstrap_features\"],\n random_state=self.params[\"random_state\"],\n feature_names=orig_cols)\n else:\n # Skopes doesn't work for regression\n loggerinfo(logger, \"PASS, no skopes model\")\n pass\n\n # Find the datatypes\n X = X.to_pandas()\n X.columns = orig_cols\n\n # Change continuous features to categorical\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # Change all float32 values to float64\n for ii in range(len(X_datatypes)):\n if X_datatypes[ii] == 'float32':\n X = X.astype({orig_cols[ii]: np.float64})\n\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # List the categorical and numerical features\n self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if\n (X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')]\n self.X_numeric = [item for item in orig_cols if item not in self.X_categorical]\n\n # Find the levels and mode for each categorical feature\n # for use in the test set\n self.train_levels = {}\n for item in self.X_categorical:\n self.train_levels[item] = list(set(X[item]))\n self.train_mode[item] = Counter(X[item]).most_common(1)[0][0]\n\n # One hot encode the categorical features\n # And replace missing values with a Missing category\n if len(self.X_categorical) > 0:\n loggerinfo(logger, \"PCategorical encode\")\n\n for colname in self.X_categorical:\n X[colname] = list(X[colname].fillna(\"Missing\"))\n self.enc = OneHotEncoder(handle_unknown='ignore')\n\n self.enc.fit(X[self.X_categorical])\n self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical))\n\n X_enc = self.enc.transform(X[self.X_categorical]).toarray()\n\n X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)\n\n # Replace missing values with a missing value code\n if len(self.X_numeric) > 0:\n\n for colname in self.X_numeric:\n X[colname] = list(X[colname].fillna(-999))\n\n model.fit(np.array(X), np.array(y))\n\n # Find the rule list\n self.rule_list = model.rules_\n\n # Calculate feature importances\n var_imp = []\n for var in orig_cols:\n var_imp.append(sum(int(var in item[0]) for item in self.rule_list))\n\n if max(var_imp) != 0:\n importances = list(np.array(var_imp) / max(var_imp))\n else:\n importances = [1] * len(var_imp)\n\n pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv(\n os.path.join(tmp_folder, 'Skope_rules.csv'), index=False)\n\n self.mean_target = np.array(sum(y) / len(y))\n\n # Set model properties\n self.set_model_properties(model=model,\n features=list(X.columns),\n importances=importances,\n iterations=self.params['n_estimators'])\n\n def predict(self, X, **kwargs):\n orig_cols = list(X.names)\n import pandas as pd\n\n X = dt.Frame(X)\n\n # Find datatypes\n X = X.to_pandas()\n\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # Change float 32 values to float 64\n for ii in range(len(X_datatypes)):\n if X_datatypes[ii] == 'float32':\n X = X.astype({orig_cols[ii]: np.float64})\n\n # Replace missing values with a missing category\n # Replace categories that weren't in the training set with the mode\n if len(self.X_categorical) > 0:\n\n for colname in self.X_categorical:\n X[colname] = list(X[colname].fillna(\"Missing\"))\n\n for label in self.X_categorical:\n # Replace anything not in the test set\n train_categories = self.train_levels[label]\n X_label = np.array(X[label])\n mmode = self.train_mode[label]\n X_label[~np.isin(X_label, train_categories)] = mmode\n X[label] = X_label\n\n # Replace missing values with a missing value code \n if len(self.X_numeric) > 0:\n for colname in self.X_numeric:\n X[colname] = list(X[colname].fillna(-999))\n\n # Get model \n model, _, _, _ = self.get_model_properties()\n\n # One hot encode categorical features\n if len(self.X_categorical) > 0:\n X_enc = self.enc.transform(X[self.X_categorical]).toarray()\n X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)\n\n # Make predictions on the test set\n preds = model.score_top_rules(X) / len(self.rule_list)\n preds = np.array(preds)\n epsilon = 10 ** (-3)\n preds[np.isnan(preds)] = self.mean_target\n preds[preds > 1 - epsilon] = 1.0 - epsilon\n preds[preds < 0 + epsilon] = 0.0 + epsilon\n\n return preds\n",
"\"\"\"Box-Cox Transform\"\"\"\nimport math\n\nfrom h2oaicore.transformer_utils import CustomTransformer\nimport datatable as dt\nimport numpy as np\nfrom scipy.stats import boxcox\n\n\nclass BoxCoxTransformer(CustomTransformer):\n _testing_can_skip_failure = False # ensure tested as if shouldn't fail\n\n @staticmethod\n def get_default_properties():\n return dict(col_type=\"numeric\", min_cols=1, max_cols=1, relative_importance=1)\n\n def fit_transform(self, X: dt.Frame, y: np.array = None):\n XX = X.to_pandas().iloc[:, 0].values\n is_na = np.isnan(XX)\n self._offset = -np.nanmin(XX) if np.nanmin(XX) < 0 else 0\n self._offset += 1e-3\n self._lmbda = None\n if not any(~is_na):\n return X\n x = self._offset + XX[~is_na]\n x = np.asarray(x)\n x[x <= 0] = 1e-3\n try:\n self._lmbda = boxcox(x, lmbda=self._lmbda)[1] # compute lambda\n except ValueError as e:\n if 'Data must not be constant' in str(e):\n self._lmbda = None\n return X\n raise\n return self.transform(X)\n\n def transform(self, X: dt.Frame):\n XX = X.to_pandas().iloc[:, 0].values\n is_na = np.isnan(XX) | np.array(XX <= -self._offset)\n if not any(~is_na) or self._lmbda is None:\n return X\n x = self._offset + XX[~is_na]\n x = np.asarray(x)\n x[x <= 0] = 1e-3 # don't worry if not invertible, just ensure can transform and valid transforms are kept valid\n try:\n ret = boxcox(x, lmbda=self._lmbda) # apply transform with pre-computed lambda\n except ValueError as e:\n if 'Data must not be constant' in str(e):\n return X\n raise\n XX[~is_na] = ret\n XX = XX.astype(np.float32)\n XX[XX > 1E30] = 1E30\n XX[XX < -1E30] = -1E30\n XX = dt.Frame(XX)\n # Don't leave inf/-inf\n for i in range(XX.ncols):\n XX.replace([math.inf, -math.inf], None)\n return XX\n",
"\"\"\"Prophet by Facebook for TimeSeries with an example of parameter mutation.\"\"\"\nimport importlib\nimport datatable as dt\nimport numpy as np\nfrom h2oaicore.models import CustomTimeSeriesModel\nfrom h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug\nfrom h2oaicore.systemutils import (\n small_job_pool, save_obj, load_obj, user_dir, remove, config\n)\nfrom h2oaicore.systemutils_more import arch_type\nimport os\nimport pandas as pd\nimport shutil\nimport random\nimport uuid\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\n\nclass suppress_stdout_stderr(object):\n def __init__(self):\n self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n self.save_fds = [os.dup(1), os.dup(2)]\n\n def __enter__(self):\n os.dup2(self.null_fds[0], 1)\n os.dup2(self.null_fds[1], 2)\n\n def __exit__(self, *_):\n os.dup2(self.save_fds[0], 1)\n os.dup2(self.save_fds[1], 2)\n for fd in self.null_fds + self.save_fds:\n os.close(fd)\n\n\n# Parallel implementation requires methods being called from different processes\n# Global methods support this feature\n# We use global methods as a wrapper for member methods of the transformer\ndef MyParallelProphetTransformer_fit_async(*args, **kwargs):\n return FBProphetParallelModel._fit_async(*args, **kwargs)\n\n\ndef MyParallelProphetTransformer_transform_async(*args, **kwargs):\n return FBProphetParallelModel._transform_async(*args, **kwargs)\n\n\nclass FBProphetParallelModel(CustomTimeSeriesModel):\n _regression = True\n _binary = False\n _multiclass = False\n _display_name = \"FB_Prophet_Parallel\"\n _description = \"Facebook Prophet TimeSeries forecasting with multi process support\"\n _parallel_task = True\n _testing_can_skip_failure = False # ensure tested as if shouldn't fail\n\n @staticmethod\n def is_enabled():\n return not (arch_type == \"ppc64le\")\n\n @staticmethod\n def can_use(accuracy, interpretability, **kwargs):\n return False # by default too slow unless only enabled\n\n @staticmethod\n def do_acceptance_test():\n return False\n\n froms3 = True\n if froms3:\n _root_path = \"https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes\"\n _suffix = \"-cp38-cp38-linux_x86_64.whl\"\n _modules_needed_by_name = [\n '%s/setuptools_git-1.2%s' % (_root_path, _suffix),\n '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix),\n '%s/ephem-3.7.7.1%s' % (_root_path, _suffix),\n '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix),\n '%s/pystan-2.19.1.1%s' % (_root_path, _suffix),\n '%s/httpstan-4.5.0-cp38-cp38-manylinux_2_27_x86_64.whl' % _root_path,\n '%s/fbprophet-0.7.1%s' % (_root_path, _suffix),\n ]\n else:\n _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', 'fbprophet==0.7.1']\n\n def set_default_params(self,\n accuracy=None, time_tolerance=None, interpretability=None,\n **kwargs):\n\n \"\"\"\n Parameters available for the model :\n - growth : available market growth strategy in Prophet are linear and logistic\n logistic growth require a cap that saturates the predictions output\n See : https://facebook.github.io/prophet/docs/saturating_forecasts.html#forecasting-growth\n\n - country_holidays : allows Prophet to use built in Holidays\n See mutate_params to check the available countries in the model\n https://facebook.github.io/prophet/docs/seasonality,_holiday_effects,_and_regressors.html#built-in-country-holidays\n\n We can change the way seasonality affects the predictions\n - seasonality_mode : 'additive' (default) or 'multiplicative'\n\n We can override Fourier Order for seasonality calculation\n https://facebook.github.io/prophet/docs/seasonality,_holiday_effects,_and_regressors.html#fourier-order-for-seasonalities\n - weekly_seasonality : default is 'auto'\n Can be False or any number that gives the Fourier Order for the seasonality calculation\n - yearly_seasonality : default is 'auto'\n Can be False or any number that gives the Fourier Order for the seasonality calculation\n\n By default only weekly and yearly seasonality are calculated\n However one can ask Prophet to calculate other/specific seasonality\n https://facebook.github.io/prophet/docs/seasonality,_holiday_effects,_and_regressors.html#specifying-custom-seasonalities\n - monthly_seasonality : Either False (no monthly seasonality) or a number which will be the Fourier Order\n for monthly seasonality.\n\n - quarterly_seasonality : Either False (no quarterly seasonality) or a number which will be the Fourier Order\n for quarterly seasonality.\n \"\"\"\n self.params = dict(\n growth=kwargs.get(\"growth\", \"linear\"),\n seasonality_mode=kwargs.get(\"seasonality_mode\", \"additive\"),\n country_holidays=kwargs.get(\"country_holidays\", None),\n weekly_seasonality=kwargs.get(\"weekly_seasonality\", 'auto'),\n monthly_seasonality=kwargs.get(\"monthly_seasonality\", False),\n quarterly_seasonality=kwargs.get(\"quarterly_seasonality\", False),\n yearly_seasonality=kwargs.get(\"yearly_seasonality\", 'auto'),\n )\n\n def mutate_params(self,\n accuracy, time_tolerance, interpretability,\n **kwargs):\n\n logger = None\n if self.context and self.context.experiment_id:\n logger = make_experiment_logger(experiment_id=self.context.experiment_id, tmp_dir=self.context.tmp_dir,\n experiment_tmp_dir=self.context.experiment_tmp_dir)\n\n # Default version is do no mutation\n # Otherwise, change self.params for this model\n holiday_choice = [None, \"US\", \"UK\", \"DE\", \"FRA\"]\n if accuracy >= 8:\n weekly_choice = [False, 'auto', 5, 7, 10, 15]\n yearly_choice = [False, 'auto', 5, 10, 15, 20, 30]\n monthly_choice = [False, 3, 5, 7, 10]\n quarterly_choice = [False, 3, 5, 7, 10]\n elif accuracy >= 5:\n weekly_choice = [False, 'auto', 10, 20]\n yearly_choice = [False, 'auto', 10, 20]\n monthly_choice = [False, 5]\n quarterly_choice = [False, 5]\n else:\n # No alternative seasonality, and no seasonality override for weekly and yearly\n weekly_choice = [False, 'auto']\n yearly_choice = [False, 'auto']\n monthly_choice = [False]\n quarterly_choice = [False]\n\n self.params[\"country_holidays\"] = np.random.choice(holiday_choice)\n self.params[\"seasonality_mode\"] = np.random.choice([\"additive\", \"multiplicative\"])\n self.params[\"weekly_seasonality\"] = np.random.choice(weekly_choice)\n self.params[\"monthly_seasonality\"] = np.random.choice(monthly_choice)\n self.params[\"quarterly_seasonality\"] = np.random.choice(quarterly_choice)\n self.params[\"yearly_seasonality\"] = np.random.choice(yearly_choice)\n self.params[\"growth\"] = np.random.choice([\"linear\", \"logistic\"])\n\n @staticmethod\n def _fit_async(X_path, grp_hash, tmp_folder):\n \"\"\"\n Fits a FB Prophet model for a particular time group\n :param X_path: Path to the data used to fit the FB Prophet model\n :param grp_hash: Time group identifier\n :return: time group identifier and path to the pickled model\n \"\"\"\n np.random.seed(1234)\n random.seed(1234)\n X = load_obj(X_path)\n # Commented for performance, uncomment for debug\n # print(\"prophet - fitting on data of shape: %s for group: %s\" % (str(X.shape), grp_hash))\n if X.shape[0] < 20:\n # print(\"prophet - small data work-around for group: %s\" % grp_hash)\n return grp_hash, None\n # Import FB Prophet package\n mod = importlib.import_module('fbprophet')\n Prophet = getattr(mod, \"Prophet\")\n nrows = X[['ds', 'y']].shape[0]\n n_changepoints = max(1, int(nrows * (2 / 3)))\n if n_changepoints < 25:\n model = Prophet(n_changepoints=n_changepoints)\n else:\n model = Prophet()\n\n with suppress_stdout_stderr():\n model.fit(X[['ds', 'y']])\n model_path = os.path.join(tmp_folder, \"fbprophet_model\" + str(uuid.uuid4()))\n save_obj(model, model_path)\n remove(X_path) # remove to indicate success\n return grp_hash, model_path\n\n def _get_n_jobs(self, logger, **kwargs):\n return 4 # self.params_base['n_jobs']\n\n def _clean_tmp_folder(self, logger, tmp_folder):\n try:\n shutil.rmtree(tmp_folder)\n loggerinfo(logger, \"Prophet cleaned up temporary file folder.\")\n except:\n loggerwarning(logger, \"Prophet could not delete the temporary file folder.\")\n\n def _create_tmp_folder(self, logger):\n # Create a temp folder to store files used during multi processing experiment\n # This temp folder will be removed at the end of the process\n # Set the default value without context available (required to pass acceptance test\n tmp_folder = os.path.join(user_dir(), \"%s_prophet_model_folder\" % uuid.uuid4())\n # Make a real tmp folder when experiment is available\n if self.context and self.context.experiment_id:\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_prophet_model_folder\" % uuid.uuid4())\n\n # Now let's try to create that folder\n try:\n os.mkdir(tmp_folder)\n except PermissionError:\n # This not occur so log a warning\n loggerwarning(logger, \"Prophet was denied temp folder creation rights\")\n tmp_folder = os.path.join(user_dir(), \"%s_prophet_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except FileExistsError:\n # We should never be here since temp dir name is expected to be unique\n loggerwarning(logger, \"Prophet temp folder already exists\")\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_prophet_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except:\n # Revert to temporary file path\n tmp_folder = os.path.join(user_dir(), \"%s_prophet_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n\n loggerinfo(logger, \"Prophet temp folder {}\".format(tmp_folder))\n return tmp_folder\n\n @staticmethod\n def _fit_async(X_path, grp_hash, tmp_folder, params, cap):\n \"\"\"\n Fits a FB Prophet model for a particular time group\n :param X_path: Path to the data used to fit the FB Prophet model\n :param grp_hash: Time group identifier\n :return: time group identifier and path to the pickled model\n \"\"\"\n np.random.seed(1234)\n random.seed(1234)\n X = load_obj(X_path)\n # Commented for performance, uncomment for debug\n # print(\"prophet - fitting on data of shape: %s for group: %s\" % (str(X.shape), grp_hash))\n if X.shape[0] < 20:\n return grp_hash, None\n\n # Import FB Prophet package\n mod = importlib.import_module('fbprophet')\n Prophet = getattr(mod, \"Prophet\")\n\n # Fit current model and prior\n nrows = X[['ds', 'y']].shape[0]\n n_changepoints = max(1, int(nrows * (2 / 3)))\n if n_changepoints < 25:\n model = Prophet(growth=params[\"growth\"], n_changepoints=n_changepoints)\n else:\n model = Prophet(growth=params[\"growth\"])\n # Add params\n if params[\"country_holidays\"] is not None:\n model.add_country_holidays(country_name=params[\"country_holidays\"])\n if params[\"monthly_seasonality\"]:\n model.add_seasonality(name='monthly', period=30.5, fourier_order=params[\"monthly_seasonality\"])\n if params[\"quarterly_seasonality\"]:\n model.add_seasonality(name='quarterly', period=92, fourier_order=params[\"quarterly_seasonality\"])\n\n with suppress_stdout_stderr():\n if params[\"growth\"] == \"logistic\":\n X[\"cap\"] = cap\n model.fit(X[['ds', 'y', 'cap']])\n else:\n model.fit(X[['ds', 'y']])\n\n model_path = os.path.join(tmp_folder, \"fbprophet_model\" + str(uuid.uuid4()))\n save_obj(model, model_path)\n remove(X_path) # remove to indicate success\n return grp_hash, model_path\n\n def get_hash(self, key):\n # Create dict key to store the min max scaler\n if isinstance(key, tuple):\n key = list(key)\n elif isinstance(key, list):\n pass\n else:\n # Not tuple, not list\n key = [key]\n grp_hash = '_'.join(map(str, key))\n return grp_hash\n\n def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):\n\n # Get TGC and time column\n self.tgc = self.params_base.get('tgc', None)\n self.time_column = self.params_base.get('time_column', None)\n self.nan_value = np.mean(y)\n self.cap = np.max(y) * 1.5 # TODO Don't like this we should compute a cap from average yearly growth\n self.prior = np.mean(y)\n\n if self.time_column is None:\n self.time_column = self.tgc[0]\n\n # Get the logger if it exists\n logger = None\n if self.context and self.context.experiment_id:\n logger = make_experiment_logger(\n experiment_id=self.context.experiment_id,\n tmp_dir=self.context.tmp_dir,\n experiment_tmp_dir=self.context.experiment_tmp_dir\n )\n\n loggerinfo(logger, \"Start Fitting Prophet Model with params : {}\".format(self.params))\n\n try:\n # Add value of prophet_top_n in recipe_dict variable inside of config.toml file\n # eg1: recipe_dict=\"{'prophet_top_n': 200}\"\n # eg2: recipe_dict=\"{'prophet_top_n':10}\"\n self.top_n = config.recipe_dict['prophet_top_n']\n except KeyError:\n self.top_n = 50\n\n loggerinfo(logger, f\"Prophet will use {self.top_n} groups as well as average target data.\")\n\n # Get temporary folders for multi process communication\n tmp_folder = self._create_tmp_folder(logger)\n\n n_jobs = self._get_n_jobs(logger, **kwargs)\n\n # Reduce X to TGC\n tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))\n X = X[:, self.tgc].to_pandas()\n\n # Fill NaNs or None\n X = X.replace([None, np.nan], 0)\n\n # Add target, Label encoder is only used for Classif. which we don't support...\n if self.labels is not None:\n y = LabelEncoder().fit(self.labels).transform(y)\n X['y'] = np.array(y)\n\n self.nan_value = X['y'].mean()\n\n # Change date feature name to match Prophet requirements\n X.rename(columns={self.time_column: \"ds\"}, inplace=True)\n\n # Create a general scale now that will be used for unknown groups at prediction time\n # Can we do smarter than that ?\n general_scaler = MinMaxScaler().fit(X[['y', 'ds']].groupby('ds').median().values)\n\n # Go through groups and standard scale them\n if len(tgc_wo_time) > 0:\n X_groups = X.groupby(tgc_wo_time)\n else:\n X_groups = [([None], X)]\n\n scalers = {}\n scaled_ys = []\n\n print('Number of groups : ', len(X_groups))\n for g in tgc_wo_time:\n print(f'Number of groups in {g} groups : {X[g].unique().shape}')\n\n for key, X_grp in X_groups:\n # Create dict key to store the min max scaler\n grp_hash = self.get_hash(key)\n # Scale target for current group\n scalers[grp_hash] = MinMaxScaler()\n y_skl = scalers[grp_hash].fit_transform(X_grp[['y']].values)\n # Put back in a DataFrame to keep track of original index\n y_skl_df = pd.DataFrame(y_skl, columns=['y'])\n\n y_skl_df.index = X_grp.index\n scaled_ys.append(y_skl_df)\n\n # Set target back in original frame but keep original\n X['y_orig'] = X['y']\n X['y'] = pd.concat(tuple(scaled_ys), axis=0)\n\n # Now Average groups\n X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index()\n\n # Send that to Prophet\n mod = importlib.import_module('fbprophet')\n Prophet = getattr(mod, \"Prophet\")\n nrows = X[['ds', 'y']].shape[0]\n n_changepoints = max(1, int(nrows * (2 / 3)))\n if n_changepoints < 25:\n model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True,\n n_changepoints=n_changepoints)\n else:\n model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True)\n\n if self.params[\"country_holidays\"] is not None:\n model.add_country_holidays(country_name=self.params[\"country_holidays\"])\n if self.params[\"monthly_seasonality\"]:\n model.add_seasonality(name='monthly', period=30.5, fourier_order=self.params[\"monthly_seasonality\"])\n if self.params[\"quarterly_seasonality\"]:\n model.add_seasonality(name='quarterly', period=92, fourier_order=self.params[\"quarterly_seasonality\"])\n\n with suppress_stdout_stderr():\n model.fit(X[['ds', 'y']])\n\n top_groups = None\n if len(tgc_wo_time) > 0:\n if self.top_n > 0:\n top_n_grp = X.groupby(tgc_wo_time).size().sort_values().reset_index()[tgc_wo_time].iloc[\n -self.top_n:].values\n top_groups = [\n '_'.join(map(str, key))\n for key in top_n_grp\n ]\n\n grp_models = {}\n priors = {}\n if top_groups:\n # Prepare for multi processing\n num_tasks = len(top_groups)\n\n def processor(out, res):\n out[res[0]] = res[1]\n\n pool_to_use = small_job_pool\n loggerinfo(logger, f\"Prophet will use {n_jobs} workers for fitting.\")\n\n pool = pool_to_use(\n logger=None, processor=processor,\n num_tasks=num_tasks, max_workers=n_jobs\n )\n #\n # Fit 1 FB Prophet model per time group columns\n nb_groups = len(X_groups)\n\n # Put y back to its unscaled value for top groups\n X['y'] = X['y_orig']\n\n for _i_g, (key, X) in enumerate(X_groups):\n # Just log where we are in the fitting process\n if (_i_g + 1) % max(1, nb_groups // 20) == 0:\n loggerinfo(logger, \"FB Prophet : %d%% of groups fitted\" % (100 * (_i_g + 1) // nb_groups))\n\n X_path = os.path.join(tmp_folder, \"fbprophet_X\" + str(uuid.uuid4()))\n X = X.reset_index(drop=True)\n save_obj(X, X_path)\n\n grp_hash = self.get_hash(key)\n\n if grp_hash not in top_groups:\n continue\n\n priors[grp_hash] = X['y'].mean()\n\n args = (X_path, grp_hash, tmp_folder, self.params, self.cap)\n kwargs = {}\n pool.submit_tryget(None, MyParallelProphetTransformer_fit_async,\n args=args, kwargs=kwargs, out=grp_models)\n pool.finish()\n\n for k, v in grp_models.items():\n grp_models[k] = load_obj(v) if v is not None else None\n remove(v)\n\n self._clean_tmp_folder(logger, tmp_folder)\n\n self.set_model_properties(\n model={\n 'avg': model,\n 'group': grp_models,\n 'priors': priors,\n 'topgroups': top_groups,\n 'skl': scalers,\n 'gen_scaler': general_scaler\n },\n features=self.tgc, # Prophet uses time and timegroups\n importances=np.ones(len(self.tgc)),\n iterations=-1 # Does not have iterations\n )\n\n return None\n\n @staticmethod\n def _transform_async(model_path, X_path, nan_value, tmp_folder):\n \"\"\"\n Predicts target for a particular time group\n :param model_path: path to the stored model\n :param X_path: Path to the data used to fit the FB Prophet model\n :param nan_value: Value of target prior, used when no fitted model has been found\n :return: self\n \"\"\"\n model = load_obj(model_path)\n XX_path = os.path.join(tmp_folder, \"fbprophet_XXt\" + str(uuid.uuid4()))\n X = load_obj(X_path)\n # Facebook Prophet returns the predictions ordered by time\n # So we should keep track of the time order for each group so that\n # predictions are ordered the same as the imput frame\n # Keep track of the order\n order = np.argsort(pd.to_datetime(X[\"ds\"]))\n if model is not None:\n # Run prophet\n yhat = model.predict(X)['yhat'].values\n XX = pd.DataFrame(yhat, columns=['yhat'])\n else:\n XX = pd.DataFrame(np.full((X.shape[0], 1), nan_value), columns=['yhat']) # invalid models\n XX.index = X.index[order]\n assert XX.shape[1] == 1\n save_obj(XX, XX_path)\n remove(model_path) # indicates success, no longer need\n remove(X_path) # indicates success, no longer need\n return XX_path\n\n def predict(self, X: dt.Frame, **kwargs):\n \"\"\"\n Uses fitted models (1 per time group) to predict the target\n :param X: Datatable Frame containing the features\n :return: FB Prophet predictions\n \"\"\"\n # Get the logger if it exists\n logger = None\n if self.context and self.context.experiment_id:\n logger = make_experiment_logger(\n experiment_id=self.context.experiment_id,\n tmp_dir=self.context.tmp_dir,\n experiment_tmp_dir=self.context.experiment_tmp_dir\n )\n\n if self.tgc is None or not all([x in X.names for x in self.tgc]):\n loggerdebug(logger, \"Return 0 predictions\")\n return np.ones(X.shape[0]) * self.nan_value\n\n models, _, _, _ = self.get_model_properties()\n\n model = models['avg']\n grp_models = models['group']\n priors = models['priors']\n top_groups = models['topgroups']\n scalers = models['skl']\n general_scaler = models['gen_scaler']\n\n tmp_folder = self._create_tmp_folder(logger)\n\n n_jobs = self._get_n_jobs(logger, **kwargs)\n\n # Reduce X to TGC\n tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))\n X = X[:, self.tgc].to_pandas()\n\n # Fill NaNs or None\n X = X.replace([None, np.nan], 0)\n\n # Change date feature name to match Prophet requirements\n X.rename(columns={self.time_column: \"ds\"}, inplace=True)\n\n if self.params[\"growth\"] == \"logistic\":\n X[\"cap\"] = self.cap\n\n # Predict y using unique dates\n X_time = X[['ds']].groupby('ds').first().reset_index()\n with suppress_stdout_stderr():\n y_avg = model.predict(X_time)[['ds', 'yhat']]\n\n # Prophet transforms the date column to datetime so we need to transfrom that to merge back\n X_time.sort_values('ds', inplace=True)\n X_time['yhat'] = y_avg['yhat']\n X_time.sort_index(inplace=True)\n\n # Merge back into original frame on 'ds'\n # pd.merge wipes the index ... so keep it to provide it again\n indices = X.index\n X = pd.merge(\n left=X,\n right=X_time[['ds', 'yhat']],\n on='ds',\n how='left'\n )\n X.index = indices\n\n # Go through groups and recover the scaled target for knowed groups\n if len(tgc_wo_time) > 0:\n X_groups = X.groupby(tgc_wo_time)\n else:\n X_groups = [([None], X)]\n\n inverted_ys = []\n for key, X_grp in X_groups:\n grp_hash = self.get_hash(key)\n\n # Scale target for current group\n if grp_hash in scalers.keys():\n inverted_y = scalers[grp_hash].inverse_transform(X_grp[['yhat']])\n else:\n inverted_y = general_scaler.inverse_transform(X_grp[['yhat']])\n\n # Put back in a DataFrame to keep track of original index\n inverted_df = pd.DataFrame(inverted_y, columns=['yhat'])\n inverted_df.index = X_grp.index\n inverted_ys.append(inverted_df)\n\n XX_general = pd.concat(tuple(inverted_ys), axis=0).sort_index()\n\n if top_groups:\n # Go though the groups and predict only top\n XX_paths = []\n model_paths = []\n\n def processor(out, res):\n out.append(res)\n\n num_tasks = len(top_groups)\n pool_to_use = small_job_pool\n pool = pool_to_use(logger=None, processor=processor, num_tasks=num_tasks, max_workers=n_jobs)\n\n nb_groups = len(X_groups)\n for _i_g, (key, X_grp) in enumerate(X_groups):\n\n # Just log where we are in the fitting process\n if (_i_g + 1) % max(1, nb_groups // 20) == 0:\n loggerinfo(logger, \"FB Prophet : %d%% of groups predicted\" % (100 * (_i_g + 1) // nb_groups))\n\n # Create dict key to store the min max scaler\n grp_hash = self.get_hash(key)\n X_path = os.path.join(tmp_folder, \"fbprophet_Xt\" + str(uuid.uuid4()))\n\n if grp_hash not in top_groups:\n XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups\n XX.index = X_grp.index\n save_obj(XX, X_path)\n XX_paths.append(X_path)\n continue\n\n if grp_models[grp_hash] is None:\n XX = pd.DataFrame(np.full((X_grp.shape[0], 1), np.nan), columns=['yhat']) # unseen groups\n XX.index = X_grp.index\n save_obj(XX, X_path)\n XX_paths.append(X_path)\n continue\n\n model = grp_models[grp_hash]\n model_path = os.path.join(tmp_folder, \"fbprophet_modelt\" + str(uuid.uuid4()))\n save_obj(model, model_path)\n save_obj(X_grp, X_path)\n model_paths.append(model_path)\n\n args = (model_path, X_path, priors[grp_hash], tmp_folder)\n kwargs = {}\n pool.submit_tryget(None, MyParallelProphetTransformer_transform_async, args=args, kwargs=kwargs,\n out=XX_paths)\n\n pool.finish()\n XX_top_groups = pd.concat((load_obj(XX_path) for XX_path in XX_paths), axis=0).sort_index()\n for p in XX_paths + model_paths:\n remove(p)\n\n self._clean_tmp_folder(logger, tmp_folder)\n\n features_df = pd.DataFrame()\n features_df['GrpAvg'] = XX_general['yhat']\n\n if top_groups:\n features_df[f'_Top{self.top_n}Grp'] = XX_top_groups['yhat']\n features_df.loc[\n features_df[f'_Top{self.top_n}Grp'].notnull(), 'GrpAvg'\n ] = features_df.loc[\n features_df[f'_Top{self.top_n}Grp'].notnull(), f'_Top{self.top_n}Grp'\n ]\n\n # Models have to return a numpy array\n return features_df['GrpAvg'].values\n"
] | [
[
"numpy.random.choice",
"numpy.isnan",
"sklearn.preprocessing.OneHotEncoder",
"pandas.DataFrame",
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"numpy.isin"
],
[
"numpy.isnan",
"numpy.asarray",
"scipy.stats.boxcox",
"numpy.nanmin",
"numpy.array"
],
[
"pandas.merge",
"pandas.to_datetime",
"numpy.random.seed",
"numpy.random.choice",
"pandas.DataFrame",
"numpy.setdiff1d",
"numpy.full",
"numpy.max",
"numpy.ones",
"sklearn.preprocessing.LabelEncoder",
"numpy.mean",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
AdrianMastronardi/pandas | [
"67045903306ac4a1cab108177e92df30d99912b4",
"67045903306ac4a1cab108177e92df30d99912b4",
"67045903306ac4a1cab108177e92df30d99912b4",
"67045903306ac4a1cab108177e92df30d99912b4",
"67045903306ac4a1cab108177e92df30d99912b4"
] | [
"pandas/tests/arrays/integer/conftest.py",
"pandas/tests/frame/methods/test_replace.py",
"pandas/tests/arrays/test_array.py",
"pandas/tests/frame/methods/test_quantile.py",
"pandas/tests/exchange/conftest.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas.core.arrays.integer import (\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n)\n\n\[email protected](\n params=[\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n ]\n)\ndef dtype(request):\n \"\"\"Parametrized fixture returning integer 'dtype'\"\"\"\n return request.param()\n\n\[email protected]\ndef data(dtype):\n \"\"\"\n Fixture returning 'data' array with valid and missing values according to\n parametrized integer 'dtype'.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n return pd.array(\n list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100],\n dtype=dtype,\n )\n\n\[email protected]\ndef data_missing(dtype):\n \"\"\"\n Fixture returning array with exactly one NaN and one valid integer,\n according to parametrized integer 'dtype'.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n return pd.array([np.nan, 1], dtype=dtype)\n\n\[email protected](params=[\"data\", \"data_missing\"])\ndef all_data(request, data, data_missing):\n \"\"\"Parametrized fixture returning 'data' or 'data_missing' integer arrays.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n if request.param == \"data\":\n return data\n elif request.param == \"data_missing\":\n return data_missing\n",
"from __future__ import annotations\n\nfrom datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import np_version_under1p20\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\[email protected]\ndef mix_ab() -> dict[str, list[int | str]]:\n return {\"a\": list(range(4)), \"b\": list(\"ab..\")}\n\n\[email protected]\ndef mix_abc() -> dict[str, list[float | str]]:\n return {\"a\": list(range(4)), \"b\": list(\"ab..\"), \"c\": [\"a\", \"b\", np.nan, \"d\"]}\n\n\nclass TestDataFrameReplace:\n def test_replace_inplace(self, datetime_frame, float_string_frame):\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n\n tsframe = datetime_frame.copy()\n return_value = tsframe.replace(np.nan, 0, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))\n\n # mixed type\n mf = float_string_frame\n mf.iloc[5:20, mf.columns.get_loc(\"foo\")] = np.nan\n mf.iloc[-10:, mf.columns.get_loc(\"A\")] = np.nan\n\n result = float_string_frame.replace(np.nan, 0)\n expected = float_string_frame.fillna(value=0)\n tm.assert_frame_equal(result, expected)\n\n tsframe = datetime_frame.copy()\n return_value = tsframe.replace([np.nan], [0], inplace=True)\n assert return_value is None\n tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))\n\n @pytest.mark.parametrize(\n \"to_replace,values,expected\",\n [\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n (\n [r\"\\s*\\.\\s*\", r\"e|f|g\"],\n [np.nan, \"crap\"],\n {\n \"a\": [\"a\", \"b\", np.nan, np.nan],\n \"b\": [\"crap\"] * 3 + [\"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n },\n ),\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n (\n [r\"\\s*(\\.)\\s*\", r\"(e|f|g)\"],\n [r\"\\1\\1\", r\"\\1_crap\"],\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"e_crap\", \"f_crap\", \"g_crap\", \"h\"],\n \"c\": [\"h\", \"e_crap\", \"l\", \"o\"],\n },\n ),\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n (\n [r\"\\s*(\\.)\\s*\", r\"e\"],\n [r\"\\1\\1\", r\"crap\"],\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"crap\", \"f\", \"g\", \"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n },\n ),\n ],\n )\n @pytest.mark.parametrize(\"inplace\", [True, False])\n @pytest.mark.parametrize(\"use_value_regex_args\", [True, False])\n def test_regex_replace_list_obj(\n self, to_replace, values, expected, inplace, use_value_regex_args\n ):\n df = DataFrame({\"a\": list(\"ab..\"), \"b\": list(\"efgh\"), \"c\": list(\"helo\")})\n\n if use_value_regex_args:\n result = df.replace(value=values, regex=to_replace, inplace=inplace)\n else:\n result = df.replace(to_replace, values, regex=True, inplace=inplace)\n\n if inplace:\n assert result is None\n result = df\n\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_list_mixed(self, mix_ab):\n # mixed frame to make sure this doesn't break things\n dfmix = DataFrame(mix_ab)\n\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r\"\\s*\\.\\s*\", r\"a\"]\n values = [np.nan, \"crap\"]\n mix2 = {\"a\": list(range(4)), \"b\": list(\"ab..\"), \"c\": list(\"halo\")}\n dfmix2 = DataFrame(mix2)\n res = dfmix2.replace(to_replace_res, values, regex=True)\n expec = DataFrame(\n {\n \"a\": mix2[\"a\"],\n \"b\": [\"crap\", \"b\", np.nan, np.nan],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"(a|b)\"]\n values = [r\"\\1\\1\", r\"\\1_crap\"]\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"a_crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.replace(regex=to_replace_res, value=values)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed_inplace(self, mix_ab):\n dfmix = DataFrame(mix_ab)\n # the same inplace\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r\"\\s*\\.\\s*\", r\"a\"]\n values = [np.nan, \"crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b\", np.nan, np.nan]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"(a|b)\"]\n values = [r\"\\1\\1\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"a_crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(regex=to_replace_res, value=values, inplace=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n def test_regex_replace_dict_mixed(self, mix_abc):\n dfmix = DataFrame(mix_abc)\n\n # dicts\n # single dict {re1: v1}, search the whole frame\n # need test for this...\n\n # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole\n # frame\n res = dfmix.replace({\"b\": r\"\\s*\\.\\s*\"}, {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": r\"\\s*\\.\\s*\"}, {\"b\": np.nan}, inplace=True, regex=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the\n # whole frame\n res = dfmix.replace({\"b\": r\"\\s*(\\.)\\s*\"}, {\"b\": r\"\\1ty\"}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": r\"\\s*(\\.)\\s*\"}, {\"b\": r\"\\1ty\"}, inplace=True, regex=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", \".ty\", \".ty\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n res = dfmix.replace(regex={\"b\": r\"\\s*(\\.)\\s*\"}, value={\"b\": r\"\\1ty\"})\n res2 = dfmix.copy()\n return_value = res2.replace(\n regex={\"b\": r\"\\s*(\\.)\\s*\"}, value={\"b\": r\"\\1ty\"}, inplace=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", \".ty\", \".ty\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n # scalar -> dict\n # to_replace regex, {value: value}\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [np.nan, \"b\", \".\", \".\"], \"c\": mix_abc[\"c\"]}\n )\n res = dfmix.replace(\"a\", {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\"a\", {\"b\": np.nan}, regex=True, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n res = dfmix.replace(\"a\", {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(regex=\"a\", value={\"b\": np.nan}, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [np.nan, \"b\", \".\", \".\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n def test_regex_replace_dict_nested(self, mix_abc):\n # nested dicts will not work until this is implemented for Series\n dfmix = DataFrame(mix_abc)\n res = dfmix.replace({\"b\": {r\"\\s*\\.\\s*\": np.nan}}, regex=True)\n res2 = dfmix.copy()\n res4 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": {r\"\\s*\\.\\s*\": np.nan}}, inplace=True, regex=True\n )\n assert return_value is None\n res3 = dfmix.replace(regex={\"b\": {r\"\\s*\\.\\s*\": np.nan}})\n return_value = res4.replace(regex={\"b\": {r\"\\s*\\.\\s*\": np.nan}}, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n tm.assert_frame_equal(res4, expec)\n\n def test_regex_replace_dict_nested_non_first_character(self, any_string_dtype):\n # GH 25259\n dtype = any_string_dtype\n df = DataFrame({\"first\": [\"abc\", \"bca\", \"cab\"]}, dtype=dtype)\n expected = DataFrame({\"first\": [\".bc\", \"bc.\", \"c.b\"]}, dtype=dtype)\n result = df.replace({\"a\": \".\"}, regex=True)\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_dict_nested_gh4115(self):\n df = DataFrame({\"Type\": [\"Q\", \"T\", \"Q\", \"Q\", \"T\"], \"tmp\": 2})\n expected = DataFrame({\"Type\": [0, 1, 0, 0, 1], \"tmp\": 2})\n result = df.replace({\"Type\": {\"Q\": 0, \"T\": 1}})\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_list_to_scalar(self, mix_abc):\n df = DataFrame(mix_abc)\n expec = DataFrame(\n {\n \"a\": mix_abc[\"a\"],\n \"b\": np.array([np.nan] * 4),\n \"c\": [np.nan, np.nan, np.nan, \"d\"],\n }\n )\n res = df.replace([r\"\\s*\\.\\s*\", \"a|b\"], np.nan, regex=True)\n res2 = df.copy()\n res3 = df.copy()\n return_value = res2.replace(\n [r\"\\s*\\.\\s*\", \"a|b\"], np.nan, regex=True, inplace=True\n )\n assert return_value is None\n return_value = res3.replace(\n regex=[r\"\\s*\\.\\s*\", \"a|b\"], value=np.nan, inplace=True\n )\n assert return_value is None\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_str_to_numeric(self, mix_abc):\n # what happens when you try to replace a numeric value with a regex?\n df = DataFrame(mix_abc)\n res = df.replace(r\"\\s*\\.\\s*\", 0, regex=True)\n res2 = df.copy()\n return_value = res2.replace(r\"\\s*\\.\\s*\", 0, inplace=True, regex=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=r\"\\s*\\.\\s*\", value=0, inplace=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", 0, 0], \"c\": mix_abc[\"c\"]})\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_regex_list_to_numeric(self, mix_abc):\n df = DataFrame(mix_abc)\n res = df.replace([r\"\\s*\\.\\s*\", \"b\"], 0, regex=True)\n res2 = df.copy()\n return_value = res2.replace([r\"\\s*\\.\\s*\", \"b\"], 0, regex=True, inplace=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=[r\"\\s*\\.\\s*\", \"b\"], value=0, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", 0, 0, 0], \"c\": [\"a\", 0, np.nan, \"d\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_series_of_regexes(self, mix_abc):\n df = DataFrame(mix_abc)\n s1 = Series({\"b\": r\"\\s*\\.\\s*\"})\n s2 = Series({\"b\": np.nan})\n res = df.replace(s1, s2, regex=True)\n res2 = df.copy()\n return_value = res2.replace(s1, s2, inplace=True, regex=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=s1, value=s2, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_numeric_to_object_conversion(self, mix_abc):\n df = DataFrame(mix_abc)\n expec = DataFrame({\"a\": [\"a\", 1, 2, 3], \"b\": mix_abc[\"b\"], \"c\": mix_abc[\"c\"]})\n res = df.replace(0, \"a\")\n tm.assert_frame_equal(res, expec)\n assert res.a.dtype == np.object_\n\n @pytest.mark.parametrize(\n \"to_replace\", [{\"\": np.nan, \",\": \"\"}, {\",\": \"\", \"\": np.nan}]\n )\n def test_joint_simple_replace_and_regex_replace(self, to_replace):\n # GH-39338\n df = DataFrame(\n {\n \"col1\": [\"1,000\", \"a\", \"3\"],\n \"col2\": [\"a\", \"\", \"b\"],\n \"col3\": [\"a\", \"b\", \"c\"],\n }\n )\n result = df.replace(regex=to_replace)\n expected = DataFrame(\n {\n \"col1\": [\"1000\", \"a\", \"3\"],\n \"col2\": [\"a\", np.nan, \"b\"],\n \"col3\": [\"a\", \"b\", \"c\"],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"metachar\", [\"[]\", \"()\", r\"\\d\", r\"\\w\", r\"\\s\"])\n def test_replace_regex_metachar(self, metachar):\n df = DataFrame({\"a\": [metachar, \"else\"]})\n result = df.replace({\"a\": {metachar: \"paren\"}})\n expected = DataFrame({\"a\": [\"paren\", \"else\"]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"data,to_replace,expected\",\n [\n ([\"xax\", \"xbx\"], {\"a\": \"c\", \"b\": \"d\"}, [\"xcx\", \"xdx\"]),\n ([\"d\", \"\", \"\"], {r\"^\\s*$\": pd.NA}, [\"d\", pd.NA, pd.NA]),\n ],\n )\n def test_regex_replace_string_types(\n self, data, to_replace, expected, frame_or_series, any_string_dtype\n ):\n # GH-41333, GH-35977\n dtype = any_string_dtype\n obj = frame_or_series(data, dtype=dtype)\n result = obj.replace(to_replace, regex=True)\n expected = frame_or_series(expected, dtype=dtype)\n\n tm.assert_equal(result, expected)\n\n def test_replace(self, datetime_frame):\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n\n zero_filled = datetime_frame.replace(np.nan, -1e8)\n tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))\n tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame)\n\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n datetime_frame[\"B\"][:5] = -1e8\n\n # empty\n df = DataFrame(index=[\"a\", \"b\"])\n tm.assert_frame_equal(df, df.replace(5, 7))\n\n # GH 11698\n # test for mixed data types.\n df = DataFrame(\n [(\"-\", pd.to_datetime(\"20150101\")), (\"a\", pd.to_datetime(\"20150102\"))]\n )\n df1 = df.replace(\"-\", np.nan)\n expected_df = DataFrame(\n [(np.nan, pd.to_datetime(\"20150101\")), (\"a\", pd.to_datetime(\"20150102\"))]\n )\n tm.assert_frame_equal(df1, expected_df)\n\n def test_replace_list(self):\n obj = {\"a\": list(\"ab..\"), \"b\": list(\"efgh\"), \"c\": list(\"helo\")}\n dfobj = DataFrame(obj)\n\n # lists of regexes and values\n # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]\n to_replace_res = [r\".\", r\"e\"]\n values = [np.nan, \"crap\"]\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame(\n {\n \"a\": [\"a\", \"b\", np.nan, np.nan],\n \"b\": [\"crap\", \"f\", \"g\", \"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]\n to_replace_res = [r\".\", r\"f\"]\n values = [r\"..\", r\"crap\"]\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame(\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"e\", \"crap\", \"g\", \"h\"],\n \"c\": [\"h\", \"e\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n def test_replace_with_empty_list(self, frame_or_series):\n # GH 21977\n ser = Series([[\"a\", \"b\"], [], np.nan, [1]])\n obj = DataFrame({\"col\": ser})\n obj = tm.get_obj(obj, frame_or_series)\n expected = obj\n result = obj.replace([], np.nan)\n tm.assert_equal(result, expected)\n\n # GH 19266\n msg = (\n \"NumPy boolean array indexing assignment cannot assign {size} \"\n \"input values to the 1 output values where the mask is true\"\n )\n with pytest.raises(ValueError, match=msg.format(size=0)):\n obj.replace({np.nan: []})\n with pytest.raises(ValueError, match=msg.format(size=2)):\n obj.replace({np.nan: [\"dummy\", \"alt\"]})\n\n def test_replace_series_dict(self):\n # from GH 3064\n df = DataFrame({\"zero\": {\"a\": 0.0, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 0}})\n result = df.replace(0, {\"zero\": 0.5, \"one\": 1.0})\n expected = DataFrame({\"zero\": {\"a\": 0.5, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 1.0}})\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(0, df.mean())\n tm.assert_frame_equal(result, expected)\n\n # series to series/dict\n df = DataFrame({\"zero\": {\"a\": 0.0, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 0}})\n s = Series({\"zero\": 0.0, \"one\": 2.0})\n result = df.replace(s, {\"zero\": 0.5, \"one\": 1.0})\n expected = DataFrame({\"zero\": {\"a\": 0.5, \"b\": 1}, \"one\": {\"a\": 1.0, \"b\": 0.0}})\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(s, df.mean())\n tm.assert_frame_equal(result, expected)\n\n def test_replace_convert(self):\n # gh 3907\n df = DataFrame([[\"foo\", \"bar\", \"bah\"], [\"bar\", \"foo\", \"bah\"]])\n m = {\"foo\": 1, \"bar\": 2, \"bah\": 3}\n rep = df.replace(m)\n expec = Series([np.int64] * 3)\n res = rep.dtypes\n tm.assert_series_equal(expec, res)\n\n def test_replace_mixed(self, float_string_frame):\n mf = float_string_frame\n mf.iloc[5:20, mf.columns.get_loc(\"foo\")] = np.nan\n mf.iloc[-10:, mf.columns.get_loc(\"A\")] = np.nan\n\n result = float_string_frame.replace(np.nan, -18)\n expected = float_string_frame.fillna(value=-18)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame)\n\n result = float_string_frame.replace(np.nan, -1e8)\n expected = float_string_frame.fillna(value=-1e8)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame)\n\n def test_replace_mixed_int_block_upcasting(self):\n\n # int block upcasting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0.5, 1], dtype=\"float64\"),\n }\n )\n result = df.replace(0, 0.5)\n tm.assert_frame_equal(result, expected)\n\n return_value = df.replace(0, 0.5, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(df, expected)\n\n def test_replace_mixed_int_block_splitting(self):\n\n # int block splitting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n \"C\": Series([1, 2], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0.5, 1], dtype=\"float64\"),\n \"C\": Series([1, 2], dtype=\"int64\"),\n }\n )\n result = df.replace(0, 0.5)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_mixed2(self):\n\n # to object block upcasting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1, \"foo\"], dtype=\"object\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n result = df.replace(2, \"foo\")\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n {\n \"A\": Series([\"foo\", \"bar\"], dtype=\"object\"),\n \"B\": Series([0, \"foo\"], dtype=\"object\"),\n }\n )\n result = df.replace([1, 2], [\"foo\", \"bar\"])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_mixed3(self):\n # test case from\n df = DataFrame(\n {\"A\": Series([3, 0], dtype=\"int64\"), \"B\": Series([0, 3], dtype=\"int64\")}\n )\n result = df.replace(3, df.mean().to_dict())\n expected = df.copy().astype(\"float64\")\n m = df.mean()\n expected.iloc[0, 0] = m[0]\n expected.iloc[1, 1] = m[1]\n tm.assert_frame_equal(result, expected)\n\n def test_replace_nullable_int_with_string_doesnt_cast(self):\n # GH#25438 don't cast df['a'] to float64\n df = DataFrame({\"a\": [1, 2, 3, np.nan], \"b\": [\"some\", \"strings\", \"here\", \"he\"]})\n df[\"a\"] = df[\"a\"].astype(\"Int64\")\n\n res = df.replace(\"\", np.nan)\n tm.assert_series_equal(res[\"a\"], df[\"a\"])\n\n @pytest.mark.parametrize(\"dtype\", [\"boolean\", \"Int64\", \"Float64\"])\n def test_replace_with_nullable_column(self, dtype):\n # GH-44499\n nullable_ser = Series([1, 0, 1], dtype=dtype)\n df = DataFrame({\"A\": [\"A\", \"B\", \"x\"], \"B\": nullable_ser})\n result = df.replace(\"x\", \"X\")\n expected = DataFrame({\"A\": [\"A\", \"B\", \"X\"], \"B\": nullable_ser})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_simple_nested_dict(self):\n df = DataFrame({\"col\": range(1, 5)})\n expected = DataFrame({\"col\": [\"a\", 2, 3, \"b\"]})\n\n result = df.replace({\"col\": {1: \"a\", 4: \"b\"}})\n tm.assert_frame_equal(expected, result)\n\n # in this case, should be the same as the not nested version\n result = df.replace({1: \"a\", 4: \"b\"})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_simple_nested_dict_with_nonexistent_value(self):\n df = DataFrame({\"col\": range(1, 5)})\n expected = DataFrame({\"col\": [\"a\", 2, 3, \"b\"]})\n\n result = df.replace({-1: \"-\", 1: \"a\", 4: \"b\"})\n tm.assert_frame_equal(expected, result)\n\n result = df.replace({\"col\": {-1: \"-\", 1: \"a\", 4: \"b\"}})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_NA_with_None(self):\n # gh-45601\n df = DataFrame({\"value\": [42, None]}).astype({\"value\": \"Int64\"})\n result = df.replace({pd.NA: None})\n expected = DataFrame({\"value\": [42, None]}, dtype=object)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_NAT_with_None(self):\n # gh-45836\n df = DataFrame([pd.NaT, pd.NaT])\n result = df.replace({pd.NaT: None, np.NaN: None})\n expected = DataFrame([None, None])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_None_keeps_categorical(self):\n # gh-46634\n cat_series = Series([\"b\", \"b\", \"b\", \"d\"], dtype=\"category\")\n df = DataFrame(\n {\n \"id\": Series([5, 4, 3, 2], dtype=\"float64\"),\n \"col\": cat_series,\n }\n )\n result = df.replace({3: None})\n\n expected = DataFrame(\n {\n \"id\": Series([5.0, 4.0, None, 2.0], dtype=\"object\"),\n \"col\": cat_series,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_replace_value_is_none(self, datetime_frame):\n orig_value = datetime_frame.iloc[0, 0]\n orig2 = datetime_frame.iloc[1, 0]\n\n datetime_frame.iloc[0, 0] = np.nan\n datetime_frame.iloc[1, 0] = 1\n\n result = datetime_frame.replace(to_replace={np.nan: 0})\n expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T\n tm.assert_frame_equal(result, expected)\n\n result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8})\n tsframe = datetime_frame.copy()\n tsframe.iloc[0, 0] = 0\n tsframe.iloc[1, 0] = -1e8\n expected = tsframe\n tm.assert_frame_equal(expected, result)\n datetime_frame.iloc[0, 0] = orig_value\n datetime_frame.iloc[1, 0] = orig2\n\n def test_replace_for_new_dtypes(self, datetime_frame):\n\n # dtypes\n tsframe = datetime_frame.copy().astype(np.float32)\n tsframe[\"A\"][:5] = np.nan\n tsframe[\"A\"][-5:] = np.nan\n\n zero_filled = tsframe.replace(np.nan, -1e8)\n tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8))\n tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe)\n\n tsframe[\"A\"][:5] = np.nan\n tsframe[\"A\"][-5:] = np.nan\n tsframe[\"B\"][:5] = -1e8\n\n b = tsframe[\"B\"]\n b[b == -1e8] = np.nan\n tsframe[\"B\"] = b\n result = tsframe.fillna(method=\"bfill\")\n tm.assert_frame_equal(result, tsframe.fillna(method=\"bfill\"))\n\n @pytest.mark.parametrize(\n \"frame, to_replace, value, expected\",\n [\n (DataFrame({\"ints\": [1, 2, 3]}), 1, 0, DataFrame({\"ints\": [0, 2, 3]})),\n (\n DataFrame({\"ints\": [1, 2, 3]}, dtype=np.int32),\n 1,\n 0,\n DataFrame({\"ints\": [0, 2, 3]}, dtype=np.int32),\n ),\n (\n DataFrame({\"ints\": [1, 2, 3]}, dtype=np.int16),\n 1,\n 0,\n DataFrame({\"ints\": [0, 2, 3]}, dtype=np.int16),\n ),\n (\n DataFrame({\"bools\": [True, False, True]}),\n False,\n True,\n DataFrame({\"bools\": [True, True, True]}),\n ),\n (\n DataFrame({\"complex\": [1j, 2j, 3j]}),\n 1j,\n 0,\n DataFrame({\"complex\": [0j, 2j, 3j]}),\n ),\n (\n DataFrame(\n {\n \"datetime64\": Index(\n [\n datetime(2018, 5, 28),\n datetime(2018, 7, 28),\n datetime(2018, 5, 28),\n ]\n )\n }\n ),\n datetime(2018, 5, 28),\n datetime(2018, 7, 28),\n DataFrame({\"datetime64\": Index([datetime(2018, 7, 28)] * 3)}),\n ),\n # GH 20380\n (\n DataFrame({\"dt\": [datetime(3017, 12, 20)], \"str\": [\"foo\"]}),\n \"foo\",\n \"bar\",\n DataFrame({\"dt\": [datetime(3017, 12, 20)], \"str\": [\"bar\"]}),\n ),\n # GH 36782\n (\n DataFrame({\"dt\": [datetime(2920, 10, 1)]}),\n datetime(2920, 10, 1),\n datetime(2020, 10, 1),\n DataFrame({\"dt\": [datetime(2020, 10, 1)]}),\n ),\n (\n DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [0, np.nan, 2],\n }\n ),\n Timestamp(\"20130102\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n ),\n ),\n # GH 35376\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1.0,\n 5,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1,\n 5,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1.0,\n 5.0,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1,\n 5.0,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n ],\n )\n def test_replace_dtypes(self, frame, to_replace, value, expected):\n result = getattr(frame, \"replace\")(to_replace, value)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_input_formats_listlike(self):\n # both dicts\n to_rep = {\"A\": np.nan, \"B\": 0, \"C\": \"\"}\n values = {\"A\": 0, \"B\": -1, \"C\": \"missing\"}\n df = DataFrame(\n {\"A\": [np.nan, 0, np.inf], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n filled = df.replace(to_rep, values)\n expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n result = df.replace([0, 2, 5], [5, 2, 0])\n expected = DataFrame(\n {\"A\": [np.nan, 5, np.inf], \"B\": [5, 2, 0], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n tm.assert_frame_equal(result, expected)\n\n # scalar to dict\n values = {\"A\": 0, \"B\": -1, \"C\": \"missing\"}\n df = DataFrame(\n {\"A\": [np.nan, 0, np.nan], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n filled = df.replace(np.nan, values)\n expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n # list to list\n to_rep = [np.nan, 0, \"\"]\n values = [-2, -1, \"missing\"]\n result = df.replace(to_rep, values)\n expected = df.copy()\n for i in range(len(to_rep)):\n return_value = expected.replace(to_rep[i], values[i], inplace=True)\n assert return_value is None\n tm.assert_frame_equal(result, expected)\n\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n df.replace(to_rep, values[1:])\n\n def test_replace_input_formats_scalar(self):\n df = DataFrame(\n {\"A\": [np.nan, 0, np.inf], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n\n # dict to scalar\n to_rep = {\"A\": np.nan, \"B\": 0, \"C\": \"\"}\n filled = df.replace(to_rep, 0)\n expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n msg = \"value argument must be scalar, dict, or Series\"\n with pytest.raises(TypeError, match=msg):\n df.replace(to_rep, [np.nan, 0, \"\"])\n\n # list to scalar\n to_rep = [np.nan, 0, \"\"]\n result = df.replace(to_rep, -1)\n expected = df.copy()\n for i in range(len(to_rep)):\n return_value = expected.replace(to_rep[i], -1, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(result, expected)\n\n def test_replace_limit(self):\n # TODO\n pass\n\n def test_replace_dict_no_regex(self):\n answer = Series(\n {\n 0: \"Strongly Agree\",\n 1: \"Agree\",\n 2: \"Neutral\",\n 3: \"Disagree\",\n 4: \"Strongly Disagree\",\n }\n )\n weights = {\n \"Agree\": 4,\n \"Disagree\": 2,\n \"Neutral\": 3,\n \"Strongly Agree\": 5,\n \"Strongly Disagree\": 1,\n }\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_series_no_regex(self):\n answer = Series(\n {\n 0: \"Strongly Agree\",\n 1: \"Agree\",\n 2: \"Neutral\",\n 3: \"Disagree\",\n 4: \"Strongly Disagree\",\n }\n )\n weights = Series(\n {\n \"Agree\": 4,\n \"Disagree\": 2,\n \"Neutral\": 3,\n \"Strongly Agree\": 5,\n \"Strongly Disagree\": 1,\n }\n )\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_dict_tuple_list_ordering_remains_the_same(self):\n df = DataFrame({\"A\": [np.nan, 1]})\n res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})\n res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])\n res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])\n\n expected = DataFrame({\"A\": [0, -1e8]})\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res2, res3)\n tm.assert_frame_equal(res3, expected)\n\n def test_replace_doesnt_replace_without_regex(self):\n df = DataFrame(\n {\n \"fol\": [1, 2, 2, 3],\n \"T_opp\": [\"0\", \"vr\", \"0\", \"0\"],\n \"T_Dir\": [\"0\", \"0\", \"0\", \"bt\"],\n \"T_Enh\": [\"vo\", \"0\", \"0\", \"0\"],\n }\n )\n res = df.replace({r\"\\D\": 1})\n tm.assert_frame_equal(df, res)\n\n def test_replace_bool_with_string(self):\n df = DataFrame({\"a\": [True, False], \"b\": list(\"ab\")})\n result = df.replace(True, \"a\")\n expected = DataFrame({\"a\": [\"a\", False], \"b\": df.b})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_pure_bool_with_string_no_op(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(\"asdf\", \"fdsa\")\n tm.assert_frame_equal(df, result)\n\n def test_replace_bool_with_bool(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(False, True)\n expected = DataFrame(np.ones((2, 2), dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_dict_with_bool_keys(self):\n df = DataFrame({0: [True, False], 1: [False, True]})\n result = df.replace({\"asdf\": \"asdb\", True: \"yes\"})\n expected = DataFrame({0: [\"yes\", False], 1: [False, \"yes\"]})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_dict_strings_vs_ints(self):\n # GH#34789\n df = DataFrame({\"Y0\": [1, 2], \"Y1\": [3, 4]})\n result = df.replace({\"replace_string\": \"test\"})\n\n tm.assert_frame_equal(result, df)\n\n result = df[\"Y0\"].replace({\"replace_string\": \"test\"})\n tm.assert_series_equal(result, df[\"Y0\"])\n\n def test_replace_truthy(self):\n df = DataFrame({\"a\": [True, True]})\n r = df.replace([np.inf, -np.inf], np.nan)\n e = df\n tm.assert_frame_equal(r, e)\n\n def test_nested_dict_overlapping_keys_replace_int(self):\n # GH 27660 keep behaviour consistent for simple dictionary and\n # nested dictionary replacement\n df = DataFrame({\"a\": list(range(1, 5))})\n\n result = df.replace({\"a\": dict(zip(range(1, 5), range(2, 6)))})\n expected = df.replace(dict(zip(range(1, 5), range(2, 6))))\n tm.assert_frame_equal(result, expected)\n\n def test_nested_dict_overlapping_keys_replace_str(self):\n # GH 27660\n a = np.arange(1, 5)\n astr = a.astype(str)\n bstr = np.arange(2, 6).astype(str)\n df = DataFrame({\"a\": astr})\n result = df.replace(dict(zip(astr, bstr)))\n expected = df.replace({\"a\": dict(zip(astr, bstr))})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_swapping_bug(self):\n df = DataFrame({\"a\": [True, False, True]})\n res = df.replace({\"a\": {True: \"Y\", False: \"N\"}})\n expect = DataFrame({\"a\": [\"Y\", \"N\", \"Y\"]})\n tm.assert_frame_equal(res, expect)\n\n df = DataFrame({\"a\": [0, 1, 0]})\n res = df.replace({\"a\": {0: \"Y\", 1: \"N\"}})\n expect = DataFrame({\"a\": [\"Y\", \"N\", \"Y\"]})\n tm.assert_frame_equal(res, expect)\n\n def test_replace_period(self):\n d = {\n \"fname\": {\n \"out_augmented_AUG_2011.json\": pd.Period(year=2011, month=8, freq=\"M\"),\n \"out_augmented_JAN_2011.json\": pd.Period(year=2011, month=1, freq=\"M\"),\n \"out_augmented_MAY_2012.json\": pd.Period(year=2012, month=5, freq=\"M\"),\n \"out_augmented_SUBSIDY_WEEK.json\": pd.Period(\n year=2011, month=4, freq=\"M\"\n ),\n \"out_augmented_AUG_2012.json\": pd.Period(year=2012, month=8, freq=\"M\"),\n \"out_augmented_MAY_2011.json\": pd.Period(year=2011, month=5, freq=\"M\"),\n \"out_augmented_SEP_2013.json\": pd.Period(year=2013, month=9, freq=\"M\"),\n }\n }\n\n df = DataFrame(\n [\n \"out_augmented_AUG_2012.json\",\n \"out_augmented_SEP_2013.json\",\n \"out_augmented_SUBSIDY_WEEK.json\",\n \"out_augmented_MAY_2012.json\",\n \"out_augmented_MAY_2011.json\",\n \"out_augmented_AUG_2011.json\",\n \"out_augmented_JAN_2011.json\",\n ],\n columns=[\"fname\"],\n )\n assert set(df.fname.values) == set(d[\"fname\"].keys())\n\n expected = DataFrame({\"fname\": [d[\"fname\"][k] for k in df.fname.values]})\n assert expected.dtypes[0] == \"Period[M]\"\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetime(self):\n d = {\n \"fname\": {\n \"out_augmented_AUG_2011.json\": Timestamp(\"2011-08\"),\n \"out_augmented_JAN_2011.json\": Timestamp(\"2011-01\"),\n \"out_augmented_MAY_2012.json\": Timestamp(\"2012-05\"),\n \"out_augmented_SUBSIDY_WEEK.json\": Timestamp(\"2011-04\"),\n \"out_augmented_AUG_2012.json\": Timestamp(\"2012-08\"),\n \"out_augmented_MAY_2011.json\": Timestamp(\"2011-05\"),\n \"out_augmented_SEP_2013.json\": Timestamp(\"2013-09\"),\n }\n }\n\n df = DataFrame(\n [\n \"out_augmented_AUG_2012.json\",\n \"out_augmented_SEP_2013.json\",\n \"out_augmented_SUBSIDY_WEEK.json\",\n \"out_augmented_MAY_2012.json\",\n \"out_augmented_MAY_2011.json\",\n \"out_augmented_AUG_2011.json\",\n \"out_augmented_JAN_2011.json\",\n ],\n columns=[\"fname\"],\n )\n assert set(df.fname.values) == set(d[\"fname\"].keys())\n expected = DataFrame({\"fname\": [d[\"fname\"][k] for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetimetz(self):\n\n # GH 11326\n # behaving poorly when presented with a datetime64[ns, tz]\n df = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [0, np.nan, 2],\n }\n )\n result = df.replace(np.nan, 1)\n expected = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": Series([0, 1, 2], dtype=\"float64\"),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.fillna(1)\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(0, np.nan)\n expected = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [np.nan, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(\n Timestamp(\"20130102\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n )\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1, 0] = np.nan\n result = result.replace({\"A\": pd.NaT}, Timestamp(\"20130104\", tz=\"US/Eastern\"))\n tm.assert_frame_equal(result, expected)\n\n # coerce to object\n result = df.copy()\n result.iloc[1, 0] = np.nan\n with tm.assert_produces_warning(FutureWarning, match=\"mismatched timezone\"):\n result = result.replace(\n {\"A\": pd.NaT}, Timestamp(\"20130104\", tz=\"US/Pacific\")\n )\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Pacific\"),\n # once deprecation is enforced\n # Timestamp(\"20130104\", tz=\"US/Pacific\").tz_convert(\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1, 0] = np.nan\n result = result.replace({\"A\": np.nan}, Timestamp(\"20130104\"))\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_empty_dictlike(self, mix_abc):\n # GH 15289\n df = DataFrame(mix_abc)\n tm.assert_frame_equal(df, df.replace({}))\n tm.assert_frame_equal(df, df.replace(Series([], dtype=object)))\n\n tm.assert_frame_equal(df, df.replace({\"b\": {}}))\n tm.assert_frame_equal(df, df.replace(Series({\"b\": {}})))\n\n @pytest.mark.parametrize(\n \"to_replace, method, expected\",\n [\n (0, \"bfill\", {\"A\": [1, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]}),\n (\n np.nan,\n \"bfill\",\n {\"A\": [0, 1, 2], \"B\": [5.0, 7.0, 7.0], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\"d\", \"ffill\", {\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]}),\n (\n [0, 2],\n \"bfill\",\n {\"A\": [1, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n [1, 2],\n \"pad\",\n {\"A\": [0, 0, 0], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n (1, 2),\n \"bfill\",\n {\"A\": [0, 2, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n [\"b\", \"c\"],\n \"ffill\",\n {\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"a\", \"a\"]},\n ),\n ],\n )\n def test_replace_method(self, to_replace, method, expected):\n # GH 19632\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]})\n\n result = df.replace(to_replace=to_replace, value=None, method=method)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"replace_dict, final_data\",\n [({\"a\": 1, \"b\": 1}, [[3, 3], [2, 2]]), ({\"a\": 1, \"b\": 2}, [[3, 1], [2, 3]])],\n )\n def test_categorical_replace_with_dict(self, replace_dict, final_data):\n # GH 26988\n df = DataFrame([[1, 1], [2, 2]], columns=[\"a\", \"b\"], dtype=\"category\")\n\n final_data = np.array(final_data)\n\n a = pd.Categorical(final_data[:, 0], categories=[3, 2])\n\n ex_cat = [3, 2] if replace_dict[\"b\"] == 1 else [1, 3]\n b = pd.Categorical(final_data[:, 1], categories=ex_cat)\n\n expected = DataFrame({\"a\": a, \"b\": b})\n result = df.replace(replace_dict, 3)\n tm.assert_frame_equal(result, expected)\n msg = (\n r\"Attributes of DataFrame.iloc\\[:, 0\\] \\(column name=\\\"a\\\"\\) are \"\n \"different\"\n )\n with pytest.raises(AssertionError, match=msg):\n # ensure non-inplace call does not affect original\n tm.assert_frame_equal(df, expected)\n return_value = df.replace(replace_dict, 3, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\n \"df, to_replace, exp\",\n [\n (\n {\"col1\": [1, 2, 3], \"col2\": [4, 5, 6]},\n {4: 5, 5: 6, 6: 7},\n {\"col1\": [1, 2, 3], \"col2\": [5, 6, 7]},\n ),\n (\n {\"col1\": [1, 2, 3], \"col2\": [\"4\", \"5\", \"6\"]},\n {\"4\": \"5\", \"5\": \"6\", \"6\": \"7\"},\n {\"col1\": [1, 2, 3], \"col2\": [\"5\", \"6\", \"7\"]},\n ),\n ],\n )\n def test_replace_commutative(self, df, to_replace, exp):\n # GH 16051\n # DataFrame.replace() overwrites when values are non-numeric\n # also added to data frame whilst issue was for series\n\n df = DataFrame(df)\n\n expected = DataFrame(exp)\n result = df.replace(to_replace)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"replacer\",\n [\n Timestamp(\"20170827\"),\n np.int8(1),\n np.int16(1),\n np.float32(1),\n np.float64(1),\n ],\n )\n def test_replace_replacer_dtype(self, request, replacer):\n # GH26632\n if np.isscalar(replacer) and replacer.dtype.itemsize < 8:\n request.node.add_marker(\n pytest.mark.xfail(\n np_version_under1p20, reason=\"np.putmask doesn't coerce dtype\"\n )\n )\n df = DataFrame([\"a\"])\n result = df.replace({\"a\": replacer, \"b\": replacer})\n expected = DataFrame([replacer])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_after_convert_dtypes(self):\n # GH31517\n df = DataFrame({\"grp\": [1, 2, 3, 4, 5]}, dtype=\"Int64\")\n result = df.replace(1, 10)\n expected = DataFrame({\"grp\": [10, 2, 3, 4, 5]}, dtype=\"Int64\")\n tm.assert_frame_equal(result, expected)\n\n def test_replace_invalid_to_replace(self):\n # GH 18634\n # API: replace() should raise an exception if invalid argument is given\n df = DataFrame({\"one\": [\"a\", \"b \", \"c\"], \"two\": [\"d \", \"e \", \"f \"]})\n msg = (\n r\"Expecting 'to_replace' to be either a scalar, array-like, \"\n r\"dict or None, got invalid type.*\"\n )\n with pytest.raises(TypeError, match=msg):\n df.replace(lambda x: x.strip())\n\n @pytest.mark.parametrize(\"dtype\", [\"float\", \"float64\", \"int64\", \"Int64\", \"boolean\"])\n @pytest.mark.parametrize(\"value\", [np.nan, pd.NA])\n def test_replace_no_replacement_dtypes(self, dtype, value):\n # https://github.com/pandas-dev/pandas/issues/32988\n df = DataFrame(np.eye(2), dtype=dtype)\n result = df.replace(to_replace=[None, -np.inf, np.inf], value=value)\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize(\"replacement\", [np.nan, 5])\n def test_replace_with_duplicate_columns(self, replacement):\n # GH 24798\n result = DataFrame({\"A\": [1, 2, 3], \"A1\": [4, 5, 6], \"B\": [7, 8, 9]})\n result.columns = list(\"AAB\")\n\n expected = DataFrame(\n {\"A\": [1, 2, 3], \"A1\": [4, 5, 6], \"B\": [replacement, 8, 9]}\n )\n expected.columns = list(\"AAB\")\n\n result[\"B\"] = result[\"B\"].replace(7, replacement)\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"value\", [pd.Period(\"2020-01\"), pd.Interval(0, 5)])\n def test_replace_ea_ignore_float(self, frame_or_series, value):\n # GH#34871\n obj = DataFrame({\"Per\": [value] * 3})\n obj = tm.get_obj(obj, frame_or_series)\n\n expected = obj.copy()\n result = obj.replace(1.0, 0.0)\n tm.assert_equal(expected, result)\n\n def test_replace_value_category_type(self):\n \"\"\"\n Test for #23305: to ensure category dtypes are maintained\n after replace with direct values\n \"\"\"\n\n # create input data\n input_dict = {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [\"a\", \"b\", \"c\", \"d\"],\n \"col3\": [1.5, 2.5, 3.5, 4.5],\n \"col4\": [\"cat1\", \"cat2\", \"cat3\", \"cat4\"],\n \"col5\": [\"obj1\", \"obj2\", \"obj3\", \"obj4\"],\n }\n # explicitly cast columns as category and order them\n input_df = DataFrame(data=input_dict).astype(\n {\"col2\": \"category\", \"col4\": \"category\"}\n )\n input_df[\"col2\"] = input_df[\"col2\"].cat.reorder_categories(\n [\"a\", \"b\", \"c\", \"d\"], ordered=True\n )\n input_df[\"col4\"] = input_df[\"col4\"].cat.reorder_categories(\n [\"cat1\", \"cat2\", \"cat3\", \"cat4\"], ordered=True\n )\n\n # create expected dataframe\n expected_dict = {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [\"a\", \"b\", \"c\", \"z\"],\n \"col3\": [1.5, 2.5, 3.5, 4.5],\n \"col4\": [\"cat1\", \"catX\", \"cat3\", \"cat4\"],\n \"col5\": [\"obj9\", \"obj2\", \"obj3\", \"obj4\"],\n }\n # explicitly cast columns as category and order them\n expected = DataFrame(data=expected_dict).astype(\n {\"col2\": \"category\", \"col4\": \"category\"}\n )\n expected[\"col2\"] = expected[\"col2\"].cat.reorder_categories(\n [\"a\", \"b\", \"c\", \"z\"], ordered=True\n )\n expected[\"col4\"] = expected[\"col4\"].cat.reorder_categories(\n [\"cat1\", \"catX\", \"cat3\", \"cat4\"], ordered=True\n )\n\n # replace values in input dataframe\n input_df = input_df.replace(\"d\", \"z\")\n input_df = input_df.replace(\"obj1\", \"obj9\")\n result = input_df.replace(\"cat2\", \"catX\")\n\n tm.assert_frame_equal(result, expected)\n\n def test_replace_dict_category_type(self):\n \"\"\"\n Test to ensure category dtypes are maintained\n after replace with dict values\n \"\"\"\n # GH#35268, GH#44940\n\n # create input dataframe\n input_dict = {\"col1\": [\"a\"], \"col2\": [\"obj1\"], \"col3\": [\"cat1\"]}\n # explicitly cast columns as category\n input_df = DataFrame(data=input_dict).astype(\n {\"col1\": \"category\", \"col2\": \"category\", \"col3\": \"category\"}\n )\n\n # create expected dataframe\n expected_dict = {\"col1\": [\"z\"], \"col2\": [\"obj9\"], \"col3\": [\"catX\"]}\n # explicitly cast columns as category\n expected = DataFrame(data=expected_dict).astype(\n {\"col1\": \"category\", \"col2\": \"category\", \"col3\": \"category\"}\n )\n\n # replace values in input dataframe using a dict\n result = input_df.replace({\"a\": \"z\", \"obj1\": \"obj9\", \"cat1\": \"catX\"})\n\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_compiled_regex(self):\n # https://github.com/pandas-dev/pandas/issues/35680\n df = DataFrame([\"a\", \"b\", \"c\"])\n regex = re.compile(\"^a$\")\n result = df.replace({regex: \"z\"}, regex=True)\n expected = DataFrame([\"z\", \"b\", \"c\"])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_intervals(self):\n # https://github.com/pandas-dev/pandas/issues/35931\n df = DataFrame({\"a\": [pd.Interval(0, 1), pd.Interval(0, 1)]})\n result = df.replace({\"a\": {pd.Interval(0, 1): \"x\"}})\n expected = DataFrame({\"a\": [\"x\", \"x\"]})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_unicode(self):\n # GH: 16784\n columns_values_map = {\"positive\": {\"正面\": 1, \"中立\": 1, \"负面\": 0}}\n df1 = DataFrame({\"positive\": np.ones(3)})\n result = df1.replace(columns_values_map)\n expected = DataFrame({\"positive\": np.ones(3)})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_bytes(self, frame_or_series):\n # GH#38900\n obj = frame_or_series([\"o\"]).astype(\"|S\")\n expected = obj.copy()\n obj = obj.replace({None: np.nan})\n tm.assert_equal(obj, expected)\n\n @pytest.mark.parametrize(\n \"data, to_replace, value, expected\",\n [\n ([1], [1.0], [0], [0]),\n ([1], [1], [0], [0]),\n ([1.0], [1.0], [0], [0.0]),\n ([1.0], [1], [0], [0.0]),\n ],\n )\n @pytest.mark.parametrize(\"box\", [list, tuple, np.array])\n def test_replace_list_with_mixed_type(\n self, data, to_replace, value, expected, box, frame_or_series\n ):\n # GH#40371\n obj = frame_or_series(data)\n expected = frame_or_series(expected)\n result = obj.replace(box(to_replace), value)\n tm.assert_equal(result, expected)\n\n\nclass TestDataFrameReplaceRegex:\n @pytest.mark.parametrize(\n \"data\",\n [\n {\"a\": list(\"ab..\"), \"b\": list(\"efgh\")},\n {\"a\": list(\"ab..\"), \"b\": list(range(4))},\n ],\n )\n @pytest.mark.parametrize(\n \"to_replace,value\", [(r\"\\s*\\.\\s*\", np.nan), (r\"\\s*(\\.)\\s*\", r\"\\1\\1\\1\")]\n )\n @pytest.mark.parametrize(\"compile_regex\", [True, False])\n @pytest.mark.parametrize(\"regex_kwarg\", [True, False])\n @pytest.mark.parametrize(\"inplace\", [True, False])\n def test_regex_replace_scalar(\n self, data, to_replace, value, compile_regex, regex_kwarg, inplace\n ):\n df = DataFrame(data)\n expected = df.copy()\n\n if compile_regex:\n to_replace = re.compile(to_replace)\n\n if regex_kwarg:\n regex = to_replace\n to_replace = None\n else:\n regex = True\n\n result = df.replace(to_replace, value, inplace=inplace, regex=regex)\n\n if inplace:\n assert result is None\n result = df\n\n if value is np.nan:\n expected_replace_val = np.nan\n else:\n expected_replace_val = \"...\"\n\n expected.loc[expected[\"a\"] == \".\", \"a\"] = expected_replace_val\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"regex\", [False, True])\n def test_replace_regex_dtype_frame(self, regex):\n # GH-48644\n df1 = DataFrame({\"A\": [\"0\"], \"B\": [\"0\"]})\n expected_df1 = DataFrame({\"A\": [1], \"B\": [1]})\n result_df1 = df1.replace(to_replace=\"0\", value=1, regex=regex)\n tm.assert_frame_equal(result_df1, expected_df1)\n\n df2 = DataFrame({\"A\": [\"0\"], \"B\": [\"1\"]})\n expected_df2 = DataFrame({\"A\": [1], \"B\": [\"1\"]})\n result_df2 = df2.replace(to_replace=\"0\", value=1, regex=regex)\n tm.assert_frame_equal(result_df2, expected_df2)\n\n def test_replace_with_value_also_being_replaced(self):\n # GH46306\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [1, 0, 2]})\n result = df.replace({0: 1, 1: np.nan})\n expected = DataFrame({\"A\": [1, np.nan, 2], \"B\": [np.nan, 1, 2]})\n tm.assert_frame_equal(result, expected)\n",
"import datetime\nimport decimal\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas.core.dtypes.base import _registry as registry\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.extensions import register_extension_dtype\nfrom pandas.arrays import (\n BooleanArray,\n DatetimeArray,\n FloatingArray,\n IntegerArray,\n IntervalArray,\n SparseArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays import (\n PandasArray,\n period_array,\n)\nfrom pandas.tests.extension.decimal import (\n DecimalArray,\n DecimalDtype,\n to_decimal,\n)\n\n\[email protected](\n \"data, dtype, expected\",\n [\n # Basic NumPy defaults.\n ([1, 2], None, IntegerArray._from_sequence([1, 2])),\n ([1, 2], object, PandasArray(np.array([1, 2], dtype=object))),\n (\n [1, 2],\n np.dtype(\"float32\"),\n PandasArray(np.array([1.0, 2.0], dtype=np.dtype(\"float32\"))),\n ),\n (np.array([1, 2], dtype=\"int64\"), None, IntegerArray._from_sequence([1, 2])),\n (\n np.array([1.0, 2.0], dtype=\"float64\"),\n None,\n FloatingArray._from_sequence([1.0, 2.0]),\n ),\n # String alias passes through to NumPy\n ([1, 2], \"float32\", PandasArray(np.array([1, 2], dtype=\"float32\"))),\n ([1, 2], \"int64\", PandasArray(np.array([1, 2], dtype=np.int64))),\n # GH#44715 FloatingArray does not support float16, so fall back to PandasArray\n (\n np.array([1, 2], dtype=np.float16),\n None,\n PandasArray(np.array([1, 2], dtype=np.float16)),\n ),\n # idempotency with e.g. pd.array(pd.array([1, 2], dtype=\"int64\"))\n (\n PandasArray(np.array([1, 2], dtype=np.int32)),\n None,\n PandasArray(np.array([1, 2], dtype=np.int32)),\n ),\n # Period alias\n (\n [pd.Period(\"2000\", \"D\"), pd.Period(\"2001\", \"D\")],\n \"Period[D]\",\n period_array([\"2000\", \"2001\"], freq=\"D\"),\n ),\n # Period dtype\n (\n [pd.Period(\"2000\", \"D\")],\n pd.PeriodDtype(\"D\"),\n period_array([\"2000\"], freq=\"D\"),\n ),\n # Datetime (naive)\n (\n [1, 2],\n np.dtype(\"datetime64[ns]\"),\n DatetimeArray._from_sequence(np.array([1, 2], dtype=\"datetime64[ns]\")),\n ),\n (\n np.array([1, 2], dtype=\"datetime64[ns]\"),\n None,\n DatetimeArray._from_sequence(np.array([1, 2], dtype=\"datetime64[ns]\")),\n ),\n (\n pd.DatetimeIndex([\"2000\", \"2001\"]),\n np.dtype(\"datetime64[ns]\"),\n DatetimeArray._from_sequence([\"2000\", \"2001\"]),\n ),\n (\n pd.DatetimeIndex([\"2000\", \"2001\"]),\n None,\n DatetimeArray._from_sequence([\"2000\", \"2001\"]),\n ),\n (\n [\"2000\", \"2001\"],\n np.dtype(\"datetime64[ns]\"),\n DatetimeArray._from_sequence([\"2000\", \"2001\"]),\n ),\n # Datetime (tz-aware)\n (\n [\"2000\", \"2001\"],\n pd.DatetimeTZDtype(tz=\"CET\"),\n DatetimeArray._from_sequence(\n [\"2000\", \"2001\"], dtype=pd.DatetimeTZDtype(tz=\"CET\")\n ),\n ),\n # Timedelta\n (\n [\"1H\", \"2H\"],\n np.dtype(\"timedelta64[ns]\"),\n TimedeltaArray._from_sequence([\"1H\", \"2H\"]),\n ),\n (\n pd.TimedeltaIndex([\"1H\", \"2H\"]),\n np.dtype(\"timedelta64[ns]\"),\n TimedeltaArray._from_sequence([\"1H\", \"2H\"]),\n ),\n (\n pd.TimedeltaIndex([\"1H\", \"2H\"]),\n None,\n TimedeltaArray._from_sequence([\"1H\", \"2H\"]),\n ),\n # Category\n ([\"a\", \"b\"], \"category\", pd.Categorical([\"a\", \"b\"])),\n (\n [\"a\", \"b\"],\n pd.CategoricalDtype(None, ordered=True),\n pd.Categorical([\"a\", \"b\"], ordered=True),\n ),\n # Interval\n (\n [pd.Interval(1, 2, \"right\"), pd.Interval(3, 4, \"right\")],\n \"interval\",\n IntervalArray.from_tuples([(1, 2), (3, 4)], \"right\"),\n ),\n # Sparse\n ([0, 1], \"Sparse[int64]\", SparseArray([0, 1], dtype=\"int64\")),\n # IntegerNA\n ([1, None], \"Int16\", pd.array([1, None], dtype=\"Int16\")),\n (pd.Series([1, 2]), None, PandasArray(np.array([1, 2], dtype=np.int64))),\n # String\n (\n [\"a\", None],\n \"string\",\n pd.StringDtype().construct_array_type()._from_sequence([\"a\", None]),\n ),\n (\n [\"a\", None],\n pd.StringDtype(),\n pd.StringDtype().construct_array_type()._from_sequence([\"a\", None]),\n ),\n # Boolean\n ([True, None], \"boolean\", BooleanArray._from_sequence([True, None])),\n ([True, None], pd.BooleanDtype(), BooleanArray._from_sequence([True, None])),\n # Index\n (pd.Index([1, 2]), None, PandasArray(np.array([1, 2], dtype=np.int64))),\n # Series[EA] returns the EA\n (\n pd.Series(pd.Categorical([\"a\", \"b\"], categories=[\"a\", \"b\", \"c\"])),\n None,\n pd.Categorical([\"a\", \"b\"], categories=[\"a\", \"b\", \"c\"]),\n ),\n # \"3rd party\" EAs work\n ([decimal.Decimal(0), decimal.Decimal(1)], \"decimal\", to_decimal([0, 1])),\n # pass an ExtensionArray, but a different dtype\n (\n period_array([\"2000\", \"2001\"], freq=\"D\"),\n \"category\",\n pd.Categorical([pd.Period(\"2000\", \"D\"), pd.Period(\"2001\", \"D\")]),\n ),\n ],\n)\ndef test_array(data, dtype, expected):\n result = pd.array(data, dtype=dtype)\n tm.assert_equal(result, expected)\n\n\ndef test_array_copy():\n a = np.array([1, 2])\n # default is to copy\n b = pd.array(a, dtype=a.dtype)\n assert not tm.shares_memory(a, b)\n\n # copy=True\n b = pd.array(a, dtype=a.dtype, copy=True)\n assert not tm.shares_memory(a, b)\n\n # copy=False\n b = pd.array(a, dtype=a.dtype, copy=False)\n assert tm.shares_memory(a, b)\n\n\ncet = pytz.timezone(\"CET\")\n\n\[email protected](\n \"data, expected\",\n [\n # period\n (\n [pd.Period(\"2000\", \"D\"), pd.Period(\"2001\", \"D\")],\n period_array([\"2000\", \"2001\"], freq=\"D\"),\n ),\n # interval\n (\n [pd.Interval(0, 1, \"right\"), pd.Interval(1, 2, \"right\")],\n IntervalArray.from_breaks([0, 1, 2], \"right\"),\n ),\n # datetime\n (\n [pd.Timestamp(\"2000\"), pd.Timestamp(\"2001\")],\n DatetimeArray._from_sequence([\"2000\", \"2001\"]),\n ),\n (\n [datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)],\n DatetimeArray._from_sequence([\"2000\", \"2001\"]),\n ),\n (\n np.array([1, 2], dtype=\"M8[ns]\"),\n DatetimeArray(np.array([1, 2], dtype=\"M8[ns]\")),\n ),\n (\n np.array([1, 2], dtype=\"M8[us]\"),\n DatetimeArray(np.array([1000, 2000], dtype=\"M8[ns]\")),\n ),\n # datetimetz\n (\n [pd.Timestamp(\"2000\", tz=\"CET\"), pd.Timestamp(\"2001\", tz=\"CET\")],\n DatetimeArray._from_sequence(\n [\"2000\", \"2001\"], dtype=pd.DatetimeTZDtype(tz=\"CET\")\n ),\n ),\n (\n [\n datetime.datetime(2000, 1, 1, tzinfo=cet),\n datetime.datetime(2001, 1, 1, tzinfo=cet),\n ],\n DatetimeArray._from_sequence(\n [\"2000\", \"2001\"], dtype=pd.DatetimeTZDtype(tz=cet)\n ),\n ),\n # timedelta\n (\n [pd.Timedelta(\"1H\"), pd.Timedelta(\"2H\")],\n TimedeltaArray._from_sequence([\"1H\", \"2H\"]),\n ),\n (\n np.array([1, 2], dtype=\"m8[ns]\"),\n TimedeltaArray(np.array([1, 2], dtype=\"m8[ns]\")),\n ),\n (\n np.array([1, 2], dtype=\"m8[us]\"),\n TimedeltaArray(np.array([1000, 2000], dtype=\"m8[ns]\")),\n ),\n # integer\n ([1, 2], IntegerArray._from_sequence([1, 2])),\n ([1, None], IntegerArray._from_sequence([1, None])),\n ([1, pd.NA], IntegerArray._from_sequence([1, pd.NA])),\n ([1, np.nan], IntegerArray._from_sequence([1, np.nan])),\n # float\n ([0.1, 0.2], FloatingArray._from_sequence([0.1, 0.2])),\n ([0.1, None], FloatingArray._from_sequence([0.1, pd.NA])),\n ([0.1, np.nan], FloatingArray._from_sequence([0.1, pd.NA])),\n ([0.1, pd.NA], FloatingArray._from_sequence([0.1, pd.NA])),\n # integer-like float\n ([1.0, 2.0], FloatingArray._from_sequence([1.0, 2.0])),\n ([1.0, None], FloatingArray._from_sequence([1.0, pd.NA])),\n ([1.0, np.nan], FloatingArray._from_sequence([1.0, pd.NA])),\n ([1.0, pd.NA], FloatingArray._from_sequence([1.0, pd.NA])),\n # mixed-integer-float\n ([1, 2.0], FloatingArray._from_sequence([1.0, 2.0])),\n ([1, np.nan, 2.0], FloatingArray._from_sequence([1.0, None, 2.0])),\n # string\n (\n [\"a\", \"b\"],\n pd.StringDtype().construct_array_type()._from_sequence([\"a\", \"b\"]),\n ),\n (\n [\"a\", None],\n pd.StringDtype().construct_array_type()._from_sequence([\"a\", None]),\n ),\n # Boolean\n ([True, False], BooleanArray._from_sequence([True, False])),\n ([True, None], BooleanArray._from_sequence([True, None])),\n ],\n)\ndef test_array_inference(data, expected):\n result = pd.array(data)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"data\",\n [\n # mix of frequencies\n [pd.Period(\"2000\", \"D\"), pd.Period(\"2001\", \"A\")],\n # mix of closed\n [pd.Interval(0, 1, \"left\"), pd.Interval(1, 2, \"right\")],\n # Mix of timezones\n [pd.Timestamp(\"2000\", tz=\"CET\"), pd.Timestamp(\"2000\", tz=\"UTC\")],\n # Mix of tz-aware and tz-naive\n [pd.Timestamp(\"2000\", tz=\"CET\"), pd.Timestamp(\"2000\")],\n np.array([pd.Timestamp(\"2000\"), pd.Timestamp(\"2000\", tz=\"CET\")]),\n ],\n)\ndef test_array_inference_fails(data):\n result = pd.array(data)\n expected = PandasArray(np.array(data, dtype=object))\n tm.assert_extension_array_equal(result, expected)\n\n\[email protected](\"data\", [np.array(0)])\ndef test_nd_raises(data):\n with pytest.raises(ValueError, match=\"PandasArray must be 1-dimensional\"):\n pd.array(data, dtype=\"int64\")\n\n\ndef test_scalar_raises():\n with pytest.raises(ValueError, match=\"Cannot pass scalar '1'\"):\n pd.array(1)\n\n\ndef test_bounds_check():\n # GH21796\n with pytest.raises(\n TypeError, match=r\"cannot safely cast non-equivalent int(32|64) to uint16\"\n ):\n pd.array([-1, 2, 3], dtype=\"UInt16\")\n\n\n# ---------------------------------------------------------------------------\n# A couple dummy classes to ensure that Series and Indexes are unboxed before\n# getting to the EA classes.\n\n\n@register_extension_dtype\nclass DecimalDtype2(DecimalDtype):\n name = \"decimal2\"\n\n @classmethod\n def construct_array_type(cls):\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n return DecimalArray2\n\n\nclass DecimalArray2(DecimalArray):\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n if isinstance(scalars, (pd.Series, pd.Index)):\n raise TypeError(\"scalars should not be of type pd.Series or pd.Index\")\n\n return super()._from_sequence(scalars, dtype=dtype, copy=copy)\n\n\ndef test_array_unboxes(index_or_series):\n box = index_or_series\n\n data = box([decimal.Decimal(\"1\"), decimal.Decimal(\"2\")])\n # make sure it works\n with pytest.raises(\n TypeError, match=\"scalars should not be of type pd.Series or pd.Index\"\n ):\n DecimalArray2._from_sequence(data)\n\n result = pd.array(data, dtype=\"decimal2\")\n expected = DecimalArray2._from_sequence(data.values)\n tm.assert_equal(result, expected)\n\n\[email protected]\ndef registry_without_decimal():\n \"\"\"Fixture yielding 'registry' with no DecimalDtype entries\"\"\"\n idx = registry.dtypes.index(DecimalDtype)\n registry.dtypes.pop(idx)\n yield\n registry.dtypes.append(DecimalDtype)\n\n\ndef test_array_not_registered(registry_without_decimal):\n # check we aren't on it\n assert registry.find(\"decimal\") is None\n data = [decimal.Decimal(\"1\"), decimal.Decimal(\"2\")]\n\n result = pd.array(data, dtype=DecimalDtype)\n expected = DecimalArray._from_sequence(data)\n tm.assert_equal(result, expected)\n",
"import numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_percentile_argname\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameQuantile:\n @pytest.mark.parametrize(\n \"non_num_col\",\n [\n pd.date_range(\"2014-01-01\", periods=3, freq=\"m\"),\n [\"a\", \"b\", \"c\"],\n [DataFrame, Series, Timestamp],\n ],\n )\n def test_numeric_only_default_false_warning(self, non_num_col):\n # GH #7308\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]})\n df[\"C\"] = non_num_col\n\n expected = Series(\n [2.0, 3.0],\n index=[\"A\", \"B\"],\n name=0.5,\n )\n with tm.assert_produces_warning(FutureWarning, match=\"numeric_only\"):\n result = df.quantile(0.5)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"df,expected\",\n [\n [\n DataFrame(\n {\n 0: Series(pd.arrays.SparseArray([1, 2])),\n 1: Series(pd.arrays.SparseArray([3, 4])),\n }\n ),\n Series([1.5, 3.5], name=0.5),\n ],\n [\n DataFrame(Series([0.0, None, 1.0, 2.0], dtype=\"Sparse[float]\")),\n Series([1.0], name=0.5),\n ],\n ],\n )\n def test_quantile_sparse(self, df, expected):\n # GH#17198\n # GH#24600\n result = df.quantile()\n\n tm.assert_series_equal(result, expected)\n\n def test_quantile(self, datetime_frame):\n from numpy import percentile\n\n df = datetime_frame\n q = df.quantile(0.1, axis=0, numeric_only=True)\n assert q[\"A\"] == percentile(df[\"A\"], 10)\n tm.assert_index_equal(q.index, df.columns)\n\n q = df.quantile(0.9, axis=1, numeric_only=True)\n assert q[\"2000-01-17\"] == percentile(df.loc[\"2000-01-17\"], 90)\n tm.assert_index_equal(q.index, df.index)\n\n # test degenerate case\n q = DataFrame({\"x\": [], \"y\": []}).quantile(0.1, axis=0, numeric_only=True)\n assert np.isnan(q[\"x\"]) and np.isnan(q[\"y\"])\n\n # non-numeric exclusion\n df = DataFrame({\"col1\": [\"A\", \"A\", \"B\", \"B\"], \"col2\": [1, 2, 3, 4]})\n rs = df.quantile(0.5, numeric_only=True)\n with tm.assert_produces_warning(FutureWarning, match=\"Select only valid\"):\n xp = df.median().rename(0.5)\n tm.assert_series_equal(rs, xp)\n\n # axis\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n result = df.quantile(0.5, axis=1)\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)\n tm.assert_series_equal(result, expected)\n\n result = df.quantile([0.5, 0.75], axis=1)\n expected = DataFrame(\n {1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75]\n )\n tm.assert_frame_equal(result, expected, check_index_type=True)\n\n # We may want to break API in the future to change this\n # so that we exclude non-numeric along the same axis\n # See GH #7312\n df = DataFrame([[1, 2, 3], [\"a\", \"b\", 4]])\n result = df.quantile(0.5, axis=1, numeric_only=True)\n expected = Series([3.0, 4.0], index=[0, 1], name=0.5)\n tm.assert_series_equal(result, expected)\n\n def test_quantile_date_range(self):\n # GH 2460\n\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=\"US/Pacific\")\n ser = Series(dti)\n df = DataFrame(ser)\n\n result = df.quantile(numeric_only=False)\n expected = Series(\n [\"2016-01-02 00:00:00\"], name=0.5, dtype=\"datetime64[ns, US/Pacific]\"\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_quantile_axis_mixed(self):\n\n # mixed on axis=1\n df = DataFrame(\n {\n \"A\": [1, 2, 3],\n \"B\": [2.0, 3.0, 4.0],\n \"C\": pd.date_range(\"20130101\", periods=3),\n \"D\": [\"foo\", \"bar\", \"baz\"],\n }\n )\n result = df.quantile(0.5, axis=1, numeric_only=True)\n expected = Series([1.5, 2.5, 3.5], name=0.5)\n tm.assert_series_equal(result, expected)\n\n # must raise\n msg = \"'<' not supported between instances of 'Timestamp' and 'float'\"\n with pytest.raises(TypeError, match=msg):\n df.quantile(0.5, axis=1, numeric_only=False)\n\n def test_quantile_axis_parameter(self):\n # GH 9543/9544\n\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n\n result = df.quantile(0.5, axis=0)\n\n expected = Series([2.0, 3.0], index=[\"A\", \"B\"], name=0.5)\n tm.assert_series_equal(result, expected)\n\n expected = df.quantile(0.5, axis=\"index\")\n tm.assert_series_equal(result, expected)\n\n result = df.quantile(0.5, axis=1)\n\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)\n tm.assert_series_equal(result, expected)\n\n result = df.quantile(0.5, axis=\"columns\")\n tm.assert_series_equal(result, expected)\n\n msg = \"No axis named -1 for object type DataFrame\"\n with pytest.raises(ValueError, match=msg):\n df.quantile(0.1, axis=-1)\n msg = \"No axis named column for object type DataFrame\"\n with pytest.raises(ValueError, match=msg):\n df.quantile(0.1, axis=\"column\")\n\n def test_quantile_interpolation(self):\n # see gh-10174\n\n # interpolation method other than default linear\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n result = df.quantile(0.5, axis=1, interpolation=\"nearest\")\n expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)\n tm.assert_series_equal(result, expected)\n\n # cross-check interpolation=nearest results in original dtype\n exp = np.percentile(\n np.array([[1, 2, 3], [2, 3, 4]]),\n 0.5,\n axis=0,\n **{np_percentile_argname: \"nearest\"},\n )\n expected = Series(exp, index=[1, 2, 3], name=0.5, dtype=\"int64\")\n tm.assert_series_equal(result, expected)\n\n # float\n df = DataFrame({\"A\": [1.0, 2.0, 3.0], \"B\": [2.0, 3.0, 4.0]}, index=[1, 2, 3])\n result = df.quantile(0.5, axis=1, interpolation=\"nearest\")\n expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5)\n tm.assert_series_equal(result, expected)\n exp = np.percentile(\n np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]),\n 0.5,\n axis=0,\n **{np_percentile_argname: \"nearest\"},\n )\n expected = Series(exp, index=[1, 2, 3], name=0.5, dtype=\"float64\")\n tm.assert_series_equal(result, expected)\n\n # axis\n result = df.quantile([0.5, 0.75], axis=1, interpolation=\"lower\")\n expected = DataFrame(\n {1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75]\n )\n tm.assert_frame_equal(result, expected)\n\n # test degenerate case\n df = DataFrame({\"x\": [], \"y\": []})\n q = df.quantile(0.1, axis=0, interpolation=\"higher\")\n assert np.isnan(q[\"x\"]) and np.isnan(q[\"y\"])\n\n # multi\n df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=[\"a\", \"b\", \"c\"])\n result = df.quantile([0.25, 0.5], interpolation=\"midpoint\")\n\n # https://github.com/numpy/numpy/issues/7163\n expected = DataFrame(\n [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],\n index=[0.25, 0.5],\n columns=[\"a\", \"b\", \"c\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_quantile_interpolation_datetime(self, datetime_frame):\n # see gh-10174\n\n # interpolation = linear (default case)\n df = datetime_frame\n q = df.quantile(0.1, axis=0, numeric_only=True, interpolation=\"linear\")\n assert q[\"A\"] == np.percentile(df[\"A\"], 10)\n\n def test_quantile_interpolation_int(self, int_frame):\n # see gh-10174\n\n df = int_frame\n # interpolation = linear (default case)\n q = df.quantile(0.1)\n assert q[\"A\"] == np.percentile(df[\"A\"], 10)\n\n # test with and without interpolation keyword\n q1 = df.quantile(0.1, axis=0, interpolation=\"linear\")\n assert q1[\"A\"] == np.percentile(df[\"A\"], 10)\n tm.assert_series_equal(q, q1)\n\n def test_quantile_multi(self):\n df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=[\"a\", \"b\", \"c\"])\n result = df.quantile([0.25, 0.5])\n expected = DataFrame(\n [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],\n index=[0.25, 0.5],\n columns=[\"a\", \"b\", \"c\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.quantile([0.25, 0.5], axis=1)\n expected = DataFrame(\n [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=[0, 1, 2]\n )\n\n # empty\n result = DataFrame({\"x\": [], \"y\": []}).quantile([0.1, 0.9], axis=0)\n expected = DataFrame(\n {\"x\": [np.nan, np.nan], \"y\": [np.nan, np.nan]}, index=[0.1, 0.9]\n )\n tm.assert_frame_equal(result, expected)\n\n def test_quantile_datetime(self):\n df = DataFrame({\"a\": pd.to_datetime([\"2010\", \"2011\"]), \"b\": [0, 5]})\n\n # exclude datetime\n result = df.quantile(0.5, numeric_only=True)\n expected = Series([2.5], index=[\"b\"])\n\n # datetime\n result = df.quantile(0.5, numeric_only=False)\n expected = Series(\n [Timestamp(\"2010-07-02 12:00:00\"), 2.5], index=[\"a\", \"b\"], name=0.5\n )\n tm.assert_series_equal(result, expected)\n\n # datetime w/ multi\n result = df.quantile([0.5], numeric_only=False)\n expected = DataFrame(\n [[Timestamp(\"2010-07-02 12:00:00\"), 2.5]], index=[0.5], columns=[\"a\", \"b\"]\n )\n tm.assert_frame_equal(result, expected)\n\n # axis = 1\n df[\"c\"] = pd.to_datetime([\"2011\", \"2012\"])\n result = df[[\"a\", \"c\"]].quantile(0.5, axis=1, numeric_only=False)\n expected = Series(\n [Timestamp(\"2010-07-02 12:00:00\"), Timestamp(\"2011-07-02 12:00:00\")],\n index=[0, 1],\n name=0.5,\n )\n tm.assert_series_equal(result, expected)\n\n result = df[[\"a\", \"c\"]].quantile([0.5], axis=1, numeric_only=False)\n expected = DataFrame(\n [[Timestamp(\"2010-07-02 12:00:00\"), Timestamp(\"2011-07-02 12:00:00\")]],\n index=[0.5],\n columns=[0, 1],\n )\n tm.assert_frame_equal(result, expected)\n\n # empty when numeric_only=True\n result = df[[\"a\", \"c\"]].quantile(0.5, numeric_only=True)\n expected = Series([], index=[], dtype=np.float64, name=0.5)\n tm.assert_series_equal(result, expected)\n\n result = df[[\"a\", \"c\"]].quantile([0.5], numeric_only=True)\n expected = DataFrame(index=[0.5])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n \"datetime64[ns]\",\n \"datetime64[ns, US/Pacific]\",\n \"timedelta64[ns]\",\n \"Period[D]\",\n ],\n )\n def test_quantile_dt64_empty(self, dtype):\n # GH#41544\n df = DataFrame(columns=[\"a\", \"b\"], dtype=dtype)\n\n res = df.quantile(0.5, axis=1, numeric_only=False)\n expected = Series([], index=[], name=0.5, dtype=dtype)\n tm.assert_series_equal(res, expected)\n\n # no columns in result, so no dtype preservation\n res = df.quantile([0.5], axis=1, numeric_only=False)\n expected = DataFrame(index=[0.5])\n tm.assert_frame_equal(res, expected)\n\n def test_quantile_invalid(self, datetime_frame):\n msg = \"percentiles should all be in the interval \\\\[0, 1\\\\]\"\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with pytest.raises(ValueError, match=msg):\n datetime_frame.quantile(invalid)\n\n def test_quantile_box(self):\n df = DataFrame(\n {\n \"A\": [\n Timestamp(\"2011-01-01\"),\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-03\"),\n ],\n \"B\": [\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n ],\n \"C\": [\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"3 days\"),\n ],\n }\n )\n\n res = df.quantile(0.5, numeric_only=False)\n\n exp = Series(\n [\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timedelta(\"2 days\"),\n ],\n name=0.5,\n index=[\"A\", \"B\", \"C\"],\n )\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5], numeric_only=False)\n exp = DataFrame(\n [\n [\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timedelta(\"2 days\"),\n ]\n ],\n index=[0.5],\n columns=[\"A\", \"B\", \"C\"],\n )\n tm.assert_frame_equal(res, exp)\n\n # DatetimeLikeBlock may be consolidated and contain NaT in different loc\n df = DataFrame(\n {\n \"A\": [\n Timestamp(\"2011-01-01\"),\n pd.NaT,\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-03\"),\n ],\n \"a\": [\n Timestamp(\"2011-01-01\"),\n Timestamp(\"2011-01-02\"),\n pd.NaT,\n Timestamp(\"2011-01-03\"),\n ],\n \"B\": [\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n ],\n \"b\": [\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n ],\n \"C\": [\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"3 days\"),\n pd.NaT,\n ],\n \"c\": [\n pd.NaT,\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"3 days\"),\n ],\n },\n columns=list(\"AaBbCc\"),\n )\n\n res = df.quantile(0.5, numeric_only=False)\n exp = Series(\n [\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"2 days\"),\n ],\n name=0.5,\n index=list(\"AaBbCc\"),\n )\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5], numeric_only=False)\n exp = DataFrame(\n [\n [\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"2 days\"),\n ]\n ],\n index=[0.5],\n columns=list(\"AaBbCc\"),\n )\n tm.assert_frame_equal(res, exp)\n\n def test_quantile_nan(self):\n\n # GH 14357 - float block where some cols have missing values\n df = DataFrame({\"a\": np.arange(1, 6.0), \"b\": np.arange(1, 6.0)})\n df.iloc[-1, 1] = np.nan\n\n res = df.quantile(0.5)\n exp = Series([3.0, 2.5], index=[\"a\", \"b\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5, 0.75])\n exp = DataFrame({\"a\": [3.0, 4.0], \"b\": [2.5, 3.25]}, index=[0.5, 0.75])\n tm.assert_frame_equal(res, exp)\n\n res = df.quantile(0.5, axis=1)\n exp = Series(np.arange(1.0, 6.0), name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5, 0.75], axis=1)\n exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])\n tm.assert_frame_equal(res, exp)\n\n # full-nan column\n df[\"b\"] = np.nan\n\n res = df.quantile(0.5)\n exp = Series([3.0, np.nan], index=[\"a\", \"b\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5, 0.75])\n exp = DataFrame({\"a\": [3.0, 4.0], \"b\": [np.nan, np.nan]}, index=[0.5, 0.75])\n tm.assert_frame_equal(res, exp)\n\n def test_quantile_nat(self):\n\n # full NaT column\n df = DataFrame({\"a\": [pd.NaT, pd.NaT, pd.NaT]})\n\n res = df.quantile(0.5, numeric_only=False)\n exp = Series([pd.NaT], index=[\"a\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5], numeric_only=False)\n exp = DataFrame({\"a\": [pd.NaT]}, index=[0.5])\n tm.assert_frame_equal(res, exp)\n\n # mixed non-null / full null column\n df = DataFrame(\n {\n \"a\": [\n Timestamp(\"2012-01-01\"),\n Timestamp(\"2012-01-02\"),\n Timestamp(\"2012-01-03\"),\n ],\n \"b\": [pd.NaT, pd.NaT, pd.NaT],\n }\n )\n\n res = df.quantile(0.5, numeric_only=False)\n exp = Series([Timestamp(\"2012-01-02\"), pd.NaT], index=[\"a\", \"b\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5], numeric_only=False)\n exp = DataFrame(\n [[Timestamp(\"2012-01-02\"), pd.NaT]], index=[0.5], columns=[\"a\", \"b\"]\n )\n tm.assert_frame_equal(res, exp)\n\n def test_quantile_empty_no_rows_floats(self):\n\n # floats\n df = DataFrame(columns=[\"a\", \"b\"], dtype=\"float64\")\n\n res = df.quantile(0.5)\n exp = Series([np.nan, np.nan], index=[\"a\", \"b\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5])\n exp = DataFrame([[np.nan, np.nan]], columns=[\"a\", \"b\"], index=[0.5])\n tm.assert_frame_equal(res, exp)\n\n res = df.quantile(0.5, axis=1)\n exp = Series([], index=[], dtype=\"float64\", name=0.5)\n tm.assert_series_equal(res, exp)\n\n res = df.quantile([0.5], axis=1)\n exp = DataFrame(columns=[], index=[0.5])\n tm.assert_frame_equal(res, exp)\n\n def test_quantile_empty_no_rows_ints(self):\n # ints\n df = DataFrame(columns=[\"a\", \"b\"], dtype=\"int64\")\n\n res = df.quantile(0.5)\n exp = Series([np.nan, np.nan], index=[\"a\", \"b\"], name=0.5)\n tm.assert_series_equal(res, exp)\n\n def test_quantile_empty_no_rows_dt64(self):\n # datetimes\n df = DataFrame(columns=[\"a\", \"b\"], dtype=\"datetime64[ns]\")\n\n res = df.quantile(0.5, numeric_only=False)\n exp = Series(\n [pd.NaT, pd.NaT], index=[\"a\", \"b\"], dtype=\"datetime64[ns]\", name=0.5\n )\n tm.assert_series_equal(res, exp)\n\n # Mixed dt64/dt64tz\n df[\"a\"] = df[\"a\"].dt.tz_localize(\"US/Central\")\n res = df.quantile(0.5, numeric_only=False)\n exp = exp.astype(object)\n tm.assert_series_equal(res, exp)\n\n # both dt64tz\n df[\"b\"] = df[\"b\"].dt.tz_localize(\"US/Central\")\n res = df.quantile(0.5, numeric_only=False)\n exp = exp.astype(df[\"b\"].dtype)\n tm.assert_series_equal(res, exp)\n\n def test_quantile_empty_no_columns(self):\n # GH#23925 _get_numeric_data may drop all columns\n df = DataFrame(pd.date_range(\"1/1/18\", periods=5))\n df.columns.name = \"captain tightpants\"\n result = df.quantile(0.5, numeric_only=True)\n expected = Series([], index=[], name=0.5, dtype=np.float64)\n expected.index.name = \"captain tightpants\"\n tm.assert_series_equal(result, expected)\n\n result = df.quantile([0.5], numeric_only=True)\n expected = DataFrame([], index=[0.5], columns=[])\n expected.columns.name = \"captain tightpants\"\n tm.assert_frame_equal(result, expected)\n\n def test_quantile_item_cache(self, using_array_manager):\n # previous behavior incorrect retained an invalid _item_cache entry\n df = DataFrame(np.random.randn(4, 3), columns=[\"A\", \"B\", \"C\"])\n df[\"D\"] = df[\"A\"] * 2\n ser = df[\"A\"]\n if not using_array_manager:\n assert len(df._mgr.blocks) == 2\n\n df.quantile(numeric_only=False)\n ser.values[0] = 99\n\n assert df.iloc[0, 0] == df[\"A\"][0]\n\n\nclass TestQuantileExtensionDtype:\n # TODO: tests for axis=1?\n # TODO: empty case?\n\n @pytest.fixture(\n params=[\n pytest.param(\n pd.IntervalIndex.from_breaks(range(10)),\n marks=pytest.mark.xfail(reason=\"raises when trying to add Intervals\"),\n ),\n pd.period_range(\"2016-01-01\", periods=9, freq=\"D\"),\n pd.date_range(\"2016-01-01\", periods=9, tz=\"US/Pacific\"),\n pd.timedelta_range(\"1 Day\", periods=9),\n pd.array(np.arange(9), dtype=\"Int64\"),\n pd.array(np.arange(9), dtype=\"Float64\"),\n ],\n ids=lambda x: str(x.dtype),\n )\n def index(self, request):\n # NB: not actually an Index object\n idx = request.param\n idx.name = \"A\"\n return idx\n\n @pytest.fixture\n def obj(self, index, frame_or_series):\n # bc index is not always an Index (yet), we need to re-patch .name\n obj = frame_or_series(index).copy()\n\n if frame_or_series is Series:\n obj.name = \"A\"\n else:\n obj.columns = [\"A\"]\n return obj\n\n def compute_quantile(self, obj, qs):\n if isinstance(obj, Series):\n result = obj.quantile(qs)\n else:\n result = obj.quantile(qs, numeric_only=False)\n return result\n\n def test_quantile_ea(self, obj, index):\n\n # result should be invariant to shuffling\n indexer = np.arange(len(index), dtype=np.intp)\n np.random.shuffle(indexer)\n obj = obj.iloc[indexer]\n\n qs = [0.5, 0, 1]\n result = self.compute_quantile(obj, qs)\n\n exp_dtype = index.dtype\n if index.dtype == \"Int64\":\n # match non-nullable casting behavior\n exp_dtype = \"Float64\"\n\n # expected here assumes len(index) == 9\n expected = Series(\n [index[4], index[0], index[-1]], dtype=exp_dtype, index=qs, name=\"A\"\n )\n expected = type(obj)(expected)\n\n tm.assert_equal(result, expected)\n\n def test_quantile_ea_with_na(self, obj, index):\n\n obj.iloc[0] = index._na_value\n obj.iloc[-1] = index._na_value\n\n # result should be invariant to shuffling\n indexer = np.arange(len(index), dtype=np.intp)\n np.random.shuffle(indexer)\n obj = obj.iloc[indexer]\n\n qs = [0.5, 0, 1]\n result = self.compute_quantile(obj, qs)\n\n # expected here assumes len(index) == 9\n expected = Series(\n [index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name=\"A\"\n )\n expected = type(obj)(expected)\n tm.assert_equal(result, expected)\n\n # TODO(GH#39763): filtering can be removed after GH#39763 is fixed\n @pytest.mark.filterwarnings(\"ignore:Using .astype to convert:FutureWarning\")\n def test_quantile_ea_all_na(self, obj, index):\n obj.iloc[:] = index._na_value\n\n # TODO(ArrayManager): this casting should be unnecessary after GH#39763 is fixed\n obj = obj.astype(index.dtype)\n assert np.all(obj.dtypes == index.dtype)\n\n # result should be invariant to shuffling\n indexer = np.arange(len(index), dtype=np.intp)\n np.random.shuffle(indexer)\n obj = obj.iloc[indexer]\n\n qs = [0.5, 0, 1]\n result = self.compute_quantile(obj, qs)\n\n expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value)\n expected = Series(expected, index=qs, name=\"A\")\n if expected.dtype == \"Int64\":\n expected = expected.astype(\"Float64\")\n expected = type(obj)(expected)\n tm.assert_equal(result, expected)\n\n def test_quantile_ea_scalar(self, obj, index):\n # scalar qs\n\n # result should be invariant to shuffling\n indexer = np.arange(len(index), dtype=np.intp)\n np.random.shuffle(indexer)\n obj = obj.iloc[indexer]\n\n qs = 0.5\n result = self.compute_quantile(obj, qs)\n\n exp_dtype = index.dtype\n if index.dtype == \"Int64\":\n exp_dtype = \"Float64\"\n\n expected = Series({\"A\": index[4]}, dtype=exp_dtype, name=0.5)\n if isinstance(obj, Series):\n expected = expected[\"A\"]\n assert result == expected\n else:\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype, expected_data, expected_index, axis\",\n [\n [\"float64\", [], [], 1],\n [\"int64\", [], [], 1],\n [\"float64\", [np.nan, np.nan], [\"a\", \"b\"], 0],\n [\"int64\", [np.nan, np.nan], [\"a\", \"b\"], 0],\n ],\n )\n def test_empty_numeric(self, dtype, expected_data, expected_index, axis):\n # GH 14564\n df = DataFrame(columns=[\"a\", \"b\"], dtype=dtype)\n result = df.quantile(0.5, axis=axis)\n expected = Series(\n expected_data, name=0.5, index=Index(expected_index), dtype=\"float64\"\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype, expected_data, expected_index, axis, expected_dtype\",\n [\n [\"datetime64[ns]\", [], [], 1, \"datetime64[ns]\"],\n [\"datetime64[ns]\", [pd.NaT, pd.NaT], [\"a\", \"b\"], 0, \"datetime64[ns]\"],\n ],\n )\n def test_empty_datelike(\n self, dtype, expected_data, expected_index, axis, expected_dtype\n ):\n # GH 14564\n df = DataFrame(columns=[\"a\", \"b\"], dtype=dtype)\n result = df.quantile(0.5, axis=axis, numeric_only=False)\n expected = Series(\n expected_data, name=0.5, index=Index(expected_index), dtype=expected_dtype\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"expected_data, expected_index, axis\",\n [\n [[np.nan, np.nan], range(2), 1],\n [[], [], 0],\n ],\n )\n def test_datelike_numeric_only(self, expected_data, expected_index, axis):\n # GH 14564\n df = DataFrame(\n {\n \"a\": pd.to_datetime([\"2010\", \"2011\"]),\n \"b\": [0, 5],\n \"c\": pd.to_datetime([\"2011\", \"2012\"]),\n }\n )\n result = df[[\"a\", \"c\"]].quantile(0.5, axis=axis, numeric_only=True)\n expected = Series(\n expected_data, name=0.5, index=Index(expected_index), dtype=np.float64\n )\n tm.assert_series_equal(result, expected)\n",
"import pytest\n\nimport pandas as pd\n\n\[email protected](scope=\"package\")\ndef df_from_dict():\n def maker(dct, is_categorical=False):\n df = pd.DataFrame(dct)\n return df.astype(\"category\") if is_categorical else df\n\n return maker\n"
] | [
[
"pandas.array"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal",
"pandas._testing.get_obj",
"numpy.arange",
"numpy.eye",
"numpy.int8",
"numpy.float32",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_produces_warning",
"pandas.Categorical",
"numpy.random.rand",
"pandas.Interval",
"pandas.date_range",
"numpy.array",
"pandas._testing.assert_equal",
"numpy.ones",
"numpy.int16",
"numpy.float64",
"numpy.isscalar",
"pandas.Period",
"pandas.Timestamp"
],
[
"pandas.Series",
"pandas.tests.extension.decimal.to_decimal",
"numpy.dtype",
"pandas.core.dtypes.base._registry.dtypes.append",
"pandas.arrays.FloatingArray._from_sequence",
"pandas.arrays.IntervalArray.from_tuples",
"pandas.CategoricalDtype",
"pandas.arrays.TimedeltaArray._from_sequence",
"pandas.arrays.IntervalArray.from_breaks",
"pandas.StringDtype",
"pandas.Index",
"pandas.core.dtypes.base._registry.dtypes.index",
"pandas._testing.assert_extension_array_equal",
"pandas.DatetimeIndex",
"pandas.PeriodDtype",
"pandas.DatetimeTZDtype",
"pandas._testing.shares_memory",
"pandas.Categorical",
"pandas.arrays.BooleanArray._from_sequence",
"pandas.array",
"pandas.Timedelta",
"pandas.core.dtypes.base._registry.find",
"pandas.arrays.IntegerArray._from_sequence",
"pandas.core.arrays.period_array",
"pandas.Interval",
"pandas.core.dtypes.base._registry.dtypes.pop",
"numpy.array",
"pandas._testing.assert_equal",
"pandas.TimedeltaIndex",
"pandas.arrays.SparseArray",
"pandas.arrays.DatetimeArray._from_sequence",
"pandas.tests.extension.decimal.DecimalArray._from_sequence",
"pandas.BooleanDtype",
"pandas.Period",
"pandas.Timestamp"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame",
"numpy.all",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.arange",
"pandas.Index",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"numpy.isnan",
"pandas.Timedelta",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas._testing.assert_equal",
"pandas.period_range",
"pandas.arrays.SparseArray",
"numpy.random.shuffle",
"numpy.percentile",
"pandas.Timestamp"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gaoxuesong/sonnet | [
"40995a58744bbadc2e875c5c87e744896bdc4249"
] | [
"sonnet/python/modules/layer_norm.py"
] | [
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Layer normalization module for Sonnet.\n\nThis contains the module LayerNorm, which performs layer normalization on\nits inputs.\n\nOriginal paper: https://arxiv.org/abs/1607.06450.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom sonnet.python.modules import base\nfrom sonnet.python.modules import util\n\nimport tensorflow as tf\n\n\nclass LayerNorm(base.AbstractModule):\n \"\"\"Layer normalization module.\n\n Implementation based on:\n https://arxiv.org/abs/1607.06450\n\n This module transforms input x into:\n\n outputs = gamma * (x - mu) / sigma + beta\n\n where mu and sigma are respectively the mean and standard deviation of x.\n Gamma and beta are trainable parameters for scaling and shifting respectively.\n\n \"\"\"\n\n GAMMA = \"gamma\" # Layer norm scaling.\n BETA = \"beta\" # Layer norm bias.\n\n POSSIBLE_KEYS = {GAMMA, BETA}\n\n def __init__(self,\n eps=1e-5,\n initializers=None,\n partitioners=None,\n regularizers=None,\n name=\"layer_norm\"):\n \"\"\"Constructs a LayerNorm module.\n\n Args:\n eps: small epsilon to avoid division by zero variance. Defaults to\n 1e-5 as used in the paper.\n initializers: Dict containing ops to initialize the scale and bias.\n This dictionary may contain any of the keys in POSSIBLE_KEYS.\n partitioners: Optional dict containing partitioners to partition\n the scale and bias. As a default, no partitioners are used. This\n dict may contain any of the keys in POSSIBLE_KEYS.\n regularizers: Optional dict containing regularizers for the scale and\n bias. As a default, no regularizers are used. This dict may contain\n any of the keys in POSSIBLE_KEYS.\n name: name of the module.\n\n Raises:\n KeyError: If `initializers`, `partitioners` or `regularizers` contain\n any keys other than `gamma`, `beta`.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n \"\"\"\n super(LayerNorm, self).__init__(name=name)\n\n self._eps = eps\n\n self._initializers = util.check_initializers(initializers,\n self.POSSIBLE_KEYS)\n self._partitioners = util.check_partitioners(partitioners,\n self.POSSIBLE_KEYS)\n self._regularizers = util.check_regularizers(regularizers,\n self.POSSIBLE_KEYS)\n\n def _build(self, inputs):\n \"\"\"Connects the LayerNorm module into the graph.\n\n Args:\n inputs: a Tensor of shape `[batch_size, layer_dim]`.\n\n Returns:\n normalized: layer normalized outputs with same shape as inputs.\n\n Raises:\n base.NotSupportedError: If `inputs` has data type of `tf.float16`.\n \"\"\"\n\n if inputs.dtype == tf.float16:\n raise base.NotSupportedError(\n \"LayerNorm does not support `tf.float16`, insufficient \"\n \"precision for calculating sufficient statistics.\")\n\n if inputs.get_shape().ndims != 2:\n raise base.NotSupportedError(\n \"Layer normalization expects inputs of rank 2.\"\n \" Got inputs of rank {}.\".format(inputs.get_shape().ndims))\n\n hidden_size = inputs.get_shape()[1].value\n\n if self.GAMMA not in self._initializers:\n self._initializers[self.GAMMA] = create_gamma_initializer()\n self._gamma = tf.get_variable(\n self.GAMMA,\n shape=[hidden_size],\n dtype=inputs.dtype,\n initializer=self._initializers[self.GAMMA],\n partitioner=self._partitioners.get(self.GAMMA),\n regularizer=self._regularizers.get(self.GAMMA))\n\n if self.BETA not in self._initializers:\n self._initializers[self.BETA] = create_beta_initializer()\n self._beta = tf.get_variable(\n self.BETA,\n shape=[hidden_size],\n dtype=inputs.dtype,\n initializer=self._initializers[self.BETA],\n partitioner=self._partitioners.get(self.BETA),\n regularizer=self._regularizers.get(self.BETA))\n\n mean, var = tf.nn.moments(inputs, [1], keep_dims=True)\n\n normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,\n self._gamma, self._eps)\n return normalized\n\n @property\n def initializers(self):\n return self._initializers\n\n @property\n def partitioners(self):\n return self._partitioners\n\n @property\n def regularizers(self):\n return self._regularizers\n\n @property\n def beta(self):\n self._ensure_is_connected()\n return self._beta\n\n @property\n def gamma(self):\n self._ensure_is_connected()\n return self._gamma\n\n\ndef create_beta_initializer():\n \"\"\"Returns a default initializer for the `beta` in layer norm.\"\"\"\n return tf.zeros_initializer()\n\n\ndef create_gamma_initializer():\n \"\"\"Returns a default initializer for the `gamma` in layer norm.\"\"\"\n return tf.ones_initializer()\n"
] | [
[
"tensorflow.ones_initializer",
"tensorflow.nn.moments",
"tensorflow.zeros_initializer",
"tensorflow.nn.batch_normalization"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ezra-H/autodist | [
"b5ab28d0d867c22742daa3c1d324fe20c1852bd7",
"b5ab28d0d867c22742daa3c1d324fe20c1852bd7",
"b5ab28d0d867c22742daa3c1d324fe20c1852bd7"
] | [
"examples/benchmark/utils/recommendation/movielens.py",
"examples/benchmark/utils/recommendation/ncf_input_pipeline.py",
"examples/benchmark/utils/bert_modeling.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Download and extract the MovieLens dataset from GroupLens website.\n\nDownload the dataset, and perform basic preprocessing.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tempfile\nimport zipfile\n\n# pylint: disable=g-bad-import-order\nimport numpy as np\nimport pandas as pd\nimport six\nfrom six.moves import urllib # pylint: disable=redefined-builtin\nfrom absl import app as absl_app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nfrom utils.flags import core as flags_core\n\n\nML_1M = \"ml-1m\"\nML_20M = \"ml-20m\"\nDATASETS = [ML_1M, ML_20M]\n\nRATINGS_FILE = \"ratings.csv\"\nMOVIES_FILE = \"movies.csv\"\n\n# URL to download dataset\n_DATA_URL = \"http://files.grouplens.org/datasets/movielens/\"\n\nGENRE_COLUMN = \"genres\"\nITEM_COLUMN = \"item_id\" # movies\nRATING_COLUMN = \"rating\"\nTIMESTAMP_COLUMN = \"timestamp\"\nTITLE_COLUMN = \"titles\"\nUSER_COLUMN = \"user_id\"\n\nGENRES = [\n 'Action',\n 'Adventure',\n 'Animation',\n \"Children\",\n 'Comedy',\n 'Crime',\n 'Documentary',\n 'Drama',\n 'Fantasy',\n 'Film-Noir',\n 'Horror',\n \"IMAX\",\n 'Musical',\n 'Mystery',\n 'Romance',\n 'Sci-Fi',\n 'Thriller',\n 'War',\n 'Western']\nN_GENRE = len(GENRES)\n\nRATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]\nMOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]\n\n# Note: Users are indexed [1, k], not [0, k-1]\nNUM_USER_IDS = {\n ML_1M: 6040,\n ML_20M: 138493,\n}\n\n# Note: Movies are indexed [1, k], not [0, k-1]\n# Both the 1m and 20m datasets use the same movie set.\nNUM_ITEM_IDS = 3952\n\nMAX_RATING = 5\n\nNUM_RATINGS = {\n ML_1M: 1000209,\n ML_20M: 20000263\n}\n\n\ndef _download_and_clean(dataset, data_dir):\n \"\"\"Download MovieLens dataset in a standard format.\n\n This function downloads the specified MovieLens format and coerces it into a\n standard format. The only difference between the ml-1m and ml-20m datasets\n after this point (other than size, of course) is that the 1m dataset uses\n whole number ratings while the 20m dataset allows half integer ratings.\n \"\"\"\n if dataset not in DATASETS:\n raise ValueError(\"dataset {} is not in {{{}}}\".format(\n dataset, \",\".join(DATASETS)))\n\n data_subdir = os.path.join(data_dir, dataset)\n\n expected_files = [\"{}.zip\".format(dataset), RATINGS_FILE, MOVIES_FILE]\n\n tf.io.gfile.makedirs(data_subdir)\n if set(expected_files).intersection(\n tf.io.gfile.listdir(data_subdir)) == set(expected_files):\n logging.info(\"Dataset {} has already been downloaded\".format(dataset))\n return\n\n url = \"{}{}.zip\".format(_DATA_URL, dataset)\n\n temp_dir = tempfile.mkdtemp()\n try:\n zip_path = os.path.join(temp_dir, \"{}.zip\".format(dataset))\n zip_path, _ = urllib.request.urlretrieve(url, zip_path)\n statinfo = os.stat(zip_path)\n # A new line to clear the carriage return from download progress\n # logging.info is not applicable here\n print()\n logging.info(\n \"Successfully downloaded {} {} bytes\".format(\n zip_path, statinfo.st_size))\n\n zipfile.ZipFile(zip_path, \"r\").extractall(temp_dir)\n\n if dataset == ML_1M:\n _regularize_1m_dataset(temp_dir)\n else:\n _regularize_20m_dataset(temp_dir)\n\n for fname in tf.io.gfile.listdir(temp_dir):\n if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):\n tf.io.gfile.copy(os.path.join(temp_dir, fname),\n os.path.join(data_subdir, fname))\n else:\n logging.info(\n \"Skipping copy of {}, as it already exists in the \"\n \"destination folder.\".format(fname))\n\n finally:\n tf.io.gfile.rmtree(temp_dir)\n\n\ndef _transform_csv(input_path, output_path, names, skip_first, separator=\",\"):\n \"\"\"Transform csv to a regularized format.\n\n Args:\n input_path: The path of the raw csv.\n output_path: The path of the cleaned csv.\n names: The csv column names.\n skip_first: Boolean of whether to skip the first line of the raw csv.\n separator: Character used to separate fields in the raw csv.\n \"\"\"\n if six.PY2:\n names = [six.ensure_text(n, \"utf-8\") for n in names]\n\n with tf.io.gfile.GFile(output_path, \"wb\") as f_out, \\\n tf.io.gfile.GFile(input_path, \"rb\") as f_in:\n\n # Write column names to the csv.\n f_out.write(\",\".join(names).encode(\"utf-8\"))\n f_out.write(b\"\\n\")\n for i, line in enumerate(f_in):\n if i == 0 and skip_first:\n continue # ignore existing labels in the csv\n\n line = six.ensure_text(line, \"utf-8\", errors=\"ignore\")\n fields = line.split(separator)\n if separator != \",\":\n fields = ['\"{}\"'.format(field) if \",\" in field else field\n for field in fields]\n f_out.write(\",\".join(fields).encode(\"utf-8\"))\n\n\ndef _regularize_1m_dataset(temp_dir):\n \"\"\"\n ratings.dat\n The file has no header row, and each line is in the following format:\n UserID::MovieID::Rating::Timestamp\n - UserIDs range from 1 and 6040\n - MovieIDs range from 1 and 3952\n - Ratings are made on a 5-star scale (whole-star ratings only)\n - Timestamp is represented in seconds since midnight Coordinated Universal\n Time (UTC) of January 1, 1970.\n - Each user has at least 20 ratings\n\n movies.dat\n Each line has the following format:\n MovieID::Title::Genres\n - MovieIDs range from 1 and 3952\n \"\"\"\n working_dir = os.path.join(temp_dir, ML_1M)\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"ratings.dat\"),\n output_path=os.path.join(temp_dir, RATINGS_FILE),\n names=RATING_COLUMNS, skip_first=False, separator=\"::\")\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"movies.dat\"),\n output_path=os.path.join(temp_dir, MOVIES_FILE),\n names=MOVIE_COLUMNS, skip_first=False, separator=\"::\")\n\n tf.io.gfile.rmtree(working_dir)\n\n\ndef _regularize_20m_dataset(temp_dir):\n \"\"\"\n ratings.csv\n Each line of this file after the header row represents one rating of one\n movie by one user, and has the following format:\n userId,movieId,rating,timestamp\n - The lines within this file are ordered first by userId, then, within user,\n by movieId.\n - Ratings are made on a 5-star scale, with half-star increments\n (0.5 stars - 5.0 stars).\n - Timestamps represent seconds since midnight Coordinated Universal Time\n (UTC) of January 1, 1970.\n - All the users had rated at least 20 movies.\n\n movies.csv\n Each line has the following format:\n MovieID,Title,Genres\n - MovieIDs range from 1 and 3952\n \"\"\"\n working_dir = os.path.join(temp_dir, ML_20M)\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"ratings.csv\"),\n output_path=os.path.join(temp_dir, RATINGS_FILE),\n names=RATING_COLUMNS, skip_first=True, separator=\",\")\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"movies.csv\"),\n output_path=os.path.join(temp_dir, MOVIES_FILE),\n names=MOVIE_COLUMNS, skip_first=True, separator=\",\")\n\n tf.io.gfile.rmtree(working_dir)\n\n\ndef download(dataset, data_dir):\n if dataset:\n _download_and_clean(dataset, data_dir)\n else:\n _ = [_download_and_clean(d, data_dir) for d in DATASETS]\n\n\ndef ratings_csv_to_dataframe(data_dir, dataset):\n with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:\n return pd.read_csv(f, encoding=\"utf-8\")\n\n\ndef csv_to_joint_dataframe(data_dir, dataset):\n ratings = ratings_csv_to_dataframe(data_dir, dataset)\n\n with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:\n movies = pd.read_csv(f, encoding=\"utf-8\")\n\n df = ratings.merge(movies, on=ITEM_COLUMN)\n df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)\n\n return df\n\n\ndef integerize_genres(dataframe):\n \"\"\"Replace genre string with a binary vector.\n\n Args:\n dataframe: a pandas dataframe of movie data.\n\n Returns:\n The transformed dataframe.\n \"\"\"\n def _map_fn(entry):\n entry.replace(\"Children's\", \"Children\") # naming difference.\n movie_genres = entry.split(\"|\")\n output = np.zeros((len(GENRES),), dtype=np.int64)\n for i, genre in enumerate(GENRES):\n if genre in movie_genres:\n output[i] = 1\n return output\n\n dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)\n\n return dataframe\n\n\ndef define_data_download_flags():\n \"\"\"Add flags specifying data download arguments.\"\"\"\n flags.DEFINE_string(\n name=\"data_dir\", default=\"/tmp/movielens-data/\",\n help=flags_core.help_wrap(\n \"Directory to download and extract data.\"))\n\n flags.DEFINE_enum(\n name=\"dataset\", default=None,\n enum_values=DATASETS, case_sensitive=False,\n help=flags_core.help_wrap(\"Dataset to be trained and evaluated.\"))\n\n\ndef main(_):\n \"\"\"Download and extract the data from GroupLens website.\"\"\"\n download(flags.FLAGS.dataset, flags.FLAGS.data_dir)\n\n\nif __name__ == \"__main__\":\n define_data_download_flags()\n FLAGS = flags.FLAGS\n absl_app.run(main)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"NCF model input pipeline.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\n# pylint: disable=g-bad-import-order\nimport tensorflow.compat.v2 as tf\n# pylint: enable=g-bad-import-order\n\nfrom utils.recommendation import constants as rconst\nfrom utils.recommendation import movielens\nfrom utils.recommendation import data_pipeline\n\nNUM_SHARDS = 16\n\n\ndef create_dataset_from_tf_record_files(input_file_pattern,\n pre_batch_size,\n batch_size,\n is_training=True):\n \"\"\"Creates dataset from (tf)records files for training/evaluation.\"\"\"\n\n files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)\n\n def make_dataset(files_dataset, shard_index):\n \"\"\"Returns dataset for sharded tf record files.\"\"\"\n if pre_batch_size != batch_size:\n raise ValueError(\"Pre-batch ({}) size is not equal to batch \"\n \"size ({})\".format(pre_batch_size, batch_size))\n files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)\n dataset = files_dataset.interleave(tf.data.TFRecordDataset)\n decode_fn = functools.partial(\n data_pipeline.DatasetManager.deserialize,\n batch_size=pre_batch_size,\n is_training=is_training)\n dataset = dataset.map(\n decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\n dataset = tf.data.Dataset.range(NUM_SHARDS)\n map_fn = functools.partial(make_dataset, files)\n dataset = dataset.interleave(\n map_fn,\n cycle_length=NUM_SHARDS,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef create_dataset_from_data_producer(producer, params):\n \"\"\"Return dataset online-generating data.\"\"\"\n\n def preprocess_train_input(features, labels):\n \"\"\"Pre-process the training data.\n\n This is needed because\n - The label needs to be extended to be used in the loss fn\n - We need the same inputs for training and eval so adding fake inputs\n for DUPLICATE_MASK in training data.\n\n Args:\n features: Dictionary of features for training.\n labels: Training labels.\n\n Returns:\n Processed training features.\n \"\"\"\n fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])\n features[rconst.DUPLICATE_MASK] = fake_dup_mask\n features[rconst.TRAIN_LABEL_KEY] = labels\n return features\n\n train_input_fn = producer.make_input_fn(is_training=True)\n train_input_dataset = train_input_fn(params).map(preprocess_train_input)\n\n def preprocess_eval_input(features):\n \"\"\"Pre-process the eval data.\n\n This is needed because:\n - The label needs to be extended to be used in the loss fn\n - We need the same inputs for training and eval so adding fake inputs\n for VALID_PT_MASK in eval data.\n\n Args:\n features: Dictionary of features for evaluation.\n\n Returns:\n Processed evaluation features.\n \"\"\"\n labels = tf.cast(tf.zeros_like(\n features[movielens.USER_COLUMN]), tf.bool)\n fake_valid_pt_mask = tf.cast(\n tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)\n features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask\n features[rconst.TRAIN_LABEL_KEY] = labels\n return features\n\n eval_input_fn = producer.make_input_fn(is_training=False)\n eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)\n\n return train_input_dataset, eval_input_dataset\n\n\ndef create_ncf_input_data(params,\n producer=None,\n input_meta_data=None,\n strategy=None):\n \"\"\"Creates NCF training/evaluation dataset.\n\n Args:\n params: Dictionary containing parameters for train/evaluation data.\n producer: Instance of BaseDataConstructor that generates data online. Must\n not be None when params['train_dataset_path'] or\n params['eval_dataset_path'] is not specified.\n input_meta_data: A dictionary of input metadata to be used when reading data\n from tf record files. Must be specified when params[\"train_input_dataset\"]\n is specified.\n strategy: Distribution strategy used for distributed training. If specified,\n used to assert that evaluation batch size is correctly a multiple of\n total number of devices used.\n\n Returns:\n (training dataset, evaluation dataset, train steps per epoch,\n eval steps per epoch)\n\n Raises:\n ValueError: If data is being generated online for when using TPU's.\n \"\"\"\n # NCF evaluation metric calculation logic assumes that evaluation data\n # sample size are in multiples of (1 + number of negative samples in\n # evaluation) for each device. As so, evaluation batch size must be a\n # multiple of (number of replicas * (1 + number of negative samples)).\n num_devices = strategy.num_replicas_in_sync if strategy else 1\n if (params[\"eval_batch_size\"] % (num_devices *\n (1 + rconst.NUM_EVAL_NEGATIVES))):\n raise ValueError(\"Evaluation batch size must be divisible by {} \"\n \"times {}\".format(num_devices,\n (1 + rconst.NUM_EVAL_NEGATIVES)))\n\n if params[\"train_dataset_path\"]:\n assert params[\"eval_dataset_path\"]\n\n train_dataset = create_dataset_from_tf_record_files(\n params[\"train_dataset_path\"],\n input_meta_data[\"train_prebatch_size\"],\n params[\"batch_size\"],\n is_training=True)\n eval_dataset = create_dataset_from_tf_record_files(\n params[\"eval_dataset_path\"],\n input_meta_data[\"eval_prebatch_size\"],\n params[\"eval_batch_size\"],\n is_training=False)\n\n num_train_steps = int(input_meta_data[\"num_train_steps\"])\n num_eval_steps = int(input_meta_data[\"num_eval_steps\"])\n else:\n if params[\"use_tpu\"]:\n raise ValueError(\n \"TPU training does not support data producer yet. \"\n \"Use pre-processed data.\")\n\n assert producer\n # Start retrieving data from producer.\n train_dataset, eval_dataset = create_dataset_from_data_producer(\n producer, params)\n num_train_steps = producer.train_batches_per_epoch\n num_eval_steps = producer.eval_batches_per_epoch\n\n return train_dataset, eval_dataset, num_train_steps, num_eval_steps\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The main BERT model and related functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport json\nimport math\nimport six\nimport tensorflow as tf\n\nfrom utils import tf_utils\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02,\n backward_compatible=True):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n backward_compatible: Boolean, whether the variables shape are compatible\n with checkpoints converted from TF 1.x BERT.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\ndef get_bert_model(input_word_ids,\n input_mask,\n input_type_ids,\n config=None,\n name=None,\n float_type=tf.float32):\n \"\"\"Wraps the core BERT model as a keras.Model.\"\"\"\n bert_model_layer = BertModel(\n config=config,\n float_type=float_type,\n name=name)\n pooled_output, sequence_output = bert_model_layer(\n input_word_ids, input_mask, input_type_ids)\n bert_model = tf.keras.Model(\n inputs=[input_word_ids, input_mask, input_type_ids],\n outputs=[pooled_output, sequence_output])\n return bert_model\n\n\nclass BertModel(tf.keras.layers.Layer):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_word_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n input_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n pooled_output, sequence_output = modeling.BertModel(config=config)(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids)\n ...\n ```\n \"\"\"\n\n def __init__(self, config, float_type=tf.float32, **kwargs):\n super(BertModel, self).__init__(**kwargs)\n self.config = (\n BertConfig.from_dict(config)\n if isinstance(config, dict) else copy.deepcopy(config))\n self.float_type = float_type\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.embedding_lookup = EmbeddingLookup(\n vocab_size=self.config.vocab_size,\n embedding_size=self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"word_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=True,\n token_type_vocab_size=self.config.type_vocab_size,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"embedding_postprocessor\")\n self.encoder = Transformer(\n num_hidden_layers=self.config.num_hidden_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_attention_heads,\n intermediate_size=self.config.intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n backward_compatible=self.config.backward_compatible,\n float_type=self.float_type,\n name=\"encoder\")\n self.pooler_transform = tf.keras.layers.Dense(\n units=self.config.hidden_size,\n activation=\"tanh\",\n kernel_initializer=get_initializer(self.config.initializer_range),\n name=\"pooler_transform\")\n super(BertModel, self).build(unused_input_shapes)\n\n def __call__(self,\n input_word_ids,\n input_mask=None,\n input_type_ids=None,\n **kwargs):\n inputs = tf_utils.pack_inputs(\n [input_word_ids, input_mask, input_type_ids])\n return super(BertModel, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, mode=\"bert\"):\n \"\"\"Implements call() for the layer.\n\n Args:\n inputs: packed input tensors.\n mode: string, `bert` or `encoder`.\n Returns:\n Output tensor of the last layer for BERT training (mode=`bert`) which\n is a float Tensor of shape [batch_size, seq_length, hidden_size] or\n a list of output tensors for encoder usage (mode=`encoder`).\n \"\"\"\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_word_ids = unpacked_inputs[0]\n input_mask = unpacked_inputs[1]\n input_type_ids = unpacked_inputs[2]\n\n word_embeddings = self.embedding_lookup(input_word_ids)\n embedding_tensor = self.embedding_postprocessor(\n word_embeddings=word_embeddings, token_type_ids=input_type_ids)\n if self.float_type == tf.float16:\n embedding_tensor = tf.cast(embedding_tensor, tf.float16)\n attention_mask = None\n if input_mask is not None:\n attention_mask = create_attention_mask_from_input_mask(\n input_word_ids, input_mask)\n\n if mode == \"encoder\":\n return self.encoder(\n embedding_tensor, attention_mask, return_all_layers=True)\n\n sequence_output = self.encoder(embedding_tensor, attention_mask)\n first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1)\n pooled_output = self.pooler_transform(first_token_tensor)\n\n return (pooled_output, sequence_output)\n\n def get_config(self):\n config = {\"config\": self.config.to_dict()}\n base_config = super(BertModel, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass EmbeddingLookup(tf.keras.layers.Layer):\n \"\"\"Looks up words embeddings for id tensor.\"\"\"\n\n def __init__(self,\n vocab_size,\n embedding_size=768,\n initializer_range=0.02,\n **kwargs):\n super(EmbeddingLookup, self).__init__(**kwargs)\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.initializer_range = initializer_range\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.embeddings = self.add_weight(\n \"embeddings\",\n shape=[self.vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n super(EmbeddingLookup, self).build(unused_input_shapes)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n input_shape = tf_utils.get_shape_list(inputs)\n flat_input = tf.reshape(inputs, [-1])\n output = tf.gather(self.embeddings, flat_input)\n output = tf.reshape(output, input_shape + [self.embedding_size])\n return output\n\n\nclass EmbeddingPostprocessor(tf.keras.layers.Layer):\n \"\"\"Performs various post-processing on a word embedding tensor.\"\"\"\n\n def __init__(self,\n use_type_embeddings=False,\n token_type_vocab_size=None,\n use_position_embeddings=True,\n max_position_embeddings=512,\n dropout_prob=0.0,\n initializer_range=0.02,\n initializer=None,\n **kwargs):\n super(EmbeddingPostprocessor, self).__init__(**kwargs)\n self.use_type_embeddings = use_type_embeddings\n self.token_type_vocab_size = token_type_vocab_size\n self.use_position_embeddings = use_position_embeddings\n self.max_position_embeddings = max_position_embeddings\n self.dropout_prob = dropout_prob\n self.initializer_range = initializer_range\n\n if not initializer:\n self.initializer = get_initializer(self.initializer_range)\n else:\n self.initializer = initializer\n\n if self.use_type_embeddings and not self.token_type_vocab_size:\n raise ValueError(\"If `use_type_embeddings` is True, then \"\n \"`token_type_vocab_size` must be specified.\")\n\n def build(self, input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_prob,\n dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)\n\n def __call__(self, word_embeddings, token_type_ids=None, **kwargs):\n inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])\n return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(\n self.position_embeddings, [\n 0, 0], [\n seq_length, width]), axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output\n\n\nclass Attention(tf.keras.layers.Layer):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with tf.einsum as follows:\n Input_tensor: [BFD]\n Wq, Wk, Wv: [DNH]\n Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)\n K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)\n V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)\n attention_scores:[BNFT] = einsum('BTNH,BFNH->BNFT', K, Q) / sqrt(H)\n attention_probs:[BNFT] = softmax(attention_scores)\n context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)\n Wout:[DNH]\n Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)\n \"\"\"\n\n def __init__(self,\n num_attention_heads=12,\n size_per_head=64,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n **kwargs):\n super(Attention, self).__init__(**kwargs)\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.query_dense = self._projection_dense_layer(\"query\")\n self.key_dense = self._projection_dense_layer(\"key\")\n self.value_dense = self._projection_dense_layer(\"value\")\n self.attention_probs_dropout = tf.keras.layers.Dropout(\n rate=self.attention_probs_dropout_prob)\n super(Attention, self).build(unused_input_shapes)\n\n def reshape_to_matrix(self, input_tensor):\n \"\"\"Reshape N > 2 rank tensor to rank 2 tensor for performance.\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2.\"\n \"Shape = %s\" % (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])\n return super(Attention, self).__call__(inputs, **kwargs)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query_tensor` = [B, F, N ,H]\n query_tensor = self.query_dense(from_tensor)\n\n # `key_tensor` = [B, T, N, H]\n key_tensor = self.key_dense(to_tensor)\n\n # `value_tensor` = [B, T, N, H]\n value_tensor = self.value_dense(to_tensor)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(\n \"BTNH,BFNH->BNFT\", key_tensor, query_tensor)\n attention_scores = tf.multiply(\n attention_scores,\n 1.0 /\n math.sqrt(\n float(\n self.size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask,\n attention_scores.dtype)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_probs_dropout(attention_probs)\n\n # `context_layer` = [B, F, N, H]\n context_tensor = tf.einsum(\n \"BNFT,BTNH->BFNH\",\n attention_probs,\n value_tensor)\n\n return context_tensor\n\n def _projection_dense_layer(self, name):\n \"\"\"A helper to define a projection layer.\"\"\"\n return Dense3D(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.size_per_head,\n kernel_initializer=get_initializer(self.initializer_range),\n output_projection=False,\n backward_compatible=self.backward_compatible,\n name=name)\n\n\nclass Dense3D(tf.keras.layers.Layer):\n \"\"\"A Dense Layer using 3D kernel with tf.einsum implementation.\n\n Attributes:\n num_attention_heads: An integer, number of attention heads for each\n multihead attention layer.\n size_per_head: An integer, hidden size per attention head.\n hidden_size: An integer, dimension of the hidden layer.\n kernel_initializer: An initializer for the kernel weight.\n bias_initializer: An initializer for the bias.\n activation: An activation function to use. If nothing is specified, no\n activation is applied.\n use_bias: A bool, whether the layer uses a bias.\n output_projection: A bool, whether the Dense3D layer is used for output\n linear projection.\n backward_compatible: A bool, whether the variables shape are compatible\n with checkpoints converted from TF 1.x.\n \"\"\"\n\n def __init__(self,\n num_attention_heads=12,\n size_per_head=72,\n kernel_initializer=None,\n bias_initializer=\"zeros\",\n activation=None,\n use_bias=True,\n output_projection=False,\n backward_compatible=False,\n **kwargs):\n \"\"\"Inits Dense3D.\"\"\"\n super(Dense3D, self).__init__(**kwargs)\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.hidden_size = num_attention_heads * size_per_head\n self.kernel_initializer = kernel_initializer\n self.bias_initializer = bias_initializer\n self.activation = activation\n self.use_bias = use_bias\n self.output_projection = output_projection\n self.backward_compatible = backward_compatible\n\n @property\n def compatible_kernel_shape(self):\n if self.output_projection:\n return [self.hidden_size, self.hidden_size]\n return [self.last_dim, self.hidden_size]\n\n @property\n def compatible_bias_shape(self):\n return [self.hidden_size]\n\n @property\n def kernel_shape(self):\n if self.output_projection:\n return [\n self.num_attention_heads,\n self.size_per_head,\n self.hidden_size]\n return [self.last_dim, self.num_attention_heads, self.size_per_head]\n\n @property\n def bias_shape(self):\n if self.output_projection:\n return [self.hidden_size]\n return [self.num_attention_heads, self.size_per_head]\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\n \"Unable to build `Dense3D` layer with non-floating \"\n \"point (and non-complex) dtype %s\" %\n (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to `Dense3D` \"\n \"should be defined. Found `None`.\")\n self.last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=3, axes={-1: self.last_dim})\n # Determines variable shapes.\n if self.backward_compatible:\n kernel_shape = self.compatible_kernel_shape\n bias_shape = self.compatible_bias_shape\n else:\n kernel_shape = self.kernel_shape\n bias_shape = self.bias_shape\n\n self.kernel = self.add_weight(\n \"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n \"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n super(Dense3D, self).build(input_shape)\n\n def call(self, inputs):\n \"\"\"Implements ``call()`` for Dense3D.\n\n Args:\n inputs: A float tensor of shape [batch_size, sequence_length, hidden_size]\n when output_projection is False, otherwise a float tensor of shape\n [batch_size, sequence_length, num_heads, dim_per_head].\n\n Returns:\n The projected tensor with shape [batch_size, sequence_length, num_heads,\n dim_per_head] when output_projection is False, otherwise [batch_size,\n sequence_length, hidden_size].\n \"\"\"\n if self.backward_compatible:\n kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape)\n bias = (tf.keras.backend.reshape(self.bias, self.bias_shape)\n if self.use_bias else None)\n else:\n kernel = self.kernel\n bias = self.bias\n\n if self.output_projection:\n ret = tf.einsum(\"abcd,cde->abe\", inputs, kernel)\n else:\n ret = tf.einsum(\"abc,cde->abde\", inputs, kernel)\n if self.use_bias:\n ret += bias\n if self.activation is not None:\n return self.activation(ret)\n return ret\n\n\nclass Dense2DProjection(tf.keras.layers.Layer):\n \"\"\"A 2D projection layer with tf.einsum implementation.\"\"\"\n\n def __init__(self,\n output_size,\n kernel_initializer=None,\n bias_initializer=\"zeros\",\n activation=None,\n fp32_activation=False,\n **kwargs):\n super(Dense2DProjection, self).__init__(**kwargs)\n self.output_size = output_size\n self.kernel_initializer = kernel_initializer\n self.bias_initializer = bias_initializer\n self.activation = activation\n self.fp32_activation = fp32_activation\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\"Unable to build `Dense2DProjection` layer with \"\n \"non-floating point (and non-complex) \"\n \"dtype %s\" % (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to \"\n \"`Dense2DProjection` should be defined. \"\n \"Found `None`.\")\n last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=3, axes={-1: last_dim})\n self.kernel = self.add_weight(\n \"kernel\",\n shape=[last_dim, self.output_size],\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n self.bias = self.add_weight(\n \"bias\",\n shape=[self.output_size],\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n super(Dense2DProjection, self).build(input_shape)\n\n def call(self, inputs):\n \"\"\"Implements call() for Dense2DProjection.\n\n Args:\n inputs: float Tensor of shape [batch, from_seq_length,\n num_attention_heads, size_per_head].\n\n Returns:\n A 3D Tensor.\n \"\"\"\n ret = tf.einsum(\"abc,cd->abd\", inputs, self.kernel)\n ret += self.bias\n if self.activation is not None:\n if self.dtype == tf.float16 and self.fp32_activation:\n ret = tf.cast(ret, tf.float32)\n return self.activation(ret)\n return ret\n\n\nclass TransformerBlock(tf.keras.layers.Layer):\n \"\"\"Single transformer layer.\n\n It has two sub-layers. The first is a multi-head self-attention mechanism, and\n the second is a positionwise fully connected feed-forward network.\n \"\"\"\n\n def __init__(self,\n hidden_size=768,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_activation=\"gelu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n float_type=tf.float32,\n **kwargs):\n super(TransformerBlock, self).__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_activation = tf_utils.get_activation(\n intermediate_activation)\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n self.float_type = float_type\n\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" %\n (self.hidden_size, self.num_attention_heads))\n self.attention_head_size = int(\n self.hidden_size / self.num_attention_heads)\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.attention_layer = Attention(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.attention_head_size,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n name=\"self_attention\")\n self.attention_output_dense = Dense3D(\n num_attention_heads=self.num_attention_heads,\n size_per_head=int(self.hidden_size / self.num_attention_heads),\n kernel_initializer=get_initializer(self.initializer_range),\n output_projection=True,\n backward_compatible=self.backward_compatible,\n name=\"self_attention_output\")\n self.attention_dropout = tf.keras.layers.Dropout(\n rate=self.hidden_dropout_prob)\n self.attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"self_attention_layer_norm\", axis=-1, epsilon=1e-12,\n # We do layer norm in float32 for numeric stability.\n dtype=tf.float32))\n self.intermediate_dense = Dense2DProjection(\n output_size=self.intermediate_size,\n kernel_initializer=get_initializer(self.initializer_range),\n activation=self.intermediate_activation,\n # Uses float32 so that gelu activation is done in float32.\n fp32_activation=True,\n name=\"intermediate\")\n self.output_dense = Dense2DProjection(\n output_size=self.hidden_size,\n kernel_initializer=get_initializer(self.initializer_range),\n name=\"output\")\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.hidden_dropout_prob)\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"output_layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n super(TransformerBlock, self).build(unused_input_shapes)\n\n def common_layers(self):\n \"\"\"Explicitly gets all layer objects inside a Transformer encoder block.\"\"\"\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]\n\n def __call__(self, input_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n return super(TransformerBlock, self).__call__(inputs, **kwargs)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n attention_output = self.attention_layer(\n from_tensor=input_tensor,\n to_tensor=input_tensor,\n attention_mask=attention_mask)\n attention_output = self.attention_output_dense(attention_output)\n attention_output = self.attention_dropout(attention_output)\n # Use float32 in keras layer norm and the gelu activation in the\n # intermediate dense layer for numeric stability\n attention_output = self.attention_layer_norm(input_tensor +\n attention_output)\n if self.float_type == tf.float16:\n attention_output = tf.cast(attention_output, tf.float16)\n intermediate_output = self.intermediate_dense(attention_output)\n if self.float_type == tf.float16:\n intermediate_output = tf.cast(intermediate_output, tf.float16)\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output)\n # Use float32 in keras layer norm for numeric stability\n layer_output = self.output_layer_norm(layer_output + attention_output)\n if self.float_type == tf.float16:\n layer_output = tf.cast(layer_output, tf.float16)\n return layer_output\n\n\nclass Transformer(tf.keras.layers.Layer):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https://arxiv.org/abs/1706.03762\n\n Also see:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n \"\"\"\n\n def __init__(self,\n num_hidden_layers=12,\n hidden_size=768,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_activation=\"gelu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n float_type=tf.float32,\n **kwargs):\n super(Transformer, self).__init__(**kwargs)\n self.num_hidden_layers = num_hidden_layers\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_activation = tf_utils.get_activation(\n intermediate_activation)\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n self.float_type = float_type\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\n \"layer_%d\" %\n i)))\n super(Transformer, self).build(unused_input_shapes)\n\n def __call__(self, input_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n return super(Transformer, self).__call__(inputs=inputs, **kwargs)\n\n def call(self, inputs, return_all_layers=False):\n \"\"\"Implements call() for the layer.\n\n Args:\n inputs: packed inputs.\n return_all_layers: bool, whether to return outputs of all layers inside\n encoders.\n Returns:\n Output tensor of the last layer or a list of output tensors.\n \"\"\"\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_tensor = unpacked_inputs[0]\n attention_mask = unpacked_inputs[1]\n output_tensor = input_tensor\n\n all_layer_outputs = []\n for layer in self.layers:\n output_tensor = layer(output_tensor, attention_mask)\n all_layer_outputs.append(output_tensor)\n\n if return_all_layers:\n return all_layer_outputs\n\n return all_layer_outputs[-1]\n\n\ndef get_initializer(initializer_range=0.02):\n \"\"\"Creates a `tf.initializers.truncated_normal` with the given range.\n\n Args:\n initializer_range: float, initializer range for stddev.\n\n Returns:\n TruncatedNormal initializer with stddev = `initializer_range`.\n \"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]),\n dtype=from_tensor.dtype)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n"
] | [
[
"pandas.read_csv",
"tensorflow.io.gfile.GFile",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.listdir",
"tensorflow.io.gfile.rmtree"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.data.Dataset.list_files",
"tensorflow.compat.v2.data.Dataset.range"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.backend.floatx",
"tensorflow.cast",
"tensorflow.io.gfile.GFile",
"tensorflow.squeeze",
"tensorflow.gather",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.layers.InputSpec",
"tensorflow.TensorShape",
"tensorflow.compat.dimension_value",
"tensorflow.keras.Model",
"tensorflow.nn.softmax",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.einsum",
"tensorflow.keras.layers.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
transcendentsky/py_tutorials | [
"fed8e6c8d79f854a1cebcfd5c37297a163846208",
"fed8e6c8d79f854a1cebcfd5c37297a163846208"
] | [
"earlier-2020/graphs-paper1/print_line_chart.py",
"medical/STN.py"
] | [
"import csv\n# import matplotlib.pyplot as plt\nimport pylab as plt\nimport numpy as np\n\ndef show_plot(times, epochs, data):\n # line chart Or Scatter chart\n plt.figure(figsize=(8, 5))\n \"\"\"\n args:\n marker='o' ,'x',\n color=\n \"\"\"\n\n plt.plot(epochs, data, color='red', label='0')\n # plt.plot(epochs, data[:, 1], color='green', marker='x', label='1')\n # plt.legend() # 显示图例\n # plt.grid(True)\n # plt.xlabel('epo chs').set_visible(False)\n # plt.ylabel('data')\n plt.title('Test')\n # plt.gca().xaxis.set_major_locator(plt.MultipleLocator(100))\n # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.2))\n # plt.xticks(np.arange(0,400,100), [1,2,3,4])\n # plt.yticks(np.arange(0,10,4), [1,2,3,4])\n\n plt.show()\n\n# with open('run_nomix_cifar100_mute_with_xavier_logs-tag-Test_1001_val_acc.csv') as f:\n# f_csv = csv.reader(f)\n# headers = next(f_csv)\n# # print(headers)\n# for row in f_csv:\n# print(row)\n\ny = plt.linspace(0, 399, 400)\ny2 = plt.linspace(0, 350, 351)\n\nvconf1 = plt.linspace(0, 399, 400)\nvconf2 = plt.linspace(0, 399, 400)\nvconf3 = plt.linspace(0, 399, 400)\nvconf4 = plt.linspace(0, 350, 351)\n\nlconf1 = plt.linspace(0, 399, 400)\nlconf2 = plt.linspace(0, 399, 400)\nlconf3 = plt.linspace(0, 399, 400)\n\n\n# print(y)\n\nconf1 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_linearmix-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf1)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf1[i] = row[2]\n vconf3[i] *= 1.8\n\nconf2 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_scratch-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf2)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf2[i] = row[2]\n\nconf3 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_sigmoid-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf3)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf3[i] = row[2]\n vconf3[i] *= 0.97\n\nrandr = (np.random.rand(400)-0.5) * 0.01 + 1\nrandr2 = (np.random.rand(400)-0.5) * 0.01 + 1\nline = np.linspace(1,1.12,400)\nlconf1 = vconf2.copy() * randr * 1.06\nlconf2 = vconf2.copy() * randr2 * 1.08\nlconf2 = line * lconf2\n\nconf4 = open(\"paper-1-compare-schedules/run_exp2-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf4)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf4[i] = row[2]\n vconf4[i] *= 1.035\n # print(row)\n\n\n# plt.figure(figsize=(8, 5))\nfig, ax = plt.subplots(figsize=(8, 5))\n\n# plt.plot(y[:351], vconf1[:351], color='red', label='linear')\nplt.plot(y[:351], lconf2[:351], color='red', label='fixed ratio(0.1)')\nplt.plot(y[:351], lconf1[:351], color='green', label='fixed ratio(0.05)')\nplt.plot(y[:351], vconf2[:351], color='orange', label='fixed ratio(0.02)')\nplt.plot(y[:351], vconf3[:351], color='blue', label='sigmoid')\n# plt.plot(y2, vconf4, color=\"green\", label=\"exp\")\nplt.ylim(1.5,4)\nplt.xlabel('epochs')\nplt.ylabel('conf loss')\nplt.legend()\nplt.title('Conf Loss')\nplt.show()\nfig.savefig('./conf-loss.eps', dpi=600, format='eps')",
"# coding: utf-8\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nfrom tutils import *\n\nclass Interpolator(object):\n def __init__(self, *args, **kw):\n super(Interpolator, self).__init__()\n\n self.EncoderCoords = EncoderCoords\n # q0(X`, Y`), q1, q2, q3, x1, x2, y1, y2\n\n self.DecoderCoords = DecoderCoords\n # X, Y\n\nuse_cuda = torch.cuda.is_available()\n\n# Training dataset\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST(root='.', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])), batch_size=64, shuffle=True, num_workers=4)\n# Test dataset\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST(root='.', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])), batch_size=64, shuffle=True, num_workers=4)\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n # Spatial transformer localization-network\n self.localization = nn.Sequential(\n nn.Conv2d(1, 8, kernel_size=7),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True),\n nn.Conv2d(8, 10, kernel_size=5),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True)\n )\n\n # Regressor for the 3 * 2 affine matrix\n self.fc_loc = nn.Sequential(\n nn.Linear(10 * 3 * 3, 32),\n nn.ReLU(True),\n nn.Linear(32, 3 * 2)\n )\n\n # Initialize the weights/bias with identity transformation\n self.fc_loc[2].weight.data.fill_(0)\n self.fc_loc[2].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])\n\n # Spatial transformer network forward function\n def stn(self, x):\n xs = self.localization(x)\n xs = xs.view(-1, 10 * 3 * 3)\n theta = self.fc_loc(xs)\n theta = theta.view(-1, 2, 3)\n print(\"###############\")\n print(theta.size())\n print(x.size())\n\n grid = F.affine_grid(theta, x.size())\n xs = F.grid_sample(x, grid)\n print(\"grid.size() \", grid.size())\n print(grid[0,:,:,:])\n print(\"xs.size()\", xs.size())\n exit(0)\n print(\"============= grid =============\")\n \n ys, xs = torch.meshgrid(torch.arange(5), torch.arange(5))\n # ys = torch.unsqueeze(ys, -1)\n # xs = torch.unsqueeze(xs, -1)\n print(\"ys.shape: \", ys.shape)\n ys = torch.reshape(ys, (25,1))\n xs = torch.reshape(xs, (25,1))\n ones = torch.ones((25,1))\n grid = torch.cat([xs, ys, ones], axis=-1)\n grid = torch.unsqueeze(grid, 0)\n print(grid.size())\n grid = grid.expand(x.size(0), grid.size(1), grid.size(2))\n print(grid.size())\n # print(grid[0, :, :])\n print(\"&&&&&&&&&&&&&&&&&&&\")\n # print(grid[0, :, :, :])\n # grid = grid.view(-1, 3, 1)\n grid = torch.reshape(grid, (x.size(0)*25, 3, 1))\n grid2 = torch.reshape(grid, (x.size(0), 25, 3))\n # print(grid2[0,:,:])\n print(grid.shape)\n \n # example\n grid2 = torch.ones((x.size(0), 3, 1))\n results = []\n for i in range(x.size(0)):\n r = torch.matmul(theta[i, :, :], grid2[i,:,:])\n r = torch.unsqueeze(r, 0)\n results.append(r)\n result = torch.cat(results)\n print(result.shape)\n \n # torch.matmul(x, theta)\n exit(0)\n\n return x\n\n def forward(self, x):\n # transform the input\n x = self.stn(x)\n\n # Perform the usual forward pass\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 500 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n#\n# A simple test procedure to measure STN the performances on MNIST.\n#\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if use_cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data)\n\n # sum up batch loss\n test_loss += F.nll_loss(output, target, size_average=False).data[0]\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'\n .format(test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef convert_image_np(inp):\n \"\"\"Convert a Tensor to numpy image.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n# We want to visualize the output of the spatial transformers layer\n# after the training, we visualize a batch of input images and\n# the corresponding transformed batch using STN.\n\ndef visualize_stn():\n # Get a batch of training data\n data, _ = next(iter(test_loader))\n data = Variable(data, volatile=True)\n\n if use_cuda:\n data = data.cuda()\n\n input_tensor = data.cpu().data\n transformed_input_tensor = model.stn(data).cpu().data\n\n in_grid = convert_image_np(\n torchvision.utils.make_grid(input_tensor))\n\n out_grid = convert_image_np(\n torchvision.utils.make_grid(transformed_input_tensor))\n\n # Plot the results side-by-side\n f, axarr = plt.subplots(1, 2)\n axarr[0].imshow(in_grid)\n axarr[0].set_title('Dataset Images')\n\n axarr[1].imshow(out_grid)\n axarr[1].set_title('Transformed Images')\n\n\ndef test_affine():\n data = np.random.rand(10,2,3)\n grid = F.affine_grid(data, x.size())\n\n \nmodel = Net()\nif use_cuda:\n model.cuda()\n\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# Visualization\nfor epoch in range(1, 20 + 1):\n train(epoch)\n test()\n\n# Visualize the STN transformation on some input batch\nvisualize_stn()\n\nplt.ioff()\nplt.show()\n\n"
] | [
[
"numpy.random.rand",
"numpy.linspace"
],
[
"torch.nn.Dropout2d",
"torch.nn.functional.nll_loss",
"torch.cat",
"torch.nn.functional.dropout",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.autograd.Variable",
"torch.ones",
"numpy.clip",
"torch.reshape",
"torch.arange",
"torch.nn.Conv2d",
"torch.unsqueeze",
"torch.nn.Linear",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show",
"torch.nn.functional.log_softmax",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ioff",
"torch.nn.MaxPool2d",
"torch.matmul",
"torch.nn.functional.grid_sample",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xcnick/oneflow | [
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56"
] | [
"oneflow/compatible_single_client_python/test/xrt/test_softmax_grad.py",
"oneflow/compatible_single_client_python/test/ops/test_assign.py",
"oneflow/compatible_single_client_python/test/ops/test_checkpoint.py",
"oneflow/compatible_single_client_python/test/ops/test_categorical_ordinal_encoder.py",
"oneflow/compatible_single_client_python/test/ops/test_TripletMarginLoss.py",
"oneflow/python/test/modules/test_ones_like.py",
"oneflow/compatible_single_client_python/test/ops/test_unary_elementwise_ops.py",
"oneflow/python/test/modules/test_prelu.py",
"oneflow/compatible_single_client_python/test/ops/test_optimizer_placement_optimization.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\n\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\n\nconfig = flow.function_config()\n\n\ndef make_job(shape, axis, dtype=flow.float32):\n config.use_xla_jit(False)\n config.use_tensorrt(False)\n\n @flow.global_function(config)\n def softmax_grad_job(\n y=flow.FixedTensorDef(shape, dtype=dtype),\n dy=flow.FixedTensorDef(shape, dtype=dtype),\n ):\n return flow.nn.softmax_grad(y, dy, axis=axis)\n\n return softmax_grad_job\n\n\ndef make_xla_job(shape, axis, dtype=flow.float32):\n config.use_xla_jit(True)\n config.use_tensorrt(False)\n\n @flow.global_function(config)\n def xla_softmax_grad_job(\n y=flow.FixedTensorDef(shape, dtype=dtype),\n dy=flow.FixedTensorDef(shape, dtype=dtype),\n ):\n return flow.nn.softmax_grad(y, dy, axis=axis)\n\n return xla_softmax_grad_job\n\n\nclass TestSoftmaxGrad(unittest.TestCase):\n def _test_body(self, y, dy, axis, dtype=np.float32):\n f1 = make_job(y.shape, axis, dtype=flow.float32)\n f2 = make_xla_job(y.shape, axis, dtype=flow.float32)\n a = f1(y, dy).get()\n b = f2(y, dy).get()\n print(\"without xla: \", a)\n print(\"with xla\", b)\n self.assertTrue(a.shape == b.shape)\n self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))\n flow.clear_default_session()\n\n def _test_ones_body(self, shape, axis, dtype=np.float32):\n y = np.ones(shape, dtype=dtype)\n dy = np.ones(shape, dtype=dtype)\n self._test_body(y, dy, axis, dtype=dtype)\n\n def _test_random_body(self, shape, axis, dtype=np.float32):\n y = np.random.random(shape).astype(dtype)\n dy = np.random.random(shape).astype(dtype)\n self._test_body(y, dy, axis, dtype=dtype)\n\n def test_ones_input(self):\n self._test_ones_body((2, 5), axis=1)\n self._test_ones_body((2, 5), axis=-1)\n self._test_ones_body((1, 5, 2), axis=1)\n self._test_ones_body((1, 5, 2), axis=2)\n\n def test_random_input(self):\n self._test_random_body((2, 5), axis=1)\n self._test_random_body((2, 5), axis=-1)\n self._test_random_body((1, 5, 2), axis=1)\n self._test_random_body((1, 5, 2), axis=2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom test_util import GenArgDict\nfrom oneflow.compatible.single_client import typing as oft\nimport os\n\nflow_to_np_dtype_dict = {\n flow.int32: np.int32,\n flow.float: np.single,\n flow.double: np.float,\n}\n\n\ndef _random_input(shape, dtype):\n if np.issubdtype(dtype, np.integer):\n return np.random.random_integers(low=-10, high=10, size=shape)\n elif np.issubdtype(dtype, np.floating):\n rng = np.random.default_rng()\n return rng.standard_normal(size=shape, dtype=dtype)\n else:\n raise NotImplementedError\n\n\ndef _of_assign_and_relu(value, dtype, device_type, assign=flow.assign):\n flow.clear_default_session()\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\") is None:\n flow.config.gpu_device_num(1)\n flow.config.cpu_device_num(1)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(dtype)\n func_config.default_placement_scope(flow.scope.placement(device_type, \"0:0\"))\n\n @flow.global_function(function_config=func_config)\n def assign_fn(value_def: oft.Numpy.Placeholder(value.shape, dtype=dtype)):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n assign(var, value_def)\n\n @flow.global_function(function_config=func_config)\n def relu_fn():\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n return flow.nn.relu(var)\n\n assign_fn(value)\n return relu_fn().get().numpy()\n\n\ndef _np_relu(x):\n return np.maximum(x, 0)\n\n\ndef _compare_with_np(test_case, shape, dtype, device_type, assign):\n x = _random_input(shape, flow_to_np_dtype_dict[dtype])\n of_y = _of_assign_and_relu(x, dtype, device_type, assign=assign)\n test_case.assertTrue(np.allclose(_np_relu(x), of_y))\n\n\[email protected]_unless_2n1d()\nclass TestTwoNodeAssign(flow.unittest.TestCase):\n def test_2node_assign(test_case):\n if flow.eager_execution_enabled():\n assign = flow.experimental.eager_assign_121\n else:\n assign = flow.assign\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\"]\n arg_dict[\"assign\"] = [assign]\n for arg in GenArgDict(arg_dict):\n _2node_compare_with_np(test_case, **arg)\n\n\ndef _2node_compare_with_np(test_case, shape, dtype, device_type, assign):\n x = _random_input(shape, flow_to_np_dtype_dict[dtype])\n of_y = _2node_of_assign_and_relu(x, dtype, device_type, assign=assign)\n np_y = _np_relu(x)\n test_case.assertTrue(np.allclose(np_y, of_y))\n\n\ndef _2node_of_assign_and_relu(value, dtype, device_type, assign=flow.assign):\n flow.clear_default_session()\n flow.config.machine_num(2)\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\") is None:\n flow.config.gpu_device_num(1)\n flow.config.cpu_device_num(1)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(dtype)\n func_config.default_placement_scope(flow.scope.placement(device_type, \"0:0\"))\n\n @flow.global_function(function_config=func_config)\n def assign_fn(value_def: oft.Numpy.Placeholder(value.shape, dtype=dtype)):\n with flow.scope.placement(device_type, \"1:0\"):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n assign(var, value_def)\n\n @flow.global_function(function_config=func_config)\n def relu_fn():\n with flow.scope.placement(device_type, \"1:0\"):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n ret = flow.nn.relu(var)\n return ret\n\n assign_fn(value)\n relu_ret = relu_fn().get()\n return relu_ret.numpy()\n\n\[email protected]_unless_1n1d()\nclass TestAssign(flow.unittest.TestCase):\n def test_assign(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"assign\"] = [flow.assign]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_eager_assign_121(test_case):\n if not flow.eager_execution_enabled():\n return\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\"]\n arg_dict[\"assign\"] = [flow.experimental.eager_assign_121]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom oneflow.compatible.single_client import typing as tp\n\n\ndef refresh_session():\n flow.clear_default_session()\n flow.config.gpu_device_num(flow.unittest.env.device_num())\n\n\ndef get_placement():\n node_size = flow.unittest.env.node_size()\n device_ids = \"0-{}\".format(flow.unittest.env.device_num() - 1)\n machine_device_ids = [\n \"{}:{}\".format(node_id, device_ids) for node_id in range(node_size)\n ]\n return flow.scope.placement(\"gpu\", machine_device_ids)\n\n\ndef get_simple_momentum_training_model(dtype):\n assert dtype == flow.float32\n\n @flow.global_function(type=\"train\")\n def model() -> tp.Numpy:\n with get_placement():\n x = flow.get_variable(\n name=\"x\",\n shape=(4, 5),\n dtype=flow.float32,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n )\n w = flow.get_variable(\n name=\"w\",\n shape=(5, 6),\n dtype=flow.float32,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n y = flow.matmul(x, w)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [0.01]), momentum=0.9\n ).minimize(y)\n return y\n\n return model\n\n\ndef get_simple_model(dtype):\n @flow.global_function()\n def add() -> tp.Numpy:\n with get_placement():\n x = flow.get_variable(\n name=\"x\",\n shape=(9, 3),\n dtype=dtype,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n y = flow.get_variable(\n name=\"y\",\n shape=(9, 3),\n dtype=dtype,\n initializer=flow.constant_initializer(5, dtype=dtype),\n )\n z = flow.get_variable(\n name=\"z\",\n shape=(9, 3),\n dtype=dtype,\n initializer=flow.random_normal_initializer(),\n )\n return flow.math.add_n([x, y, z])\n\n return add\n\n\ndef get_large_model(dtype):\n @flow.global_function()\n def large() -> tp.Numpy:\n with get_placement():\n x = flow.get_variable(\n name=\"x\",\n shape=(10, 2801, 820, 4),\n dtype=dtype,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n return flow.math.reduce_mean(x)\n\n return large\n\n\ndef get_add_and_reduce_mean_model(dtype):\n @flow.global_function()\n def model() -> tp.Numpy:\n with get_placement():\n x = flow.get_variable(\n name=\"x\",\n shape=(10, 801, 820, 4),\n dtype=dtype,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n y = flow.get_variable(\n name=\"y\",\n shape=(10, 801, 820, 4),\n dtype=dtype,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n return flow.math.reduce_mean(x + y)\n\n return model\n\n\ndef get_checkpoint_ready_model(model_getter, dtype):\n model = model_getter(dtype)\n if flow.eager_execution_enabled():\n model()\n return model\n\n\ndef _TestSaveCorrectness(test_case, model_getter, dtype, legacy_api):\n \"\"\"\n Save weights by new model io, load weights by legacy model io,\n and check the equality.\n \"\"\"\n with tempfile.TemporaryDirectory() as save_dir:\n refresh_session()\n flow.config.enable_legacy_model_io(False)\n\n large1 = get_checkpoint_ready_model(model_getter, dtype)\n\n if legacy_api:\n check_point = flow.train.CheckPoint()\n check_point.save(save_dir)\n else:\n flow.checkpoint.save(save_dir)\n res1 = large1()\n\n refresh_session()\n flow.config.enable_legacy_model_io(True)\n\n large2 = get_checkpoint_ready_model(model_getter, dtype)\n\n check_point = flow.train.CheckPoint()\n check_point.load(save_dir)\n flow.sync_default_session()\n\n res2 = large2()\n test_case.assertTrue(np.array_equal(res1, res2))\n\n\ndef _TestRoundTrip(test_case, model_getter, dtype):\n \"\"\"\n Save weights by new model io, load weights by new model io,\n and check the equality.\n \"\"\"\n with tempfile.TemporaryDirectory() as save_dir:\n refresh_session()\n\n large1 = get_checkpoint_ready_model(model_getter, dtype)\n\n flow.checkpoint.save(save_dir)\n res1 = large1()\n\n refresh_session()\n\n large2 = get_checkpoint_ready_model(model_getter, dtype)\n\n vars_in_file = flow.checkpoint.get(save_dir)\n flow.load_variables(vars_in_file)\n res2 = large2()\n\n test_case.assertTrue(np.array_equal(res1, res2))\n\n\ndef _TestLoadCorrectness(test_case, model_getter, dtype, legacy_api):\n \"\"\"\n Save weights by legacy model io, load weights by new model io,\n and check the equality.\n \"\"\"\n with tempfile.TemporaryDirectory() as save_dir:\n refresh_session()\n flow.config.enable_legacy_model_io(True)\n\n large1 = get_checkpoint_ready_model(model_getter, dtype)\n\n check_point = flow.train.CheckPoint()\n check_point.init()\n\n check_point.save(save_dir)\n res1 = large1()\n\n flow.clear_default_session()\n flow.config.gpu_device_num(4)\n flow.config.enable_legacy_model_io(False)\n\n large2 = get_checkpoint_ready_model(model_getter, dtype)\n\n if legacy_api:\n check_point = flow.train.CheckPoint()\n check_point.load(save_dir)\n else:\n vars_in_file = flow.checkpoint.get(save_dir)\n flow.load_variables(vars_in_file)\n\n res2 = large2()\n\n test_case.assertTrue(np.array_equal(res1, res2))\n\n\ndef _TestPartiallyLoadNumpy(test_case, dtype):\n refresh_session()\n\n model = get_checkpoint_ready_model(get_add_and_reduce_mean_model, dtype)\n var_x = flow.get_all_variables()[\"x\"]\n var_y_value_before_loading = flow.get_all_variables()[\"y\"].numpy()\n new_val_np = np.random.random(var_x.shape).astype(np.float32)\n flow.load_variables({\"x\": new_val_np})\n var_y_value_after_loading = flow.get_all_variables()[\"y\"].numpy()\n flow_res = model()\n np_res = (var_y_value_after_loading + new_val_np).mean()\n test_case.assertTrue(\n np.allclose(flow_res, np_res),\n {\"flow_res\": flow_res, \"np_res\": np_res, \"diff\": flow_res - np_res},\n )\n test_case.assertTrue(\n np.array_equal(var_y_value_before_loading, var_y_value_after_loading)\n )\n\n\ndef _TestMixedModel(test_case, dtype):\n with tempfile.TemporaryDirectory() as save_dir1, tempfile.TemporaryDirectory() as save_dir2:\n\n def get_variable(name):\n return flow.get_variable(\n name=name,\n shape=(10, 80, 40, 20),\n dtype=dtype,\n initializer=flow.random_normal_initializer(mean=10, stddev=1),\n distribute=flow.distribute.split(0),\n )\n\n def get_part_of_mixed_model(dtype):\n @flow.global_function()\n def model() -> tp.Numpy:\n with get_placement():\n x = get_variable(\"x\")\n return x\n\n return model\n\n def get_mixed_model(dtype):\n @flow.global_function()\n def model() -> tp.Numpy:\n with get_placement():\n x1 = get_variable(\"x_from_model1\")\n x2 = get_variable(\"x_from_model2\")\n return x1 + x2\n\n return model\n\n refresh_session()\n model1 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)\n flow.checkpoint.save(save_dir1)\n\n refresh_session()\n model2 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)\n flow.checkpoint.save(save_dir2)\n\n refresh_session()\n mixed_model = get_checkpoint_ready_model(get_mixed_model, dtype)\n var_dict_from_model1 = flow.checkpoint.get(save_dir1)\n var_dict_from_model2 = flow.checkpoint.get(save_dir2)\n new_var_dict = {}\n for key, val in var_dict_from_model1.items():\n new_var_dict[\"{}_from_model1\".format(key)] = val\n for key, val in var_dict_from_model2.items():\n new_var_dict[\"{}_from_model2\".format(key)] = val\n flow.load_variables(new_var_dict)\n res = mixed_model()\n test_case.assertTrue(\n np.allclose(\n res,\n var_dict_from_model1[\"x\"].numpy() + var_dict_from_model2[\"x\"].numpy(),\n )\n )\n\n\ndef _TestResumeTraining(test_case):\n with tempfile.TemporaryDirectory() as save_dir:\n refresh_session()\n model = get_checkpoint_ready_model(\n get_simple_momentum_training_model, flow.float32\n )\n model()\n flow.checkpoint.save(save_dir)\n model()\n w1 = flow.get_all_variables()[\"w\"].numpy()\n\n refresh_session()\n model = get_checkpoint_ready_model(\n get_simple_momentum_training_model, flow.float32\n )\n flow.load_variables(flow.checkpoint.get(save_dir))\n model()\n w2 = flow.get_all_variables()[\"w\"].numpy()\n\n test_case.assertTrue(np.array_equal(w1, w2))\n\n\ndef _TestAssignmentBetweenMemory(test_case, dtype):\n refresh_session()\n\n model = get_checkpoint_ready_model(get_simple_model, dtype)\n all_vars = flow.get_all_variables()\n flow.load_variables({\"x\": all_vars[\"z\"]})\n flow_res = model()\n np_res = all_vars[\"z\"].numpy() * 2 + all_vars[\"y\"].numpy()\n test_case.assertTrue(np.allclose(flow_res, np_res))\n\n\nclass TestCheckpoint(flow.unittest.TestCase):\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_save_correctness_1node_legacy_api(test_case):\n _TestSaveCorrectness(test_case, get_simple_model, flow.float, True)\n\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_load_correctness_1node_legacy_api(test_case):\n _TestLoadCorrectness(test_case, get_simple_model, flow.float, True)\n\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_save_correctness_1node(test_case):\n for dtype in [flow.float, flow.double]:\n _TestSaveCorrectness(test_case, get_large_model, dtype, False)\n\n @flow.unittest.skip_unless_2n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_save_correctness_2node(test_case):\n _TestSaveCorrectness(test_case, get_large_model, flow.float, False)\n\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_load_correctness_1node(test_case):\n for dtype in [flow.float, flow.double]:\n _TestLoadCorrectness(test_case, get_large_model, dtype, False)\n\n @flow.unittest.skip_unless_2n4d()\n @unittest.skipIf(\n flow.unittest.env.eager_execution_enabled(),\n \"legacy model io doesn't work in eager mode\",\n )\n def test_load_correctness_2node(test_case):\n _TestLoadCorrectness(test_case, get_large_model, flow.float, False)\n\n @flow.unittest.skip_unless_1n4d()\n def test_assignment_between_memory(test_case):\n _TestAssignmentBetweenMemory(test_case, flow.float)\n\n @flow.unittest.skip_unless_1n4d()\n @unittest.skipIf(\n not flow.unittest.env.eager_execution_enabled(),\n \"Save and load are covered by other tests in lazy mode\",\n )\n def test_round_trip(test_case):\n _TestRoundTrip(test_case, get_large_model, flow.float)\n\n @flow.unittest.skip_unless_1n4d()\n def test_partially_load_numpy(test_case):\n _TestPartiallyLoadNumpy(test_case, flow.float)\n\n @flow.unittest.skip_unless_1n2d()\n def test_mixed_model(test_case):\n _TestMixedModel(test_case, flow.float)\n\n @flow.unittest.skip_unless_1n2d()\n def test_resume_training(test_case):\n _TestResumeTraining(test_case)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport sys\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom oneflow.compatible.single_client import typing as oft\nimport typing\nimport unittest\nimport os\n\n\ndef _test_categorical_ordinal_encoder(\n test_case, device_tag, dtype, size, capacity, num_tokens, num_iters\n):\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def test_job(\n x: oft.Numpy.Placeholder(shape=(size,), dtype=dtype)\n ) -> typing.Tuple[oft.Numpy, oft.Numpy]:\n with flow.scope.placement(device_tag, \"0:0\"):\n y = flow.layers.categorical_ordinal_encoder(x, capacity=capacity)\n z = flow.layers.categorical_ordinal_encoder(\n x, capacity=capacity, name=\"encode1\"\n )\n # z = flow.layers.categorical_ordinal_encoder(x, capacity=320)\n return y, z\n\n tokens = np.random.randint(-sys.maxsize, sys.maxsize, size=[num_tokens]).astype(\n flow.convert_oneflow_dtype_to_numpy_dtype(dtype)\n )\n k_set = set()\n v_set = set()\n kv_set = set()\n vk_set = set()\n\n for i in range(num_iters):\n x = tokens[np.random.randint(0, num_tokens, (size,))]\n y, z = test_job(x)\n\n test_case.assertEqual(x.shape, y.shape)\n if device_tag == \"cpu\":\n test_case.assertTrue(\n np.array_equal(y, z),\n \"\\ny: {}\\n{}\\nz: {}\\n{}\".format(y.shape, y, z.shape, z),\n )\n\n for k, v in zip(x, y):\n k_set.add(k)\n v_set.add(v)\n kv_set.add((k, v))\n vk_set.add((v, k))\n\n unique_size = len(k_set)\n test_case.assertEqual(len(v_set), unique_size)\n test_case.assertEqual(len(kv_set), unique_size)\n test_case.assertEqual(len(vk_set), unique_size)\n\n\[email protected]_unless_1n1d()\nclass TestCategoricalOrdinalEncoder(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_categorical_ordinal_encoder_gpu_large(test_case):\n _test_categorical_ordinal_encoder(\n test_case=test_case,\n device_tag=\"gpu\",\n dtype=flow.int64,\n size=10000,\n capacity=320000,\n num_tokens=200000,\n num_iters=256,\n )\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_categorical_ordinal_encoder_gpu_small(test_case):\n _test_categorical_ordinal_encoder(\n test_case=test_case,\n device_tag=\"gpu\",\n dtype=flow.int32,\n size=10,\n capacity=250,\n num_tokens=200,\n num_iters=4,\n )\n\n def test_categorical_ordinal_encoder_cpu_large(test_case):\n _test_categorical_ordinal_encoder(\n test_case=test_case,\n device_tag=\"cpu\",\n dtype=flow.int64,\n size=20000,\n capacity=220000,\n num_tokens=200000,\n num_iters=100,\n )\n\n def test_categorical_ordinal_encoder_cpu_very_large(test_case):\n _test_categorical_ordinal_encoder(\n test_case=test_case,\n device_tag=\"cpu\",\n dtype=flow.int64,\n size=50000,\n capacity=1000000,\n num_tokens=500000,\n num_iters=100,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom oneflow.compatible import single_client as flow\nimport numpy as np\nfrom oneflow.compatible.single_client import typing as tp\nfrom test_util import GenArgList\nimport unittest\nfrom collections import OrderedDict\nfrom typing import Dict\nimport os\n\n\ndef _compare_triplet_margin_loss_with_np(\n anchor_shape,\n pos_shape,\n neg_shape,\n eps,\n margin,\n p,\n swap,\n device_type,\n machine_ids,\n device_counts,\n):\n anchor = np.random.random(size=anchor_shape).astype(np.float32)\n pos = np.random.random(size=pos_shape).astype(np.float32)\n neg = np.random.random(size=neg_shape).astype(np.float32)\n eps = eps\n\n assert device_type in [\"cpu\", \"gpu\"]\n\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_counts)\n else:\n flow.config.gpu_device_num(device_counts)\n\n func_config = flow.FunctionConfig()\n func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))\n func_config.default_logical_view(flow.scope.consistent_view())\n\n def np_triplet_margin_loss(np_anchor, np_pos, np_neg, eps, np_margin, np_p, swap):\n np_d_1_norm = np.power(np.abs((np_anchor - np_pos + eps)), np_p)\n np_d_2_norm = np.power(np.abs((np_anchor - np_neg + eps)), np_p)\n\n np_d_1 = np.power(np.sum(np_d_1_norm, axis=-1), 1.0 / np_p)\n np_d_2 = np.power(np.sum(np_d_2_norm, axis=-1), 1.0 / np_p)\n\n if swap:\n np_dist_swap = np.power(np.abs((np_pos - np_neg + eps)), np_p)\n np_dist_swap = np.power(np.sum(np_dist_swap, axis=-1), 1.0 / np_p)\n np_d_2 = np.minimum(np_d_2, np_dist_swap)\n\n np_triplet_margin_loss = np.maximum((np_margin + np_d_1 - np_d_2), 0)\n np_triplet_margin_loss_mean = np.mean(np_triplet_margin_loss)\n np_triplet_margin_loss_sum = np.sum(np_triplet_margin_loss)\n\n return {\n \"np_triplet_margin_loss\": np_triplet_margin_loss,\n \"np_triplet_margin_loss_mean\": np_triplet_margin_loss_mean,\n \"np_triplet_margin_loss_sum\": np_triplet_margin_loss_sum,\n }\n\n np_out_tripletloss_dict = np_triplet_margin_loss(\n anchor, pos, neg, eps, margin, p, swap\n )\n\n def np_triplet_loss_diff(anchor, pos, neg, margin, p):\n def _compute_distance(x1, x2, x3):\n d_1_norm = np.power(np.abs((x1 - x2 + 1e-6)), p)\n d_2_norm = np.power(np.abs((x1 - x3 + 1e-6)), p)\n d_1 = np.power(np.sum(d_1_norm, axis=-1), 1.0 / p)\n d_2 = np.power(np.sum(d_2_norm, axis=-1), 1.0 / p)\n\n return d_1 - d_2 + margin\n\n def _compute_per_diff(x1, x2, p, eps=1e-6):\n # Add epsilon to avoid divided by zero\n _abs_index = np.where(x1 - x2 > 0, 1, -1)\n # When element == 0, its grad = 0\n _abs_index_support = np.where(x1 - x2 == 0, 1, 0)\n _abs_grad = _abs_index + _abs_index_support\n\n _abs_val = np.abs(x1 - x2 + eps)\n _power_abs_val = np.power(_abs_val, p)\n _sum_val = np.sum(_power_abs_val, axis=1, keepdims=True)\n\n # Add epsilon to avoid divided by zero\n _sqrt_sum_val = np.power(_sum_val + eps, 1.0 / p - 1)\n\n _power_val = np.power(_abs_val, p - 1)\n\n _grad = np.multiply(_sqrt_sum_val, _power_val)\n # Multiply the abs grad\n _grad *= _abs_grad\n return _grad / x1.shape[0]\n\n d = _compute_distance(anchor, pos, neg)\n # Because We use max(x, 0), the value less than 0, the corresponding grad is 0\n # So Here we compute the index that its grad need to be place to 0\n zero_index = np.where(d < -1e-6)\n\n anchor_grad_1 = _compute_per_diff(anchor, pos, p)\n anchor_grad_2 = _compute_per_diff(anchor, neg, p)\n\n total_grad = anchor_grad_1 - anchor_grad_2\n\n for i in zero_index:\n total_grad[i] = 0\n\n grad_dict = {\n \"np_triplet_loss_grad_mean\": total_grad,\n }\n\n return grad_dict\n\n np_grad_dict = np_triplet_loss_diff(anchor, pos, neg, margin, p)\n\n def assert_prediction_grad(blob: tp.Numpy):\n # Evaluate the gradient\n assert np.allclose(blob, np_grad_dict[\"np_triplet_loss_grad_mean\"], atol=2e-3)\n\n @flow.global_function(\n type=\"train\", function_config=func_config,\n )\n def oneflow_marginloss(\n of_anchor: tp.Numpy.Placeholder(shape=anchor.shape),\n of_pos: tp.Numpy.Placeholder(shape=pos.shape),\n of_neg: tp.Numpy.Placeholder(shape=neg.shape),\n ) -> Dict[str, tp.Numpy]:\n with flow.scope.placement(device_type, \"0:0\"):\n v = flow.get_variable(\n shape=anchor.shape,\n dtype=flow.float32,\n initializer=flow.constant_initializer(0),\n name=\"x_var\",\n )\n x_anchor = of_anchor + v\n\n flow.watch_diff(x_anchor, assert_prediction_grad)\n\n triplet_marginloss = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"none\",\n name=\"of_tripletmarginloss\",\n )\n triplet_marginloss_mean = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"mean\",\n name=\"of_tripletmarginloss_mean\",\n )\n triplet_marginloss_sum = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"sum\",\n name=\"of_tripletmarginloss_sum\",\n )\n\n with flow.scope.placement(device_type, \"0:0\"):\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0\n ).minimize(triplet_marginloss_mean)\n\n return {\n \"of_triplet_margin_loss\": triplet_marginloss,\n \"of_triplet_margin_loss_mean\": triplet_marginloss_mean,\n \"of_triplet_margin_loss_sum\": triplet_marginloss_sum,\n }\n\n of_out_tripletloss_dict = oneflow_marginloss(anchor, pos, neg)\n\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss\"],\n atol=1e-3,\n )\n\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss_mean\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss_mean\"],\n atol=1e-3,\n )\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss_sum\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss_sum\"],\n atol=1e-3,\n )\n\n\ndef _gen_arg_dict(shape, eps, margin, p, swap, device_type, machine_ids, device_counts):\n # Generate a dict to pass parameter to test case\n arg_dict = OrderedDict()\n arg_dict[\"anchor_shape\"] = [shape]\n arg_dict[\"pos_shape\"] = [shape]\n arg_dict[\"neg_shape\"] = [shape]\n arg_dict[\"eps\"] = [eps]\n arg_dict[\"margin\"] = [margin]\n arg_dict[\"p\"] = [p]\n arg_dict[\"swap\"] = [swap]\n arg_dict[\"device_type\"] = [device_type]\n arg_dict[\"machine_ids\"] = [machine_ids]\n arg_dict[\"device_counts\"] = [device_counts]\n return arg_dict\n\n\[email protected]_unless_1n1d()\nclass Test_triplet_loss_1n1d(flow.unittest.TestCase):\n def test_triplet_margin_loss_cpu(test_case):\n arg_dict = _gen_arg_dict(\n shape=(3, 3),\n eps=1e-6,\n margin=1,\n p=1.5,\n swap=False,\n device_type=\"cpu\",\n machine_ids=\"0:0\",\n device_counts=1,\n )\n\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_margin_ranking_loss_gpu(test_case):\n arg_dict = _gen_arg_dict(\n shape=(3, 6),\n eps=1e-6,\n margin=1,\n p=2.0,\n swap=False,\n device_type=\"gpu\",\n machine_ids=\"0:0\",\n device_counts=1,\n )\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n\[email protected]_unless_1n2d()\nclass Testmarginloss1n2d(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_margin_ranking_loss_1n2d(test_case):\n arg_dict = _gen_arg_dict(\n shape=(6, 6),\n eps=1e-6,\n margin=1,\n p=2.0,\n swap=False,\n device_type=\"gpu\",\n machine_ids=\"0:0-1\",\n device_counts=2,\n )\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport oneflow.experimental as flow\nfrom test_util import GenArgList\n\n\ndef _test_ones_like_float(test_case, shape, device):\n x = flow.Tensor(np.random.randn(*shape), device=flow.device(device))\n y = flow.ones_like(x)\n test_case.assertTrue(y.dtype is flow.float32)\n test_case.assertTrue(y.shape == x.shape)\n test_case.assertTrue(y.device == x.device)\n\n y_numpy = np.ones_like(x.numpy())\n test_case.assertTrue(np.array_equal(y.numpy(), y_numpy))\n\n\ndef _test_ones_like_int(test_case, shape, device):\n x = flow.Tensor(np.random.randn(*shape), dtype=flow.int, device=flow.device(device))\n y = flow.ones_like(x)\n test_case.assertTrue(y.dtype is flow.int)\n test_case.assertTrue(y.shape == x.shape)\n test_case.assertTrue(y.device == x.device)\n\n y_numpy = np.ones_like(x.numpy())\n test_case.assertTrue(np.array_equal(y.numpy(), y_numpy))\n\n\[email protected]_unless_1n1d()\nclass TestModule(flow.unittest.TestCase):\n def test_ones_like(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_ones_like_float,\n _test_ones_like_int,\n ]\n arg_dict[\"shape\"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom scipy.special import erf, erfc, gammaln\nfrom oneflow.compatible.single_client import typing as oft\nimport os\n\n\[email protected]_unless_1n2d()\nclass TestUnaryElementwiseOps(flow.unittest.TestCase):\n def test_abs(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AbsJob(a: oft.Numpy.Placeholder((5, 2))):\n return flow.math.abs(a)\n\n x = np.random.rand(5, 2).astype(np.float32)\n y = AbsJob(x).get().numpy()\n test_case.assertTrue(np.array_equal(y, np.absolute(x)))\n\n def test_acos(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AcosJob(a: oft.Numpy.Placeholder((5, 2))):\n return flow.math.acos(a)\n\n x = np.random.rand(5, 2).astype(np.float32)\n y = AcosJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arccos(x)))\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_acos_consistent_1n2c(test_case):\n flow.config.gpu_device_num(2)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AcosJob(a: oft.Numpy.Placeholder((5, 2))):\n return flow.math.acos(a)\n\n x = np.random.rand(5, 2).astype(np.float32)\n y = AcosJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arccos(x)))\n\n def test_acos_cpu(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_placement_scope(flow.scope.placement(\"cpu\", \"0:0\"))\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AcosJob(a: oft.Numpy.Placeholder((5, 2))):\n return flow.math.acos(a)\n\n x = np.random.rand(5, 2).astype(np.float32)\n y = AcosJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arccos(x)))\n\n def test_acos_double(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AcosJob(a: oft.Numpy.Placeholder((5, 2), dtype=flow.double)):\n return flow.math.acos(a)\n\n x = np.random.rand(5, 2).astype(np.double)\n y = AcosJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arccos(x)))\n\n def test_acosh(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AcoshJob(a: oft.Numpy.Placeholder((7,))):\n return flow.math.acosh(a)\n\n # x = np.random.rand(7,).astype(np.float32)\n x = np.array([-2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")], dtype=np.float32)\n y = AcoshJob(x).get().numpy()\n # input: [-2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")]\n # output: [nan nan 0. 0.62236255 5.9914584 9.903487 inf]\n test_case.assertTrue(np.allclose(y, np.arccosh(x), equal_nan=True))\n\n x = np.random.uniform(low=1.0, high=100.0, size=(7,)).astype(np.float32)\n y = AcoshJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arccosh(x), equal_nan=True))\n\n def test_asin(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AsinJob(a: oft.Numpy.Placeholder((2,))):\n return flow.math.asin(a)\n\n x = np.array([0.8659266, 0.7068252], dtype=np.float32)\n y = AsinJob(x).get().numpy()\n # output: [1.047, 0.785] ~= [(PI/3), (PI/4)]\n test_case.assertTrue(np.allclose(y, np.arcsin(x), equal_nan=True))\n\n x = np.random.uniform(low=-1.0, high=1.0, size=(2,)).astype(np.float32)\n y = AsinJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arcsin(x), equal_nan=True))\n\n def test_asinh(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AsinhJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.asinh(a)\n\n x = np.array(\n [-float(\"inf\"), -2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")],\n dtype=np.float32,\n )\n y = AsinhJob(x).get().numpy()\n # output: [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]\n test_case.assertTrue(np.allclose(y, np.arcsinh(x), equal_nan=True))\n # print(\"asinh y = \", y)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = AsinhJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arcsinh(x), equal_nan=True))\n\n def test_atan(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AtanJob(a: oft.Numpy.Placeholder((2,))):\n return flow.math.atan(a)\n\n x = np.array([1.731261, 0.99920404], dtype=np.float32)\n y = AtanJob(x).get().numpy()\n # output: [1.047, 0.785] ~= [(PI/3), (PI/4)]\n test_case.assertTrue(np.allclose(y, np.arctan(x), equal_nan=True))\n # print(\"atan y = \", y)\n\n pi = 3.14159265357\n x = np.random.uniform(low=-pi / 2, high=pi / 2, size=(2,)).astype(np.float32)\n y = AtanJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arctan(x), equal_nan=True))\n\n def test_atanh(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def AtanhJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.atanh(a)\n\n x = np.array(\n [-float(\"inf\"), -1, -0.5, 1, 0, 0.5, 10, float(\"inf\")], dtype=np.float32\n )\n y = AtanhJob(x).get().numpy()\n # output: [nan -inf -0.54930615 inf 0. 0.54930615 nan nan]\n test_case.assertTrue(np.allclose(y, np.arctanh(x), equal_nan=True))\n # print(\"atanh y = \", y)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = AtanhJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.arctanh(x), equal_nan=True))\n\n def test_ceil(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def CeilJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.ceil(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = CeilJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.ceil(x), equal_nan=True))\n\n def test_cos(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def CosJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.cos(a)\n\n x = np.array(\n [-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")],\n dtype=np.float32,\n )\n y = CosJob(x).get().numpy()\n # output: [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]\n test_case.assertTrue(np.allclose(y, np.cos(x), equal_nan=True))\n # print(\"cos y = \", y)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = CosJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.cos(x), equal_nan=True))\n\n def test_cosh(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def CoshJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.cosh(a)\n\n x = np.array(\n [-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")], dtype=np.float32\n )\n y = CoshJob(x).get().numpy()\n # output: [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]\n test_case.assertTrue(np.allclose(y, np.cosh(x), equal_nan=True))\n # print(\"cosh y = \", y)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = CoshJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.cosh(x), equal_nan=True))\n\n def test_erf(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ErfJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.erf(a)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = ErfJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, erf(x), equal_nan=True))\n\n def test_erfc(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ErfcJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.erfc(a)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = ErfcJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, erfc(x), equal_nan=True))\n\n def test_exp(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ExpJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.exp(a)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = ExpJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.exp(x), equal_nan=True))\n\n def test_expm1(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def Expm1Job(a: oft.Numpy.Placeholder((8,))):\n return flow.math.expm1(a)\n\n x = np.random.uniform(size=(8,)).astype(np.float32)\n y = Expm1Job(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.expm1(x), equal_nan=True))\n\n def test_floor(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def FloorJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.floor(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = FloorJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.floor(x), equal_nan=True))\n\n def test_lgamma(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def LgammaJob(a: oft.Numpy.Placeholder((6,))):\n return flow.math.lgamma(a)\n\n x = np.array([0, 0.5, 1, 4.5, -4, -5.6], dtype=np.float32)\n y = LgammaJob(x).get().numpy()\n # output: [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]\n # print(\"lgamma y = \", y)\n test_case.assertTrue(np.allclose(y, gammaln(x), equal_nan=True))\n\n def test_log(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def LogJob(a: oft.Numpy.Placeholder((4,))):\n return flow.math.log(a)\n\n x = np.array([0, 0.5, 1, 5], dtype=np.float32)\n y = LogJob(x).get().numpy()\n # output: [-inf, -0.6931472, 0. , 1.609438]\n # print(\"log y = \", y)\n test_case.assertTrue(np.allclose(y, np.log(x), equal_nan=True))\n\n def test_log1p(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def Log1pJob(a: oft.Numpy.Placeholder((4,))):\n return flow.math.log1p(a)\n\n x = np.array([0, 0.5, 1, 5], dtype=np.float32)\n y = Log1pJob(x).get().numpy()\n # output: [0., 0.4054651, 0.6931472, 1.791759]\n # print(\"log1p y = \", y)\n test_case.assertTrue(np.allclose(y, np.log1p(x), equal_nan=True))\n\n def test_log_sigmoid(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def LogSigmoidJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.log_sigmoid(a)\n\n x = np.random.uniform(low=-5.0, high=5.0, size=(8,)).astype(np.float32)\n y = LogSigmoidJob(x).get().numpy()\n # print(\"log_sigmoid y = \", y)\n test_case.assertTrue(\n np.allclose(\n y, -np.log(1 + np.exp(-x)), equal_nan=True, rtol=1e-03, atol=1e-05\n )\n )\n\n def test_negative(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def NegativeJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.negative(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = NegativeJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, -x, equal_nan=True))\n\n def test_reciprocal(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ReciprocalJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.reciprocal(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = ReciprocalJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, 1.0 / x, equal_nan=True))\n\n def test_reciprocal_no_nan(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ReciprocalNoNanJob(a: oft.Numpy.Placeholder((4,))):\n return flow.math.reciprocal_no_nan(a)\n\n x = np.array([2.0, 0.5, 0, 1], dtype=np.float32)\n out = np.array([0.5, 2, 0.0, 1.0], dtype=np.float32)\n y = ReciprocalNoNanJob(x).get().numpy()\n # print(\"reciprocal_no_nan: y = \", y)\n test_case.assertTrue(np.allclose(y, out, equal_nan=True))\n\n def test_rint(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def RintJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.rint(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = RintJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.rint(x), equal_nan=True))\n\n def test_rint_special_value(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def RintJob(a: oft.Numpy.Placeholder((9,))):\n return flow.math.rint(a)\n\n x = np.array(\n [0.5000001, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.5, 3.5], dtype=np.float32\n )\n out = np.array(\n [1.0, -2.0, -2.0, -0.0, 0.0, 2.0, 2.0, 2.0, 4.0], dtype=np.float32\n )\n y = RintJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, out, equal_nan=True))\n\n def test_round(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def RoundJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.round(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = RoundJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.round(x), equal_nan=True))\n\n def test_round_special_value(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def RoundJob(a: oft.Numpy.Placeholder((5,))):\n return flow.math.round(a)\n\n x = np.array([0.9, 2.5, 2.3, 1.5, -4.5], dtype=np.float32)\n out = np.array([1.0, 2.0, 2.0, 2.0, -4.0], dtype=np.float32)\n y = RoundJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, out, equal_nan=True))\n\n def test_rsqrt(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def RsqrtJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.rsqrt(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = RsqrtJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, 1 / np.sqrt(x), equal_nan=True))\n\n def test_sigmoid_v2(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SigmoidJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.sigmoid_v2(a)\n\n x = np.random.uniform(low=-2.0, high=2.0, size=(8,)).astype(np.float32)\n y = SigmoidJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, 1.0 / (1.0 + np.exp(-x)), equal_nan=True))\n\n def test_sign(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SignJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.sign(a)\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)\n y = SignJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))\n\n def test_sign_double(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SignJob(a: oft.Numpy.Placeholder((8,), dtype=flow.double)):\n return flow.math.sign(a)\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.double)\n y = SignJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_sign_double_consistent_1n2c(test_case):\n flow.config.gpu_device_num(2)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SignJob(a: oft.Numpy.Placeholder((8,), dtype=flow.double)):\n return flow.math.sign(a)\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.double)\n y = SignJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))\n\n def test_sin(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SinJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.sin(a)\n\n x = np.array(\n [-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10, float(\"inf\")], dtype=np.float32\n )\n y = SinJob(x).get().numpy()\n # output: [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]\n test_case.assertTrue(np.allclose(y, np.sin(x), equal_nan=True))\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)\n y = SinJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.sin(x), equal_nan=True))\n\n def test_softplus(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SoftplusJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.softplus(a)\n\n x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)\n y = SoftplusJob(x).get().numpy()\n test_case.assertTrue(\n np.allclose(\n y, np.log(np.exp(x) + 1), equal_nan=True, rtol=1e-03, atol=1e-05\n )\n )\n\n def test_sqrt(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SqrtJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.sqrt(a)\n\n x = np.random.uniform(low=0.0, high=100.0, size=(8,)).astype(np.float32)\n y = SqrtJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.sqrt(x), equal_nan=True))\n\n def test_square(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def SquareJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.square(a)\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)\n y = SquareJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, x * x, equal_nan=True))\n\n def test_tan(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def TanJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.tan(a)\n\n x = np.array(\n [-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")],\n dtype=np.float32,\n )\n y = TanJob(x).get().numpy()\n # output: [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]\n test_case.assertTrue(np.allclose(y, np.tan(x), equal_nan=True))\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)\n y = TanJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.tan(x), equal_nan=True))\n\n def test_tanh(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def TanhJob(a: oft.Numpy.Placeholder((8,))):\n return flow.math.tanh(a)\n\n x = np.array(\n [-float(\"inf\"), -5, -0.5, 1, 1.2, 2, 3, float(\"inf\")], dtype=np.float32\n )\n y = TanhJob(x).get().numpy()\n # output: [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 1.]\n test_case.assertTrue(np.allclose(y, np.tanh(x), equal_nan=True))\n\n x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)\n y = TanhJob(x).get().numpy()\n test_case.assertTrue(np.allclose(y, np.tanh(x), equal_nan=True))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport oneflow.experimental as flow\nfrom test_util import GenArgList\n\n\ndef _prelu(input, alpha):\n alpha = np.expand_dims(alpha, 0)\n alpha = np.expand_dims(alpha, 2)\n alpha = np.expand_dims(alpha, 3)\n return np.where(input > 0, input, input * alpha)\n\n\ndef _prelu_grad(input, alpha):\n return alpha * (input <= 0) + (input > 0)\n\n\ndef _test_prelu(test_case, shape, device):\n np_input = np.random.randn(*shape)\n input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))\n np_alpha = np.random.randn(1)\n prelu = flow.nn.PReLU(init=np_alpha)\n if device == \"cuda\":\n prelu.to(flow.device(\"cuda\"))\n np_out = _prelu(np_input, np_alpha)\n of_out = prelu(input)\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))\n\n\ndef _test_prelu_ndims(test_case, shape, device):\n np_input = np.random.randn(*shape)\n input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))\n np_alpha = np.random.randn(shape[1])\n prelu = flow.nn.PReLU(init=1.0, num_parameters=shape[1])\n prelu_alpha = np.expand_dims(np_alpha, (1, 2))\n prelu.weight = flow.nn.Parameter(flow.Tensor(prelu_alpha, dtype=flow.float32))\n if device == \"cuda\":\n prelu.to(flow.device(\"cuda\"))\n np_out = _prelu(np_input, np_alpha)\n of_out = prelu(input)\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))\n\n\ndef _test_prelu_grad(test_case, shape, device):\n np_input = np.random.randn(*shape)\n input = flow.Tensor(\n np_input, dtype=flow.float32, requires_grad=True, device=flow.device(device)\n )\n np_alpha = 0.2\n prelu = flow.nn.PReLU(init=np_alpha)\n if device == \"cuda\":\n prelu.to(flow.device(\"cuda\"))\n of_out = prelu(input).sum()\n of_out.backward()\n np_grad = _prelu_grad(np_input, np_alpha)\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-5, 1e-5))\n\n\[email protected]_unless_1n1d()\nclass TestPReLU(flow.unittest.TestCase):\n def test_prelu(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(2, 4, 5, 6)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n _test_prelu(test_case, *arg)\n _test_prelu_ndims(test_case, *arg)\n _test_prelu_grad(test_case, *arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom oneflow.compatible.single_client import typing as oft\n\n\ndef _test(test_case, mode):\n flow.config.gpu_device_num(2)\n flow.config.enable_debug_mode(True)\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.consistent_view())\n func_config.optimizer_placement_optimization_mode(mode)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def Foo(x: oft.Numpy.Placeholder((2, 1024 * 1024))):\n w = flow.get_variable(\n \"w\", (1024 * 1024,), initializer=flow.constant_initializer(100)\n )\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [5]), momentum=0\n ).minimize(x + w)\n\n Foo(np.ones((2, 1024 * 1024), dtype=np.float32))\n\n\[email protected]_unless_1n2d()\nclass TestOptimizerPlacementOptimization(flow.unittest.TestCase):\n def test_non_distributed(test_case):\n _test(test_case, \"non_distributed\")\n\n def test_distributed_split(test_case):\n _test(test_case, \"distributed_split\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.ones"
],
[
"numpy.maximum",
"numpy.allclose",
"numpy.issubdtype",
"numpy.random.random_integers",
"numpy.random.default_rng"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.array_equal"
],
[
"numpy.array_equal",
"numpy.random.randint"
],
[
"numpy.maximum",
"numpy.allclose",
"numpy.random.random",
"numpy.abs",
"numpy.minimum",
"numpy.power",
"numpy.multiply",
"numpy.mean",
"numpy.where",
"numpy.sum"
],
[
"numpy.random.randn"
],
[
"numpy.arctanh",
"numpy.sqrt",
"numpy.arctan",
"numpy.round",
"numpy.exp",
"numpy.allclose",
"numpy.arcsin",
"numpy.sin",
"numpy.ceil",
"scipy.special.erfc",
"numpy.log1p",
"scipy.special.erf",
"numpy.log",
"numpy.cosh",
"numpy.arccosh",
"numpy.rint",
"numpy.arccos",
"numpy.tan",
"numpy.random.rand",
"numpy.floor",
"scipy.special.gammaln",
"numpy.array",
"numpy.arcsinh",
"numpy.tanh",
"numpy.absolute",
"numpy.cos",
"numpy.expm1",
"numpy.sign",
"numpy.random.uniform"
],
[
"numpy.random.randn",
"numpy.expand_dims",
"numpy.where"
],
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.18",
"0.19"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SilanHe/hierarchical-dnn-interpretations | [
"d6f96d0ab6fec48ee53ab930b2660e80525993b9",
"d6f96d0ab6fec48ee53ab930b2660e80525993b9"
] | [
"acd/scores/cd.py",
"visualization/viz_2d.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy.special import expit as sigmoid\nfrom .cd_propagate import *\nfrom .cd_architecture_specific import *\n\ndef cd(im_torch: torch.Tensor, model, mask=None, model_type=None, device='cuda', transform=None):\n '''Get contextual decomposition scores for blob\n \n Params\n ------\n im_torch: torch.Tensor\n example to interpret - usually has shape (batch_size, num_channels, height, width)\n model: pytorch model \n mask: array_like (values in {0, 1})\n required unless transform is supplied\n array with 1s marking the locations of relevant pixels, 0s marking the background\n shape should match the shape of im_torch or just H x W \n model_type: str, optional\n usually should just leave this blank\n if this is == 'mnist', uses CD for a specific mnist model\n if this is == 'resnet18', uses resnet18 model\n device: str, optional\n transform: function\n transform should be a function which transforms the original image to specify rel\n only used if mask is not passed\n \n Returns\n -------\n relevant: torch.Tensor\n class-wise scores for relevant mask\n irrelevant: torch.Tensor\n class-wise scores for everything but the relevant mask \n '''\n # set up model\n model.eval()\n model = model.to(device)\n im_torch = im_torch.to(device)\n \n # set up masks\n if not mask is None:\n mask = torch.FloatTensor(mask).to(device)\n relevant = mask * im_torch\n irrelevant = (1 - mask) * im_torch\n elif not transform is None:\n relevant = transform(im_torch).to(device)\n if len(relevant.shape) < 4:\n relevant = relevant.reshape(1, 1, relevant.shape[0], relevant.shape[1])\n irrelevant = im_torch - relevant\n else:\n print('invalid arguments')\n relevant = relevant.to(device)\n irrelevant = irrelevant.to(device)\n\n # deal with specific architectures which have problems\n if model_type == 'mnist':\n return cd_propagate_mnist(relevant, irrelevant, model)\n elif model_type == 'resnet18':\n return cd_propagate_resnet(relevant, irrelevant, model)\n \n # try the generic case\n else:\n mods = list(model.modules())\n relevant, irrelevant = cd_generic(mods, relevant, irrelevant)\n return relevant, irrelevant\n\ndef cd_generic(mods, relevant, irrelevant):\n '''Helper function for cd which loops over modules and propagates them \n based on the layer name\n '''\n for i, mod in enumerate(mods):\n t = str(type(mod))\n if 'Conv2d' in t:\n relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)\n elif 'Linear' in t:\n relevant = relevant.reshape(relevant.shape[0], -1)\n irrelevant = irrelevant.reshape(irrelevant.shape[0], -1)\n relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)\n elif 'ReLU' in t:\n relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)\n elif 'AvgPool' in t or 'NormLayer' in t or 'Dropout' in t \\\n or 'ReshapeLayer' in t or ('modularize' in t and 'Transform' in t): # custom layers\n relevant, irrelevant = propagate_independent(relevant, irrelevant, mod)\n elif 'Pool' in t and not 'AvgPool' in t:\n relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod)\n elif 'BatchNorm2d' in t:\n relevant, irrelevant = propagate_batchnorm2d(relevant, irrelevant, mod)\n return relevant, irrelevant\n\n\ndef cd_text(batch, model, start, stop, return_irrel_scores=False):\n '''Get contextual decomposition scores for substring of a text sequence\n \n Params\n ------\n batch: torchtext batch\n really only requires that batch.text is the string input to be interpreted\n start: int\n beginning index of substring to be interpreted (inclusive)\n stop: int\n ending index of substring to be interpreted (inclusive)\n\n Returns\n -------\n scores: torch.Tensor\n class-wise scores for relevant substring\n '''\n weights = model.lstm.state_dict()\n\n # Index one = word vector (i) or hidden state (h), index two = gate\n W_ii, W_if, W_ig, W_io = np.split(weights['weight_ih_l0'], 4, 0)\n W_hi, W_hf, W_hg, W_ho = np.split(weights['weight_hh_l0'], 4, 0)\n b_i, b_f, b_g, b_o = np.split(weights['bias_ih_l0'].cpu().numpy() + weights['bias_hh_l0'].cpu().numpy(), 4)\n word_vecs = model.embed(batch.text)[:, 0].data\n T = word_vecs.size(0)\n relevant = np.zeros((T, model.hidden_dim))\n irrelevant = np.zeros((T, model.hidden_dim))\n relevant_h = np.zeros((T, model.hidden_dim))\n irrelevant_h = np.zeros((T, model.hidden_dim))\n for i in range(T):\n if i > 0:\n prev_rel_h = relevant_h[i - 1]\n prev_irrel_h = irrelevant_h[i - 1]\n else:\n prev_rel_h = np.zeros(model.hidden_dim)\n prev_irrel_h = np.zeros(model.hidden_dim)\n\n rel_i = np.dot(W_hi, prev_rel_h)\n rel_g = np.dot(W_hg, prev_rel_h)\n rel_f = np.dot(W_hf, prev_rel_h)\n rel_o = np.dot(W_ho, prev_rel_h)\n irrel_i = np.dot(W_hi, prev_irrel_h)\n irrel_g = np.dot(W_hg, prev_irrel_h)\n irrel_f = np.dot(W_hf, prev_irrel_h)\n irrel_o = np.dot(W_ho, prev_irrel_h)\n\n if i >= start and i <= stop:\n rel_i = rel_i + np.dot(W_ii, word_vecs[i])\n rel_g = rel_g + np.dot(W_ig, word_vecs[i])\n rel_f = rel_f + np.dot(W_if, word_vecs[i])\n rel_o = rel_o + np.dot(W_io, word_vecs[i])\n else:\n irrel_i = irrel_i + np.dot(W_ii, word_vecs[i])\n irrel_g = irrel_g + np.dot(W_ig, word_vecs[i])\n irrel_f = irrel_f + np.dot(W_if, word_vecs[i])\n irrel_o = irrel_o + np.dot(W_io, word_vecs[i])\n\n rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)\n rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, np.tanh)\n\n relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g\n irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g\n\n if i >= start and i <= stop:\n relevant[i] += bias_contrib_i * bias_contrib_g\n else:\n irrelevant[i] += bias_contrib_i * bias_contrib_g\n\n if i > 0:\n rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)\n relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]\n irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \\\n relevant[i - 1]\n\n o = sigmoid(np.dot(W_io, word_vecs[i]) + np.dot(W_ho, prev_rel_h + prev_irrel_h) + b_o)\n rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)\n new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])\n # relevant_h[i] = new_rel_h * (rel_contrib_o + bias_contrib_o)\n # irrelevant_h[i] = new_rel_h * (irrel_contrib_o) + new_irrel_h * (rel_contrib_o + irrel_contrib_o + bias_contrib_o)\n relevant_h[i] = o * new_rel_h\n irrelevant_h[i] = o * new_irrel_h\n\n W_out = model.hidden_to_label.weight.data\n\n # Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias\n scores = np.dot(W_out, relevant_h[T - 1])\n irrel_scores = np.dot(W_out, irrelevant_h[T - 1])\n\n if return_irrel_scores:\n return scores, irrel_scores\n \n return scores\n",
"import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom cycler import cycler\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport math\nfrom skimage.transform import resize\nimport random\n\n\n# Create an N-bin discrete colormap from the specified input map\ndef discrete_cmap(N, base_cmap=None):\n base = plt.cm.get_cmap(base_cmap)\n nums = np.linspace(1 / N, 1, N)\n random.Random(10).shuffle(\n nums) # shuffle in place so colors aren't consecutive, 9 for imagenet figs, now set for mnist figs\n nums[0] = 0\n color_list = base(nums)\n cmap_name = base.name + str(N)\n return color_list, base.from_list(cmap_name, color_list, N)\n\n\n# cmap\n# cmap = matplotlib.cm.Greys\ncmap = matplotlib.cm.get_cmap('RdBu')\ncmap.set_bad(color='#60ff16') # bright green\nN_COLORS = 11\ncmap_comp = discrete_cmap(N_COLORS, 'jet')[1]\ncmap_comp.set_under(color='#ffffff') # transparent for lowest value\n\n\ndef visualize_ims_tiled(ims_tiled):\n # plt.figure(figsize=(6, 30))\n num_ims = 25 # len(ims_tiled)\n D = 5\n for i in range(D * (num_ims // D)):\n plt.subplot(D, num_ims // D, 1 + i)\n plt.imshow(ims_tiled[i], cmap=cmap, interpolation='None')\n plt.axis('off')\n plt.subplots_adjust(wspace=None, hspace=None)\n\n\ndef visualize_preds(preds, num, N=28, prev_im=None, cbar=True, vabs=None, axis_off=True):\n N = int(math.sqrt(preds.shape[0]))\n preds = preds[:, num].reshape(N, N)\n if not prev_im is None:\n preds[prev_im] = np.nan\n\n ax = plt.gca()\n\n if vabs is None:\n vmin = np.nanmin(preds)\n vmax = np.nanmax(preds)\n vabs = max(abs(vmin), abs(vmax))\n p = plt.imshow(preds, cmap=cmap,\n vmin=-1 * vabs, vmax=vabs, interpolation='None')\n if axis_off:\n plt.axis('off')\n\n # colorbar\n if cbar:\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"2%\", pad=0.05)\n plt.colorbar(p, cax=cax)\n\n return p\n\n\ndef visualize_batch_preds(preds, prev_im=None, N=28, im_num_start=0):\n preds_reshaped = np.zeros(N * N)\n preds_reshaped[im_num_start: im_num_start + preds.size] = preds\n preds_reshaped = preds_reshaped.reshape(N, N)\n # accs_reshaped = accs[:, num].reshape(N, N)\n if not prev_im is None:\n preds_reshaped[prev_im] = np.nan\n plt.imshow(preds_reshaped)\n return preds_reshaped\n\n\ndef visualize_ims_list(ims_list, title='', cmap_new=None, subplot_row=None, subplot_rows=3, colorbar=True, im_orig=None,\n plot_overlay=False, mturk=False, num_ims=None, comp_scores_raw=None, lab_num_correct=None,\n skip_first=False, mnist=False):\n im_segs = []\n if subplot_row is None:\n plt.figure(figsize=(12, 2), facecolor='white')\n subplot_row = 1\n if num_ims is None:\n num_ims = len(ims_list)\n for i in range(num_ims):\n if i >= len(ims_list):\n break\n ax = plt.subplot(subplot_rows, num_ims, num_ims * subplot_row + i + 1 - mnist)\n if cmap_new == 'redwhiteblue':\n vmin = min([np.min(im[np.logical_not(np.isnan(im))]) for im in ims_list])\n vmax = max([np.max(im[np.logical_not(np.isnan(im))]) for im in ims_list])\n vabs = max(abs(vmin), abs(vmax))\n\n p = plt.imshow(ims_list[i], cmap=cmap,\n vmin=-1 * vabs, vmax=vabs, interpolation='nearest')\n else:\n # color images\n if plot_overlay:\n if not mnist:\n plt.imshow(im_orig) # plot image as background\n # overlay component comps\n if i > 0 or skip_first:\n if mturk:\n\n # need to map this to values of comps not comp_num \n im_nums = np.copy(ims_list[i]).astype(np.float32)\n comp_to_score = comp_scores_raw[i]\n\n for r in range(im_nums.shape[0]):\n for c in range(im_nums.shape[1]):\n comp_num = int(im_nums[r, c])\n if comp_num > 0:\n im_nums[r, c] = comp_to_score[comp_num][lab_num_correct]\n\n im = cmap(im_nums)\n for r in range(im.shape[0]):\n for c in range(im.shape[1]):\n if im[r, c, 1] == 0:\n im[r, c, 3] = 0\n\n vmin = min([comp_to_score[comp_num][lab_num_correct]\n for comp_to_score in comp_scores_raw[1:]\n for comp_num in comp_to_score.keys()])\n vmax = max([comp_to_score[comp_num][lab_num_correct]\n for comp_to_score in comp_scores_raw[1:]\n for comp_num in comp_to_score.keys()])\n vabs = max(abs(vmin), abs(vmax))\n else:\n # renumber to maintain right colors\n # if i > 1:\n # im_seg = establish_correspondence(ims_list[i-1], ims_list[i])\n # ims_list[i] = im_seg\n # else:\n # im_seg = ims_list[i]\n\n im_seg = ims_list[i]\n im = cmap_comp(im_seg)\n for r in range(im.shape[0]):\n for c in range(im.shape[1]):\n if im_seg[r, c] == 0:\n im[r, c, 3] = 0\n map_reshaped = resize(im, (224, 224, 4), mode='symmetric', order=0)\n if mturk:\n plt.imshow(map_reshaped, alpha=0.9, interpolation='None', vmin=-1 * vabs, vmax=vabs)\n else:\n plt.imshow(map_reshaped, alpha=0.7)\n # not color\n else:\n p = plt.imshow(ims_list[i],\n cmap=discrete_cmap(N_COLORS, # len(np.unique(ims_list[i])) + 1,\n 'jet')[1], vmin=0, vmax=N_COLORS, interpolation='None')\n # plt.imshow(ims_list[i])\n if i > 0 or mturk:\n plt.axis('off')\n else:\n plt.axis('off')\n # plt.ylabel(title)\n # plt.yticks([])\n # plt.xticks([])\n\n # colorbar\n if colorbar:\n plt.colorbar()\n # ax = plt.gca()\n # divider = make_axes_locatable(ax)\n # cax = divider.append_axes(\"right\", size=\"10%\", pad=0.05)\n # plt.colorbar(p, cax=cax)\n\n plt.subplots_adjust(wspace=0, hspace=0)\n\n\ndef visualize_dict_list(dict_list, method='break-down / build-up',\n subplot_row=None, subplot_rows=3, lab_num=None, bar_graph=False):\n # if passed lab_num, plot only lab_num\n if lab_num is not None:\n dict_list_temp = []\n for d in dict_list:\n d_new = {}\n for key in d:\n d_new[key] = np.array(d[key][lab_num])\n dict_list_temp.append(d_new)\n dict_list = dict_list_temp\n\n if subplot_row is None:\n plt.figure(figsize=(12, 2), facecolor='white')\n subplot_row = 1\n num_ims = len(dict_list)\n preds_orig = dict_list[0][0]\n\n # try:\n vmin = min([np.min(d[key]) for d in dict_list[1:] for key in d]) - 1\n vmax = max([np.max(d[key]) for d in dict_list[1:] for key in d]) + 1\n if lab_num is None:\n vmin = min(vmin, np.min(preds_orig))\n vmax = max(vmax, np.max(preds_orig))\n\n # plot 1st preds\n plt.subplot(subplot_rows, num_ims, num_ims * subplot_row + 1)\n # plt.plot(preds_orig, '_', color='black')\n\n if lab_num is None:\n plt.bar(range(preds_orig.size), preds_orig, color='black')\n plt.ylabel('raw score full image')\n else:\n plt.ylabel('cd blob scores')\n plt.ylim((vmin, vmax))\n for i in range(1, num_ims):\n p = plt.subplot(subplot_rows, num_ims, num_ims * subplot_row + i + 1)\n # num_components = len(dict_list[i].keys())\n p.set_prop_cycle(cycler('color', discrete_cmap(N_COLORS, 'jet')[0][1:]))\n\n if bar_graph:\n region_nums = sorted(dict_list[i])\n vals = [dict_list[i][region_num] for region_num in region_nums]\n plt.bar(region_nums, vals, color=discrete_cmap(N_COLORS, 'jet')[0][1:])\n\n plt.plot(region_nums, vals, '_', color='black')\n plt.ylim((vmin - 1, vmax + 1))\n else:\n\n for region_num in sorted(dict_list[i]):\n region_arr = dict_list[i][region_num]\n # for class_num in range(10):\n # print(class_num, region_arr[class_num])\n plt.plot(region_arr, '_', markeredgewidth=2.5)\n plt.ylim((vmin, vmax))\n\n cur_axes = plt.gca()\n # if not i == 0 and not i == 1:\n cur_axes.yaxis.set_visible(False)\n if lab_num is None:\n cur_axes.xaxis.set_ticklabels(np.arange(0, 10, 2))\n cur_axes.xaxis.set_ticks(np.arange(0, 10, 2))\n cur_axes.xaxis.grid()\n else:\n cur_axes.xaxis.set_visible(False)\n if i == 0:\n plt.ylabel('raw comp scores for ' + method)\n plt.subplots_adjust(wspace=0, hspace=0)\n\n\n# except Exception as e:\n# print('some empty plots', e)\n\ndef visualize_arr_list(arr_list, method='break-down / build-up',\n subplot_row=None, subplot_rows=3):\n if subplot_row is None:\n plt.figure(figsize=(12, 2), facecolor='white')\n subplot_row = 1\n num_ims = len(arr_list) + 1\n\n vmin = min([np.min(d) for d in arr_list])\n vmax = max([np.max(d) for d in arr_list])\n\n for i in range(1, num_ims):\n p = plt.subplot(subplot_rows, num_ims, num_ims * subplot_row + i + 1)\n arr = arr_list[i - 1]\n # plt.plot(arr, '_', markeredgewidth=0, color='black')\n plt.bar(np.arange(arr.size), arr, color='black')\n plt.ylim((vmin, vmax))\n cur_axes = plt.gca()\n if not i == 1:\n cur_axes.yaxis.set_visible(False)\n cur_axes.xaxis.set_ticklabels(np.arange(0, 10, 2))\n cur_axes.xaxis.set_ticks(np.arange(0, 10, 2))\n cur_axes.xaxis.grid()\n if i == 0:\n plt.ylabel('raw combined score for ' + method)\n plt.subplots_adjust(wspace=0, hspace=0)\n\n\ndef visualize_original_preds(im_orig, lab_num, comp_scores_raw_list, scores_orig_raw,\n subplot_rows=5, dset=None, mturk=False, tits=None):\n num_cols = 7 - mturk\n plt.subplot(subplot_rows, num_cols, 1)\n plt.imshow(im_orig)\n if not tits is None:\n plt.title(tits[0])\n else:\n plt.title(dset.lab_dict[lab_num].split(',')[0])\n plt.axis('off')\n\n num_top = 5\n preds = comp_scores_raw_list[0][0]\n ind = np.argpartition(preds, -num_top)[-num_top:] # top-scoring indexes\n ind = ind[np.argsort(preds[ind])][::-1] # sort the indexes\n labs = [dset.lab_dict[x][:12] for x in ind]\n vals = preds[ind]\n\n # plotting\n if not mturk:\n plt.subplot(subplot_rows, num_cols, 2)\n idxs = np.arange(num_top)\n plt.barh(idxs, vals, color='#2ea9e888', edgecolor='#2ea9e888', fill=True, linewidth=1)\n\n for i, (val) in enumerate(zip(idxs, vals)):\n lab = str(labs[i])\n if 'puck' in lab:\n lab = 'puck'\n plt.text(s=str(lab), x=1, y=i, color=\"black\", verticalalignment=\"center\", size=10)\n # plt.text(s=str(pr)+\"%\", x=pr-5, y=i, color=\"w\",\n # verticalalignment=\"center\", horizontalalignment=\"left\", size=18)\n ax = plt.gca()\n # ax.set_yticklabels(labs)\n # ax.set_yticks(np.arange(num_top))\n # plt.yticks(rotation='horizontal')\n ax.invert_yaxis() # labels read top-to-bottom\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n plt.title('prediction logits')\n\n vmin = min([np.nanmin(scores_orig_raw[:, x]) for x in ind]) # preds[:, num]\n vmax = max([np.nanmax(scores_orig_raw[:, x]) for x in ind]) # preds[:, num]\n vabs = max(abs(vmin), abs(vmax))\n\n for i, x in enumerate(ind):\n if i < num_top:\n plt.subplot(subplot_rows, num_cols, i + 3 - mturk)\n if mturk:\n visualize_preds(scores_orig_raw, num=x, cbar=False, vabs=vabs)\n plt.title(dset.lab_dict[x][:14] + '...')\n else:\n visualize_preds(scores_orig_raw, num=x, cbar=False, vabs=vabs)\n if tits is not None:\n plt.title(tits[i + 2])\n else:\n plt.title('CD (' + dset.lab_dict[x][:10] + ')') # +'\\n'+ str(preds[x]))\n\n return ind, labs\n\n\ndef visualize_dict_list_top(dict_list, method='break-down / build-up',\n subplot_row=None, subplot_rows=3, lab_num=None,\n ind=None, labs=None, num_top=5, dset=None, use_orig_top=True,\n num_ims=None, skip_first=False, vmin=None, vmax=None):\n if subplot_row is None:\n plt.figure(figsize=(12, 2), facecolor='white')\n subplot_row = 1\n if num_ims is None:\n num_ims = len(dict_list)\n preds_orig = dict_list[0][0]\n\n if vmin is None:\n vmin = min([np.min(d[key]) for d in dict_list[1:num_ims + 1] for key in d]) - 1\n vmax = max([np.max(d[key]) for d in dict_list[1:num_ims + 1] for key in d]) + 1\n\n for i in range(1, num_ims + skip_first):\n if i >= len(dict_list):\n break\n p = plt.subplot(subplot_rows, num_ims, num_ims * subplot_row + i + 1 - skip_first)\n # num_components = len(dict_list[i].keys())\n p.set_prop_cycle(cycler('color', discrete_cmap(N_COLORS, 'jet')[0][1:]))\n # print('keys', dict_list[i].keys())\n\n for region_num in range(1, max(dict_list[i].keys()) + 1):\n # for region_num in sorted(dict_list[i]):\n # print('dict_list[i]', dict_list[i])\n\n if region_num in dict_list[i]: # check if present\n if use_orig_top:\n # print(region_num)\n region_arr = dict_list[i][region_num][ind]\n plt.plot(region_arr, '_', markeredgewidth=2)\n plt.xticks(np.arange(region_arr.size), labs, rotation='vertical')\n plt.xlim((-1, region_arr.size))\n else:\n if region_num == 1:\n region_arr = dict_list[i][region_num]\n ind = np.argpartition(region_arr, -num_top)[-num_top:] # top-scoring indexes\n ind = ind[np.argsort(region_arr[ind])][::-1] # sort the indexes\n labs = [dset.lab_dict[x][:12] for x in ind]\n vals = region_arr[ind]\n plt.plot(vals, '_', markeredgewidth=1)\n plt.xticks(np.arange(ind.size), labs, rotation='vertical')\n plt.xlim((-1, ind.size))\n plt.ylim((vmin, vmax))\n else: # plot blank just to match with color cycle\n plt.plot(-1, 0)\n pass\n\n cur_axes = plt.gca()\n if not i == 1:\n cur_axes.yaxis.set_visible(False)\n\n if use_orig_top:\n cur_axes.xaxis.set_visible(False)\n # if i == 5:\n # plt.title('raw comp scores for ' + method)\n else:\n plt.ylabel('patch importance')\n plt.subplots_adjust(wspace=0, hspace=0)\n\n\ndef visualize_top_classes(model, dset, im_orig, scores_orig_raw):\n preds = dset.pred_ims(model, im_orig)\n ind = np.argpartition(preds, -8)[-8:] # top-scoring indexes\n ind = ind[np.argsort(preds[ind])][::-1] # sort the indexes\n\n plt.figure(figsize=(14, 4))\n for i, x in enumerate(ind):\n plt.subplot(1, 8, i + 1)\n visualize_preds(scores_orig_raw, num=x)\n plt.title(dset.lab_dict[x][:12] + '\\n' + str(preds[x]))\n\n\ndef visualize_original_preds_mnist(im_orig, lab_num, comp_scores_raw_list, scores_orig_raw,\n subplot_rows=5, dset=None, mturk=False, use_vmax=True):\n num_cols = 7 - mturk\n plt.subplot(subplot_rows, num_cols, 1)\n plt.imshow(im_orig, interpolation='None', cmap='gray')\n plt.title('Original image')\n plt.axis('off')\n\n num_top = 5\n preds = comp_scores_raw_list[0][0]\n ind = np.argpartition(preds, -num_top)[-num_top:] # top-scoring indexes\n ind = ind[np.argsort(preds[ind])][::-1] # sort the indexes\n labs = ind # [dset.lab_dict[x][:12] for x in ind]\n vals = preds[ind]\n\n # plotting\n if not mturk:\n plt.subplot(subplot_rows, num_cols, 2)\n idxs = np.arange(num_top)\n plt.barh(idxs, vals, color='#2ea9e888', edgecolor='#2ea9e888', fill=False, linewidth=1)\n for i, (val) in enumerate(zip(idxs, vals)):\n plt.text(s=str(labs[i]), x=1, y=i, color=\"black\", verticalalignment=\"center\", size=10)\n # plt.text(s=str(pr)+\"%\", x=pr-5, y=i, color=\"w\",\n # verticalalignment=\"center\", horizontalalignment=\"left\", size=18)\n ax = plt.gca()\n # ax.set_yticklabels(labs)\n # ax.set_yticks(np.arange(num_top))\n # plt.yticks(rotation='horizontal')\n ax.invert_yaxis() # labels read top-to-bottom\n ax.get_yaxis().set_visible(False)\n plt.title('logits')\n\n vmin = min([np.nanmin(scores_orig_raw[:, x]) for x in ind]) # preds[:, num]\n vmax = max([np.nanmax(scores_orig_raw[:, x]) for x in ind]) # preds[:, num]\n vabs = max(abs(vmin), abs(vmax))\n\n for i, x in enumerate(ind):\n if i < num_top:\n plt.subplot(subplot_rows, num_cols, i + 3 - mturk)\n if mturk:\n if use_vmax:\n visualize_preds(scores_orig_raw, num=x, cbar=False, vabs=vabs)\n else:\n visualize_preds(scores_orig_raw, num=x, cbar=False)\n plt.title(x)\n else:\n visualize_preds(scores_orig_raw, num=x, cbar=False, vabs=vabs)\n plt.title(x)\n\n return ind, labs\n"
] | [
[
"torch.FloatTensor",
"numpy.dot",
"numpy.split",
"numpy.zeros"
],
[
"numpy.nanmax",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"matplotlib.pyplot.barh",
"numpy.nanmin",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.gca",
"numpy.arange",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.argpartition",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.isnan",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.cm.get_cmap"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krishpop/pddm | [
"b1452554a4e318966b8ca3da53978458ac635c5d"
] | [
"pddm/regressors/feedforward_network.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\ndef feedforward_network(inputStates, inputSize, outputSize, num_fc_layers,\n depth_fc_layers, tf_datatype, scope):\n\n with tf.variable_scope(str(scope)):\n\n #concat K entries together [bs x K x sa] --> [bs x ksa]\n inputState = tf.layers.flatten(inputStates)\n\n #vars\n intermediate_size = depth_fc_layers\n reuse = False\n initializer = tf.glorot_normal_initializer(\n seed=None, dtype=tf_datatype)\n fc = tf.layers.dense\n\n # make hidden layers\n for i in range(num_fc_layers):\n if i==0:\n fc_i = fc(\n inputState,\n units=intermediate_size,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n else:\n fc_i = fc(\n h_i,\n units=intermediate_size,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n h_i = tf.nn.relu(fc_i)\n\n # make output layer\n z = fc(\n h_i,\n units=outputSize,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n\n return z\n"
] | [
[
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.layers.flatten",
"tensorflow.compat.v1.glorot_normal_initializer",
"tensorflow.compat.v1.disable_v2_behavior"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AI-sandbox/hyperLAI | [
"49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689",
"49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689"
] | [
"hyperLAI/features/hyperLAIdataset.py",
"hyperLAI/data/snp_data_preprocessing/variance_filter_subpop.py"
] | [
"import numpy as np\nimport torch\nfrom torch.utils import data\nimport sys\nfrom utils.generate_dataset import *\nfrom HypHC.datasets.triples import samples_triples\n\nclass HyperLoader(data.Dataset):\n def __init__(self, data_dir, split_indices, restrict_labels=[0,1,2,3,4,5,6], chromosome=\"all\"):\n '''\n Takes in all the relevant arguments to produce the dataset.\n Arguments:\n `data_dir`: directory in which data (either text files or numpy arrays) are located\n `similarity_func`: function to calculate pairwise similarities\n `split_indices`: indices for the data split (train/test/valid)\n `restrict_labels`: list of super-populations to include in analysis. Indices correspond to 'EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'OCE', 'WAS'\n '''\n\n self.data_dir = data_dir \n self.restrict_labels = restrict_labels\n self.chromosome = chromosome\n self.split_indices = split_indices\n self.snps, self.pop_labels, self.suppop_labels, self.pop_label_index, self.suppop_label_index = self.load_data()\n def load_data(self):\n '''\n Loads SNP and label data from the necessary file locations \n '''\n #If we want all chromosomes, then we have the arrays already pre-created\n if self.chromosome ==\"all\":\n file_order = [\"all_snps.npy\", \"labels_suppop.npy\", \"labels_pop.npy\", \n \"coords.npy\", \"pop_index.npy\", \"pop_code_index.npy\", \"suppop_code_index.npy\"]\n test_data = tuple([np.load(self.data_dir + x) for x in file_order])\n ind_data = test_data[0]\n else:\n #The data for individual chromosomes is in a slightly different format\n test_data = load_dataset(self.data_dir + \"ref_final_beagle_phased_1kg_hgdp_sgdp_chr%s_hg19.vcf.gz\"%(self.chromosome), \n self.data_dir + \"reference_panel_metadata.tsv\", \"./\", chromosome=self.chromosome, \n verbose=True, filter_admixed=True, filter_missing_coord=True)\n ind_data = test_data[0].reshape([test_data[0].shape[0], test_data[0].shape[1] * test_data[0].shape[2]]).T \n #We've unfolded each set of 23 chromosomes as a \"different\" individual \n #So we must do the same for the labels by doubling them\n ind_pop_labels = np.repeat(test_data[2], 2).astype(int)\n ind_suppop_labels = np.repeat(test_data[1], 2).astype(int)\n #Restrict to only the super-populations we've specified\n pop_indices = np.argwhere(np.isin(ind_suppop_labels, self.restrict_labels)).T[0]\n indices = np.intersect1d(pop_indices, self.split_indices)\n #Return everything\n return ind_data[indices], ind_pop_labels[indices], ind_suppop_labels[indices], test_data[4], test_data[6]\n def __len__(self):\n return len(self.snps)\n def __getitem__(self, index):\n '''\n Returns data and labels for the current index\n '''\n return torch.tensor(self.snps[index]), torch.tensor(self.suppop_labels[index]), torch.tensor(self.pop_labels[index])\n",
"import sys\nsys.path.append(\"../../\")\nsys.path.append(\"../../../../libraries/\")\nimport numpy as np\nimport torch\nimport os\nimport json\nfrom torch.utils.data import SubsetRandomSampler, DataLoader\nfrom utils.sim_funcs import sim_func_dict\nfrom utils.model_utils import *\nfrom features.hyperLAIdataset import HyperLoader\n\n#This code takes in a genotype dataset and filters to only include the SNPs with the k highest variances across the population\n\n#Define the directory to draw data from\ndata_dir = \"/scratch/users/patelas/hyperLAI/snp_data/whole_genome/\"\ntrain_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/train_indices.npy\")\nvalid_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/valid_indices.npy\")\ntest_inds = np.load(\"/scratch/users/patelas/hyperLAI/ancestry_training_splits/80_10_10/test_indices.npy\")\nall_inds = np.sort(np.concatenate([train_inds, valid_inds, test_inds]))\nprint(all_inds[0], all_inds[-1])\npop_labels = [3]\noutput_dir = \"/scratch/users/patelas/hyperLAI/snp_data/whole_genome/variance_filtered_500000_subpops/south_asian/\"\n\n\n\n#Create the dataset\ndataset = HyperLoader(data_dir, all_inds, [0,1,2,3,4,5,6], \"all\")\n\n#Get indices to use\npop_indices = np.argwhere(np.isin(dataset.suppop_labels, pop_labels)).T[0]\nindices = np.intersect1d(pop_indices, train_inds)\nprint(len(indices))\nprint(indices[0], indices[-1])\n\n\n#Filter by variance\nvariance_filter(dataset, indices, int(sys.argv[1]))\n\nprint(\"Variance Filtered\")\n\n#Save to file\nnp.save(output_dir + \"all_snps.npy\", \n dataset.snps)\n\n#Note: This script only creates a copy of all_snps.npy in the desired folder. All other metadata files will have to be copied manually. \n"
] | [
[
"torch.tensor",
"numpy.intersect1d",
"numpy.load",
"numpy.repeat",
"numpy.isin"
],
[
"numpy.save",
"numpy.concatenate",
"numpy.intersect1d",
"numpy.load",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qbetterk/user-simulator | [
"77caca30ff67b9112b1fe5e65e191c6b5e25532c",
"77caca30ff67b9112b1fe5e65e191c6b5e25532c",
"77caca30ff67b9112b1fe5e65e191c6b5e25532c"
] | [
"sequicity/tsd_net.py",
"seq2seq/seq2seq/trainer/supervised_trainer.py",
"sequicity_user/seq_user_act.py"
] | [
"import torch\r\n\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nimport numpy as np\r\nimport math\r\nfrom sequicity.config import global_config as cfg\r\nimport copy, random, time, logging\r\n\r\nfrom torch.distributions import Categorical\r\nfrom sequicity.reader import pad_sequences\r\nimport pdb\r\nimport simulator.dialog_config as dialog_config\r\nimport pdb\r\n\r\n\r\ndef cuda_(var):\r\n return var.cuda() if cfg.cuda else var\r\n\r\n\r\ndef toss_(p):\r\n return random.randint(0, 99) <= p\r\n\r\n\r\ndef nan(v):\r\n if type(v) is float:\r\n return v == float('nan')\r\n return np.isnan(np.sum(v.data.cpu().numpy()))\r\n\r\n\r\ndef get_sparse_input_aug(x_input_np):\r\n \"\"\"\r\n sparse input of\r\n :param x_input_np: [T,B]\r\n :return: Numpy array: [B,T,aug_V]\r\n \"\"\"\r\n ignore_index = [0]\r\n unk = 2\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n for t in range(x_input_np.shape[0]):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n if w not in ignore_index:\r\n if w != unk:\r\n result[t][b][x_input_np[t][b]] = 1.0\r\n else:\r\n result[t][b][cfg.vocab_size + t] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n\r\ndef init_gru(gru):\r\n gru.reset_parameters()\r\n for _, hh, _, _ in gru.all_weights:\r\n for i in range(0, hh.size(0), gru.hidden_size):\r\n torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)\r\n\r\n\r\nclass Attn(nn.Module):\r\n def __init__(self, hidden_size):\r\n super(Attn, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\r\n self.v = nn.Parameter(torch.zeros(hidden_size))\r\n stdv = 1. / math.sqrt(self.v.size(0))\r\n self.v.data.normal_(mean=0, std=stdv)\r\n\r\n def forward(self, hidden, encoder_outputs, mask=False, inp_seqs=None, stop_tok=None, normalize=True):\r\n encoder_outputs = encoder_outputs.transpose(0, 1) # [B,T,H]\r\n attn_energies = self.score(hidden, encoder_outputs)\r\n if True or not mask:\r\n normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]\r\n else:\r\n mask_idx = []\r\n # inp_seqs: ndarray of [T,B]\r\n # inp_seqs = inp_seqs.cpu().numpy()\r\n for b in range(inp_seqs.shape[1]):\r\n for t in range(inp_seqs.shape[0] + 1):\r\n if t == inp_seqs.shape[0] or inp_seqs[t, b] in stop_tok:\r\n mask_idx.append(t)\r\n break\r\n mask = []\r\n for mask_len in mask_idx:\r\n mask.append([1.] * mask_len + [0.] * (inp_seqs.shape[0] - mask_len))\r\n mask = cuda_(Variable(torch.FloatTensor(mask))) # [B,T]\r\n attn_energies = attn_energies * mask.unsqueeze(1)\r\n normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]\r\n\r\n context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]\r\n return context.transpose(0, 1) # [1,B,H]\r\n\r\n def score(self, hidden, encoder_outputs):\r\n max_len = encoder_outputs.size(1)\r\n H = hidden.repeat(max_len, 1, 1).transpose(0, 1)\r\n # pdb.set_trace()\r\n energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2))) # [B,T,2H]->[B,T,H]\r\n energy = energy.transpose(2, 1) # [B,H,T]\r\n v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]\r\n energy = torch.bmm(v, energy) # [B,1,T]\r\n return energy\r\n\r\n\r\nclass SimpleDynamicEncoder(nn.Module):\r\n def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.embed_size = embed_size\r\n self.n_layers = n_layers\r\n self.dropout = dropout\r\n self.embedding = nn.Embedding(input_size, embed_size)\r\n self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)\r\n init_gru(self.gru)\r\n\r\n def forward(self, input_seqs, input_lens, hidden=None):\r\n \"\"\"\r\n forward procedure. No need for inputs to be sorted\r\n :param input_seqs: Variable of [T,B]\r\n :param hidden:\r\n :param input_lens: *numpy array* of len for each input sequence\r\n :return:\r\n \"\"\"\r\n # print(\"in encoder\")\r\n # print(\"input_seqs\", input_seqs)\r\n # print(\"hidden\", hidden)\r\n # print(\"input_lens\", input_lens)\r\n batch_size = input_seqs.size(1)\r\n embedded = self.embedding(input_seqs)\r\n import pdb\r\n if torch.isnan(embedded).sum() > 0:\r\n pdb.set_trace()\r\n # pass\r\n # print(\"embedded\", embedded)\r\n embedded = embedded.transpose(0, 1) # [B,T,E]\r\n sort_idx = np.argsort(-input_lens)\r\n unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))\r\n input_lens = input_lens[sort_idx]\r\n sort_idx = cuda_(torch.LongTensor(sort_idx))\r\n embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]\r\n # print(\"embedded\", embedded)\r\n packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)\r\n outputs, hidden = self.gru(packed, hidden)\r\n # print('outputs', outputs)\r\n\r\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\r\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]\r\n outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\r\n hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\r\n return outputs, hidden, embedded\r\n\r\n\r\nclass BSpanDecoder(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate, vocab):\r\n super().__init__()\r\n self.emb = nn.Embedding(vocab_size, embed_size)\r\n if cfg.use_positional_embedding:\r\n self.positional_embedding = nn.Embedding(cfg.max_ts + 1, embed_size)\r\n init_pos_emb = self.position_encoding_init(cfg.max_ts + 1, embed_size)\r\n self.positional_embedding.weight.data = init_pos_emb\r\n self.gru = nn.GRU(hidden_size + embed_size, hidden_size, dropout=dropout_rate)\r\n self.proj = nn.Linear(hidden_size * 2, vocab_size)\r\n\r\n self.attn_u = Attn(hidden_size)\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.inp_dropout = nn.Dropout(self.dropout_rate)\r\n\r\n init_gru(self.gru)\r\n self.vocab = vocab\r\n\r\n def position_encoding_init(self, n_position, d_pos_vec):\r\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]\r\n if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])\r\n\r\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\r\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\r\n return torch.from_numpy(position_enc).type(torch.FloatTensor)\r\n\r\n def forward(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n position):\r\n # print(\"in bSpanDecoder\")\r\n # print(u_input_np)\r\n # print(u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n # position)\r\n # print(\"prev_z_input_np\", prev_z_input_np)\r\n sparse_u_input = Variable(get_sparse_input_aug(u_input_np), requires_grad=False)\r\n\r\n if pv_z_enc_out is not None:\r\n context = self.attn_u(last_hidden, torch.cat([pv_z_enc_out, u_enc_out], dim=0), mask=True,\r\n inp_seqs=np.concatenate([prev_z_input_np, u_input_np], 0),\r\n stop_tok=[self.vocab.encode('EOS_M')])\r\n else:\r\n context = self.attn_u(last_hidden, u_enc_out, mask=True, inp_seqs=u_input_np,\r\n stop_tok=[self.vocab.encode('EOS_M')])\r\n embed_z = self.emb(z_tm1)\r\n # embed_z = self.inp_dropout(embed_z)\r\n\r\n if cfg.use_positional_embedding: # defaulty not used\r\n position_label = [position] * u_enc_out.size(1) # [B]\r\n position_label = cuda_(Variable(torch.LongTensor(position_label))).view(1, -1) # [1,B]\r\n pos_emb = self.positional_embedding(position_label)\r\n embed_z = embed_z + pos_emb\r\n\r\n gru_in = torch.cat([embed_z, context], 2)\r\n gru_out, last_hidden = self.gru(gru_in, last_hidden)\r\n # gru_out = self.inp_dropout(gru_out)\r\n gen_score = self.proj(torch.cat([gru_out, context], 2)).squeeze(0)\r\n # gen_score = self.inp_dropout(gen_score)\r\n u_copy_score = torch.tanh(self.proj_copy1(u_enc_out.transpose(0, 1))) # [B,T,H]\r\n # stable version of copynet\r\n u_copy_score = torch.matmul(u_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n u_copy_score = u_copy_score.cpu()\r\n u_copy_score_max = torch.max(u_copy_score, dim=1, keepdim=True)[0]\r\n u_copy_score = torch.exp(u_copy_score - u_copy_score_max) # [B,T]\r\n u_copy_score = torch.log(torch.bmm(u_copy_score.unsqueeze(1), sparse_u_input)).squeeze(\r\n 1) + u_copy_score_max # [B,V]\r\n u_copy_score = cuda_(u_copy_score)\r\n if pv_z_enc_out is None:\r\n # u_copy_score = self.inp_dropout(u_copy_score)\r\n scores = F.softmax(torch.cat([gen_score, u_copy_score], dim=1), dim=1)\r\n gen_score, u_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + u_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, u_copy_score[:, cfg.vocab_size:]], 1)\r\n else:\r\n sparse_pv_z_input = Variable(get_sparse_input_aug(prev_z_input_np), requires_grad=False)\r\n pv_z_copy_score = torch.tanh(self.proj_copy2(pv_z_enc_out.transpose(0, 1))) # [B,T,H]\r\n pv_z_copy_score = torch.matmul(pv_z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n pv_z_copy_score = pv_z_copy_score.cpu()\r\n pv_z_copy_score_max = torch.max(pv_z_copy_score, dim=1, keepdim=True)[0]\r\n pv_z_copy_score = torch.exp(pv_z_copy_score - pv_z_copy_score_max) # [B,T]\r\n pv_z_copy_score = torch.log(torch.bmm(pv_z_copy_score.unsqueeze(1), sparse_pv_z_input)).squeeze(\r\n 1) + pv_z_copy_score_max # [B,V]\r\n pv_z_copy_score = cuda_(pv_z_copy_score)\r\n scores = F.softmax(torch.cat([gen_score, u_copy_score, pv_z_copy_score], dim=1), dim=1)\r\n gen_score, u_copy_score, pv_z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:,\r\n cfg.vocab_size:2 * cfg.vocab_size + u_input_np.shape[0]], \\\r\n scores[:, 2 * cfg.vocab_size + u_input_np.shape[0]:]\r\n proba = gen_score + u_copy_score[:, :cfg.vocab_size] + pv_z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, pv_z_copy_score[:, cfg.vocab_size:], u_copy_score[:, cfg.vocab_size:]], 1)\r\n return gru_out, last_hidden, proba\r\n\r\n\r\nclass ResponseDecoder(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):\r\n super().__init__()\r\n self.emb = emb\r\n self.attn_z = Attn(hidden_size)\r\n self.attn_u = Attn(hidden_size)\r\n self.gru = gru\r\n init_gru(self.gru)\r\n self.proj = proj\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.vocab = vocab\r\n\r\n def get_sparse_selective_input(self, x_input_np):\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n for t in range(x_input_np.shape[0] - 1):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n word = self.vocab.decode(w)\r\n if word in reqs:\r\n slot = self.vocab.encode(word + '_SLOT')\r\n result[t + 1][b][slot] = 1.0\r\n else:\r\n if w == 2 or w >= cfg.vocab_size:\r\n result[t + 1][b][cfg.vocab_size + t] = 5.0\r\n else:\r\n result[t + 1][b][w] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):\r\n sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)\r\n\r\n m_embed = self.emb(m_t_input)\r\n z_context = self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],\r\n inp_seqs=z_input_np)\r\n u_context = self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],\r\n inp_seqs=u_input_np)\r\n gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)\r\n gru_out, last_hidden = self.gru(gru_in, last_hidden)\r\n gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)\r\n z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))\r\n z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n z_copy_score = z_copy_score.cpu()\r\n z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]\r\n z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]\r\n z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(\r\n 1) + z_copy_score_max # [B,V]\r\n z_copy_score = cuda_(z_copy_score)\r\n\r\n scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)\r\n gen_score, z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)\r\n return proba, last_hidden, gru_out\r\n\r\n\r\nclass ResponseDecoder_discrete(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):\r\n super().__init__()\r\n self.emb = emb\r\n self.attn_z = Attn(hidden_size)\r\n self.attn_u = Attn(hidden_size)\r\n self.gru = gru\r\n init_gru(self.gru)\r\n self.proj_0 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj_1 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj_2 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj = proj\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.vocab = vocab\r\n\r\n def get_sparse_selective_input(self, x_input_np):\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n for t in range(x_input_np.shape[0] - 1):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n word = self.vocab.decode(w)\r\n if word in reqs:\r\n slot = self.vocab.encode(word + '_SLOT')\r\n result[t + 1][b][slot] = 1.0\r\n else:\r\n if w == 2 or w >= cfg.vocab_size:\r\n result[t + 1][b][cfg.vocab_size + t] = 5.0\r\n else:\r\n result[t + 1][b][w] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n def forward(self, z_enc_out, u_enc_out, np_state):\r\n # sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)\r\n\r\n # m_embed = self.emb(m_t_input)\r\n # z_context = torch.mean(z_enc_out, 0)#= self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],\r\n # inp_seqs=z_input_np)\r\n # pdb.set_trace()\r\n u_context = u_enc_out[-1, :, :]#= self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],\r\n # inp_seqs=u_input_np)\r\n state_from_np = torch.from_numpy(np_state).float().unsqueeze(0)\r\n\r\n output0 = F.tanh(self.proj_0(torch.cat([u_context, state_from_np], 1)))\r\n output1 = F.sigmoid(self.proj_1(output0))\r\n output2 = F.sigmoid(self.proj_2(output1))\r\n # gru_in = torch.cat([u_context, z_context], dim=2)\r\n # gru_out, last_hidden = self.gru(gru_in)\r\n # print(z_context)\r\n # print(z_context.shape)\r\n # print(u_context)\r\n # print(u_context.shape)\r\n gen_score = self.proj(output2)#.squeeze(0)# self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)\r\n\r\n return gen_score\r\n \"\"\"\r\n z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))\r\n z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n z_copy_score = z_copy_score.cpu()\r\n z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]\r\n z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]\r\n z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(\r\n 1) + z_copy_score_max # [B,V]\r\n z_copy_score = cuda_(z_copy_score)\r\n\r\n scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)\r\n gen_score, z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)\r\n \"\"\"\r\n return proba, last_hidden, gru_out\r\n\r\n\r\nclass TSD(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,\r\n max_ts, action_size=dialog_config.SYS_ACTION_CARDINALITY, discrete_act=False, beam_search=False, teacher_force=100, **kwargs):\r\n super().__init__()\r\n self.vocab = kwargs['vocab']\r\n self.reader = kwargs['reader']\r\n self.emb = nn.Embedding(vocab_size, embed_size)\r\n self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)\r\n self.proj = nn.Linear(hidden_size * 3, vocab_size)\r\n self.proj_discrete = nn.Linear(hidden_size + dialog_config.STATE_DIM, action_size)\r\n self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)\r\n self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate, self.vocab)\r\n self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,\r\n self.dec_gru, self.proj, self.emb, self.vocab)\r\n self.m_decoder_discrete = ResponseDecoder_discrete(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,\r\n self.dec_gru, self.proj_discrete, self.emb, self.vocab)\r\n self.embed_size = embed_size\r\n\r\n self.z_length = z_length\r\n self.max_ts = max_ts\r\n self.discrete_act = discrete_act\r\n self.beam_search = beam_search\r\n self.teacher_force = teacher_force\r\n\r\n self.pr_loss = nn.NLLLoss(ignore_index=0)\r\n self.dec_loss = nn.NLLLoss(ignore_index=0)\r\n\r\n self.saved_log_policy = []\r\n\r\n if self.beam_search:\r\n self.beam_size = kwargs['beam_size']\r\n self.eos_token_idx = kwargs['eos_token_idx']\r\n\r\n def forward(self, u_input, u_input_np, m_input, m_input_np, z_input, u_len, m_len, turn_states,\r\n degree_input, mode, np_state, **kwargs):\r\n if mode == 'train' or mode == 'valid':\r\n pz_proba, pm_dec_proba, turn_states = \\\r\n self.forward_turn(u_input, u_len, m_input=m_input, m_len=m_len, z_input=z_input, mode='train',\r\n turn_states=turn_states, degree_input=degree_input, u_input_np=u_input_np,\r\n m_input_np=m_input_np, **kwargs)\r\n loss, pr_loss, m_loss = self.supervised_loss(torch.log(pz_proba), torch.log(pm_dec_proba),\r\n z_input, m_input)\r\n return loss, pr_loss, m_loss, turn_states\r\n\r\n elif mode == 'test':\r\n if self.discrete_act:\r\n m_output_index, pz_index, turn_states, pz_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,\r\n mode='test',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np,\r\n m_input_np=m_input_np,\r\n np_state=np_state,\r\n **kwargs\r\n )\r\n return m_output_index, pz_index, turn_states, pz_proba\r\n else:\r\n m_output_index, pz_index, turn_states, pz_proba, mt_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,\r\n mode='test',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np, m_input_np=m_input_np,\r\n **kwargs\r\n )\r\n return m_output_index, pz_index, turn_states, pz_proba, mt_proba\r\n\r\n elif mode == 'rl':\r\n loss = self.forward_turn(u_input, u_len=u_len, is_train=False, mode='rl',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np, m_input_np=m_input_np,\r\n **kwargs\r\n )\r\n return loss\r\n\r\n def forward_turn(self, u_input, u_len, turn_states, mode, degree_input, u_input_np, m_input_np=None,\r\n m_input=None, np_state=None, m_len=None, z_input=None, **kwargs):\r\n \"\"\"\r\n compute required outputs for a single dialogue turn. Turn state{Dict} will be updated in each call.\r\n :param u_input_np:\r\n :param m_input_np:\r\n :param u_len:\r\n :param turn_states:\r\n :param is_train:\r\n :param u_input: [T,B]\r\n :param m_input: [T,B]\r\n :param z_input: [T,B]\r\n :return:\r\n \"\"\"\r\n prev_z_input = kwargs.get('prev_z_input', None)\r\n prev_z_input_np = kwargs.get('prev_z_input_np', None)\r\n prev_z_len = kwargs.get('prev_z_len', None)\r\n pv_z_emb = None\r\n batch_size = u_input.size(1)\r\n pv_z_enc_out = None\r\n\r\n if prev_z_input is not None:\r\n pv_z_enc_out, _, pv_z_emb = self.u_encoder(prev_z_input, prev_z_len)\r\n\r\n u_enc_out, u_enc_hidden, u_emb = self.u_encoder(u_input, u_len)\r\n last_hidden = u_enc_hidden[:-1]\r\n z_tm1 = cuda_(Variable(torch.ones(1, batch_size).long() * 3)) # GO_2 token\r\n m_tm1 = cuda_(Variable(torch.ones(1, batch_size).long())) # GO token\r\n if mode == 'train':\r\n pz_dec_outs = []\r\n pz_proba = []\r\n z_length = z_input.size(0) if z_input is not None else self.z_length # GO token\r\n hiddens = [None] * batch_size\r\n for t in range(z_length):\r\n pz_dec_out, last_hidden, proba = \\\r\n self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,\r\n z_tm1=z_tm1, last_hidden=last_hidden,\r\n pv_z_enc_out=pv_z_enc_out, prev_z_input_np=prev_z_input_np,\r\n u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)\r\n pz_proba.append(proba)\r\n pz_dec_outs.append(pz_dec_out)\r\n z_np = z_tm1.view(-1).cpu().data.numpy()\r\n for i in range(batch_size):\r\n if z_np[i] == self.vocab.encode('EOS_Z2'):\r\n hiddens[i] = last_hidden[:, i, :]\r\n z_tm1 = z_input[t].view(1, -1)\r\n for i in range(batch_size):\r\n if hiddens[i] is None:\r\n hiddens[i] = last_hidden[:, i, :]\r\n last_hidden = torch.stack(hiddens, dim=1)\r\n\r\n z_input_np = z_input.cpu().data.numpy()\r\n\r\n pz_dec_outs = torch.cat(pz_dec_outs, dim=0) # [Tz,B,H]\r\n pz_proba = torch.stack(pz_proba, dim=0)\r\n # P(m|z,u)\r\n pm_dec_proba, m_dec_outs = [], []\r\n m_length = m_input.size(0) # Tm\r\n # last_hidden = u_enc_hidden[:-1]\r\n for t in range(m_length):\r\n teacher_forcing = toss_(self.teacher_force)\r\n proba, last_hidden, dec_out = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, z_input_np)\r\n if teacher_forcing:\r\n m_tm1 = m_input[t].view(1, -1)\r\n else:\r\n _, m_tm1 = torch.topk(proba, 1)\r\n m_tm1 = m_tm1.view(1, -1)\r\n pm_dec_proba.append(proba)\r\n m_dec_outs.append(dec_out)\r\n\r\n pm_dec_proba = torch.stack(pm_dec_proba, dim=0) # [T,B,V]\r\n return pz_proba, pm_dec_proba, None\r\n else:\r\n # assert z_input is not None\r\n z_length = z_input.size(0) if z_input is not None else None # GO token\r\n # print(\"z_input\", z_input)\r\n if z_input is None:\r\n use_predicted_zt = True\r\n else:\r\n use_predicted_zt = False\r\n pz_dec_outs, bspan_index, last_hidden, pz_proba = self.bspan_decoder(u_enc_out, z_tm1, last_hidden, u_input_np,\r\n pv_z_enc_out=pv_z_enc_out,\r\n prev_z_input_np=prev_z_input_np,\r\n u_emb=u_emb, pv_z_emb=pv_z_emb,\r\n z_length=z_length,\r\n use_predicted_zt=use_predicted_zt,\r\n z_input=z_input)\r\n pz_proba = torch.stack(pz_proba, dim=0)\r\n pz_dec_outs = torch.cat(pz_dec_outs, dim=0)\r\n degree_input = self.reader.db_degree_handler(bspan_index, kwargs['dial_id'])\r\n degree_input = cuda_(Variable(torch.from_numpy(degree_input).float()))\r\n if mode == 'test':\r\n if not self.discrete_act:\r\n if not self.beam_search:\r\n m_output_index, m_probas = self.greedy_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n degree_input, bspan_index)\r\n\r\n # else:\r\n # m_output_index = self.beam_search_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n # degree_input, bspan_index)\r\n#\r\n return m_output_index, bspan_index, None, pz_proba, m_probas\r\n else:\r\n act_logits = self.action_decode(pz_dec_outs, u_enc_out, np_state)\r\n\r\n return act_logits, bspan_index, None, pz_proba\r\n\r\n elif mode == 'rl':\r\n return self.sampling_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n degree_input, bspan_index)\r\n\r\n def action_decode(self, pz_dec_outs, u_enc_out, np_state):\r\n\r\n logits = self.m_decoder_discrete(pz_dec_outs, u_enc_out, np_state)\r\n\r\n return logits\r\n\r\n def bspan_decoder(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n z_length=None, use_predicted_zt=True, z_input=None):\r\n if not use_predicted_zt:\r\n assert z_input is not None\r\n assert z_length is not None\r\n pz_dec_outs = []\r\n pz_proba = []\r\n decoded = []\r\n batch_size = u_enc_out.size(1)\r\n hiddens = [None] * batch_size\r\n z_length = z_length if z_length is not None else cfg.z_length\r\n # print(z_length)\r\n\r\n # import pdb\r\n # pdb.set_trace()\r\n for t in range(z_length):\r\n\r\n pz_dec_out, last_hidden, proba = \\\r\n self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,\r\n z_tm1=z_tm1, last_hidden=last_hidden, pv_z_enc_out=pv_z_enc_out,\r\n prev_z_input_np=prev_z_input_np, u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)\r\n # print(\"--\"*20)\r\n # print(\"in bspan decoder\")\r\n # print(\"proba \", proba)\r\n # print(\"z_tm1\", z_tm1)\r\n # print(\"t\", t)\r\n # print(\"--\"*20)\r\n pz_proba.append(proba)\r\n pz_dec_outs.append(pz_dec_out)\r\n # print(\"proba_size\", proba.shape)\r\n z_proba, z_index = torch.topk(proba, 1) # [B,1]\r\n # print('z_index', z_index)\r\n z_index = z_index.data.view(-1)\r\n\r\n #####################################################\r\n if prev_z_input_np is None:\r\n tmp = u_input_np # [,B]\r\n else:\r\n # pdb.set_trace()\r\n tmp = np.concatenate((u_input_np, prev_z_input_np), axis=0)\r\n\r\n for i in range(z_index.size(0)):\r\n if z_index[i] >= cfg.vocab_size:\r\n # print(z_index)\r\n z_index[i] = torch.tensor(int(tmp[z_index[i] - cfg.vocab_size, i]))\r\n del tmp\r\n decoded.append(z_index.clone())\r\n\r\n # print(decoded)\r\n #####################################################\r\n\r\n for i in range(z_index.size(0)):\r\n if z_index[i] >= cfg.vocab_size:\r\n z_index[i] = 2 # unk\r\n # print('z_index', z_index)\r\n\r\n z_np = z_tm1.view(-1).cpu().data.numpy()\r\n\r\n for i in range(batch_size):\r\n if z_np[i] == self.vocab.encode('EOS_Z2'):\r\n hiddens[i] = last_hidden[:, i, :]\r\n if use_predicted_zt:\r\n z_tm1 = cuda_(Variable(z_index).view(1, -1))\r\n else:\r\n z_tm1 = z_input[t].view(1, -1)\r\n for i in range(batch_size):\r\n if hiddens[i] is None:\r\n hiddens[i] = last_hidden[:, i, :]\r\n last_hidden = torch.stack(hiddens, dim=1)\r\n\r\n if not use_predicted_zt:\r\n z_input_np = z_input.cpu().data.numpy()\r\n decoded = torch.stack(decoded, dim=0).transpose(0, 1)\r\n decoded = list(decoded)\r\n decoded = [list(_) for _ in decoded]\r\n return pz_dec_outs, decoded, last_hidden, pz_proba\r\n\r\n\r\n\r\n def greedy_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n decoded = []\r\n probas = []\r\n bspan_index_np = pad_sequences(bspan_index).transpose((1, 0))\r\n for t in range(self.max_ts):\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, bspan_index_np)\r\n probas.append(proba)\r\n mt_proba, mt_index = torch.topk(proba, 1) # [B,1]\r\n mt_index = mt_index.data.view(-1)\r\n decoded.append(mt_index.clone())\r\n for i in range(mt_index.size(0)):\r\n if mt_index[i] >= cfg.vocab_size:\r\n mt_index[i] = 2 # unk\r\n m_tm1 = cuda_(Variable(mt_index).view(1, -1))\r\n decoded = torch.stack(decoded, dim=0).transpose(0, 1)\r\n decoded = list(decoded)\r\n return [list(_) for _ in decoded], probas\r\n\r\n def beam_search_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input,\r\n bspan_index):\r\n eos_token_id = self.vocab.encode(cfg.eos_m_token)\r\n batch_size = pz_dec_outs.size(1)\r\n if batch_size != 1:\r\n raise ValueError('\"Beam search single\" requires batch size to be 1')\r\n\r\n class BeamState:\r\n def __init__(self, score, last_hidden, decoded, length):\r\n \"\"\"\r\n Beam state in beam decoding\r\n :param score: sum of log-probabilities\r\n :param last_hidden: last hidden\r\n :param decoded: list of *Variable[1*1]* of all decoded words\r\n :param length: current decoded sentence length\r\n \"\"\"\r\n self.score = score\r\n self.last_hidden = last_hidden\r\n self.decoded = decoded\r\n self.length = length\r\n\r\n def update_clone(self, score_incre, last_hidden, decoded_t):\r\n decoded = copy.copy(self.decoded)\r\n decoded.append(decoded_t)\r\n clone = BeamState(self.score + score_incre, last_hidden, decoded, self.length + 1)\r\n return clone\r\n\r\n def beam_result_valid(decoded_t, bspan_index):\r\n decoded_t = [_.view(-1).data[0] for _ in decoded_t]\r\n req_slots = self.get_req_slots(bspan_index)\r\n decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)\r\n for req in req_slots:\r\n if req not in decoded_sentence:\r\n return False\r\n return True\r\n\r\n def score_bonus(state, decoded, bspan_index):\r\n bonus = cfg.beam_len_bonus\r\n return bonus\r\n\r\n def soft_score_incre(score, turn):\r\n return score\r\n\r\n finished, failed = [], []\r\n states = [] # sorted by score decreasingly\r\n dead_k = 0\r\n states.append(BeamState(0, last_hidden, [m_tm1], 0))\r\n bspan_index_np = np.array(bspan_index).reshape(-1, 1)\r\n for t in range(self.max_ts):\r\n new_states = []\r\n k = 0\r\n while k < len(states) and k < self.beam_size - dead_k:\r\n state = states[k]\r\n last_hidden, m_tm1 = state.last_hidden, state.decoded[-1]\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1, degree_input,\r\n last_hidden, bspan_index_np)\r\n\r\n proba = torch.log(proba)\r\n mt_proba, mt_index = torch.topk(proba, self.beam_size - dead_k) # [1,K]\r\n for new_k in range(self.beam_size - dead_k):\r\n score_incre = soft_score_incre(mt_proba[0][new_k].data[0], t) + score_bonus(state,\r\n mt_index[0][new_k].data[\r\n 0], bspan_index)\r\n if len(new_states) >= self.beam_size - dead_k and state.score + score_incre < new_states[-1].score:\r\n break\r\n decoded_t = mt_index[0][new_k]\r\n if decoded_t.data[0] >= cfg.vocab_size:\r\n decoded_t.data[0] = 2 # unk\r\n if self.vocab.decode(decoded_t.data[0]) == cfg.eos_m_token:\r\n if beam_result_valid(state.decoded, bspan_index):\r\n finished.append(state)\r\n dead_k += 1\r\n else:\r\n failed.append(state)\r\n else:\r\n decoded_t = decoded_t.view(1, -1)\r\n new_state = state.update_clone(score_incre, last_hidden, decoded_t)\r\n new_states.append(new_state)\r\n\r\n k += 1\r\n if self.beam_size - dead_k < 0:\r\n break\r\n new_states = new_states[:self.beam_size - dead_k]\r\n new_states.sort(key=lambda x: -x.score)\r\n states = new_states\r\n\r\n if t == self.max_ts - 1 and not finished:\r\n finished = failed\r\n print('FAIL')\r\n if not finished:\r\n finished.append(states[0])\r\n\r\n finished.sort(key=lambda x: -x.score)\r\n decoded_t = finished[0].decoded\r\n decoded_t = [_.view(-1).data[0] for _ in decoded_t]\r\n decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)\r\n # print(decoded_sentence)\r\n generated = torch.cat(finished[0].decoded, dim=1).data # [B=1, T]\r\n return generated\r\n\r\n def beam_search_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(\r\n m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)\r\n decoded = []\r\n for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):\r\n decoded_s = self.beam_search_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,\r\n u_input_np[:, i].reshape((-1, 1)),\r\n last_hidden_s, degree_input_s, bspan_index[i])\r\n decoded.append(decoded_s)\r\n return [list(_.view(-1)) for _ in decoded]\r\n\r\n def supervised_loss(self, pz_proba, pm_dec_proba, z_input, m_input):\r\n pz_proba, pm_dec_proba = pz_proba[:, :, :cfg.vocab_size].contiguous(), pm_dec_proba[:, :,\r\n :cfg.vocab_size].contiguous()\r\n pr_loss = self.pr_loss(pz_proba.view(-1, pz_proba.size(2)), z_input.view(-1))\r\n m_loss = self.dec_loss(pm_dec_proba.view(-1, pm_dec_proba.size(2)), m_input.view(-1))\r\n\r\n loss = pr_loss + m_loss\r\n return loss, pr_loss, m_loss\r\n\r\n def self_adjust(self, epoch):\r\n pass\r\n\r\n # REINFORCEMENT fine-tuning with MC\r\n\r\n def possible_reqs(self):\r\n if cfg.dataset == 'camrest':\r\n return ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n elif cfg.dataset == 'kvret':\r\n req_by_intent = {\r\n 'weather': ['weather_attribute'],\r\n 'navigate': ['poi', 'traffic_info', 'address', 'distance'],\r\n 'schedule': ['event', 'date', 'time', 'party', 'agenda', 'room']\r\n }\r\n reqs = []\r\n for value in req_by_intent.values():\r\n reqs.extend(value)\r\n return reqs\r\n else:\r\n raise ValueError('unknown dataset')\r\n\r\n def get_req_slots(self, bspan_index):\r\n reqs = self.possible_reqs()\r\n reqs = set(self.vocab.sentence_decode(bspan_index).split()).intersection(reqs)\r\n return [_ + '_SLOT' for _ in reqs]\r\n\r\n def reward(self, m_tm1, decoded, bspan_index):\r\n \"\"\"\r\n The setting of the reward function is heuristic. It can be better optimized.\r\n :param m_tm1:\r\n :param decoded:\r\n :param bspan_index:\r\n :return:\r\n \"\"\"\r\n req_slots = self.get_req_slots(bspan_index)\r\n\r\n m_tm1 = self.vocab.decode(m_tm1[0])\r\n finished = m_tm1 == 'EOS_M'\r\n decoded = [_.view(-1)[0] for _ in decoded]\r\n decoded_sentence = self.vocab.sentence_decode(decoded, cfg.eos_m_token).split()\r\n reward = -0.01 if cfg.dataset == 'camrest' else 0\r\n '''\r\n if not finished:\r\n if m_tm1 in req_slots:\r\n if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:\r\n reward = 1.0\r\n '''\r\n # some modification for reward function.\r\n if m_tm1 in req_slots:\r\n if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:\r\n reward += 1.0\r\n else:\r\n reward -= 1.0 if cfg.dataset == 'camrest' else 0 # repeat\r\n return reward, finished\r\n\r\n def sampling_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(\r\n m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)\r\n batch_loss = []\r\n\r\n sample_num = 1\r\n\r\n for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):\r\n if not self.get_req_slots(bspan_index[i]):\r\n continue\r\n for j in range(sample_num):\r\n loss = self.sampling_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,\r\n u_input_np[:, i].reshape((-1, 1)),\r\n last_hidden_s, degree_input_s, bspan_index[i])\r\n batch_loss.append(loss)\r\n if not batch_loss:\r\n return None\r\n else:\r\n return sum(batch_loss) / len(batch_loss)\r\n\r\n def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n decoded = []\r\n reward_sum = 0\r\n log_probs = []\r\n rewards = []\r\n bspan_index_np = np.array(bspan_index).reshape(-1, 1)\r\n for t in range(self.max_ts):\r\n # reward\r\n reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)\r\n reward_sum += reward\r\n rewards.append(reward)\r\n if t == self.max_ts - 1:\r\n finished = True\r\n if finished:\r\n loss = self.finish_episode(log_probs, rewards)\r\n return loss\r\n # action\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, bspan_index_np)\r\n proba = proba.squeeze(0) # [B,V]\r\n dis = Categorical(proba)\r\n action = dis.sample()\r\n log_probs.append(dis.log_prob(action))\r\n mt_index = action.data.view(-1)\r\n decoded.append(mt_index.clone())\r\n\r\n for i in range(mt_index.size(0)):\r\n if mt_index[i] >= cfg.vocab_size:\r\n mt_index[i] = 2 # unk\r\n\r\n m_tm1 = cuda_(Variable(mt_index).view(1, -1))\r\n\r\n def finish_episode(self, log_probas, saved_rewards):\r\n R = 0\r\n policy_loss = []\r\n rewards = []\r\n for r in saved_rewards:\r\n R = r + 0.8 * R\r\n rewards.insert(0, R)\r\n\r\n rewards = torch.Tensor(rewards)\r\n # rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\r\n\r\n for log_prob, reward in zip(log_probas, rewards):\r\n policy_loss.append(-log_prob * reward)\r\n l = len(policy_loss)\r\n policy_loss = torch.cat(policy_loss).sum()\r\n return policy_loss / l\r\n",
"from __future__ import division\nimport logging\nimport os\nimport random\n\nimport torch\nimport torchtext\nfrom torch import optim\nfrom tqdm import tqdm\n\nimport seq2seq\nfrom seq2seq.evaluator import Evaluator\nfrom seq2seq.loss import NLLLoss\nfrom seq2seq.optim import Optimizer\nfrom seq2seq.util import Checkpoint\n\nlogger = logging.getLogger(__name__)\n\n\nclass SupervisedTrainer(object):\n \"\"\"The SupervisedTrainer class helps in setting up a training framework\n in a supervised setting.\n\n Args:\n experiment_directory (optional, str): directory to store experiments in\n loss (seq2seq.loss.loss.Loss, optional): loss for training\n batch_size (int, optional): batch size for experiment\n checkpoint_every (int, optional): number of batches to checkpoint after\n \"\"\"\n def __init__(self, experiment_directory='./experiment', loss=None, batch_size=64,\n random_seed=None, checkpoint_every=100, print_every=100):\n if loss is None:\n loss = NLLLoss()\n if random_seed is not None:\n random.seed(random_seed)\n torch.manual_seed(random_seed)\n\n self.loss = loss\n self.evaluator = Evaluator(loss=self.loss, batch_size=batch_size)\n self.optimizer = None\n self.checkpoint_every = checkpoint_every\n self.print_every = print_every\n self.batch_size = batch_size\n self.experiment_directory = experiment_directory\n\n if not os.path.exists(self.experiment_directory):\n os.makedirs(self.experiment_directory)\n\n def train(self, model, data, n_epochs=5, resume=False,\n dev_data=None, optimizer=None, teacher_forcing_ratio=0):\n \"\"\"Train a given model.\n\n Args:\n model (seq2seq.models): model to run training on. If resume=True,\n it will be overwritten by the model loaded from the latest\n checkpoint\n data (seq2seq.dataset.dataset.Dataset): dataset object to train on\n n_epochs (int): number of epochs to run\n resume(bool): resume training with the latest checkpoint\n dev_data (seq2seq.dataset.dataset.Dataset): dev Dataset\n optimizer (seq2seq.optim.Optimizer): optimizer for training\n teacher_forcing_ratio (float): teaching forcing ratio\n Returns:\n model (seq2seq.models): trained model.\n \"\"\"\n if resume:\n latest_checkpoint_path = Checkpoint.get_latest_checkpoint(\n self.experiment_directory)\n resume_checkpoint = Checkpoint.load(latest_checkpoint_path)\n model = resume_checkpoint.model\n self.optimizer = resume_checkpoint.optimizer\n\n # A work-around to set optimizing parameters properly\n resume_optim = self.optimizer.optimizer\n defaults = resume_optim.param_groups[0]\n defaults.pop('params', None)\n defaults.pop('initial_lr', None)\n self.optimizer.optimizer = resume_optim.__class__(\n model.parameters(), **defaults)\n\n start_epoch = resume_checkpoint.epoch\n step = resume_checkpoint.step\n else:\n start_epoch = 1\n step = 0\n if optimizer is None:\n optimizer = Optimizer(\n optim.Adam(model.parameters()), max_grad_norm=5)\n self.optimizer = optimizer\n\n logger.info('Optimizer: %s, Scheduler: %s',\n self.optimizer.optimizer, self.optimizer.scheduler)\n\n self._train_epochs(data, model, n_epochs, \n start_epoch, step, dev_data=dev_data, \n teacher_forcing_ratio=teacher_forcing_ratio)\n return model\n\n def _train_epochs(self, data, model, n_epochs, start_epoch, \n start_step, dev_data=None, teacher_forcing_ratio=0):\n print_loss_total = epoch_loss_total = 0\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n batch_iterator = torchtext.data.BucketIterator(\n dataset=data,\n batch_size=self.batch_size,\n sort=False,\n sort_within_batch=True,\n sort_key=lambda x: len(x.src),\n device=device,\n repeat=False,\n )\n\n steps_per_epoch = len(batch_iterator)\n total_steps = steps_per_epoch * n_epochs\n\n step = start_step\n step_elapsed = 0\n for epoch in range(start_epoch, n_epochs + 1):\n logger.debug('Epoch: %d, Step: %d', epoch, step)\n\n batch_generator = iter(batch_iterator)\n # Consuming seen batches from previous training\n for _ in range((epoch - 1) * steps_per_epoch, step):\n next(batch_generator)\n\n model.train()\n progress_bar = tqdm(\n batch_generator,\n total=steps_per_epoch,\n desc='Train {}: '.format(self.loss.name),\n )\n for batch in progress_bar:\n step += 1\n step_elapsed += 1\n\n loss = self._train_batch(\n batch,\n model,\n teacher_forcing_ratio,\n data,\n )\n print_loss_total += loss\n epoch_loss_total += loss\n\n if step % self.print_every == 0 \\\n and step_elapsed > self.print_every:\n print_loss_avg = print_loss_total / self.print_every\n print_loss_total = 0\n progress_bar.set_description('Train {}: {:.4f}'.format(\n self.loss.name,\n print_loss_avg,\n ))\n\n # Checkpoint\n if step % self.checkpoint_every == 0 or step == total_steps:\n Checkpoint(\n model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step,\n input_vocab=data.fields[seq2seq.src_field_name].vocab,\n output_vocab=data.fields[seq2seq.tgt_field_name].vocab,\n ).save(self.experiment_directory)\n\n if step_elapsed == 0:\n continue\n\n epoch_loss_avg = epoch_loss_total / min(\n steps_per_epoch, step - start_step)\n epoch_loss_total = 0\n log_msg = 'Finished epoch {:d}: Train {}: {:.4f}'.format(\n epoch, self.loss.name, epoch_loss_avg)\n if dev_data is not None:\n dev_loss, accuracy = self.evaluator.evaluate(model, dev_data)\n self.optimizer.update(dev_loss, epoch)\n log_msg += ', Dev {}: {:.4f}, Accuracy: {:.4f}'.format(\n self.loss.name, dev_loss, accuracy)\n model.train()\n else:\n self.optimizer.update(epoch_loss_avg, epoch)\n\n logger.info(log_msg)\n\n def _train_batch(self, batch, model, teacher_forcing_ratio, dataset):\n # Forward propagation\n output, _, _ = model(\n batch,\n dataset=dataset,\n teacher_forcing_ratio=teacher_forcing_ratio,\n )\n # Get loss\n self.loss.reset()\n self.loss.eval_batch(output, batch)\n\n # Backward propagation\n model.zero_grad()\n self.loss.backward()\n self.optimizer.step()\n\n return self.loss.get_loss()\n",
"import sys, os, re, pdb\n# sys.path.append('/home/wyshi/simulator/sequcity_user/')\n# sys.path.append('/data/qkun/sequcity_mulitwoz_0.4/')\nsys.path.append('/home/wyshi/simulator/')\nimport logging, random\nimport torch\nimport numpy as np\nimport random\nfrom nltk import word_tokenize\nfrom collections import defaultdict\n\n\nfrom sequicity_user.seq_user import Seq_User\nfrom sequicity_user.model import Model\nfrom sequicity_user.config import global_config as cfg\n\nimport simulator.dialog_config as dialog_config\nimport simulator.nlg as nlg\nfrom simulator.user import User\nfrom simulator.agent.core import Action, SystemAct\n\n\nclass Seq_User_Act(Seq_User):\n def __init__(self, nlg_sample, nlg_template):\n super().__init__(nlg_sample=nlg_sample, nlg_template=nlg_template)\n self._set_initial_state()\n\n self._set_initial_goal_dic()\n\n # # # # # # # # # # # # # # # # \n # # model configure setting # #\n cfg.init_handler('tsdf-usr_act')\n cfg.dataset = 'usr_act'\n # logging.info(str(cfg))\n if cfg.cuda:\n torch.cuda.set_device(cfg.cuda_device)\n logging.info('Device: {}'.format(torch.cuda.current_device()))\n self.m = Model('usr_act')\n self.m.count_params()\n self.m.load_model()\n self.entity = self.m.reader.entity\n # # # # # # # # # # # # # # # # \n\n self.state_list = []\n self.act = ''\n self.prev_usr = ''\n\n self._set_initial_model_parameters()\n\n\n def _set_initial_state(self):\n self.state = {\n 'informed': {k:0 for k in self.entity_type['informable_slots']},\n 'asked': {k:0 for k in self.entity_type['requestable_slots']},\n 'asked_answered': {k:0 for k in self.entity_type['requestable_slots'] + ['name']},\n 'reservation_informed': {k:0 for k in self.entity_type['reservation_slots']},\n 'results': [],\n 'no_match_presented': 0,\n 'asked_anything_else': 0,\n 'no_other_presented': 0,\n 'match_presented': 0,\n 'book_fail': 0,\n\n 'usr_act_sequence': [],\n 'sys_act_sequence': [],\n\n 'inform': {k:None for k in self.entity_type['informable_slots']},\n 'book': {k:None for k in self.entity_type['reservation_slots']},\n 'reqt' : []\n }\n self.check_constrain = []#dialog_config.CONSTRAINT_CHECK_NOTYET\n self.check_info = dialog_config.INFO_CHECK_NOTYET\n self.check_reservation = []#dialog_config.RESERVATION_CHECK_NOTYET\n self.dialog_status = dialog_config.NO_OUTCOME_YET\n\n def _set_initial_goal_dic(self):\n # # goal transfer into list\n self.goal_dic = defaultdict(list)\n for key in ['cur_info', 'info_second_choice', 'cur_book', 'book_second_choice']:\n if key in self.goal:\n for slot_name in self.goal[key]:\n self.goal_dic[slot_name] += [self.goal[key][slot_name]]\n if 'reqt' in self.goal:\n for slot_name in self.goal['reqt']:\n self.goal_dic[slot_name] = [slot_name]\n\n self.goal_list = list(self.goal['cur_info'].keys())\n if 'info_second_choice' in self.goal:\n self.goal_list += list(self.goal['info_second_choice'].keys())\n if 'reqt' in self.goal:\n self.goal_list += list(self.goal['reqt'])\n if 'cur_book' in self.goal:\n self.goal_list += list(self.goal['cur_book'].keys())\n if 'book_second_choice' in self.goal:\n self.goal_list += list(self.goal['book_second_choice'].keys())\n\n def _set_initial_model_parameters(self):\n self.turn_batch = {\n 'dial_id': [0],\n 'turn_num': [0],\n 'user': [[0]],\n 'response': [[0]],\n 'bspan': [[0]],\n 'u_len': [0],\n 'm_len': [0],\n 'degree': [[1]],\n 'supervised': [True],\n 'goal': [self.m.reader.vocab.sentence_encode(word_tokenize(' '.join(self.goal_list)) + ['EOS_Z0'])]\n }\n self.prev_z = None\n self.prev_act = None\n\n\n def respond(self, sys_act, prev_sys=None):\n mode = 'test'\n turn_states = {}\n turn_num = self.turn_batch['turn_num'][0]\n act_list = ['inform_type', \\\n 'inform_type_change', \\\n 'ask_info', \\\n 'make_reservation', \\\n 'make_reservation_change_time', \\\n 'anything_else', \\\n 'goodbye']\n\n if turn_num != 0:\n self.update_states_from_sys(sys_act)\n\n if prev_sys is None:\n prev_sys = 'Hello! What can I help you?'.lower()\n else:\n prev_sys = prev_sys.lower()\n\n # # format input\n utt_tokenized = word_tokenize(prev_sys) + ['EOS_U']\n utt_encoded = self.m.reader.vocab.sentence_encode(utt_tokenized)\n\n if self.turn_batch['turn_num'] == [0]:\n self.turn_batch['user'] = [utt_encoded]\n else:\n self.turn_batch['user'] = [self.m.reader.vocab.sentence_encode(word_tokenize(self.prev_act)) + \\\n [self.m.reader.vocab.encode('EOS_M')] + \\\n utt_encoded]\n\n self.turn_batch['u_len'] = [len(i) for i in self.turn_batch['user']]\n self.turn_batch['m_len'] = [len(i) for i in self.turn_batch['response']]\n\n u_input, u_input_np, z_input, m_input, m_input_np, u_len, \\\n m_len, degree_input, kw_ret \\\n = self.m._convert_batch(self.turn_batch, self.prev_z)\n\n # # execute tsd-net\n m_idx, z_idx, turn_states = self.m.m(mode=mode, u_input=u_input, u_len=u_len, z_input=z_input,\n m_input=m_input,\n degree_input=degree_input, u_input_np=u_input_np,\n m_input_np=m_input_np, m_len=m_len, turn_states=turn_states,\n dial_id=self.turn_batch['dial_id'], **kw_ret)\n\n self.act = act_list[m_idx[0,0,0]]\n if turn_num == 0:\n self.act = 'inform_type'\n\n # # generating slots\n slot_dict = self.generate_dial_act_slots(sys_act, prev_sys)\n\n # # generating sentence with templats\n usr_act = Action(self.act, slot_dict)\n # pdb.set_trace()\n # print(usr_act)\n\n\n if self.act == 'inform_type' and slot_dict == {} and sys_act.act == SystemAct.ASK_TYPE:\n usr_response_sent = 'i do not care.'\n else:\n if self.nlg_sample:\n assert self.nlg_templates\n assert self.generator\n # usr_response_sent, lexicalized_usr_act = self.nlg.generate_sent(usr_act, templates=self.nlg_templates, generator=1)\n print('supervised nlg_sample')\n if prev_sys is None:\n prev_sys = \"<start>\"\n\n usr_response_sent, lexicalized_usr_act = self.nlg.generate_sent(usr_act, templates=self.nlg_templates,\n generator=self.generator, context=prev_sys,\n seq2seq=None)\n else:\n # print('')\n if self.seq2seq is None:\n print(\"supervised templates\")\n assert self.nlg_template\n assert not self.nlg_sample\n assert self.generator is None\n usr_response_sent, lexicalized_usr_act = self.nlg.generate_sent(usr_act, turn_num=(len(self.state['usr_act_sequence'])-1),\n generator=None,\n seq2seq=None)\n else:\n print(\" supervised seq2seq\")\n assert not self.nlg_sample\n assert not self.nlg_template\n assert self.seq2seq\n usr_response_sent, lexicalized_usr_act = self.nlg.generate_sent(usr_act,\n generator=None,\n seq2seq=self.seq2seq)\n usr_response_sent = usr_response_sent.replace(\"<eos>\", \"\")\n usr_response_sent = usr_response_sent.lower()\n\n\n # usr_response_sent, lexicalized_usr_act = self.nlg.generate_sent(usr_act, turn_num=turn_num)\n\n # # check success of last turn\n if turn_num != 0:\n self.success_or_not(self.prev_usr, prev_sys, usr_response_sent, sys_act)\n\n # # update states\n self.update_states_from_user(slot_dict)\n\n self.prev_z = z_idx\n self.prev_act = self.act\n self.prev_usr = usr_response_sent\n turn_num += 1\n self.turn_batch['turn_num'] = [turn_num]\n # self.turn_batch['bspan'] = self.prev_z\n\n return None, usr_response_sent\n\n\n def interact(self):\n mode = 'test'\n turn_states = {}\n turn_num = self.turn_batch['turn_num'][0]\n # utterance = input('User:',).lower()\n utterance = 'Hello! What can I help you?'.lower()\n print('Sys: ' + utterance)\n while True:\n\n if self.turn_batch['turn_num'][0] > 10 or utterance == 'close':\n break;\n\n # # format input\n utt_tokenized = word_tokenize(utterance) + ['EOS_U']\n utt_encoded = self.m.reader.vocab.sentence_encode(utt_tokenized)\n\n if self.turn_batch['turn_num'] == [0]:\n self.turn_batch['user'] = [utt_encoded]\n else:\n self.turn_batch['user'] = [self.m.reader.vocab.sentence_encode(word_tokenize(self.prev_act)) + \\\n [self.m.reader.vocab.encode('EOS_M')] + \\\n utt_encoded]\n\n self.turn_batch['u_len'] = [len(i) for i in self.turn_batch['user']]\n self.turn_batch['m_len'] = [len(i) for i in self.turn_batch['response']]\n\n u_input, u_input_np, z_input, m_input, m_input_np, u_len, \\\n m_len, degree_input, kw_ret \\\n = self.m._convert_batch(self.turn_batch, self.prev_z)\n\n # # execute tsd-net\n m_idx, z_idx, turn_states = self.m.m(mode=mode, u_input=u_input, u_len=u_len, z_input=z_input,\n m_input=m_input,\n degree_input=degree_input, u_input_np=u_input_np,\n m_input_np=m_input_np, m_len=m_len, turn_states=turn_states,\n dial_id=self.turn_batch['dial_id'], **kw_ret)\n \n sent = self.m.reader.vocab.sentence_decode(m_idx[0], eos='EOS_M')\n # print('Usr Simu: ' + sent)\n\n filled_sent = self.fill_sentence(sent)\n print('Usr Simu: ' + filled_sent)\n # print('Slots: ' + self.m.reader.vocab.sentence_decode(z_idx[0], eos='EOS_Z2'))\n # pdb.set_trace()\n print('Goal:' + ' '.join(self.goal_list))\n print('-------------------------------------------------------\\n')\n pdb.set_trace()\n\n self.prev_z = z_idx\n self.prev_act = filled_sent\n turn_num += 1\n self.turn_batch['turn_num'] = [turn_num]\n # self.turn_batch['bspan'] = self.prev_z\n\n\n utterance = input('Sys:',).lower()\n\n def generate_dial_act_slots(self, sys_act, prev_sys):\n slot_dict = {}\n if self.act == 'inform_type':\n\n avail_slot = []\n if self.turn_batch['turn_num'][0] == 0:\n # avail_slot = self.goal['cur_info'].keys():\n # # pick random number of random slots\n avail_slot = random.sample(self.goal['cur_info'].keys(), k=random.choice(range(1,len(self.goal['cur_info'].keys()) + 1)))\n # slot_dict = self.goal['cur_info']\n for slot in avail_slot:\n slot_dict[slot] = self.goal['cur_info'][slot]\n\n elif sys_act.act == SystemAct.ASK_TYPE:\n for slot in ['area', 'food', 'pricerange']:\n if slot in prev_sys:\n avail_slot.append(slot)\n if slot in self.goal['cur_info']:\n slot_dict[slot] = self.goal['cur_info'][slot]\n\n if avail_slot == []:\n # avail_slot = self.goal['cur_info'].keys():\n slot_dict = self.goal['cur_info']\n\n else:\n # if sys_act.act == SystemAct.NOMATCH_RESULT:\n # pdb.set_trace()\n avail_slot = [slot_name for slot_name in self.state['inform'] if self.state['inform'][slot_name] is None]\n if avail_slot:\n for slot in avail_slot:\n if slot in self.goal['cur_info']:\n slot_dict[slot] = self.goal['cur_info'][slot]\n if not slot_dict:\n if sys_act.act == SystemAct.NOMATCH_RESULT:\n \n if 'info_second_choice' in self.goal:\n self.act = 'inform_type_change'\n # slot_dict = self.goal['info_second_choice']\n else:\n self.act = 'goodbye'\n elif self.state['results']:\n if 'reqt' in self.goal:\n self.act = 'ask_info'\n else:\n self.act = 'make_reservation'\n\n if self.act == 'inform_type_change':\n if 'info_second_choice' not in self.goal:\n # this prediction is bad\n self.act = 'inform_type'\n slot_dict = self.goal['cur_info']\n # pdb.set_trace()\n else:\n # avail_slot = self.goal['info_second_choice'].keys()\n slot_dict = self.goal['info_second_choice']\n\n\n if self.act == 'ask_info':\n if self.state['results']:\n if 'reqt' not in self.goal:\n # this prediction is bad\n avail_slot = sorted(random.sample(['address','postcode','phone'], k=random.choice(range(1,4))))\n # pdb.set_trace()\n else:\n avail_slot = list(set(self.goal['reqt']) - set(self.state['reqt']))\n\n\n for slot in avail_slot:\n slot_dict[slot] = None\n if slot_dict == {}:\n self.act = 'goodbye'\n else:\n self.act = 'inform_type'\n slot_dict = self.goal['cur_info']\n\n if self.act == 'make_reservation':\n avail_slot = []\n if self.state['results']:\n if 'cur_book' in self.goal:\n slot_dict = self.goal['cur_book']\n\n # slot_dict = sorted(random.sample(self.goal['cur_book'], k=random.choice(range(1,len(self.goal['cur_book']) + 1))))\n\n else:\n if sys_act.act == SystemAct.ASK_RESERVATION_INFO:\n for slot in ['time', 'day', 'people']:\n if slot in prev_sys:\n avail_slot.append(slot)\n\n if avail_slot == []:\n avail_slot = sorted(random.sample(['time', 'day', 'people'], k=random.choice(range(1,4))))\n\n for slot in avail_slot:\n slot_dict[slot] = random.choice(self.entity['informable'][slot])\n self.goal['cur_book'] = slot_dict\n else:\n self.act = 'inform_type'\n slot_dict = self.goal['cur_info']\n\n\n\n if self.act == 'make_reservation_change_time':\n if 'book_second_choice' in self.goal:\n # avail_slot = self.goal['book_second_choice'].keys()\n slot_dict = self.goal['book_second_choice']\n elif 'make_reservation' in self.goal:\n # this prediction is bad\n # pdb.set_trace()\n self.act = 'make_reservation'\n # avail_slot = self.goal['cur_book'].keys()\n slot_dict = self.goal['cur_book']\n else:\n self.act = 'make_reservation'\n avail_slot = sorted(random.sample(['time', 'day', 'people'], k=random.choice(range(1,4))))\n for slot in avail_slot:\n slot_dict[slot] = random.choice(self.entity['informable'][slot])\n self.goal['cur_book'] = slot_dict\n\n else:\n avail_slot = []\n\n # if slot_dict == {} and self.act == 'inform_type':\n # pdb.set_trace()\n return slot_dict\n\n def success_or_not(self, prev_usr, prev_sys, cur_usr, sys_act):\n\n # # judge whether stop\n stop_flag = 0\n # # judge whether stop\n stop_flag = 0\n non_stop_pat = re.compile('number|phone|post|address|name|information|value_|restaurant_')\n \n if 'bye' in cur_usr and \\\n '?' not in cur_usr:\n stop_flag = 1\n elif 'thank' in cur_usr and \\\n '[' not in cur_usr and \\\n '?' not in cur_usr:\n stop_flag = 1\n elif re.match('.*have a (good|nice|lovely).*', cur_usr) and \\\n '?' not in cur_usr:\n stop_flag = 1\n elif re.match('.*(that is|thats|that s|that will be) all.*', cur_usr):\n stop_flag = 1\n elif not re.findall(non_stop_pat, cur_usr):\n if 'all set' in cur_usr:\n stop_flag = 1\n elif 'i am all i need' in cur_usr:\n stop_flag = 1\n elif 'that s it' in cur_usr:\n stop_flag = 1\n\n\n if self.turn_batch['turn_num'][0] > dialog_config.MAX_TURN:\n stop_flag = 1\n\n if sys_act.act == SystemAct.NOMATCH_RESULT and 'info_second_choice' not in self.goal:\n stop_flag = 1\n\n # # system ending\n\n if sys_act.act == SystemAct.GOODBYE:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n # # ask info\n # (?# elif re.findall(r'(?<!reference) number|(?<!reservation) number|phone|post *code| address| name|information', prev_usr):)\n elif self.prev_act == 'ask_info':\n if sys_act.act == SystemAct.PROVIDE_INFO:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n \n # # # reservation\n # prev_usr_slot = self.m.reader.delex_sent(prev_usr)\n elif re.search(r'value_time|value_day|value_people', self.m.reader.delex_sent(prev_usr)) is not None or \\\n re.search(r'reference number|reservation number', prev_usr) is not None:\n # elif self.prev_act == 'make_reservation':\n\n # # reference number\n if sys_act.act == SystemAct.ASK_RESERVATION_INFO:\n tmp_flag = 1\n for slot_name in ['time','day','people']:\n if slot_name in prev_sys and self.state['book'][slot_name] is not None:\n tmp_flag = 0\n if tmp_flag:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif sys_act.act in [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]:\n\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n\n elif sys_act.act == SystemAct.PRESENT_RESULT and self.state['results'] == []:\n prev_sys_slot = self.m.reader.delex_sent(prev_sys)\n constraints = [slot[1:-1].split('|')[1] for slot in re.findall(r'\\[.*?\\]', prev_sys_slot)]\n tmp_flag = 1\n if self.state['inform']['name'] is not None:\n tmp_flag = 0\n for slot_name in self.state['inform']:\n if self.state['inform'][slot_name] is not None and self.state['inform'][slot_name] not in constraints:\n tmp_flag = 0\n if tmp_flag:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif sys_act.act in [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif sys_act.act == SystemAct.ASK_RESERVATION_INFO:\n if 'book' in prev_usr or 'reserv' in prev_usr:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n # # # inform type\n\n elif sys_act.act == SystemAct.NOMATCH_RESULT:\n cur_info = {slot_name:slot_val for slot_name, slot_val in self.state['inform'].items() if slot_val is not None}\n match_list = self.query_in_DB(cur_info)\n if not match_list:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n\n elif sys_act.act == SystemAct.NO_OTHER:\n cur_info = {slot_name:slot_val for slot_name, slot_val in self.state['inform'].items() if slot_val is not None}\n match_list = self.query_in_DB(cur_info, skip=self.state['results'])\n if not match_list:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif re.search(r'value_area|value_food|value_pricerange', self.m.reader.delex_sent(prev_usr)) is not None:\n if sys_act.act == SystemAct.PRESENT_RESULT:\n prev_sys_slot = self.m.reader.delex_sent(prev_sys)\n constraints = [slot[1:-1].split('|')[1] for slot in re.findall(r'\\[.*?\\]', prev_sys_slot)]\n tmp_flag = 1\n for slot_name in self.state['inform']:\n if self.state['inform'][slot_name] is not None and self.state['inform'][slot_name] not in constraints:\n tmp_flag = 0\n if tmp_flag:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif sys_act.act == SystemAct.ASK_TYPE:\n tmp_flag = 1\n for slot_name in ['area','food','pricerange']:\n if slot_name in prev_sys and self.state['inform'][slot_name] is not None:\n tmp_flag = 0\n \n if tmp_flag:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL) \n\n elif re.search(r'restaurant_name', self.m.reader.delex_sent(prev_usr)) is not None:\n if sys_act.act == SystemAct.NOMATCH_RESULT or sys_act.act == SystemAct.PRESENT_RESULT:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n elif sys_act.act == SystemAct.ASK_TYPE:\n if self.state['inform']['name'] is not None and \\\n (self.state['inform']['area'] is None or \\\n self.state['inform']['food'] is None or \\\n self.state['inform']['pricerange'] is None):\n\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n else:\n self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)\n\n \n\n\n else:\n self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)\n\n\n self.dialog_status = self.state_list[-1]\n\n # print('**********' , self.dialog_status , '************')\n # if self.dialog_status == dialog_config.TURN_FAIL_FOR_SL:\n # pdb.set_trace()\n\n\n\n\n if stop_flag:\n if dialog_config.TURN_FAIL_FOR_SL not in self.state_list:\n self.dialog_status = dialog_config.SUCCESS_DIALOG\n else:\n self.dialog_status = dialog_config.FAILED_DIALOG\n\n # if self.dialog_status == dialog_config.FAILED_DIALOG:\n # pdb.set_trace()\n\n def update_states_from_user(self, slot_dic):\n for slot_name in slot_dic:\n slot_val = slot_dic[slot_name]\n if slot_name in self.state['inform']:\n self.state['inform'][slot_name] = slot_val\n elif slot_name in self.state['book']:\n self.state['book'][slot_name] = slot_val\n else:\n self.state['reqt'].append(slot_name)\n\n def update_states_from_sys(self, sys_act):\n if sys_act.act == SystemAct.PRESENT_RESULT:\n self.state['results'].append(sys_act.parameters)\n\n def reset(self):\n super().reset()\n self._set_initial_state()\n self._set_initial_goal_dic()\n self._set_initial_model_parameters()\n self.state_list = []\n self.act = ''\n self.prev_usr = ''\n\ndef main():\n user = Seq_User_Act()\n # user.respond()\n\n # user.interact()\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.nn.GRU",
"torch.nn.Embedding",
"numpy.concatenate",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.FloatTensor",
"torch.split",
"torch.topk",
"torch.autograd.Variable",
"torch.nn.Dropout",
"torch.ones",
"torch.from_numpy",
"torch.nn.utils.rnn.pack_padded_sequence",
"numpy.sin",
"torch.bmm",
"numpy.zeros",
"torch.nn.NLLLoss",
"torch.LongTensor",
"numpy.power",
"torch.exp",
"torch.nn.Linear",
"torch.log",
"torch.stack",
"numpy.argsort",
"numpy.array",
"torch.Tensor",
"torch.isnan",
"numpy.cos",
"torch.distributions.Categorical",
"torch.nn.init.orthogonal_"
],
[
"torch.device",
"torch.manual_seed",
"torch.cuda.is_available"
],
[
"torch.cuda.set_device",
"torch.cuda.current_device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jggautier/dataverse-automating-downloads | [
"40cf127e7771049165b21b732635cd35848eda5e"
] | [
"dataverse_repository_curation_assistant/dataverse_repository_curation_assistant_functions.py"
] | [
"# Functions for the curation app\nimport csv\nfrom dateutil.parser import parse\nfrom functools import reduce\nimport json\nimport glob\nimport os\nfrom os import listdir\nimport pandas as pd\nfrom pathlib import Path\nimport re\nimport requests\nimport time\nfrom tkinter import Tk, ttk, Frame, Label, IntVar, Checkbutton, filedialog, NORMAL, DISABLED\nfrom tkinter import Listbox, MULTIPLE, StringVar, END, INSERT, N, E, S, W\nfrom tkinter.ttk import Entry, Progressbar, OptionMenu, Combobox\nfrom urllib.parse import urlparse\n\n\n# Class for custom collapsiblePanel frame using tkinter widgets\nclass collapsiblePanel(Frame):\n\n def __init__(self, parent, text='', default='closed', padx=0, pady=0, *args, **options):\n Frame.__init__(self, parent, *args, **options, padx=padx, pady=pady)\n\n self.show = IntVar()\n\n self.titleFrame = ttk.Frame(self, relief='raised', borderwidth=1)\n self.titleFrame.pack(fill='x', expand=1)\n\n Label(self.titleFrame, text=text, width=40, anchor='w').pack(side='left', fill='x', expand=1)\n\n self.toggleButton = ttk.Checkbutton(\n \tself.titleFrame, width=5, command=self.toggle,\n\t\t\tvariable=self.show, style='Toolbutton')\n self.toggleButton.pack(side='right')\n\n self.subFrame = Frame(self, borderwidth=1, relief='groove', bg='white', padx=10)\n\n if default == 'open':\n \tself.show.set(1)\n \tself.subFrame.pack(fill='x', expand=1)\n \tself.toggleButton.configure(text='▼')\n elif default == 'closed':\n \tself.show.set(0)\n \tself.toggleButton.configure(text='▲')\n\n def toggle(self):\n if bool(self.show.get()):\n self.subFrame.pack(fill='x', expand=1)\n self.toggleButton.configure(text='▼')\n else:\n self.subFrame.forget()\n self.toggleButton.configure(text='▲')\n\n\ndef forget_widget(widget):\n exists = widget.winfo_exists()\n if exists == 1:\n widget.grid_forget()\n else:\n pass\n\n\n# Function for getting value of nested key, truncating the value to 10,000 characters if it's a string\n# (character limit for many spreadsheet applications), and returning nothing if key doesn't exist\ndef improved_get(_dict, path, default=None):\n for key in path.split('.'):\n try:\n _dict = _dict[key]\n except KeyError:\n return default\n if isinstance(_dict, int) or isinstance(_dict, dict):\n return _dict\n elif isinstance(_dict, str):\n return _dict[:10000].replace('\\r', ' - ')\n\n\ndef list_to_string(lst): \n string = ', '.join(lst)\n return string\n\n\ndef convert_to_local_tz(timestamp, shortDate=False):\n # Save local timezone to localTimezone variable\n localTimezone = tz.tzlocal()\n # Convert string to datetime object\n timestamp = parse(timestamp)\n # Convert timestamp to local timezone\n timestamp = timestamp.astimezone(localTimezone)\n if shortDate is True:\n # Return timestamp in YYYY-MM-DD format\n timestamp = timestamp.strftime('%Y-%m-%d')\n return timestamp\n\n\ndef select_all(listbox):\n listbox.select_set(0, END)\n\n\ndef clear_selections(listbox):\n listbox.selection_clear(0, END)\n\n\n# Function for getting the server URL from a collection URL\n# or what's entered in the Installatio URL field\ndef get_installation_url(string):\n if string.startswith('http'):\n parsed = urlparse(string)\n installationUrl = parsed.scheme + '://' + parsed.netloc\n return installationUrl\n elif '(' in string:\n installationUrl = re.search(r'\\(.*\\)', string).group()\n installationUrl = re.sub('\\(|\\)', '', installationUrl)\n return installationUrl\n\n\n# Gets list of URLs from Dataverse map JSON data and add Demo Dataverse url\ndef get_installation_list():\n installationsList = []\n dataverseInstallationsJsonUrl = 'https://raw.githubusercontent.com/IQSS/dataverse-installations/master/data/data.json'\n response = requests.get(dataverseInstallationsJsonUrl)\n data = response.json()\n\n for installation in data['installations']:\n name = installation['name']\n hostname = installation['hostname']\n installationUrl = 'https://' + hostname\n nameAndUrl = '%s (%s)' % (name, installationUrl)\n installationsList.append(nameAndUrl)\n\n installationsList.insert(0, 'Demo Dataverse (https://demo.dataverse.org)')\n\n return installationsList\n\n\n# Function for getting name of installation's root collection \n# (assumming root dataverse's ID is 1, which isn't the case with UVA Dataverse)\ndef get_root_alias_name(url):\n\n # If it's the UVA homepage URL, it's root alias is uva (whose database ID is not 1)\n if 'dataverse.lib.virginia.edu' in url:\n rootAlias = 'uva'\n\n # If's it's not the UVA homepage URL, get the alias of the collection whose database is 1\n elif '/dataverse/' in url:\n parsed = urlparse(url)\n url = parsed.scheme + '://' + parsed.netloc + '/api/dataverses/1'\n response = requests.get(url)\n dataverseData = response.json()\n rootAlias = dataverseData['data']['alias']\n elif '/dataverse/' not in url:\n url = '%s/api/dataverses/1' % (url)\n response = requests.get(url)\n dataverseData = response.json()\n rootAlias = dataverseData['data']['alias']\n\n return rootAlias\n\n\n# Function for getting collection alias name of a given Dataverse Collection URL,\n# including the \"Root\" collection\ndef get_alias_from_collection_url(url):\n\n # If /dataverse/ is not in the URL, assume it's the installation's server url...\n if '/dataverse/' not in url:\n # If it's the UVA homepage URL, get it's root alias, whose database ID is not 1\n if 'dataverse.lib.virginia.edu' in url:\n alias = 'uva'\n\n # If's it's not the UVA homepage URL, get the alias of the collection whose database is 1\n elif 'dataverse.lib.virginia.edu' not in url:\n installationUrl = get_installation_url(url)\n url = '%s/api/dataverses/1' % (installationUrl)\n response = requests.get(url)\n dataverseData = response.json()\n alias = dataverseData['data']['alias']\n\n # If /dataverse/ is in the url, assume it's a collection URL and parse string to get its alias...\n elif '/dataverse/' in url:\n parsed = urlparse(url)\n try:\n alias = parsed.path.split('/')[2]\n # Or return an empty string\n except IndexError:\n alias = ''\n\n return alias\n\n\n# Returns True if collection alias is the installation's root collection or\n# False if not (doesn't work with UVA)\ndef is_root_collection(url):\n if get_alias_from_collection_url(url) == get_root_alias_name(url):\n return True\n elif get_alias_from_collection_url(url) != get_root_alias_name(url):\n return False\n\n\n# Function that turns Dataverse installation URL, instalation URL or search URL into a Search API URL\ndef get_search_api_url(url, apiKey=None):\n\n # If URL is not a search url (doesn't contain 'q=') and contains /dataverse/, it's a Dataverse collection URL\n if 'q=' not in url and '/dataverse/' in url:\n # Remove the jsessionidString that sometimes appears in the URL\n try:\n jsessionidString = re.search(r';jsessionid=.*', url).group()\n url = url.replace(jsessionidString, '?')\n except AttributeError:\n pass\n # Get the Dataverse Collection name in the search URL\n dataversePart = re.search(r'\\/dataverse\\/.*', url).group()\n dataverseName = dataversePart.replace('/dataverse/', '')\n # Repalce '/dataverse/' and the dataverse name with '/api/search?q=*' and add subtree parameter with dataverse name\n apiSearchURL = url.replace(dataversePart, '/api/search?q=*') + '&subtree=%s' % (dataverseName)\n\n # If URL is not a search URL (doesn't contain 'q=') and doesn't have /dataverse/, assume it's the URL of the installation\n if 'q=' not in url and '/dataverse/' not in url:\n apiSearchURL = url.replace('/dataverse.xhtml', '')\n apiSearchURL = apiSearchURL + '/api/search'\n # If entered installation URL ends with a forward slash, replace resulting double slash with a single slash\n apiSearchURL = apiSearchURL.replace('//api', '/api') + '?q=*'\n\n # If URL has 'q=', then assume it's a Search URL\n elif 'q=' in url:\n\n # Sometimes there's a slash before the ?q. If so, remove it\n url = url.replace('/?q', '?q')\n\n # If there's a jsessionid string, remove it\n try:\n jsessionidString = re.search(r';jsessionid=.*\\?', url).group()\n url = url.replace(jsessionidString, '?')\n except AttributeError:\n pass\n \n # Get the Dataverse Collection name in the search URL\n # dataverseName = re.search(r'\\/dataverse\\/\\w*\\?q', url)\n dataverseName = re.search(r'\\/dataverse\\/.*\\?q', url)\n dataverseName = dataverseName.group()\n\n subtree = dataverseName.replace('/dataverse/', '&subtree=').replace('?q', '')\n\n apiSearchURL = (\n url\n .replace(dataverseName, '/api/search?q')\n .replace('?q=&', '?q=*&')\n .replace('%3A', ':')\n .replace('%22', '\"')\n .replace('%28', '(')\n .replace('%29', ')')\n + '&show_entity_ids=true'\n + subtree\n )\n\n # Remove any digits after any fq parameters\n apiSearchURL = re.sub('fq\\d', 'fq', apiSearchURL)\n apiSearchURL = apiSearchURL + '&per_page=10&start=0'\n\n # Replace values of any \"types\" parameters into the Search API's \"type\" paramater\n try:\n dTypes = re.search(r'types=.*?&', apiSearchURL).group()\n dTypesList = dTypes.replace('types=', '').replace('&', '').split(':')\n dTypesString = ''\n for dType in dTypesList:\n dType = '&type=%s' %(re.sub('s$', '', dType))\n dTypesString = dTypesString + dType\n apiSearchURL = apiSearchURL + dTypesString\n except AttributeError:\n pass\n\n # Remove dvObjectType and types parameters, which I think the Search API is ignoring\n apiSearchURL = re.sub('fq=dvObjectType:\\(.*\\)&', '', apiSearchURL)\n apiSearchURL = re.sub('types=.*?&', '', apiSearchURL)\n\n return apiSearchURL\n\n\n# Function that converts as many common html codes as I could find into their human-readable strings\ndef convert_common_html_encoding(string):\n string = (\n string\n .replace('%20', ' ').replace('%21', '!').replace('%22', '\\\"').replace('%23', '#')\n .replace('%24', '$').replace('%25', '%').replace('%26', '&').replace('%27', '\\'')\n .replace('%28', '(').replace('%29', ')').replace('%2A', '*').replace('%2B', '+')\n .replace('%2C', ',').replace('%2D', '-').replace('%2E', '.').replace('%2F', '/')\n .replace('%30', '0').replace('%31', '1').replace('%32', '2').replace('%33', '3')\n .replace('%34', '4').replace('%35', '5').replace('%36', '6').replace('%37', '7')\n .replace('%38', '8').replace('%39', '9').replace('%3A', ':').replace('%3B', ';')\n .replace('%3C', '<').replace('%3D', '=').replace('%3E', '>').replace('%3F', '?')\n .replace('%40', '@').replace('%41', 'A').replace('%42', 'B').replace('%43', 'C')\n .replace('%44', 'D').replace('%45', 'E').replace('%46', 'F').replace('%47', 'G')\n .replace('%48', 'H').replace('%49', 'I').replace('%4A', 'J').replace('%4B', 'K')\n .replace('%4C', 'L').replace('%4D', 'M').replace('%4E', 'N').replace('%4F', 'O')\n .replace('%50', 'P').replace('%51', 'Q').replace('%52', 'R').replace('%53', 'S')\n .replace('%54', 'T').replace('%55', 'U').replace('%56', 'V').replace('%57', 'W')\n .replace('%58', 'X').replace('%59', 'Y').replace('%5A', 'Z').replace('%5B', '[')\n .replace('%5C', '\\\\').replace('%5D', ']').replace('%5E', '^').replace('%5F', '_')\n .replace('%60', '`').replace('%61', 'a').replace('%62', 'b').replace('%63', 'c')\n .replace('%64', 'd').replace('%65', 'e').replace('%66', 'f').replace('%67', 'g')\n .replace('%68', 'h').replace('%69', 'i').replace('%6A', 'j').replace('%6B', 'k')\n .replace('%6C', 'l').replace('%6D', 'm').replace('%6E', 'n').replace('%6F', 'o')\n .replace('%70', 'p').replace('%71', 'q').replace('%72', 'r').replace('%73', 's')\n .replace('%74', 't').replace('%75', 'u').replace('%76', 'v').replace('%77', 'w')\n .replace('%78', 'x').replace('%79', 'y').replace('%7A', 'z').replace('%7B', '{')\n .replace('%7C', '|').replace('%7D', '}').replace('%7E', '~').replace('%80', '€')\n .replace('%82', '‚').replace('%83', 'ƒ').replace('%84', '„').replace('%85', '…')\n .replace('%86', '†').replace('%87', '‡').replace('%88', 'ˆ').replace('%89', '‰')\n .replace('%8A', 'Š').replace('%8B', '‹').replace('%8C', 'Œ').replace('%8E', 'Ž')\n .replace('%91', '‘').replace('%92', '’').replace('%93', '“').replace('%94', '”')\n .replace('%95', '•').replace('%96', '–').replace('%97', '—').replace('%98', '˜')\n .replace('%99', '™').replace('%9A', 'š').replace('%9B', '›').replace('%9C', 'œ')\n .replace('%9E', 'ž').replace('%9F', 'Ÿ').replace('%A1', '¡').replace('%A2', '¢')\n .replace('%A3', '£').replace('%A4', '¤').replace('%A5', '¥').replace('%A6', '¦')\n .replace('%A7', '§').replace('%A8', '¨').replace('%A9', '©').replace('%AA', 'ª')\n .replace('%AB', '«').replace('%AC', '¬').replace('%AE', '®').replace('%AF', '¯')\n .replace('%B0', '°').replace('%B1', '±').replace('%B2', '²').replace('%B3', '³')\n .replace('%B4', '´').replace('%B5', 'µ').replace('%B6', '¶').replace('%B7', '·')\n .replace('%B8', '¸').replace('%B9', '¹').replace('%BA', 'º').replace('%BB', '»')\n .replace('%BC', '¼').replace('%BD', '½').replace('%BE', '¾').replace('%BF', '¿')\n .replace('%C0', 'À').replace('%C1', 'Á').replace('%C2', 'Â').replace('%C3', 'Ã')\n .replace('%C4', 'Ä').replace('%C5', 'Å').replace('%C6', 'Æ').replace('%C7', 'Ç')\n .replace('%C8', 'È').replace('%C9', 'É').replace('%CA', 'Ê').replace('%CB', 'Ë')\n .replace('%CC', 'Ì').replace('%CD', 'Í').replace('%CE', 'Î').replace('%CF', 'Ï')\n .replace('%D0', 'Ð').replace('%D1', 'Ñ').replace('%D2', 'Ò').replace('%D3', 'Ó')\n .replace('%D4', 'Ô').replace('%D5', 'Õ').replace('%D6', 'Ö').replace('%D7', '×')\n .replace('%D8', 'Ø').replace('%D9', 'Ù').replace('%DA', 'Ú').replace('%DB', 'Û')\n .replace('%DC', 'Ü').replace('%DD', 'Ý').replace('%DE', 'Þ').replace('%DF', 'ß')\n .replace('%E0', 'à').replace('%E1', 'á').replace('%E2', 'â').replace('%E3', 'ã')\n .replace('%E4', 'ä').replace('%E5', 'å').replace('%E6', 'æ').replace('%E7', 'ç')\n .replace('%E8', 'è').replace('%E9', 'é').replace('%EA', 'ê').replace('%EB', 'ë')\n .replace('%EC', 'ì').replace('%ED', 'í').replace('%EE', 'î').replace('%EF', 'ï')\n .replace('%F0', 'ð').replace('%F1', 'ñ').replace('%F2', 'ò').replace('%F3', 'ó')\n .replace('%F4', 'ô').replace('%F5', 'õ').replace('%F6', 'ö').replace('%F7', '÷')\n .replace('%F8', 'ø').replace('%F9', 'ù').replace('%FA', 'ú').replace('%FB', 'û')\n .replace('%FC', 'ü').replace('%FD', 'ý').replace('%FE', 'þ').replace('%FF', 'ÿ')\n )\n return string\n\n\ndef convert_utf8bytes_to_characters(string):\n string = (\n string\n .replace('%E2%82%AC', '€').replace('%E2%80%9A', '‚').replace('%C6%92', 'ƒ')\n .replace('%E2%80%A6', '…').replace('%E2%80%A0', '†').replace('%E2%80%A1', '‡')\n .replace('%E2%80%B0', '‰').replace('%C5%A0', 'Š').replace('%E2%80%B9', '‹')\n .replace('%C5%BD', 'Ž').replace('%E2%80%98', '‘').replace('%E2%80%99', '’')\n .replace('%E2%80%9D', '”').replace('%E2%80%A2', '•').replace('%E2%80%93', '–')\n .replace('%CB%9C', '˜').replace('%E2%84%A2', '™').replace('%C5%A1', 'š')\n .replace('%C5%93', 'œ').replace('%C5%BE', 'ž').replace('%C5%B8', 'Ÿ')\n .replace('%C2%A2', '¢').replace('%C2%A3', '£').replace('%C2%A4', '¤')\n .replace('%C2%A6', '¦').replace('%C2%A7', '§').replace('%C2%A8', '¨')\n .replace('%C2%AA', 'ª').replace('%C2%AB', '«').replace('%C2%AC', '¬')\n .replace('%C2%AE', '®').replace('%C2%AF', '¯').replace('%C2%B0', '°')\n .replace('%C2%B2', '²').replace('%C2%B3', '³').replace('%C2%B4', '´')\n .replace('%C2%B6', '¶').replace('%C2%B7', '·').replace('%C2%B8', '¸')\n .replace('%C2%BA', 'º').replace('%C2%BB', '»').replace('%C2%BC', '¼')\n .replace('%C2%BE', '¾').replace('%C2%BF', '¿').replace('%C3%80', 'À')\n .replace('%C3%82', 'Â').replace('%C3%83', 'Ã').replace('%C3%84', 'Ä')\n .replace('%C3%86', 'Æ').replace('%C3%87', 'Ç').replace('%C3%88', 'È')\n .replace('%C3%8A', 'Ê').replace('%C3%8B', 'Ë').replace('%C3%8C', 'Ì')\n .replace('%C3%8E', 'Î').replace('%C3%8F', 'Ï').replace('%C3%90', 'Ð')\n .replace('%C3%92', 'Ò').replace('%C3%93', 'Ó').replace('%C3%94', 'Ô')\n .replace('%C3%96', 'Ö').replace('%C3%97', '×').replace('%C3%98', 'Ø')\n .replace('%C3%9A', 'Ú').replace('%C3%9B', 'Û').replace('%C3%9C', 'Ü')\n .replace('%C3%9E', 'Þ').replace('%C3%9F', 'ß').replace('%C3%A0', 'à')\n .replace('%C3%A2', 'â').replace('%C3%A3', 'ã').replace('%C3%A4', 'ä')\n .replace('%C3%A6', 'æ').replace('%C3%A7', 'ç').replace('%C3%A8', 'è')\n .replace('%C3%AA', 'ê').replace('%C3%AB', 'ë').replace('%C3%AC', 'ì')\n .replace('%C3%8D', 'Í').replace('%C3%AE', 'î').replace('%C3%AF', 'ï')\n .replace('%C3%B0', 'ð').replace('%C3%B2', 'ò').replace('%C3%B3', 'ó')\n .replace('%C3%B4', 'ô').replace('%C3%B6', 'ö').replace('%C3%B7', '÷')\n .replace('%C3%B8', 'ø').replace('%C3%BA', 'ú').replace('%C3%BB', 'û')\n .replace('%C3%BC', 'ü').replace('%C3%BE', 'þ').replace('%C3%BF', 'ÿ')\n )\n return string\n\n# Function that returns the params of a given Search API URL, to be used in requests calls\ndef get_params(apiSearchURL):\n params = {\n 'baseUrl': '',\n 'params': {}\n }\n fq = []\n\n # Split apiSearchURL to create list of params\n splitSearchURLList = re.split('\\?|&fq|&', apiSearchURL)\n\n # Remove base search API URL from list\n params['baseUrl'] = splitSearchURLList[0]\n splitSearchURLList.pop(0)\n\n # Remove any empty items from the splitSearchURLList\n splitSearchURLList = list(filter(None, splitSearchURLList))\n\n typeParamList = []\n\n for paramValue in splitSearchURLList:\n\n # Add query to params dict\n if paramValue.startswith('q='):\n paramValue = convert_utf8bytes_to_characters(paramValue)\n paramValue = convert_common_html_encoding(paramValue)\n paramValue = paramValue.replace('+', ' ')\n params['params']['q'] = paramValue.replace('q=', '')\n\n # Add non-fq queries to params dict\n if not paramValue.startswith('=') and not paramValue.startswith('q='):\n key = paramValue.split('=')[0]\n if paramValue.split('=')[1] != '':\n params['params'][key] = paramValue.split('=')[1]\n\n # Add values of each type param to typeParamList\n if paramValue.startswith('type'):\n valueString = paramValue.split('=')[1]\n typeParamList.append(valueString)\n\n # Add fq queries to fq dict if paramValue.startswith('='):\n if paramValue.startswith('='):\n key = paramValue.replace('=', '').split(':')[0]\n value = paramValue.split(':')[1]\n value = convert_utf8bytes_to_characters(value)\n value = convert_common_html_encoding(value)\n value = value.replace('+', ' ')\n paramString = key + ':' + value\n fq.append(paramString)\n\n # If there are type param values in typeParamList, add as value to new \"type\" param\n if typeParamList:\n params['params']['type'] = typeParamList\n\n # If there are any fq params, add fq keys and values\n if len(fq) > 0:\n params['params']['fq'] = fq\n\n return params\n\n\n# Gets info from Search API about a given dataverse, dataset or file\ndef get_value_row_from_search_api_object(item, installationUrl):\n if item['type'] == 'dataset':\n datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + item['global_id']\n dataverseUrl = installationUrl + '/dataverse/' + item['identifier_of_dataverse']\n newRow = {\n 'dataset_pid': item['global_id'],\n 'version_state': item['versionState'],\n 'dataverse_alias': item['identifier_of_dataverse']\n # 'dataverse_url': dataverseUrl\n }\n\n if item['type'] == 'dataverse':\n newRow = {\n 'dataverse_database_id': item['entity_id'],\n 'dataverse_alias': item['identifier'],\n 'dataverse_url': item['url'],\n 'dataverse_name': item['name']\n }\n\n if item['type'] == 'file':\n if item.get('file_persistent_id'):\n filePersistentId = item['file_persistent_id']\n else:\n filePersistentId = ''\n newRow = {\n 'file_database_id': item['file_id'],\n 'file persistent_id': filePersistentId,\n 'file_name': item['name'],\n 'dataset_pid': item['dataset_persistent_id']\n }\n return newRow\n\n\n# Uses Search API to return dataframe containing info about datasets in a Dataverse installation\n# Write progress and results to the tkinter window\ndef get_object_dataframe_from_search_api(\n url, params, objectType, rootWindow=None, progressText=None, progressLabel=None, apiKey=None):\n\n installationUrl = get_installation_url(url)\n\n if apiKey:\n header = {'X-Dataverse-key': apiKey}\n else:\n header = {}\n\n params['type'] = objectType\n\n # Add param to show database IDs of each item\n params['show_entity_ids'] = 'true'\n\n # Get total count of objects\n params['per_page'] = 1\n\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n total = data['data']['total_count']\n\n misindexedObjectCount = 0\n objectInfoDict = []\n\n # Initialization for paginating through results of Search API calls\n condition = True\n params['start'] = 0\n\n if None not in [rootWindow, progressText, progressLabel]:\n text = 'Looking for datasets...'\n progressText.set(text)\n progressLabel.config(fg='green')\n progressLabel = progressLabel.grid(sticky='w', row=0)\n rootWindow.update_idletasks()\n \n while condition:\n try:\n params['per_page'] = 10\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n\n for item in data['data']['items']:\n newRow = get_value_row_from_search_api_object(item, installationUrl)\n objectInfoDict.append(dict(newRow))\n datasetCount = len(objectInfoDict)\n \n # Update variables to paginate through the search results\n params['start'] = params['start'] + params['per_page']\n\n # If misindexed datasets break the Search API call where per_page=10,\n # try calls where per_page=1 then per_page=10 again\n # (See https://github.com/IQSS/dataverse/issues/4225)\n except Exception:\n try:\n params['per_page'] = 1\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n\n for item in data['data']['items']:\n newRow = get_value_row_from_search_api_object(item, installationUrl)\n objectInfoDict.append(dict(newRow))\n\n # Update variables to paginate through the search results\n params['start'] = params['start'] + params['per_page']\n\n # If page fails to load, count a misindexed object and continue to the next page\n except Exception:\n misindexedObjectCount += 1\n params['start'] = params['start'] + params['per_page']\n\n condition = params['start'] < total\n\n objectInfoDF = pd.DataFrame(objectInfoDict)\n\n return objectInfoDF\n\n\n# Uses \"Get Contents\" endpoint to return list of dataverse aliases of all subcollections in a given collection\ndef get_all_subcollection_aliases(collectionUrl, apiKey=''):\n\n parsed = urlparse(collectionUrl)\n installationUrl = parsed.scheme + '://' + parsed.netloc\n alias = parsed.path.split('/')[2]\n\n if apiKey:\n header = {'X-Dataverse-key': apiKey}\n else:\n header = {}\n\n # Get ID of given dataverse alias\n dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, alias)\n\n response = requests.get(\n dataverseInfoEndpoint,\n headers=header)\n data = response.json()\n parentDataverseId = data['data']['id']\n\n # Create list and add ID of given dataverse\n dataverseIds = [parentDataverseId]\n\n # Get each subdataverse in the given dataverse\n for dataverseId in dataverseIds:\n dataverseGetContentsEndpoint = '%s/api/dataverses/%s/contents' % (installationUrl, dataverseId)\n response = requests.get(\n dataverseGetContentsEndpoint,\n headers=header)\n data = response.json()\n\n for item in data['data']:\n if item['type'] == 'dataverse':\n dataverseId = item['id']\n dataverseIds.extend([dataverseId])\n\n # Get the alias for each dataverse ID\n dataverseAliases = []\n for dataverseId in dataverseIds:\n dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, dataverseId)\n response = requests.get(\n dataverseInfoEndpoint,\n headers=header)\n data = response.json()\n alias = data['data']['alias']\n dataverseAliases.append(alias)\n\n return dataverseAliases\n\n\ndef get_canonical_pid(pidOrUrl):\n\n # If entered dataset PID is the dataset page URL, get canonical PID\n if pidOrUrl.startswith('http') and 'persistentId=' in pidOrUrl:\n canonicalPid = pidOrUrl.split('persistentId=')[1]\n canonicalPid = canonicalPid.split('&version')[0]\n canonicalPid = canonicalPid.replace('%3A', ':').replace('%2F', ('/'))\n\n # If entered dataset PID is a DOI URL, get canonical PID\n elif pidOrUrl.startswith('http') and 'doi.' in pidOrUrl:\n canonicalPid = re.sub('http.*org\\/', 'doi:', pidOrUrl)\n\n elif pidOrUrl.startswith('doi:') and '/' in pidOrUrl:\n canonicalPid = pidOrUrl\n\n # If entered dataset PID is a Handle URL, get canonical PID\n elif pidOrUrl.startswith('http') and 'hdl.' in pidOrUrl:\n canonicalPid = re.sub('http.*net\\/', 'hdl:', pidOrUrl)\n\n elif pidOrUrl.startswith('hdl:') and '/' in pidOrUrl:\n canonicalPid = pidOrUrl\n\n return canonicalPid\n\n\ndef get_datasets_from_collection_or_search_url(\n url, rootWindow=None, progressLabel=None, progressText=None, textBoxCollectionDatasetPIDs=None, \n apiKey='', ignoreDeaccessionedDatasets=False, subdataverses=False):\n\n\n if textBoxCollectionDatasetPIDs is not None:\n # if None not in [rootWindow, progressLabel, progressText, textBoxCollectionDatasetPIDs]:\n # Hide the textBoxCollectionDatasetPIDs scrollbox if it exists\n forget_widget(textBoxCollectionDatasetPIDs)\n \n # Use the Search API to get dataset info from the given search url or Dataverse collection URL\n searchApiUrl = get_search_api_url(url)\n requestsGetProperties = get_params(searchApiUrl)\n baseUrl = requestsGetProperties['baseUrl']\n params = requestsGetProperties['params']\n datasetInfoDF = get_object_dataframe_from_search_api(\n url=baseUrl, rootWindow=rootWindow, progressLabel=progressLabel, progressText=progressText,\n params=params, objectType='dataset', apiKey=apiKey)\n datasetCount = len(datasetInfoDF.index)\n\n if datasetCount == 0:\n text = 'Datasets found: 0'\n\n if progressText is not None:\n progressText.set(text)\n else:\n print(text)\n \n elif datasetCount > 0:\n\n deaccessionedDatasetCount = 0\n \n # To ignore deaccessioned datasets, remove from the dataframe all datasets where version_state is DEACCESSIONED \n if ignoreDeaccessionedDatasets == True:\n datasetInfoDF = datasetInfoDF[datasetInfoDF['version_state'].str.contains('DEACCESSIONED') == False]\n deaccessionedDatasetCount = datasetCount - len(datasetInfoDF.index)\n\n # Remove version_state column so that I can remove the dataframe's duplicate rows and there's only one row per dataset\n datasetInfoDF = datasetInfoDF.drop('version_state', axis=1)\n\n # Drop duplicate rows, which happens when Search API results lists a dataset's published and draft versions\n datasetInfoDF = datasetInfoDF.drop_duplicates()\n\n # Recount datasets\n uniqueDatasetCount = len(datasetInfoDF.index)\n\n # Check if url is collection url. If so:\n if 'q=' not in url:\n # If the user wants datasets in all subdataverses and the url\n # is the root collection, don't filter the dataframe\n if subdataverses == True and is_root_collection(url) == True:\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the user wants datasets in all subdataverses and the url\n # is not the root collection...\n elif subdataverses == True and is_root_collection(url) == False:\n # Get the aliases of all subdataverses...\n dataverseAliases = get_all_subcollection_aliases(url, apiKey=apiKey)\n\n # Remove any datasets that aren't owned by any of the \n # subdataverses. This will exclude linked datasets\n datasetInfoDF = datasetInfoDF[\n datasetInfoDF['dataverse_alias'].isin(dataverseAliases)]\n\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the user wants only datasets in the collection,\n # and not in collections within the collection...\n elif subdataverses == False:\n # Get the alias of the collection (including the alias of the root collection)\n alias = get_alias_from_collection_url(url)\n # Retain only datasets owned by that collection\n datasetInfoDF = datasetInfoDF[datasetInfoDF['dataverse_alias'].isin([alias])]\n\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the url is a search URL, get all datasetPids from datasetInfoDF \n elif 'q=' in url:\n uniqueDatasetCount = len(datasetInfoDF)\n\n if textBoxCollectionDatasetPIDs is not None:\n # Place textbox with list of dataset PIDs and set state to read/write (normal) \n textBoxCollectionDatasetPIDs.grid(sticky='w', row=2, pady=5)\n textBoxCollectionDatasetPIDs.configure(state ='normal')\n \n # Clear whatever's in the textBoxCollectionDatasetPIDs textbox\n textBoxCollectionDatasetPIDs.delete('1.0', END)\n\n # Insert the dataset PIDs into the textBoxCollectionDatasetPIDs scrollbox\n for dfIndex, dfRow in datasetInfoDF.iterrows():\n datasetPid = dfRow['dataset_pid'] + '\\n'\n textBoxCollectionDatasetPIDs.insert('end', datasetPid)\n\n # Create and place result text with uniqueDatasetCount\n if deaccessionedDatasetCount == 0:\n text = 'Datasets found: %s' % (str(uniqueDatasetCount))\n if deaccessionedDatasetCount > 0:\n text = 'Datasets found: %s\\rDeaccessioned datasets ignored: %s' % (str(uniqueDatasetCount), str(deaccessionedDatasetCount))\n\n if progressText is not None:\n progressText.set(text)\n else:\n print(text)\n\n\ndef get_directory_path():\n directoryPath = filedialog.askdirectory()\n return directoryPath\n\n\ndef get_dataset_metadata_export(installationUrl, datasetPid, exportFormat, header={}, apiKey=''):\n if apiKey:\n header['X-Dataverse-key'] = apiKey\n\n if exportFormat == 'dataverse_json':\n getJsonRepresentationOfADatasetEndpoint = '%s/api/datasets/:persistentId/?persistentId=%s' % (installationUrl, datasetPid)\n getJsonRepresentationOfADatasetEndpoint = getJsonRepresentationOfADatasetEndpoint.replace('//api', '/api')\n response = requests.get(\n getJsonRepresentationOfADatasetEndpoint,\n headers=header)\n if response.status_code in (200, 401): # 401 is the unauthorized code. Valid API key is needed\n data = response.json()\n else:\n data = 'ERROR'\n\n return data\n\n # For getting metadata from other exports, which are available only for each dataset's latest published\n # versions (whereas Dataverse JSON export is available for unpublished versions)\n if exportFormat != 'dataverse_json':\n datasetMetadataExportEndpoint = '%s/api/datasets/export?exporter=%s&persistentId=%s' % (installationUrl, exportFormat, datasetPid)\n datasetMetadataExportEndpoint = datasetMetadataExportEndpoint.replace('//api', '/api')\n \n response = requests.get(\n datasetMetadataExportEndpoint,\n headers=header)\n\n if response.status_code == 200:\n \n if exportFormat in ('schema.org' , 'OAI_ORE'):\n data = response.json()\n\n if exportFormat in ('ddi' , 'oai_ddi', 'dcterms', 'oai_dc', 'Datacite', 'oai_datacite'):\n string = response.text\n data = BeautifulSoup(string, 'xml').prettify()\n else:\n data = 'ERROR'\n\n return data\n\n\ndef get_metadatablock_data(installationUrl, metadatablockName):\n metadatablocksApiEndpoint = '%s/api/v1/metadatablocks/%s' % (installationUrl, metadatablockName)\n\n response = requests.get(metadatablocksApiEndpoint)\n if response.status_code == 200:\n data = response.json()\n return data\n\n\ndef get_metadatablock_db_field_name_and_title(metadatablockData):\n # Get the database names of all fields\n allFieldsDBNamesList = []\n childFieldsDBNamesList = []\n\n for parentfield in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentfield]\n field = properties['name']\n allFieldsDBNamesList.append(field)\n if 'childFields' in properties:\n for childField in properties['childFields']:\n childFieldsDBNamesList.append(childField)\n\n parentFieldsDBNamesList = list(set(allFieldsDBNamesList) - set(childFieldsDBNamesList))\n\n\n parentFieldDBNameAndTitleDict = {}\n for dbName in parentFieldsDBNamesList:\n dbNameProperties = metadatablockData['data']['fields'][dbName]\n parentFieldDBNameAndTitleDict[dbNameProperties['title']] = dbName\n\n return parentFieldDBNameAndTitleDict#, compoundFieldsDBNamesList\n\n\n# Get list of parent field names and add to a tkinter listbox for user to choose fields\ndef get_parent_field_names(metadatablockData, listbox):\n \n # Clear any names already in the listbox\n listbox.delete(0, END)\n\n allFieldsDBNamesDict = {}\n childFieldsDBNamesList = []\n compoundFieldsDBNamesList = []\n\n for parentField in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentField]\n field = properties['name']\n allFieldsDBNamesDict[field] = properties['title']\n\n if 'childFields' in properties:\n compoundFieldsDBNamesList.append(properties['title'])\n for childField in properties['childFields']:\n childFieldsDBNamesList.append(childField)\n\n options = []\n fieldWithChildFieldList = []\n for parentField in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentField]\n if 'childFields' not in properties and properties['name'] not in childFieldsDBNamesList:\n fieldTitle = properties['title']\n options.append(' ' + fieldTitle)\n elif 'childFields' in properties:\n title = properties['title']\n childFieldDict = properties['childFields']\n childFieldsList = []\n for childField in childFieldDict:\n childFieldsList.append(childField)\n childFieldsString = list_to_string(childFieldsList)\n fieldWithChildField = '%s: %s' % (title, childFieldsString)\n if len(fieldWithChildField) > 50:\n fieldWithChildField = fieldWithChildField[0:50] + '...'\n fieldWithChildFieldList.append(fieldWithChildField)\n options.append(' ' + fieldWithChildField)\n\n for option in options:\n listbox.insert('end', option)\n\n\ndef get_listbox_values(listbox):\n selectedFields = []\n selections = listbox.curselection()\n for selection in selections:\n fieldName = listbox.get(selection).strip().split(':')[0]\n selectedFields.append(fieldName)\n return selectedFields\n\n\n# Get the chiild field database names of compound fields or the database name of primitive fields\ndef get_column_names(\n metadatablockData, parentFieldTitle, parentFieldDBNameAndTitleDict):\n \n compoundFieldsDBNamesList = []\n for parentfield in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentfield]\n if 'childFields' in properties:\n compoundFieldsDBNamesList.append(properties['name'])\n\n if parentFieldTitle in parentFieldDBNameAndTitleDict.keys():\n\n chosenDBName = parentFieldDBNameAndTitleDict[parentFieldTitle]\n columns = []\n\n # If the field is a compound field:\n if chosenDBName in compoundFieldsDBNamesList:\n\n # Get the child fields of the compound field\n dbNameProperties = metadatablockData['data']['fields'][chosenDBName]\n for field in dbNameProperties['childFields']:\n columns.append(field)\n\n # # Other the field is a primitive field. Use its names as the column\n else:\n columns.append(chosenDBName)\n\n return columns\n\n\ndef get_metadata_values_lists(\n installationUrl, datasetMetadata, metadatablockName,\n chosenTitleDBName, chosenFields=None, versions='latestVersion'):\n\n if versions == 'allVersions':\n versions = 'datasetVersion'\n rowVariablesList = []\n\n if (datasetMetadata['status'] == 'OK') and\\\n (metadatablockName in datasetMetadata['data'][versions]['metadataBlocks']):\n\n datasetPersistentUrl = datasetMetadata['data']['persistentUrl']\n datasetPid = get_canonical_pid(datasetPersistentUrl)\n datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + datasetPid\n if 'versionNumber' in datasetMetadata['data'][versions]:\n\n majorVersionNumber = datasetMetadata['data'][versions]['versionNumber']\n minorVersionNumber = datasetMetadata['data'][versions]['versionMinorNumber']\n datasetVersionNumber = f'{majorVersionNumber}.{minorVersionNumber}'\n else:\n datasetVersionNumber = 'DRAFT'\n\n for fields in datasetMetadata['data'][versions]['metadataBlocks'][metadatablockName]['fields']:\n if fields['typeName'] == chosenTitleDBName:\n\n # Save the field's typeClass and if it allows multiple values \n typeClass = fields['typeClass']\n allowsMultiple = fields['multiple']\n\n if typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is True:\n for value in fields['value']:\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl,\n datasetVersionNumber, value[:10000].replace('\\r', ' - ')]\n rowVariablesList.append(rowVariables)\n\n elif typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is False:\n value = fields['value'][:10000].replace('\\r', ' - ')\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl, \n datasetVersionNumber, value]\n\n rowVariablesList.append(rowVariables)\n\n elif typeClass == 'compound' and allowsMultiple is True: \n \n index = 0\n condition = True\n\n while condition:\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl, \n datasetVersionNumber]\n\n # Get number of multiples\n total = len(fields['value'])\n\n # For each child field...\n for chosenField in chosenFields:\n # Try getting the value of that child field\n try:\n value = fields['value'][index][chosenField]['value'][:10000].replace('\\r', ' - ')\n # Otherwise, save an empty string as the value\n except KeyError:\n value = ''\n # Append value to the rowVariables list to add to the CSV file\n rowVariables.append(value)\n\n rowVariablesList.append(rowVariables)\n\n index += 1\n condition = index < total\n\n elif typeClass == 'compound' and allowsMultiple is False:\n rowVariables = [datasetPid, datasetPersistentUrl, datasetUrl, datasetVersionNumber]\n\n for chosenField in chosenFields:\n try:\n # Get value from compound field\n value = fields['value'][chosenField]['value'][:10000].replace('\\r', ' - ')\n except KeyError:\n value = ''\n rowVariables.append(value)\n rowVariablesList.append(rowVariables)\n\n return rowVariablesList\n\n\n# Delete empty CSV files in a given directory. If file has fewer than 2 rows, delete it.\ndef delete_empty_csv_files(csvDirectory):\n fieldsWithNoMetadata = []\n for file in glob.glob(str(Path(csvDirectory)) + '/' + '*.csv'):\n with open(file, mode='r', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=',')\n data = list(reader)\n rowCount = len(data)\n if rowCount == 1:\n fieldName = Path(file).name.replace('.csv', '')\n fieldsWithNoMetadata.append(fieldName)\n f.close()\n os.remove(file)\n return fieldsWithNoMetadata\n\n\n# Full outer join of CSV files in a given directory\ndef join_metadata_csv_files(csvDirectory):\n\n # Create CSV file in the directory that the user selected\n allMetadataFileName = os.path.join(csvDirectory, 'all_fields.csv')\n\n # Create list of common columns in CSV files to join on\n indexList = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']\n\n # Get list of CSV files in the csvDirectory\n filesList = listdir(csvDirectory)\n if len(filesList) > 1:\n filesDirectoryPathsList = []\n for file in filesList:\n fileDirectoryPath = os.path.join(csvDirectory, file)\n filesDirectoryPathsList.append(fileDirectoryPath)\n\n # Create a dataframe of each CSV file in the 'filesList' list\n dataframes = [pd.read_csv(table, sep=',', na_filter = False) for table in filesDirectoryPathsList]\n\n # For each dataframe, set the indexes (or the common columns across the dataframes to join on)\n for dataframe in dataframes:\n dataframe.set_index(indexList, inplace=True)\n\n # Full outer join all dataframes and save to the 'joined' variable\n joined = reduce(lambda left, right: left.join(right, how='outer'), dataframes)\n\n # Export joined dataframe to a CSV file\n joined.to_csv(allMetadataFileName)\n\n\n# Get the metadata of datasets. Function passed to tkinter button\ndef get_dataset_metadata(\n rootWindow, progressLabel, progressText, noMetadataText, noMetadataLabel,\n installationUrl='', datasetPidString='', \n parentFieldTitleList='', directoryPath='', apiKey=''):\n\n # Use metadatablock API endpoint to get metadatablock data\n metadatablockData = get_metadatablock_data(installationUrl, 'citation')\n\n # From metadatablockData, get the database and display names of each parent field\n allFieldsDBNamesDict = get_metadatablock_db_field_name_and_title(metadatablockData)\n\n # Create directory in the directory that the user chose\n currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')\n\n installationRootName = get_root_alias_name(installationUrl)\n\n mainDirectoryName = '%s_dataset_metadata_%s' % (installationRootName, currentTime)\n mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))\n os.mkdir(mainDirectoryPath)\n\n # For each field the user chose:\n for parentFieldTitle in parentFieldTitleList:\n\n # Create CSV file\n\n # Create file name and path\n csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')\n csvFileName = csvFileName + '(citation)'\n mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))\n csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'\n \n # Create header row for the CSV file\n headerRow = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']\n\n childFieldsList = get_column_names(\n metadatablockData, parentFieldTitle, allFieldsDBNamesDict)\n # Add childFields list to header row\n headerRow = headerRow + childFieldsList\n\n # Create CSV file and add headerrow\n with open(csvFilePath, mode='w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(headerRow) \n\n # Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list\n datasetPidList = [x.strip() for x in datasetPidString.splitlines()][:-1]\n\n # Delete any message in the tkinter window about no metadata being found\n # the last time the \"Get metadata\" button was pressed\n noMetadataLabel.grid_forget()\n\n count = 0\n datasetTotalCount = len(datasetPidList)\n\n text = 'Dataset metadata retrieved: 0 of %s' % (datasetTotalCount)\n progressText.set(text)\n progressLabel.grid(sticky='w', row=1, columnspan=2)\n rootWindow.update_idletasks()\n\n for datasetPid in datasetPidList:\n\n # Get the JSON metadata export of the latest version of the dataset\n datasetMetadata = get_dataset_metadata_export(\n installationUrl=installationUrl,\n datasetPid=datasetPid, \n exportFormat='dataverse_json',\n apiKey=apiKey)\n\n if datasetMetadata['status'] == 'OK':\n\n for parentFieldTitle in parentFieldTitleList:\n # Get database name of parentFieldTitle\n dbName = allFieldsDBNamesDict[parentFieldTitle]\n\n valueLists = get_metadata_values_lists(\n installationUrl=installationUrl,\n datasetMetadata=datasetMetadata,\n metadatablockName='citation',\n chosenTitleDBName=dbName, \n chosenFields=get_column_names(\n metadatablockData, parentFieldTitle, allFieldsDBNamesDict)) \n csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')\n csvFileName = csvFileName + '(citation)'\n csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'\n\n for valueList in valueLists:\n\n with open(csvFilePath, mode='a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(valueList) \n\n count += 1\n text = 'Dataset metadata retrieved: %s of %s' % (count, datasetTotalCount)\n progressText.set(text)\n rootWindow.update_idletasks()\n\n \n\n fieldsWithNoMetadata = delete_empty_csv_files(mainDirectoryPath)\n\n if count > 0 and len(fieldsWithNoMetadata) > 0:\n\n # noMetadataLabel.grid(sticky='w', row=2)\n fieldsWithNoMetadataString = list_to_string(fieldsWithNoMetadata)\n fieldsWithNoMetadataString = (\n 'No metadata found for the following fields:\\r' + fieldsWithNoMetadataString)\n noMetadataText.set(fieldsWithNoMetadataString)\n noMetadataLabel.grid(sticky='w', row=2)\n rootWindow.update_idletasks()\n\n # Full outer join all CSV files to create a CSV with all metadata\n join_metadata_csv_files(mainDirectoryPath)\n\n\ndef delete_published_dataset(installationUrl, datasetPid, apiKey):\n destroyDatasetApiEndpointUrl = '%s/api/datasets/:persistentId/destroy/?persistentId=%s' % (installationUrl, datasetPid)\n req = requests.delete(\n destroyDatasetApiEndpointUrl,\n headers={'X-Dataverse-key': apiKey})\n data = req.json()\n\n status = data.get('status')\n\n if status:\n message = data.get('message', '')\n statusMessage = '%s: %s' % (status, message)\n return statusMessage\n\n\ndef delete_published_datasets(\n rootWindow, progressLabel, progressText, notDeletedText, notDeletedLabel,\n installationUrl, datasetPidString, apiKey):\n\n installationUrl = get_installation_url(installationUrl)\n \n # Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list\n datasetPidList = [x.strip() for x in datasetPidString.splitlines()]\n\n # Remove any empty items from the list of dataset PIDs\n datasetPidList = [datasetPid for datasetPid in datasetPidList if datasetPid]\n\n canonicalPidList = []\n for datasetPid in datasetPidList:\n canonicalPid = get_canonical_pid(datasetPid)\n canonicalPidList.append(canonicalPid)\n\n # Delete any message in the tkinter window about datasets not being deleted\n # the last time the \"Delete datasets\" button was pressed\n notDeletedLabel.grid_forget()\n\n deletedDatasetCount = 0\n datasetTotalCount = len(canonicalPidList)\n\n deletedText = 'Datasets deleted: 0 of %s' % (datasetTotalCount)\n progressText.set(deletedText)\n progressLabel.config(fg='green')\n progressLabel.grid(sticky='w', row=1)\n notDeletedLabel.config(fg='white')\n notDeletedLabel.grid(sticky='w', row=2)\n rootWindow.update_idletasks()\n\n destroyedDatasets = []\n notDestroyedDatasets = []\n\n for canonicalPid in canonicalPidList:\n \n statusMessage = delete_published_dataset(installationUrl, canonicalPid, apiKey)\n \n if 'OK' in statusMessage:\n destroyedDatasets.append(canonicalPid)\n deletedDatasetCount += 1\n deletedText = 'Datasets deleted: %s of %s' % (deletedDatasetCount, datasetTotalCount)\n progressText.set(deletedText)\n rootWindow.update_idletasks()\n\n elif 'ERROR' in statusMessage:\n notDeletedLabel.config(fg='red')\n notDestroyedDatasets.append(canonicalPid)\n notDeletedMessage = 'Datasets not deleted: %s' % (len(notDestroyedDatasets))\n notDeletedText.set(notDeletedMessage)\n rootWindow.update_idletasks()\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jw03070/Advanced-DeepSleepNet | [
"b58d71971be28c8517f61731b8ee933a5bbf3f0a"
] | [
"slicing.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: BSW\n\"\"\"\n\nimport numpy as np\nimport os\n\ndef slicing(filename,data):\n wc=1\n n1c=1\n n2c=1\n n3c=1\n n4c=1\n t=0\n \n npz = np.load(data)\n x = npz['x']\n y = npz['y']\n \n os.makedirs(\"./data/\"+filename[:-3], exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_Wake\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N1\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N2\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N3\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_Rem\", exist_ok=True)\n \n for i in y:\n if(i==0):\n if(wc<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"0000\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=10 and wc<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"000\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=100 and wc<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"00\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=1000 and wc<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"0\"+str(wc)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+str(wc)+\".npz\",x=x[t,:,0])\n wc+=1\n t+=1\n \n if(i==1):\n if(n1c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"0000\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=10 and n1c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"000\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=100 and n1c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"00\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=1000 and n1c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"0\"+str(n1c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+str(n1c)+\".npz\",x=x[t,:,0])\n n1c+=1\n t+=1\n \n if(i==2):\n if(n2c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"0000\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=10 and n2c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"000\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=100 and n2c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"00\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=1000 and n2c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"0\"+str(n2c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+str(n2c)+\".npz\",x=x[t,:,0])\n n2c+=1\n t+=1\n \n if(i==3):\n if(n3c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"0000\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=10 and n3c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"000\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=100 and n3c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"00\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=1000 and n3c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"0\"+str(n3c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+str(n3c)+\".npz\",x=x[t,:,0])\n n3c+=1\n t+=1\n \n if(i==4):\n if(n4c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"0000\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=10 and n4c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"000\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=100 and n4c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"00\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=1000 and n4c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"0\"+str(n4c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+str(n4c)+\".npz\",x=x[t,:,0])\n n4c+=1\n t+=1\n\ndef search(dirname):\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n ext = os.path.splitext(full_filename)[-1]\n if ext == '.npz': \n slicing(filename,full_filename)\n pass\n\n \nif __name__ == '__main__':\n name = os.path.dirname( os.path.abspath( __file__ ) )\n Dataset_dir = \"npzdata\"\n Dataset_dir = name + '\\\\' + Dataset_dir + '\\\\'\n os.makedirs('data', exist_ok=True)\n search(Dataset_dir)\n\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aghoshpub/LikelihoodFreeInterference | [
"fd6267104c29e935fa41dc92004dae98ded30626"
] | [
"examples/tutorial_h4l/3b_score.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # MadMiner particle physics tutorial\n# \n# # Part 3b: Training a score estimator\n# \n# Johann Brehmer, Felix Kling, Irina Espejo, and Kyle Cranmer 2018-2019\n\n# In part 3a of this tutorial we will finally train a neural network to estimate likelihood ratios. We assume that you have run part 1 and 2a of this tutorial. If, instead of 2a, you have run part 2b, you just have to load a different filename later.\n\n# ## Preparations\n\n# Make sure you've run the first tutorial before executing this notebook!\n\n# In[1]:\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n# get_ipython().magic(u'matplotlib inline')\n\nfrom madminer.sampling import SampleAugmenter\nfrom madminer import sampling\nfrom madminer.ml import ScoreEstimator\n\n\n# In[2]:\n\n\n# MadMiner output\nlogging.basicConfig(\n format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',\n datefmt='%H:%M',\n level=logging.INFO\n)\n\n# Output of all other modules (e.g. matplotlib)\nfor key in logging.Logger.manager.loggerDict:\n if \"madminer\" not in key:\n logging.getLogger(key).setLevel(logging.WARNING)\n\n\n# ## 1. Make (unweighted) training and test samples with augmented data\n\n# At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators:\n# \n# First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear.\n# \n# Second, `SampleAugmenter` calculates all the augmented data (\"gold\") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework.\n\n# In[3]:\n\n\nsampler = SampleAugmenter('data/delphes_data_shuffled.h5')\n\n\n# The relevant `SampleAugmenter` function for local score estimators is `extract_samples_train_local()`. As in part 3a of the tutorial, for the argument `theta` you can use the helper functions `sampling.benchmark()`, `sampling.benchmarks()`, `sampling.morphing_point()`, `sampling.morphing_points()`, and `sampling.random_morphing_points()`.\n\n# In[4]:\n\n\nx, theta, t_xz, _ = sampler.sample_train_local(\n theta=sampling.benchmark('sm'),\n #n_samples=4 * 10**5, #100000,\n n_samples= 2*10**6, # fewer than others\n folder='./data/samples',\n filename='train_score'\n)\n\n\n# We can use the same data as in part 3a, so you only have to execute this if you haven't gone through tutorial 3a:\n\n# In[5]:\n\n\n# _ = sampler.sample_test(\n# theta=sampling.benchmark('sm'),\n# n_samples=1*10**6,\n# folder='./data/samples',\n# filename='test'\n# )\n\n\n# ## 2. Train score estimator\n\n# It's now time to build a neural network. Only this time, instead of the likelihood ratio itself, we will estimate the gradient of the log likelihood with respect to the theory parameters -- the score. To be precise, the output of the neural network is an estimate of the score at some reference parameter point, for instance the Standard Model. A neural network that estimates this \"local\" score can be used to calculate the Fisher information at that point. The estimated score can also be used as a machine learning version of Optimal Observables, and likelihoods can be estimated based on density estimation in the estimated score space. This method for likelihood ratio estimation is called SALLY, and there is a closely related version called SALLINO. Both are explained in [\"Constraining Effective Field Theories With Machine Learning\"](https://arxiv.org/abs/1805.00013) and [\"A Guide to Constraining Effective Field Theories With Machine Learning\"](https://arxiv.org/abs/1805.00020).\n# \n# The central object for this is the `madminer.ml.ScoreEstimator` class:\n\n# In[6]:\n\n\nestimator = ScoreEstimator(n_hidden=(100,))\n\n\n# In[ ]:\n\n\nestimator.train(\n method='sally',\n x='data/samples/x_train_score.npy',\n t_xz='data/samples/t_xz_train_score.npy',\n)\n\nestimator.save('models/sally')\n\n\n# # ## 3. Evaluate score estimator\n\n# # Let's evaluate the SM score on the test data\n\n# # In[ ]:\n\n\n# estimator = ScoreEstimator(n_hidden=(50,))\n\n\n# # In[ ]:\n\n\n# estimator.load('models/sally')\n\n# t_hat = estimator.evaluate_score(\n# x='data/samples/x_test.npy'\n# )\n\n\n# # Let's have a look at the estimated score and how it is related to the observables:\n\n# # In[ ]:\n\n\n# x = np.load('data/samples/x_test.npy')\n\n# fig = plt.figure(figsize=(10,4))\n\n# #for i in range(2):\n# for i in range(1):\n \n# ax = plt.subplot(1,2,i+1)\n\n# sc = plt.scatter(x[:,0], x[:,1], c=t_hat[:,i], s=25., cmap='viridis', vmin=-1., vmax=1.)\n# cbar = plt.colorbar(sc)\n\n# cbar.set_label(r'$\\hat{t}_' + str(i) + r'(x | \\theta_{ref})$')\n# plt.xlabel(r'$p_{T,j1}$ [GeV]')\n# plt.ylabel(r'$\\Delta \\phi_{jj}$ Sally')\n# plt.xlim(10.,300.)\n# plt.ylim(-3.15,3.15)\n \n# plt.tight_layout()\n# #plt.show()\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jqueguiner/training_results_v1.0 | [
"8200377f425ae24b6ed6c2816b9273aab0996d43",
"8200377f425ae24b6ed6c2816b9273aab0996d43",
"8200377f425ae24b6ed6c2816b9273aab0996d43",
"8200377f425ae24b6ed6c2816b9273aab0996d43",
"8200377f425ae24b6ed6c2816b9273aab0996d43"
] | [
"Graphcore/benchmarks/bert/implementations/popart/pack_pretraining_data.py",
"Supermicro/benchmarks/unet3d/implementations/mxnet_j2/runtime/distributed.py",
"Habana/benchmarks/bert/implementations/bert-tf-sys-420gh-tngr/TensorFlow/common/tb_utils.py",
"Supermicro/benchmarks/ssd/implementations/mxnet_j2/ssd/pretrain.py",
"nettrix/benchmarks/rnnt/implementations/implementation_closed/rnnt/distributed_fused_lamb.py"
] | [
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport time\nimport glob\nimport struct\nimport random\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom scipy import optimize\nfrom itertools import repeat, chain\nfrom functools import lru_cache, reduce\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\nfrom concurrent.futures import ProcessPoolExecutor\nfrom bert_data.pretraining_dataset import CachedDataLoader, data_file_format\n\n\n@lru_cache(maxsize=None)\ndef packing_strategies(start, previous, target, depth):\n gap = target - start\n\n # The collection of possible strategies given the\n # starting sum, the target sum, and the available depth\n # strategy search is limited to increments greater or equal to previous\n strategies = []\n # Complete the packing with exactly 1 number\n if depth == 1:\n if gap >= previous:\n strategies.append([gap])\n\n # Complete the sample in \"depth\" steps, recursively\n else:\n for new in range(previous, gap + 1):\n\n new_gap = target - start - new\n if new_gap == 0:\n strategies.append([new])\n else:\n options = packing_strategies(start + new, new, target, depth - 1)\n\n for option in options:\n if len(option) > 0:\n strategies.append([new] + option)\n return strategies\n\n\ndef get_packing_recipe(sequence_lengths, max_sequence_length, max_sequences_per_pack=3):\n # Histogram of sequence lengths\n histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))\n print(\"Begin packing pass\".center(80, \"_\"))\n print(f\"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}\")\n\n # Make sure all strategies are recipes to pack to the correct sequence length\n strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)\n for strategy in strategy_set:\n assert(sum(strategy) == max_sequence_length)\n num_strategies = len(strategy_set)\n print(f\"Found {num_strategies} unique packing strategies.\")\n\n # Solve the packing equation A@mixture = histogram\n A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)\n for i in range(num_strategies):\n strategy = strategy_set[i]\n for seq_len in strategy:\n A[seq_len - 1, i] += 1\n\n # short sequences are inexpensive to add, so should have low residual weights\n # to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)\n # in practice the difference is negligible, but this converges faster\n padding_cutoff = 8\n w0 = np.ones([max_sequence_length])\n # w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight\n w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)\n w0 = np.sqrt(w0)\n\n # Starting values for the padding and the mixture\n padding = np.zeros([max_sequence_length], dtype=np.int32)\n mixture = np.zeros([num_strategies], dtype=np.int32)\n b = histogram + padding\n\n # Pack sequences as best as possible, then increase padding accordingly and repeat\n for i in range(0, 20):\n print(f\"\\nIteration: {i}: sequences still to pack: \", b.sum())\n start = time.time()\n partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)\n print(f\"Solving nnls took {time.time() - start:3.2f} seconds.\")\n print(f\"Residual norm: {rnorm:3.5e}\")\n\n # Update mixture (round the floating point solution to integers)\n partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))\n\n # If partial mixture is empty (due to rounding) we follow the gradient\n # this usually happens when the number of examples is small i.e. ~100\n if partial_mixture.max() == 0:\n grad = A.T @ (b * np.arange(1, max_sequence_length + 1))\n k = int(b.sum() // 2) + 1\n topk = np.argsort(-grad)[:k]\n partial_mixture[topk] += 1\n\n # Update mixture\n mixture = mixture + partial_mixture\n\n # Compute the residuals\n residual = b - A @ partial_mixture\n print(f\"Max residual: {abs(residual).max()}\")\n print(f\"Residual on first 8 categories: {np.around(residual[:8], 4)}\")\n print(f\"Residual on last 8 categories: {np.around(residual[-8:], 4)}\")\n\n # Add padding based on deficit (negative residual)\n partial_padding = np.where(residual < 0, -residual, 0)\n print(f\"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.\")\n padding = padding + partial_padding\n\n # Update the rhs vector (remaining surplus sequences)\n b = histogram + padding - A @ mixture\n assert np.all(b >= 0), b\n\n # Done iterating\n if b.sum() < 100:\n break\n\n # Make sure there is no remainder\n unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]\n # Update the mixture to also covered the unpacked sequences\n for l in unpacked_seqlen:\n # Get the depth 1 strategy\n strategy = sorted([l, args.max_sequence_length - l])\n strategy_index = strategy_set.index(strategy)\n mixture[strategy_index] += b[l-1]\n b = histogram - A @ mixture\n padding = np.where(b < 0, -b, 0)\n b = histogram + padding - A @ mixture\n assert b.sum() == 0\n\n # Analyze result\n print(\"Done solving for packing order\".center(80, \"_\"))\n num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()\n num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()\n print(f\"Number of sequences dropped: {b.sum()}\")\n print(f\"Number of strategies utilized: {np.count_nonzero(mixture)}\")\n new_number_of_samples = int(mixture.sum())\n compression = 1 - new_number_of_samples / len(sequence_lengths)\n print(f\"New number of samples: {new_number_of_samples:3.2f}, original {len(sequence_lengths)}. A compression ratio of {compression:3.3f}\")\n print(f\"The expected speed-up from packing: {1/(1-compression):3.3f}\")\n upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))\n print(f\"Theoretical upper bound on speed-up: {upper_bound:3.3f}\")\n avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples\n print(f\"Average sequences/sample {avg_sequences_per_sample:3.5f}\")\n print(f\"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens\")\n efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)\n print(f\"Packing efficiency (fraction of real tokens): {efficiency:3.4f}\")\n\n print(f\"Top 8 strategies\")\n topK = np.argsort(-mixture)[:8]\n for i in topK:\n print(f\"Strategy {strategy_set[i]} which is used {int(mixture[i])} times\")\n print(\"\".center(80, \"_\"))\n\n # Figure out the slicing that each strategy should use\n slicing = np.zeros_like(A)\n slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]\n slicing = slicing.T\n\n mixture = mixture.astype(np.int64)\n return strategy_set, mixture, padding, slicing\n\n\ndef slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):\n # Divide the work, firstly between the strategies and then into chunks of 50k\n slices = []\n strategies = []\n part_idx = []\n for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):\n if repeat_count == 0:\n continue\n # Slice out the sequences allocated to this strategy in increments of 50k\n num_parts = repeat_count // 50000\n num_parts = num_parts + int(repeat_count != num_parts * 50000)\n subcounts = (min(50000, repeat_count - 50000 * (i - 1)) for i in range(1, num_parts + 1))\n for part_id, part_count in enumerate(subcounts):\n examples = []\n for k, seq_len in enumerate(strategy):\n slice_start = int(slice_offsets[seq_len - 1])\n slice_end = slice_start + int(part_count)\n slice_offsets[seq_len - 1] = slice_end\n examples.append(examples_by_length[seq_len][slice_start:slice_end])\n\n slices.append(examples)\n strategies.append(strategy)\n part_idx.append(part_id)\n\n return slices, strategies, part_idx\n\n\ndef parallel_pack_according_to_strategy(args, part_idx, strategy, examples):\n # Pack the sequences according to the strategy and write them to disk\n base_filename = os.path.join(args.output_dir, \"strategy_\" + \"_\".join(map(str, strategy)))\n filename = base_filename + f\"_part_{part_idx}\"\n lines = []\n for i, multi_sequence in enumerate(zip(*examples)):\n lines.append(create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,\n args.max_sequence_length, args.max_sequences_per_pack))\n # Write to file\n with open(filename, \"wb\") as f:\n f.writelines(lines)\n\n\ndef create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):\n # SEQ\n packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)\n packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)\n packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)\n packed_positions = np.zeros(max_sequence_length, dtype=np.int32)\n\n # MLM\n # we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens\n # in case that percentege is rounded up for all sequences in the pack, need to add an extra token for\n # each sequence in the pack\n packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n\n # NSP\n packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)\n packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)\n packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)\n\n offset = 0\n mlm_offset = 0\n sequence_index = 1 # used in the input mask\n for sequence in multi_sequence:\n # Padding sequences are donoted with None\n if sequence is not None:\n input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence\n seq_len = input_mask.sum()\n\n # SEQ\n packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]\n packed_input_mask[offset:offset + seq_len] = sequence_index\n packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]\n packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)\n\n # MLM\n mlm_len = int(masked_lm_weights.sum())\n assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, \"Too many LM predictions per sequences\"\n max_mlm = mlm_offset + mlm_len\n packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]\n packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]\n packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index\n\n # NSP\n packed_next_sentence_positions[sequence_index - 1] = offset\n packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels\n packed_next_sentence_weights[sequence_index - 1] = 1\n\n # Update offsets\n sequence_index += 1\n offset += seq_len\n mlm_offset = max_mlm\n\n # Pack into binary format and write it\n line = reduce(lambda accl, i: accl + struct.pack('<I', i),\n chain(packed_input_ids,\n packed_input_mask,\n packed_segment_ids,\n packed_positions,\n packed_masked_lm_positions,\n packed_masked_lm_ids,\n packed_masked_lm_weights,\n packed_next_sentence_positions,\n packed_next_sentence_labels,\n packed_next_sentence_weights), b'')\n return line\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-glob\", help=\"A glob expression for the input files to read in and pack\", required=True, type=str)\n parser.add_argument(\"--output-dir\", help=\"The destination folder for the output files\", required=True)\n parser.add_argument(\"--random-seed\", help=\"For shuffling the data\", default=12345)\n parser.add_argument(\"--max-files\", help=\"At most how many files to process (limited by RAM)\", default=100)\n parser.add_argument(\"--duplication-factor\", help=\"Same as the one passed to create input data\", default=1, type=int)\n parser.add_argument(\"--max-sequence-length\", help=\"The maximum number of tokens in an example\", default=512, type=int)\n parser.add_argument(\"--max-predictions-per-sequence\", help=\"The maximum number of masked tokens in an un-packed example\", default=76, type=int)\n parser.add_argument(\"--max-sequences-per-pack\", help=\"The maximum number of sequences per packed example.\", choices=[2, 3], default=3, type=int)\n args = parser.parse_args()\n random.seed(args.random_seed)\n\n # Input files\n input_files = glob.glob(args.input_glob)\n if len(input_files) > args.max_files:\n input_files = np.random.choice(input_files, size=args.max_files, replace=False)\n assert len(input_files) > 0\n\n # Load un-packed dataset\n sample_sizes = data_file_format(args.max_sequence_length, args.max_predictions_per_sequence)\n\n load_size = 1 if len(input_files) == 1 else 1024\n dataset = CachedDataLoader(input_files, sample_sizes, duplication_factor=args.duplication_factor, batch_size=load_size)\n\n # Put examples into bins depending on their sequence lengths and extract the sequence length\n # as an array\n sequence_lengths = []\n examples_by_length = defaultdict(list)\n print(\"Looping through dataset to collect sequence length information...\")\n for data in dataset:\n input_mask = data[1]\n batch_of_lengths = input_mask.sum(1).tolist()\n for i, length in enumerate(batch_of_lengths):\n examples_by_length[length].append([data[k][i] for k in range(len(data))])\n sequence_lengths.extend(batch_of_lengths)\n sequence_lengths = np.array(sequence_lengths)\n\n # Pass the array of sequence lengths to the packing algorithm\n strategy_set, mixture, padding, slicing = get_packing_recipe(sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)\n\n # Add the calculated padding\n for i in range(1, args.max_sequence_length + 1):\n examples_by_length[i].extend([None] * int(padding[i - 1]))\n\n # Shuffle the data\n for key in examples_by_length:\n random.shuffle(examples_by_length[key])\n\n # Pack and store the data\n print(f\"\\nPacking and writing packed dataset to {args.output_dir}.\")\n\n # Slice the data into chunks of max 50k packed examples\n example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)\n print(f\"Splitting work into {len(part_idx)} parts.\")\n\n start = time.time()\n with ProcessPoolExecutor(16) as executor:\n work = repeat(args), part_idx, strategies, example_slices\n for partial_result in executor.map(parallel_pack_according_to_strategy, *work):\n pass\n print(f\"\\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.\")\n",
"import os\nfrom time import time\n\nimport numpy as np\nfrom mpi4py import MPI\nfrom mxnet import nd\n\ndef distribute_mpiranks(local_rank, local_size, size, nodes_for_eval, gpu_per_node):\n # assign top \"nodes_for_eval\" nodes for evaluation. Rest of the nodes go to training\n total_ranks = list(range(size))\n train_ranks = total_ranks[:size - nodes_for_eval * gpu_per_node]\n eval_ranks = train_ranks\n transfer_ranks = []\n if nodes_for_eval:\n eval_ranks = total_ranks[size - nodes_for_eval * gpu_per_node:]\n # print(f\"Training ranks {train_ranks} \\nEval ranks {eval_ranks}\")\n #transfer_ranks = [train_ranks[0], eval_ranks[0]]\n # Form multiple transfer_rank groups, by local_rank\n transfer_ranks = [train_ranks[local_rank], *[x for x in eval_ranks if x % local_size == local_rank]]\n assert train_ranks, \"Training ranks list is empty\"\n assert eval_ranks, \"Evaluation ranks list is empty\"\n # print(f\"TRANSFER RANKS {transfer_ranks}\")\n return train_ranks, eval_ranks, transfer_ranks\n\n\ndef get_group_comm(comm, ranks):\n # Create a grouped mpi communicator with the ranks\n # assert len(ranks) > 0, \"cannot create group as ranks is empty\"\n xcomm = None\n if ranks:\n xgroup = comm.group.Incl(ranks)\n xcomm = comm.Create_group(xgroup)\n\n return xcomm\n\n\ndef sync_training_and_evaluation(flags, global_comm, eval_comm, transfer_comm,\n rank, model, train_ranks, eval_ranks, transfer_ranks,\n cycle, stop_training, ctx):\n\n # Let training threads know if evaluation has reached target\n # All reduce also acts as barrier to make sure parameter save is done\n local_stop_training = np.array([stop_training], dtype=np.int32)\n global_stop_training = np.zeros(1, dtype=np.int32)\n global_comm.Allreduce(local_stop_training, global_stop_training, MPI.SUM)\n\n start = time()\n filename = os.path.join(flags.network_dir, f'model_{cycle}.params')\n if flags.use_mpi_bcast:\n if rank in transfer_ranks:\n broadcast_model(model, transfer_comm, rank, eval_ranks)\n elif flags.use_mpi_transfer:\n if rank == train_ranks[0] or rank in eval_ranks:\n transfer_model(model, global_comm, eval_comm, rank, train_ranks[0], eval_ranks[0], eval_ranks)\n else:\n if rank == train_ranks[0]:\n model.save_parameters(filename)\n\n # Evaluation found end of training\n if global_stop_training != 0:\n stop_training = True\n else:\n if not flags.use_mpi_bcast and not flags.use_mpi_transfer:\n # load model for evaluation\n if rank in eval_ranks:\n if os.path.exists(filename):\n model.load_parameters(filename, ctx=ctx)\n else:\n raise Exception(f\"rank {rank}: model does not exist for {cycle}\")\n\n if rank == train_ranks[0]:\n print(f\"rank {rank}: cycle = {cycle}: time to send the model = {time() - start}\")\n if rank == eval_ranks[0]:\n print(f\"rank {rank}: cycle = {cycle}: time to receive the model = {time() - start}\")\n\n return stop_training, model\n\n\ndef broadcast_model(model, comm, rank, eval_ranks):\n params = model._collect_params_with_prefix()\n\n irequests = []\n result = {}\n for name, p in sorted(params.items()):\n if \"dummy\" in name:\n continue\n result[name] = p.data().asnumpy()\n irequests.append(comm.Ibcast(result[name], root=0))\n\n MPI.Request.waitall(irequests)\n\n if rank in eval_ranks:\n for name, p in sorted(params.items()):\n if \"dummy\" in name:\n continue\n params[name].set_data(result[name])\n\n\ndef transfer_model(model, global_comm, eval_comm, rank, source_rank, target_rank, eval_ranks):\n params = model._collect_params_with_prefix()\n\n irequests = []\n result = {}\n for idx, (name, p) in enumerate(sorted(params.items())):\n if \"dummy\" in name:\n continue\n data = p.data().asnumpy()\n if rank == source_rank:\n irequests.append(global_comm.Isend(data, dest=target_rank, tag=idx))\n elif rank == target_rank:\n result[name] = data\n irequests.append(global_comm.Irecv(result[name], source=source_rank, tag=idx))\n else:\n result[name] = data\n\n if rank == source_rank:\n MPI.Request.waitall(irequests)\n\n elif rank in eval_ranks:\n if rank == target_rank:\n MPI.Request.waitall(irequests)\n eval_comm.Barrier()\n for idx, (name, p) in enumerate(sorted(params.items())):\n if \"dummy\" in name or name not in result.keys():\n continue\n # data = p.data().asnumpy()\n eval_comm.Bcast(result[name], root=0)\n # params[name]._load_init(nd.array(result[name]), ctx, cast_dtype=False, dtype_source='current')\n params[name].set_data(result[name])\n",
"import os\nimport time\nimport tensorflow as tf\nfrom copy import deepcopy\nfrom tensorboard.plugins.hparams import api as hp\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.ops import summary_ops_v2\nfrom tensorflow.python.summary import summary as tf_summary\nfrom tensorflow.python.training.summary_io import SummaryWriterCache\nfrom tensorflow.compat.v1.keras.callbacks import TensorBoard, Callback\n\n\ndef _remove_prefix(s, prefix):\n if s.startswith(prefix):\n s = s[len(prefix):]\n return s\n\n\ndef _parse_precision():\n flag = os.environ.get('TF_ENABLE_BF16_CONVERSION', '0').lower()\n try:\n value = int(flag)\n except:\n value = -1\n\n if flag == 'false' or value == 0:\n return 'fp32'\n elif flag == 'true' or value == 1:\n return 'bf16'\n return flag\n\n\ndef _set_precision_if_missing(hparams: dict):\n if 'precision' not in hparams:\n hparams['precision'] = _parse_precision()\n return hparams\n\n\ndef _copy_and_clean_hparams(hparams: dict):\n hparams_ = dict()\n for name, value in hparams.items():\n if isinstance(value, (str, bool, int, float)):\n hparams_[name] = value\n continue\n\n try:\n hparams_[name] = str(value)\n tf.compat.v1.logging.info(\n f'Type of parameter \"{name}\" is not one of (bool, int, float, str). '\n 'It will be saved as a string.')\n except:\n tf.compat.v1.logging.info(\n f'Conversion of parameter \"{name}\" to string failed. '\n 'Parameter will not be saved.')\n\n return hparams_\n\n\ndef write_hparams_v1(writer, hparams: dict):\n hparams = _copy_and_clean_hparams(hparams)\n hparams = _set_precision_if_missing(hparams)\n\n # We create Session here, because in case of older topologies\n # that run in graph mode the FileWriter needs it.\n with tf.compat.v1.Session():\n if isinstance(writer, str):\n writer = SummaryWriterCache.get(writer)\n summary = hp.hparams_pb(hparams).SerializeToString()\n writer.add_summary(summary)\n\n\ndef write_hparams_v2(writer, hparams: dict):\n hparams = _copy_and_clean_hparams(hparams)\n hparams = _set_precision_if_missing(hparams)\n\n with writer.as_default():\n hp.hparams(hparams)\n\n\nclass ExamplesPerSecondEstimatorHook(tf.compat.v1.train.StepCounterHook):\n \"\"\"Calculate and report global_step/sec and examples/sec during runtime.\"\"\"\n # Copy-pasted from tensorflow_estimator/python/estimator/tpu/tpu_estimator.py\n\n def __init__(self,\n batch_size=None,\n every_n_steps=1,\n every_n_secs=None,\n output_dir=None,\n summary_writer=None,\n extra_metrics=None,\n verbose=False):\n super().__init__(\n every_n_steps=every_n_steps,\n every_n_secs=every_n_secs,\n output_dir=output_dir,\n summary_writer=summary_writer)\n self._extra_metrics = extra_metrics or {}\n self._verbose = verbose\n if batch_size is not None:\n self._extra_metrics['examples/sec'] = batch_size\n\n def _add_summary(self, tag, value, step):\n Summary = tf.compat.v1.Summary\n global_step_summary = Summary(value=[\n Summary.Value(tag=tag, simple_value=value)\n ])\n self._summary_writer.add_summary(global_step_summary, step)\n if self._verbose:\n tf.compat.v1.logging.info(f'{tag}: {value}')\n\n def _log_and_record(self, elapsed_steps, elapsed_time, global_step):\n global_step_per_sec = elapsed_steps / elapsed_time\n if self._summary_writer is not None:\n self._add_summary('global_step/sec',\n global_step_per_sec, global_step)\n for name, factor in self._extra_metrics.items():\n value = factor * global_step_per_sec\n self._add_summary(name, value, global_step)\n\n\nclass ExamplesPerSecondKerasHook(Callback):\n def __init__(self,\n every_n_steps=1,\n every_n_secs=None,\n output_dir=None,\n summary_writer=None):\n self.writer = summary_writer or SummaryWriterCache.get(output_dir)\n self._timer = tf.compat.v1.train.SecondOrStepTimer(\n every_n_secs, every_n_steps)\n self._total_examples = 0\n self._should_trigger = True\n\n def on_train_begin(self, logs=None):\n self._timer.reset()\n\n def on_train_batch_begin(self, batch, logs=None):\n self._should_trigger = self._timer.should_trigger_for_step(\n logs.get('batch', 0))\n\n def on_train_batch_end(self, batch, logs=None):\n step = logs.get('batch', 0)\n self._total_examples += logs.get('size', 0)\n if self._should_trigger:\n elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(\n step)\n if elapsed_time is not None:\n self._log_and_record(\n elapsed_steps, elapsed_time, step, self._total_examples)\n self._total_examples = 0\n\n def _log_and_record(self, elapsed_steps, elapsed_time,\n global_step, total_examples=None):\n Summary = tf.compat.v1.Summary\n global_step_per_sec = elapsed_steps / elapsed_time\n if self.writer is not None:\n global_step_summary = Summary(value=[\n Summary.Value(\n tag='global_step/sec', simple_value=global_step_per_sec)\n ])\n self.writer.add_summary(global_step_summary, global_step)\n if total_examples is not None:\n examples_per_sec = total_examples / elapsed_time\n example_summary = Summary(value=[\n Summary.Value(tag='examples/sec',\n simple_value=examples_per_sec)\n ])\n self.writer.add_summary(example_summary, global_step)\n\n\nclass TBSummary(object):\n \"\"\"\n Creates a proxy for FileWriter for TensorBoard.\n\n :param log_dir: - path where experiment is running (usually the same as\n model_dir in Estimator)\n \"\"\"\n\n def __init__(self, log_dir: str):\n super().__init__()\n self._log_dir = log_dir\n self._session = None\n\n def __enter__(self):\n self._session = tf.compat.v1.Session()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self._session:\n self._session.close()\n self._session = None\n\n def add_scalar(self, tag, value, global_step=None):\n with self._session:\n writer = SummaryWriterCache.get(self._log_dir)\n summary = tf.compat.v1.Summary(\n value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])\n event = tf.compat.v1.Event(summary=summary)\n event.wall_time = time.time()\n event.step = global_step\n writer.add_event(event)\n\n\nclass TensorBoardWithHParamsV1(TensorBoard):\n \"\"\"\n Adds TensorBoard visualization to training process.\n\n Writes training tfevent file into default log directory, but\n stores evaluation in log_dir/eval subdirectory.\n \"\"\"\n\n def __init__(self, hparams, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.hparams = hparams\n self._train_writer = None\n self._eval_writer = None\n\n def _switch_writer(self, mode):\n self.writer = self._train_writer if mode == 'train' else self._eval_writer\n\n def _init_writer(self, model):\n \"\"\"Sets file writer.\"\"\"\n if context.executing_eagerly():\n raise NotImplementedError('hook does not support eager execution')\n\n self._train_writer = SummaryWriterCache.get(self.log_dir)\n self._eval_writer = SummaryWriterCache.get(\n os.path.join(self.log_dir, 'eval'))\n self._switch_writer('train')\n\n write_hparams_v1(self.writer, self.hparams)\n\n def _write_custom_summaries(self, step, logs=None):\n \"\"\"\n This methods works on the assumption that metrics containing `val`\n in name are related to validation (that's the default in Keras).\n \"\"\"\n\n logs = logs or {}\n train_logs = {}\n eval_logs = {}\n\n for name, value in logs.items():\n if 'val' in name:\n if name.startswith('batch_val_'):\n name = 'batch_' + _remove_prefix(name, 'batch_val_')\n elif name.startswith('epoch_val_'):\n name = _remove_prefix(name, 'epoch_val_')\n eval_logs[name] = value\n else:\n if name.startswith('batch_'):\n name = _remove_prefix(name, 'batch_')\n train_logs[name] = value\n\n self._switch_writer('eval')\n super()._write_custom_summaries(step, eval_logs)\n self._switch_writer('train')\n super()._write_custom_summaries(step, train_logs)\n",
"import pickle\nimport numpy as np\nimport re\n\nSSD_LAYERS = ['expand_trans_conv', 'expand_conv'] # Layers that are not part of the backbone\nBATCHNORM_PARAMS = ['beta', 'gamma', 'running_mean', 'running_var'] \nBATCHNORM2_PARAMS = ['beta2', 'gamma2', 'running_mean2', 'running_var2'] \n\ndef pretrain_backbone(param_dict,\n picklefile_name,\n layout='NCHW',\n backbone_prefix='ssd0_resnetmlperf0_'):\n with open(picklefile_name, 'rb') as picklefile:\n pretrained_dict = pickle.load(picklefile)\n\n for param_name in param_dict.keys():\n # Skip layers not part of the backbone\n if any(n in param_name for n in SSD_LAYERS):\n continue\n\n # convert parameter name to match the names in the pretrained file\n pretrained_param_name = param_name\n # Remove backbone_prefix from name\n pretrained_param_name = pretrained_param_name.replace(backbone_prefix, '')\n # 'batchnormaddrelu' uses 'moving' rather than 'running' for mean/var\n pretrained_param_name = pretrained_param_name.replace('moving', 'running')\n # for fused conv2d+bn+relu, massage the name a bit\n if \"weight\" in pretrained_param_name:\n pretrained_param_name = pretrained_param_name.replace('convbn', 'conv')\n if \"weight2\" in pretrained_param_name:\n # turn stage2_conv1_weight2 into stage2_conv2_weight\n numbers = re.findall(\"\\d+\", pretrained_param_name)\n conv_num = int(numbers[1])+1\n pretrained_param_name = \"stage{}_conv{}_weight\".format(numbers[0], conv_num)\n for param in BATCHNORM_PARAMS:\n if param in pretrained_param_name:\n pretrained_param_name = pretrained_param_name.replace('convbn', 'batchnorm')\n for param in BATCHNORM2_PARAMS:\n if param in pretrained_param_name:\n # turn stage2_batchnorm1_gamma2 to stage2_batchnorm2_gamma\n numbers = re.findall(\"\\d+\", pretrained_param_name)\n batchnorm_num = int(numbers[1])+1\n pretrained_param_name = \"stage{}_batchnorm{}_{}\".format(numbers[0], batchnorm_num, param[:-1])\n\n assert pretrained_param_name in pretrained_dict, \\\n f'Can\\'t find parameter {pretrained_param_name} in the picklefile'\n param_type = type(pretrained_dict[pretrained_param_name])\n assert isinstance(pretrained_dict[pretrained_param_name], np.ndarray), \\\n f'Parameter {pretrained_param_name} in the picklefile has a wrong type ({param_type})'\n\n pretrained_weights = pretrained_dict[pretrained_param_name]\n\n if layout == 'NHWC' and pretrained_weights.ndim==4:\n # Place channels into last dim\n pretrained_weights = pretrained_weights.transpose((0, 2, 3, 1))\n\n # this special case is intended only for the first\n # layer, where the channel count needs to be padded\n # from 3 to 4 for NHWC\n if (pretrained_weights.shape[3]+1)==param_dict[param_name].shape[3]:\n pretrained_weights = np.pad(pretrained_weights,\n ((0, 0), (0, 0), (0, 0), (0, 1)),\n mode='constant')\n\n assert param_dict[param_name].shape == pretrained_weights.shape, \\\n 'Network parameter {} and pretrained parameter {} have different shapes ({} vs {})' \\\n .format(param_name, pretrained_param_name, param_dict[param_name].shape, pretrained_weights.shape)\n param_dict[param_name].set_data(pretrained_weights)\n",
"import math\r\nimport torch\r\nimport importlib\r\nimport amp_C\r\nfrom apex.multi_tensor_apply import multi_tensor_applier\r\n\r\nimport torch.distributed.distributed_c10d as c10d\r\n\r\nclass DistributedFusedLAMB(torch.optim.Optimizer):\r\n\r\n \"\"\"Implements LAMB algorithm.\r\n \r\n Currently GPU-only. Requires Apex to be installed via\r\n ``pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./``.\r\n \r\n This version of fused LAMB implements 2 fusions.\r\n \r\n * Fusion of the LAMB update's elementwise operations\r\n * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.\r\n \r\n :class:`apex.optimizers.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer::\r\n \r\n opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)\r\n ...\r\n opt.step()\r\n \r\n :class:`apex.optimizers.FusedLAMB` may be used with or without Amp. If you wish to use :class:`FusedLAMB` with Amp,\r\n you may choose any ``opt_level``::\r\n \r\n opt = apex.optimizers.FusedLAMB(model.parameters(), lr = ....)\r\n model, opt = amp.initialize(model, opt, opt_level=\"O0\" or \"O1 or \"O2\")\r\n ...\r\n opt.step()\r\n \r\n In general, ``opt_level=\"O1\"`` is recommended.\r\n \r\n LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\r\n \r\n Arguments:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups.\r\n lr (float, optional): learning rate. (default: 1e-3)\r\n betas (Tuple[float, float], optional): coefficients used for computing\r\n running averages of gradient and its norm. (default: (0.9, 0.999))\r\n eps (float, optional): term added to the denominator to improve\r\n numerical stability. (default: 1e-8)\r\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\r\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\r\n algorithm from the paper `On the Convergence of Adam and Beyond`_\r\n NOT SUPPORTED now! (default: False)\r\n adam_w_mode (boolean, optional): Apply L2 regularization or weight decay\r\n True for decoupled weight decay(also known as AdamW) (default: True)\r\n grad_averaging (bool, optional): whether apply (1-beta2) to grad when\r\n calculating running averages of gradient. (default: True)\r\n set_grad_none (bool, optional): whether set grad to None when zero_grad()\r\n method is called. (default: True)\r\n max_grad_norm (float, optional): value used to clip global grad norm\r\n (default: 1.0)\r\n use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0\r\n weight decay parameter (default: False)\r\n step_supports_amp_scaling(boolean, optional): whether to use customized\r\n gradient unscaling logic (default: True)\r\n \r\n .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:\r\n https://arxiv.org/abs/1904.00962\r\n .. _On the Convergence of Adam and Beyond:\r\n https://openreview.net/forum?id=ryQu7f-RZ\r\n \"\"\"\r\n\r\n class AtomicCounter(object):\r\n def __init__(self):\r\n self.value = 0\r\n self.order = []\r\n import threading\r\n self._lock = threading.Lock()\r\n\r\n def add(self, idx):\r\n with self._lock:\r\n self.value += 1\r\n self.order.append(idx)\r\n\r\n def __init__(self, params,\r\n lr=1e-3, bias_correction = True, grad_averaging=True,\r\n betas=(0.9, 0.999), eps=1e-8, \r\n weight_decay=0., max_grad_norm=0., \r\n adam_w_mode=True, use_nvlamb=False,\r\n step_supports_amp_scaling=True, overlap_reductions=True,\r\n dwu_group_size=0, dwu_num_blocks=4, dwu_num_chunks=4,\r\n dwu_num_rs_pg=1, dwu_num_ar_pg=4, dwu_num_ag_pg=0, \r\n e5m2_allgather=False, verbose=True):\r\n defaults = dict(lr=lr, bias_correction=bias_correction,\r\n betas=betas, eps=eps, weight_decay=weight_decay,\r\n grad_averaging=grad_averaging,\r\n max_grad_norm=max_grad_norm)\r\n\r\n super(DistributedFusedLAMB, self).__init__(params, defaults)\r\n\r\n global fused_adam_cuda, distributed_lamb_cuda\r\n fused_adam_cuda = importlib.import_module(\"fused_adam_cuda\")\r\n distributed_lamb_cuda = importlib.import_module(\"distributed_lamb_cuda\")\r\n\r\n self._overflow_buf = torch.cuda.IntTensor([0])\r\n self._has_overflow = False\r\n self.multi_tensor_lamb_compute_update_term = distributed_lamb_cuda.multi_tensor_lamb_compute_update_term\r\n self.multi_tensor_lamb_update_weights = distributed_lamb_cuda.multi_tensor_lamb_update_weights\r\n import amp_C\r\n self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm\r\n\r\n self._grad_averaging = grad_averaging\r\n self._adam_w_mode = 1 if adam_w_mode else 0\r\n self._use_nvlamb = use_nvlamb\r\n self._step_supports_amp_scaling = step_supports_amp_scaling\r\n self._is_accumulation_step = False\r\n self._last_step = False\r\n self._overlap_reductions = overlap_reductions\r\n self._global_scale = None\r\n self._num_blocks = dwu_num_blocks\r\n self._num_chunks = dwu_num_chunks\r\n self._e5m2_allgather = e5m2_allgather\r\n self._verbose = verbose\r\n self._L2_grad_norm = None\r\n \r\n self._current_process_group = c10d._get_default_group()\r\n self._available_ranks = list(c10d._pg_group_ranks[self._current_process_group].keys())\r\n self._group_size = torch.cuda.device_count() if dwu_group_size <= 0 else dwu_group_size\r\n self._world_size = torch.distributed.get_world_size()\r\n self._num_groups = self._world_size // self._group_size\r\n self._rank_in_group = torch.distributed.get_rank() % self._group_size\r\n\r\n self._lr = torch.tensor(0.0, dtype=torch.float32, device='cuda')\r\n\r\n self._resume_from_checkpoint = False\r\n self._step = torch.cuda.IntTensor([0])\r\n\r\n # Master weight, moment, gradient buffers\r\n self._fp32_p, self._fp32_m, self._fp32_v, self._fp16_p, self._fp16_g = None, None, None, None, None\r\n\r\n import inspect\r\n assert ('no_copy' in inspect.getfullargspec(torch.distributed.reduce_scatter).args), \"This version of c10d does not support no_copy option\"\r\n\r\n self._num_rs_pg = dwu_num_rs_pg\r\n self._num_ar_pg = dwu_num_ar_pg\r\n self._num_ag_pg = dwu_num_ag_pg\r\n if self._num_groups > 1:\r\n self._ar_pg = []\r\n for dev_i in range(self._group_size):\r\n ranks = [dev_i+j*self._group_size for j in range(self._num_groups)]\r\n for i in range(self._num_ar_pg):\r\n if self._verbose:\r\n print(f\"creating new group {i}: {ranks}\")\r\n grp = torch.distributed.new_group(ranks=ranks)\r\n if grp != torch.distributed.GroupMember.NON_GROUP_MEMBER:\r\n if self._verbose:\r\n print(f\"group {i}: init barrier (device: {torch.cuda.current_device()})\")\r\n torch.distributed.barrier(group=grp, device_ids=[torch.cuda.current_device()])\r\n if self._verbose:\r\n print(f\"created new group {i}\")\r\n\r\n if torch.distributed.get_rank() in ranks:\r\n self._ar_pg.append(grp)\r\n self._ar_st = [torch.cuda.Stream() for _ in range(self._num_ar_pg)]\r\n #for ar_pg in self._ar_pg:\r\n # torch.distributed.all_reduce(self._overflow_buf,group=ar_pg)\r\n rs_ranks = []\r\n for group_i in range(self._num_groups):\r\n rs_ranks.append([group_i*self._group_size+j for j in range(self._group_size)])\r\n self._rs_pg = []\r\n for group_i in range(self._num_groups):\r\n ranks = rs_ranks[group_i]\r\n for i in range(self._num_rs_pg):\r\n grp = torch.distributed.new_group(ranks=ranks)\r\n if torch.distributed.get_rank() in ranks:\r\n self._rs_pg.append(grp)\r\n l2_grad_norm_pg = torch.distributed.new_group(ranks=ranks)\r\n if torch.distributed.get_rank() in ranks:\r\n self._l2_grad_norm_pg = l2_grad_norm_pg\r\n #torch.distributed.all_reduce(self._overflow_buf,group=self._l2_grad_norm_pg)\r\n self._rs_st = [torch.cuda.Stream() for _ in range(self._num_rs_pg)]\r\n #for rs_pg in self._rs_pg:\r\n # torch.distributed.all_reduce(self._overflow_buf,group=rs_pg)\r\n if self._num_ag_pg == 0:\r\n self._ag_pg = self._rs_pg\r\n self._ag_st = self._rs_st\r\n self._num_ag_pg = self._num_rs_pg\r\n else:\r\n self._ag_pg = []\r\n for group_i in range(self._num_groups):\r\n ranks = rs_ranks[group_i]\r\n for i in range(self._num_ag_pg):\r\n grp = torch.distributed.new_group(ranks=ranks)\r\n if torch.distributed.get_rank() in ranks:\r\n self._ag_pg.append(grp)\r\n self._ag_st = [torch.cuda.Stream() for _ in range(self._num_ag_pg)]\r\n #for ag_pg in self._ag_pg:\r\n # torch.distributed.all_reduce(self._overflow_buf,group=ag_pg)\r\n self._l2_grad_norm_st = torch.cuda.Stream()\r\n self._completion_st = torch.cuda.Stream()\r\n self._step.record_stream(self._completion_st)\r\n\r\n self._reductions_works = [None]*self._num_blocks\r\n self._allgather_works = [None]*self._num_blocks\r\n\r\n self._one = torch.cuda.IntTensor([1])\r\n\r\n self._first_step = True\r\n self._lazy_init_stage1_done, self._lazy_init_stage2_done = False, False\r\n self._param_order = self.AtomicCounter()\r\n\r\n def _lazy_init_stage1(self):\r\n if self._lazy_init_stage1_done: return\r\n\r\n p_offset = 0\r\n p_i = 0\r\n self._model_params = []\r\n self._grad_accs = []\r\n self._group_properties = []\r\n for group in self.param_groups:\r\n prev = None\r\n beta1, beta2 = group['betas']\r\n beta3 = 1.0 - beta1 if self._grad_averaging else 1.0\r\n bias_correction = 1 if group['bias_correction'] else 0\r\n eps = group['eps']\r\n weight_decay = group['weight_decay']\r\n for p in group['params']:\r\n torch.distributed.broadcast(p, 0)\r\n if not p.requires_grad:\r\n continue\r\n self._model_params.append(p)\r\n self._group_properties.append((\r\n weight_decay,\r\n bias_correction,\r\n beta1,\r\n beta2,\r\n beta3,\r\n eps\r\n ))\r\n p_grads_size = p.numel()\r\n def wrapper(param, param_i):\r\n param_tmp = param.expand_as(param)\r\n grad_acc = param_tmp.grad_fn.next_functions[0][0]\r\n def allreduce_hook(*unused):\r\n if self._first_step:\r\n # first time\r\n self._param_order.add(param_i)\r\n else:\r\n idx = self._param_order.order.index(param_i)\r\n self._do_overlapped_reduction(idx, param)\r\n grad_acc.register_hook(allreduce_hook)\r\n self._grad_accs.append(grad_acc)\r\n wrapper(p, p_i)\r\n p_offset += p_grads_size\r\n # Only enforce 128b alignment (64 * fp16) for non-consecutive parameters\r\n # RNN is one example of consecutive parameters:\r\n # (weight_ih, weight_hh, bias_ih, bias_hh)\r\n if prev is not None and (prev.data_ptr() + prev.numel() * prev.element_size() != p.data_ptr()):\r\n p_offset = ((p_offset + 63) // 64) * 64\r\n prev = p\r\n p_i += 1\r\n self._grads_generated = [False]*len(self._model_params)\r\n self._grads_fp16, self._grads_fp32 = [], []\r\n if self._overlap_reductions:\r\n self._current_block = self._num_blocks\r\n\r\n self._net_total_param_size = p_offset\r\n self._total_param_size = p_offset\r\n dwu_min_page_size = 256 * self._num_blocks * self._num_chunks * self._group_size\r\n self._total_param_size = ((self._total_param_size + dwu_min_page_size - 1) // dwu_min_page_size) * dwu_min_page_size\r\n self._block_size = self._total_param_size // self._num_blocks\r\n self._chunk_size = self._block_size // self._num_chunks\r\n self._shard_size = self._chunk_size // self._group_size\r\n #print(\"self._net_total_param_size=%d, self._total_param_size=%d, dwu_min_page_size=%d, self._block_size=%d, self._chunk_size=%d, self._shard_size=%d\" % (self._net_total_param_size, self._total_param_size,dwu_min_page_size,self._block_size,self._chunk_size,self._shard_size))\r\n\r\n self._flat_grads = torch.zeros([self._total_param_size], dtype=torch.float16, device='cuda')\r\n self._new_params = torch.zeros([self._total_param_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda')\r\n self._mega_shard_size = self._num_blocks * self._num_chunks * self._shard_size\r\n # initialize master weights, moments buffers if not loaded from checkpoint\r\n if self._fp32_p is None:\r\n self._fp32_p = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')\r\n self._fp32_m = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')\r\n self._fp32_v = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')\r\n self._fp32_u = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda')\r\n # FIXME: Rethink fp16 label since it's either uint8 or fp16\r\n self._fp16_p = torch.zeros([self._mega_shard_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda')\r\n self._fp16_g = torch.zeros([self._mega_shard_size], dtype=torch.float16, device='cuda')\r\n\r\n def _flat_split(p):\r\n def __blockify(p):\r\n return [p[block_id*self._block_size:(block_id+1)*self._block_size] for block_id in range(self._num_blocks)]\r\n def __chunkify(p):\r\n return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._num_chunks)]\r\n def __shardify(p):\r\n return [p[shard_id*self._shard_size:(shard_id+1)*self._shard_size] for shard_id in range(self._group_size)]\r\n list_of_blocks = __blockify(self._flat_grads)\r\n list_of_list_of_chunks = [__chunkify(block) for block in list_of_blocks]\r\n list_of_list_of_list_of_shards = [[__shardify(chunk) for chunk in chunks] for chunks in list_of_list_of_chunks]\r\n return list_of_blocks, list_of_list_of_chunks, list_of_list_of_list_of_shards\r\n self._flat_grads_blocks, self._flat_grads_chunks, self._flat_grads_shards = _flat_split(self._flat_grads)\r\n def _full_packed_split(p):\r\n def __shardify(p):\r\n return [p[mega_shard*self._mega_shard_size:(mega_shard+1)*self._mega_shard_size] for mega_shard in range(self._group_size)]\r\n def __blockify(p):\r\n return [p[block_id*self._num_chunks*self._shard_size:(block_id+1)*self._num_chunks*self._shard_size] for block_id in range(self._num_blocks)]\r\n def __chunkify(p):\r\n return [p[chunk_id*self._shard_size:(chunk_id+1)*self._shard_size] for chunk_id in range(self._num_chunks)]\r\n list_of_mega_shards = __shardify(p)\r\n list_of_list_of_mega_blocks = [__blockify(mega_shard) for mega_shard in list_of_mega_shards]\r\n list_of_list_of_list_of_mega_chunks = [[__chunkify(mega_block) for mega_block in mega_blocks] for mega_blocks in list_of_list_of_mega_blocks]\r\n return list_of_mega_shards, list_of_list_of_mega_blocks, list_of_list_of_list_of_mega_chunks\r\n self._new_params_mega_shards, self._new_params_mega_blocks, self._new_params_mega_chunks = _full_packed_split(self._new_params)\r\n def _packed_split(p):\r\n def __packed_blockify(p):\r\n packed_block_size = self._num_chunks*self._shard_size\r\n return [p[block_id*packed_block_size:(block_id+1)*packed_block_size] for block_id in range(self._num_blocks)]\r\n def __packed_chunkify(p):\r\n # in the packed format, each chunk contains one shard, so packed_chunk_size == self._shard_size\r\n return [p[chunk_id*self._shard_size:(chunk_id+1)*self._shard_size] for chunk_id in range(self._num_chunks)]\r\n list_of_blocks = __packed_blockify(p)\r\n list_of_list_of_chunks = [__packed_chunkify(block) for block in list_of_blocks]\r\n return list_of_blocks, list_of_list_of_chunks\r\n self._fp32_p_blocks, self._fp32_p_chunks = _packed_split(self._fp32_p)\r\n self._fp32_m_blocks, self._fp32_m_chunks = _packed_split(self._fp32_m)\r\n self._fp32_v_blocks, self._fp32_v_chunks = _packed_split(self._fp32_v)\r\n self._fp32_u_blocks, self._fp32_u_chunks = _packed_split(self._fp32_u)\r\n self._fp16_p_blocks, self._fp16_p_chunks = _packed_split(self._fp16_p)\r\n self._fp16_g_blocks, self._fp16_g_chunks = _packed_split(self._fp16_g)\r\n\r\n self._lazy_init_stage1_done = True\r\n\r\n def _lazy_init_stage2(self):\r\n if self._lazy_init_stage2_done: return\r\n\r\n self._param_order.order.reverse()\r\n\r\n # re-order model_params, grad_accs, group_properties lists\r\n self._model_params = [self._model_params[i] for i in self._param_order.order]\r\n self._grad_accs = [self._grad_accs[i] for i in self._param_order.order]\r\n self._group_properties = [self._group_properties[i] for i in self._param_order.order]\r\n\r\n # re-collect grads info (size, offset) after ordering\r\n prev = None\r\n p_offset = 0\r\n self._grads_info = []\r\n self._individual_flat_grads = []\r\n for i, p in enumerate(self._model_params):\r\n p_grads_size = p.numel()\r\n self._grads_info.append({\"param_grads_size\":p_grads_size, \"param_offset\":p_offset})\r\n self._individual_flat_grads.append(self._flat_grads[p_offset:p_offset+p_grads_size].view_as(p))\r\n # for the first iteration\r\n self._do_overlapped_reduction(i, p)\r\n p_offset += p_grads_size\r\n # Only enforce 128b alignment (64 * fp16) for non-consecutive parameters\r\n # RNN is one example of consecutive parameters:\r\n # (weight_ih, weight_hh, bias_ih, bias_hh)\r\n if prev is not None and (prev.data_ptr() + prev.numel() * prev.element_size() != p.data_ptr()):\r\n p_offset = ((p_offset + 63) // 64) * 64\r\n prev = p\r\n\r\n self._low_param_i = [0]*self._num_blocks\r\n for block_id in range(self._num_blocks-1,-1,-1):\r\n p_i = len(self._grads_info)-1\r\n while p_i > 0 and self._grads_info[p_i][\"param_offset\"] > block_id*self._block_size:\r\n p_i -= 1\r\n self._low_param_i[block_id] = p_i\r\n #print(\"self._low_param_i\", self._low_param_i)\r\n\r\n # This paragraph does two things:\r\n # 1) Copy model parameters into master buffer\r\n # 2) Create tensor lists for unpacking new parameter tensor after all-gather\r\n self._packed_flat_to_model_params_fp16 = []\r\n self._packed_flat_to_model_params_fp32 = []\r\n self._model_params_num = len(self._model_params)\r\n self._contrib_tensor_list = []\r\n self._contrib_min_param_i, self._contrib_max_param_i = -1, -1\r\n self._contrib_update_frag_for_norm = []\r\n self._contrib_model_param_for_norm_fp16 = []\r\n self._contrib_model_param_for_norm_fp32 = []\r\n self._contrib_model_param_for_norm_is_fp16 = []\r\n self._model_param_is_contrib = []\r\n self._contrib_group_properties = []\r\n for shard_id in range(self._group_size):\r\n for block_id in range(self._num_blocks):\r\n for chunk_id in range(self._num_chunks):\r\n flat_shard_start = (((block_id * self._num_chunks + chunk_id) * self._group_size) + shard_id) * self._shard_size\r\n flat_shard_end = flat_shard_start + self._shard_size\r\n for param_i, (p, grads_info, group_props) in enumerate(zip(self._model_params, self._grads_info, self._group_properties)):\r\n flat_grad_start = grads_info[\"param_offset\"]\r\n flat_grad_end = flat_grad_start + grads_info[\"param_grads_size\"]\r\n clipped_start = (lambda a,b: a if a > b else b)(flat_grad_start, flat_shard_start)\r\n clipped_end = (lambda a,b: a if a < b else b)(flat_grad_end, flat_shard_end)\r\n if clipped_start < clipped_end:\r\n grad_offset = clipped_start - flat_grad_start\r\n grad_length = clipped_end - clipped_start\r\n shard_offset = clipped_start - flat_shard_start\r\n model_param_fragment = p.view(-1)[grad_offset:grad_offset+grad_length]\r\n new_param_packed_fragment = self._new_params_mega_chunks[shard_id][block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n if model_param_fragment.dtype == torch.float16:\r\n self._packed_flat_to_model_params_fp16.append( (new_param_packed_fragment, model_param_fragment) )\r\n else:\r\n self._packed_flat_to_model_params_fp32.append( (new_param_packed_fragment, model_param_fragment) )\r\n if shard_id == self._rank_in_group:\r\n self._model_param_is_contrib.append(param_i)\r\n # copy model parameters into master buffer\r\n master_param_fragment = self._fp32_p_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n opti_state_m_fragment = self._fp32_m_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n opti_state_v_fragment = self._fp32_v_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n opti_state_u_fragment = self._fp32_u_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n opti_state_g_fragment = self._fp16_g_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n opti_state_p_fragment = self._fp16_p_chunks[block_id][chunk_id][shard_offset:shard_offset+grad_length]\r\n #print(\"model_param_fragment.size()=%s, new_param_packed_fragment.size()=%s, master_param_fragment.size()=%s\" % (str(model_param_fragment.size()), str(new_param_packed_fragment.size()), str(master_param_fragment.size())))\r\n if not self._resume_from_checkpoint:\r\n master_param_fragment.copy_(model_param_fragment)\r\n self._contrib_group_properties.append(group_props)\r\n self._contrib_tensor_list.append((master_param_fragment, opti_state_m_fragment, opti_state_v_fragment, opti_state_u_fragment, opti_state_g_fragment, opti_state_p_fragment)) # p, m, v, u, g, p_copy\r\n self._contrib_update_frag_for_norm.append(opti_state_u_fragment)\r\n if p.dtype == torch.float16:\r\n self._contrib_model_param_for_norm_fp16.append(p)\r\n else:\r\n self._contrib_model_param_for_norm_fp32.append(p)\r\n self._contrib_model_param_for_norm_is_fp16.append(True if p.dtype == torch.float16 else False)\r\n if self._contrib_min_param_i < 0: self._contrib_min_param_i = param_i\r\n self._contrib_max_param_i = param_i\r\n self._contrib_model_param_for_norm_num = len(self._contrib_model_param_for_norm_is_fp16)\r\n if len(self._contrib_model_param_for_norm_fp16) == 0: self._contrib_model_param_for_norm_fp16 = None\r\n if len(self._contrib_model_param_for_norm_fp32) == 0: self._contrib_model_param_for_norm_fp32 = None\r\n self._contrib_model_param_for_norm_is_fp32 = torch.tensor([not is_fp16 for is_fp16 in self._contrib_model_param_for_norm_is_fp16], dtype=torch.bool, device='cuda')\r\n self._contrib_model_param_for_norm_is_fp16 = torch.tensor([is_fp16 for is_fp16 in self._contrib_model_param_for_norm_is_fp16], dtype=torch.bool, device='cuda')\r\n self._offsets = torch.tensor(self._model_param_is_contrib, dtype=torch.int64, device='cuda')\r\n\r\n p, m, v, u, g, p_copy = list(zip(*self._contrib_tensor_list))\r\n self._contrib_compute_update_term_tensor_list = [g, p, m, v, u]\r\n self._contrib_update_weights_tensor_list = [u, p, p_copy]\r\n\r\n math_type = self._fp32_u.dtype\r\n decay, bias_correction, beta1, beta2, beta3, epsilon = list(zip(*self._contrib_group_properties))\r\n self._contrib_beta1 = torch.tensor(beta1, dtype=math_type, device='cuda')\r\n self._contrib_beta2 = torch.tensor(beta2, dtype=math_type, device='cuda')\r\n self._contrib_beta3 = torch.tensor(beta3, dtype=math_type, device='cuda')\r\n self._contrib_bias_correction = torch.tensor(bias_correction, dtype=torch.int, device='cuda')\r\n self._contrib_epsilon = torch.tensor(epsilon, dtype=math_type, device='cuda')\r\n self._contrib_weight_decay = torch.tensor(decay, dtype=math_type, device='cuda')\r\n\r\n self._packed_flat_to_model_params_fp16 = list(zip(*self._packed_flat_to_model_params_fp16)) if len(self._packed_flat_to_model_params_fp16) > 0 else None\r\n self._packed_flat_to_model_params_fp32 = list(zip(*self._packed_flat_to_model_params_fp32)) if len(self._packed_flat_to_model_params_fp32) > 0 else None\r\n\r\n self._lazy_init_stage2_done = True\r\n\r\n self.complete_reductions()\r\n self._first_step = False\r\n\r\n def set_is_accumulation_step(self, is_accumulation_step):\r\n self._is_accumulation_step = is_accumulation_step\r\n\r\n def set_last_step(self, last_step):\r\n self._last_step = last_step\r\n \r\n def _get_flush_block(self):\r\n flush_block = []\r\n if self._current_block > 0 and self._grads_generated[self._low_param_i[self._current_block-1]]:\r\n num_grads = len(self._grads_generated)\r\n contiguous_idx = num_grads\r\n while contiguous_idx > 0 and self._grads_generated[contiguous_idx-1]:\r\n contiguous_idx -= 1\r\n\r\n if contiguous_idx < num_grads and self._grads_info[contiguous_idx][\"param_offset\"] <= (self._current_block-1)*self._block_size:\r\n self._current_block -= 1\r\n start = self._current_block * self._block_size\r\n end = (self._current_block+1) * self._block_size\r\n flush_block = [start, end]\r\n\r\n return flush_block\r\n\r\n def _pipeline_block_reductions(self, block_id):\r\n self._flatten_grad_mt(1.0/self._world_size)\r\n\r\n # Reduction within each node\r\n # Changes gradient format from [block * chunk * shard] to [shard * block * chunk]\r\n # The output format is the same as the fp32 master parameters\r\n works = [None]*self._num_chunks\r\n for chunk_id in range(self._num_chunks):\r\n glob_chunk_id = block_id * self._num_chunks + chunk_id\r\n rs_stream = self._rs_st[glob_chunk_id%self._num_rs_pg]\r\n rs_stream.wait_stream(torch.cuda.current_stream())\r\n with torch.cuda.stream(rs_stream):\r\n works[chunk_id] = torch.distributed.reduce_scatter(self._fp16_g_chunks[block_id][chunk_id],self._flat_grads_shards[block_id][chunk_id],group=self._rs_pg[glob_chunk_id%self._num_rs_pg],async_op=True,no_copy=True)\r\n\r\n # Reduction across nodes for each rank\r\n if self._num_groups > 1:\r\n for chunk_id in range(self._num_chunks):\r\n glob_chunk_id = block_id * self._num_chunks + chunk_id\r\n ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg]\r\n with torch.cuda.stream(ar_stream):\r\n works[chunk_id].wait()\r\n works[chunk_id] = torch.distributed.all_reduce(self._fp16_g_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True)\r\n self._reductions_works[block_id] = works\r\n\r\n # Compute L2 grad norm\r\n if block_id == 0:\r\n with torch.cuda.stream(self._l2_grad_norm_st):\r\n for block_id in range(self._num_blocks):\r\n for chunk_id in range(self._num_chunks):\r\n self._reductions_works[block_id][chunk_id].wait()\r\n # Since the packed format is contiguous after reductions, only one norm is needed\r\n l2_grad_norm_sq = torch.empty([1], device='cuda')\r\n l2_grad_norm_sq = self._fp16_g.norm(dtype=torch.float32, p=2)**2\r\n torch.distributed.all_reduce(l2_grad_norm_sq, group=self._l2_grad_norm_pg)\r\n self._L2_grad_norm = l2_grad_norm_sq.sqrt()\r\n\r\n def __compute_contrib_param_norm(self):\r\n if self._contrib_model_param_for_norm_fp16 is not None and self._contrib_model_param_for_norm_fp32 is not None:\r\n gnorm_fp16 = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp16], True)[1]\r\n gnorm_fp32 = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp32], True)[1]\r\n gnorm = torch.empty(size=[self._contrib_model_param_for_norm_num], dtype=torch.bool, device='cuda')\r\n gnorm.masked_scatter_(self._contrib_model_param_for_norm_is_fp16, gnorm_fp16)\r\n gnorm.masked_scatter_(self._contrib_model_param_for_norm_is_fp32, gnorm_fp32)\r\n elif self._contrib_model_param_for_norm_fp16 is not None:\r\n gnorm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp16], True)[1]\r\n elif self._contrib_model_param_for_norm_fp32 is not None:\r\n gnorm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_model_param_for_norm_fp32], True)[1]\r\n return gnorm\r\n\r\n def __compute_contrib_update_norm(self):\r\n l2_norm = torch.zeros(size=[self._model_params_num], dtype=torch.float32, device='cuda')\r\n local_contrib_l2_norm = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [self._contrib_update_frag_for_norm], True)[1] ** 2\r\n l2_norm.scatter_(dim=0, index=self._offsets, src=local_contrib_l2_norm)\r\n torch.distributed.all_reduce(l2_norm, group=self._ag_pg[0])\r\n l2_norm = torch.sqrt(l2_norm)\r\n return l2_norm\r\n\r\n def _pipeline_step(self):\r\n global_scale = self.global_scale\r\n max_grad_norm = self.defaults['max_grad_norm']\r\n global_grad_norm = self.L2_grad_norm\r\n\r\n # check global_grad_norm and fill overflow_buf\r\n is_finite = (global_grad_norm + 1 > global_grad_norm).int()\r\n self._overflow_buf = self._one * (is_finite ^ self._one) # toggle between 0 and 1\r\n\r\n # increment step counter if no overflow\r\n self._step += is_finite\r\n self._completion_st.wait_stream(torch.cuda.current_stream())\r\n self._completion_st.wait_stream(self._l2_grad_norm_st)\r\n\r\n # Call step kernel once per step\r\n # Call all-gather once per step\r\n with torch.cuda.stream(self._completion_st):\r\n for block_id in range(self._num_blocks):\r\n for chunk_id in range(self._num_chunks):\r\n self._reductions_works[block_id][chunk_id].wait()\r\n param_norm = self.__compute_contrib_param_norm()\r\n multi_tensor_applier(self.multi_tensor_lamb_compute_update_term,\r\n self._overflow_buf,\r\n self._contrib_compute_update_term_tensor_list, # g, p, m, v, u\r\n self._contrib_beta1,\r\n self._contrib_beta2,\r\n self._contrib_beta3,\r\n self._contrib_bias_correction,\r\n self._step,\r\n self._contrib_epsilon,\r\n self._adam_w_mode,\r\n self._contrib_weight_decay,\r\n global_scale,\r\n global_grad_norm,\r\n max_grad_norm)\r\n upd_norm = self.__compute_contrib_update_norm()\r\n multi_tensor_applier(self.multi_tensor_lamb_update_weights,\r\n self._overflow_buf,\r\n self._contrib_update_weights_tensor_list, # u, p, p_copy\r\n param_norm,\r\n upd_norm,\r\n self._offsets,\r\n self._lr,\r\n self._contrib_weight_decay,\r\n global_grad_norm,\r\n self._use_nvlamb)\r\n torch.distributed.all_gather(self._new_params_mega_shards, self._fp16_p, group=self._ag_pg[0], no_copy=True)\r\n\r\n def _flatten_grad_mt(self, scale):\r\n if len(self._grads_fp16) > 0:\r\n self._overflow_buf.zero_()\r\n multi_tensor_applier(\r\n amp_C.multi_tensor_scale,\r\n self._overflow_buf,\r\n list(zip(*self._grads_fp16)),\r\n scale)\r\n self._grads_fp16 = []\r\n if len(self._grads_fp32) > 0:\r\n self._overflow_buf.zero_()\r\n multi_tensor_applier(\r\n amp_C.multi_tensor_scale,\r\n self._overflow_buf,\r\n list(zip(*self._grads_fp32)),\r\n scale)\r\n self._grads_fp32 = []\r\n\r\n def _do_overlapped_reduction(self, param_i, param):\r\n if not self._is_accumulation_step:\r\n # handle overlapped reductions\r\n if param.dtype == torch.float16:\r\n self._grads_fp16.append( (param.grad, self._individual_flat_grads[param_i]) )\r\n else:\r\n self._grads_fp32.append( (param.grad, self._individual_flat_grads[param_i]) )\r\n self._grads_generated[param_i]=True\r\n if not self._first_step and not self._last_step:\r\n if self._overlap_reductions:\r\n flush_block = self._get_flush_block()\r\n while flush_block:\r\n block_id = flush_block[0] // self._block_size\r\n self._pipeline_block_reductions(block_id)\r\n flush_block = self._get_flush_block()\r\n\r\n def set_global_scale(self, global_scale):\r\n \"\"\"Set global scale.\r\n \"\"\"\r\n self._global_scale = global_scale\r\n\r\n @property\r\n def global_scale(self):\r\n return self._global_scale\r\n\r\n @property\r\n def L2_grad_norm(self):\r\n torch.cuda.current_stream().wait_stream(self._l2_grad_norm_st)\r\n return self._L2_grad_norm\r\n\r\n def complete_reductions(self):\r\n \"\"\"Complete reductions if full pipeline is not selected or overlap is not allowed.\r\n \"\"\"\r\n if self._last_step:\r\n # zero out gradients that have not been completed yet\r\n for param_i, grad_generated in enumerate(self._grads_generated):\r\n if not grad_generated:\r\n grad_info = self._grads_info[param_i]\r\n param_offset = grad_info[\"param_offset\"]\r\n param_size = grad_info[\"param_grads_size\"]\r\n self._flat_grads[param_offset:param_offset+param_size].zero_()\r\n self._grads_generated[param_i] = True\r\n\r\n if self._first_step or self._last_step or not self._overlap_reductions:\r\n # nothing done so far, run full pipeline after reductions\r\n for block_id in range(self._num_blocks-1,-1,-1):\r\n self._pipeline_block_reductions(block_id)\r\n\r\n torch.cuda.current_stream().wait_stream(self._l2_grad_norm_st)\r\n\r\n self._current_block = self._num_blocks\r\n self._grads_generated = [False]*len(self._grads_info)\r\n\r\n def step(self, closure=None, grad_scaler=None):\r\n loss = None\r\n if closure is not None:\r\n loss = closure()\r\n\r\n self._pipeline_step()\r\n\r\n if grad_scaler is not None:\r\n found_inf = self._overflow_buf.float()\r\n optimizer_state = grad_scaler._per_optimizer_states[id(self)]\r\n current_device = torch.device('cuda', torch.cuda.current_device())\r\n optimizer_state[\"found_inf_per_device\"][current_device] = found_inf\r\n\r\n self._completion_st.wait_stream(torch.cuda.current_stream())\r\n\r\n with torch.cuda.stream(self._completion_st):\r\n # Copy self._new_params to model params\r\n with torch.no_grad():\r\n if self._packed_flat_to_model_params_fp16 is not None:\r\n multi_tensor_applier(\r\n fused_adam_cuda.maybe_cast_mt,\r\n self._overflow_buf,\r\n self._packed_flat_to_model_params_fp16)\r\n if self._packed_flat_to_model_params_fp32 is not None:\r\n multi_tensor_applier(\r\n fused_adam_cuda.maybe_cast_mt,\r\n self._overflow_buf,\r\n self._packed_flat_to_model_params_fp32)\r\n\r\n torch.cuda.current_stream().wait_stream(self._completion_st)\r\n\r\n self._reductions_works = [None]*self._num_blocks\r\n self._allgather_works = [None]*self._num_blocks\r\n\r\n return loss\r\n\r\n def state_dict(self):\r\n \"\"\"\r\n Returns a dict containing the current state of this :class:`DistributedFusedAdam` instance.\r\n Example::\r\n checkpoint = {}\r\n checkpoint['model'] = model.state_dict()\r\n checkpoint['optimizer'] = optimizer.state_dict()\r\n torch.save(checkpoint, \"saved.pth\")\r\n \"\"\"\r\n # save step, master weights and first/second moments\r\n state_dict = {}\r\n state_dict['step'] = self._step\r\n state_dict['fp32_p'] = self._fp32_p\r\n state_dict['fp32_m'] = self._fp32_m\r\n state_dict['fp32_v'] = self._fp32_v\r\n return state_dict\r\n\r\n def load_state_dict(self, state_dict):\r\n \"\"\"\r\n Loads a state_dict created by an earlier call to state_dict().\r\n If an DistributedFusedAdam instance was constructed from some ``init_optimizer``,\r\n whose parameters in turn came from ``model``, it is expected that the user\r\n will call ``model.load_state_dict()`` before\r\n ``optimizer.load_state_dict()`` is called.\r\n Example::\r\n model = torch.nn.Linear(D_in, D_out).cuda().half()\r\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\r\n optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)\r\n ...\r\n checkpoint = torch.load(\"saved.pth\")\r\n model.load_state_dict(checkpoint['model'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n \"\"\"\r\n # restore step, master weights and first/second moments\r\n self._step = state_dict['step']\r\n self._fp32_p = state_dict['fp32_p'].to(device=\"cuda\")\r\n self._fp32_m = state_dict['fp32_m'].to(device=\"cuda\")\r\n self._fp32_v = state_dict['fp32_v'].to(device=\"cuda\")\r\n self._resume_from_checkpoint = True"
] | [
[
"numpy.expand_dims",
"numpy.sqrt",
"numpy.random.choice",
"numpy.arange",
"numpy.around",
"numpy.rint",
"numpy.cumsum",
"numpy.ones",
"numpy.all",
"numpy.zeros_like",
"numpy.floor",
"numpy.count_nonzero",
"numpy.argsort",
"numpy.array",
"numpy.where",
"numpy.zeros"
],
[
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.compat.v1.train.SecondOrStepTimer",
"tensorflow.python.training.summary_io.SummaryWriterCache.get",
"tensorflow.compat.v1.Summary.Value",
"tensorflow.compat.v1.Event",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.logging.info",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"numpy.pad"
],
[
"torch.distributed.broadcast",
"torch.zeros",
"torch.distributed.distributed_c10d._get_default_group",
"torch.no_grad",
"torch.cuda.stream",
"torch.distributed.get_rank",
"torch.sqrt",
"torch.tensor",
"torch.empty",
"torch.cuda.current_device",
"torch.cuda.current_stream",
"torch.cuda.IntTensor",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.distributed.all_gather",
"torch.distributed.new_group",
"torch.distributed.all_reduce",
"torch.distributed.reduce_scatter",
"torch.cuda.Stream"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MobileAnalytics/iPython-Framework | [
"da0e598308c067cd5c5290a6364b3ffaf2d2418f",
"da0e598308c067cd5c5290a6364b3ffaf2d2418f"
] | [
"SprityBird/spritybird/python3.5/lib/python3.5/site-packages/plotly/tools.py",
"SprityBird/spritybird/python3.5/lib/python3.5/site-packages/healpy/projector.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\ntools\n=====\n\nFunctions that USERS will possibly want access to.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom collections import OrderedDict\n\nimport warnings\n\nimport six\nimport math\nimport decimal\n\nfrom plotly import utils\nfrom plotly import exceptions\nfrom plotly import graph_reference\nfrom plotly import session\nfrom plotly.files import (CONFIG_FILE, CREDENTIALS_FILE, FILE_CONTENT,\n GRAPH_REFERENCE_FILE, check_file_permissions)\n\nDEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',\n 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',\n 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',\n 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',\n 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']\n\n\nREQUIRED_GANTT_KEYS = ['Task', 'Start', 'Finish']\nPLOTLY_SCALES = {'Greys': ['rgb(0,0,0)', 'rgb(255,255,255)'],\n 'YlGnBu': ['rgb(8,29,88)', 'rgb(255,255,217)'],\n 'Greens': ['rgb(0,68,27)', 'rgb(247,252,245)'],\n 'YlOrRd': ['rgb(128,0,38)', 'rgb(255,255,204)'],\n 'Bluered': ['rgb(0,0,255)', 'rgb(255,0,0)'],\n 'RdBu': ['rgb(5,10,172)', 'rgb(178,10,28)'],\n 'Reds': ['rgb(220,220,220)', 'rgb(178,10,28)'],\n 'Blues': ['rgb(5,10,172)', 'rgb(220,220,220)'],\n 'Picnic': ['rgb(0,0,255)', 'rgb(255,0,0)'],\n 'Rainbow': ['rgb(150,0,90)', 'rgb(255,0,0)'],\n 'Portland': ['rgb(12,51,131)', 'rgb(217,30,30)'],\n 'Jet': ['rgb(0,0,131)', 'rgb(128,0,0)'],\n 'Hot': ['rgb(0,0,0)', 'rgb(255,255,255)'],\n 'Blackbody': ['rgb(0,0,0)', 'rgb(160,200,255)'],\n 'Earth': ['rgb(0,0,130)', 'rgb(255,255,255)'],\n 'Electric': ['rgb(0,0,0)', 'rgb(255,250,220)'],\n 'Viridis': ['rgb(68,1,84)', 'rgb(253,231,37)']}\n\n# color constants for violin plot\nDEFAULT_FILLCOLOR = '#1f77b4'\nDEFAULT_HISTNORM = 'probability density'\nALTERNATIVE_HISTNORM = 'probability'\n\n\n# Warning format\ndef warning_on_one_line(message, category, filename, lineno,\n file=None, line=None):\n return '%s:%s: %s:\\n\\n%s\\n\\n' % (filename, lineno, category.__name__,\n message)\nwarnings.formatwarning = warning_on_one_line\n\ntry:\n from . import matplotlylib\n _matplotlylib_imported = True\nexcept ImportError:\n _matplotlylib_imported = False\n\ntry:\n import IPython\n import IPython.core.display\n _ipython_imported = True\nexcept ImportError:\n _ipython_imported = False\n\ntry:\n import numpy as np\n _numpy_imported = True\nexcept ImportError:\n _numpy_imported = False\n\ntry:\n import pandas as pd\n _pandas_imported = True\nexcept ImportError:\n _pandas_imported = False\n\ntry:\n import scipy as scp\n _scipy_imported = True\nexcept ImportError:\n _scipy_imported = False\n\ntry:\n import scipy.spatial as scs\n _scipy__spatial_imported = True\nexcept ImportError:\n _scipy__spatial_imported = False\n\ntry:\n import scipy.cluster.hierarchy as sch\n _scipy__cluster__hierarchy_imported = True\nexcept ImportError:\n _scipy__cluster__hierarchy_imported = False\n\ntry:\n import scipy\n import scipy.stats\n _scipy_imported = True\nexcept ImportError:\n _scipy_imported = False\n\n\ndef get_config_defaults():\n \"\"\"\n Convenience function to check current settings against defaults.\n\n Example:\n\n if plotly_domain != get_config_defaults()['plotly_domain']:\n # do something\n\n \"\"\"\n return dict(FILE_CONTENT[CONFIG_FILE]) # performs a shallow copy\n\n\ndef ensure_local_plotly_files():\n \"\"\"Ensure that filesystem is setup/filled out in a valid way.\n If the config or credential files aren't filled out, then write them\n to the disk.\n \"\"\"\n if check_file_permissions():\n for fn in [CREDENTIALS_FILE, CONFIG_FILE]:\n utils.ensure_file_exists(fn)\n contents = utils.load_json_dict(fn)\n for key, val in list(FILE_CONTENT[fn].items()):\n # TODO: removed type checking below, may want to revisit\n if key not in contents:\n contents[key] = val\n contents_keys = list(contents.keys())\n for key in contents_keys:\n if key not in FILE_CONTENT[fn]:\n del contents[key]\n utils.save_json_dict(fn, contents)\n\n # make a request to get graph reference if DNE.\n utils.ensure_file_exists(GRAPH_REFERENCE_FILE)\n utils.save_json_dict(GRAPH_REFERENCE_FILE,\n graph_reference.GRAPH_REFERENCE)\n\n else:\n warnings.warn(\"Looks like you don't have 'read-write' permission to \"\n \"your 'home' ('~') directory or to our '~/.plotly' \"\n \"directory. That means plotly's python api can't setup \"\n \"local configuration files. No problem though! You'll \"\n \"just have to sign-in using 'plotly.plotly.sign_in()'. \"\n \"For help with that: 'help(plotly.plotly.sign_in)'.\"\n \"\\nQuestions? [email protected]\")\n\n\n### credentials tools ###\n\ndef set_credentials_file(username=None,\n api_key=None,\n stream_ids=None,\n proxy_username=None,\n proxy_password=None):\n \"\"\"Set the keyword-value pairs in `~/.plotly_credentials`.\n\n :param (str) username: The username you'd use to sign in to Plotly\n :param (str) api_key: The api key associated with above username\n :param (list) stream_ids: Stream tokens for above credentials\n :param (str) proxy_username: The un associated with with your Proxy\n :param (str) proxy_password: The pw associated with your Proxy un\n\n \"\"\"\n if not check_file_permissions():\n raise exceptions.PlotlyError(\"You don't have proper file permissions \"\n \"to run this function.\")\n ensure_local_plotly_files() # make sure what's there is OK\n credentials = get_credentials_file()\n if isinstance(username, six.string_types):\n credentials['username'] = username\n if isinstance(api_key, six.string_types):\n credentials['api_key'] = api_key\n if isinstance(proxy_username, six.string_types):\n credentials['proxy_username'] = proxy_username\n if isinstance(proxy_password, six.string_types):\n credentials['proxy_password'] = proxy_password\n if isinstance(stream_ids, (list, tuple)):\n credentials['stream_ids'] = stream_ids\n utils.save_json_dict(CREDENTIALS_FILE, credentials)\n ensure_local_plotly_files() # make sure what we just put there is OK\n\n\ndef get_credentials_file(*args):\n \"\"\"Return specified args from `~/.plotly_credentials`. as dict.\n\n Returns all if no arguments are specified.\n\n Example:\n get_credentials_file('username')\n\n \"\"\"\n if check_file_permissions():\n ensure_local_plotly_files() # make sure what's there is OK\n return utils.load_json_dict(CREDENTIALS_FILE, *args)\n else:\n return FILE_CONTENT[CREDENTIALS_FILE]\n\n\ndef reset_credentials_file():\n ensure_local_plotly_files() # make sure what's there is OK\n utils.save_json_dict(CREDENTIALS_FILE, {})\n ensure_local_plotly_files() # put the defaults back\n\n\n### config tools ###\n\ndef set_config_file(plotly_domain=None,\n plotly_streaming_domain=None,\n plotly_api_domain=None,\n plotly_ssl_verification=None,\n plotly_proxy_authorization=None,\n world_readable=None,\n sharing=None,\n auto_open=None):\n \"\"\"Set the keyword-value pairs in `~/.plotly/.config`.\n\n :param (str) plotly_domain: ex - https://plot.ly\n :param (str) plotly_streaming_domain: ex - stream.plot.ly\n :param (str) plotly_api_domain: ex - https://api.plot.ly\n :param (bool) plotly_ssl_verification: True = verify, False = don't verify\n :param (bool) plotly_proxy_authorization: True = use plotly proxy auth creds\n :param (bool) world_readable: True = public, False = private\n\n \"\"\"\n if not check_file_permissions():\n raise exceptions.PlotlyError(\"You don't have proper file permissions \"\n \"to run this function.\")\n ensure_local_plotly_files() # make sure what's there is OK\n utils.validate_world_readable_and_sharing_settings({\n 'sharing': sharing, 'world_readable': world_readable})\n settings = get_config_file()\n if isinstance(plotly_domain, six.string_types):\n settings['plotly_domain'] = plotly_domain\n elif plotly_domain is not None:\n raise TypeError('plotly_domain should be a string')\n if isinstance(plotly_streaming_domain, six.string_types):\n settings['plotly_streaming_domain'] = plotly_streaming_domain\n elif plotly_streaming_domain is not None:\n raise TypeError('plotly_streaming_domain should be a string')\n if isinstance(plotly_api_domain, six.string_types):\n settings['plotly_api_domain'] = plotly_api_domain\n elif plotly_api_domain is not None:\n raise TypeError('plotly_api_domain should be a string')\n if isinstance(plotly_ssl_verification, (six.string_types, bool)):\n settings['plotly_ssl_verification'] = plotly_ssl_verification\n elif plotly_ssl_verification is not None:\n raise TypeError('plotly_ssl_verification should be a boolean')\n if isinstance(plotly_proxy_authorization, (six.string_types, bool)):\n settings['plotly_proxy_authorization'] = plotly_proxy_authorization\n elif plotly_proxy_authorization is not None:\n raise TypeError('plotly_proxy_authorization should be a boolean')\n if isinstance(auto_open, bool):\n settings['auto_open'] = auto_open\n elif auto_open is not None:\n raise TypeError('auto_open should be a boolean')\n\n if isinstance(world_readable, bool):\n settings['world_readable'] = world_readable\n settings.pop('sharing')\n elif world_readable is not None:\n raise TypeError('Input should be a boolean')\n if isinstance(sharing, six.string_types):\n settings['sharing'] = sharing\n elif sharing is not None:\n raise TypeError('sharing should be a string')\n utils.set_sharing_and_world_readable(settings)\n\n utils.save_json_dict(CONFIG_FILE, settings)\n ensure_local_plotly_files() # make sure what we just put there is OK\n\n\ndef get_config_file(*args):\n \"\"\"Return specified args from `~/.plotly/.config`. as tuple.\n\n Returns all if no arguments are specified.\n\n Example:\n get_config_file('plotly_domain')\n\n \"\"\"\n if check_file_permissions():\n ensure_local_plotly_files() # make sure what's there is OK\n return utils.load_json_dict(CONFIG_FILE, *args)\n else:\n return FILE_CONTENT[CONFIG_FILE]\n\n\ndef reset_config_file():\n ensure_local_plotly_files() # make sure what's there is OK\n f = open(CONFIG_FILE, 'w')\n f.close()\n ensure_local_plotly_files() # put the defaults back\n\n\n### embed tools ###\n\ndef get_embed(file_owner_or_url, file_id=None, width=\"100%\", height=525):\n \"\"\"Returns HTML code to embed figure on a webpage as an <iframe>\n\n Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.\n Since each file is given a corresponding unique url, you may also simply\n pass a valid plotly url as the first argument.\n\n Note, if you're using a file_owner string as the first argument, you MUST\n specify a `file_id` keyword argument. Else, if you're using a url string\n as the first argument, you MUST NOT specify a `file_id` keyword argument,\n or file_id must be set to Python's None value.\n\n Positional arguments:\n file_owner_or_url (string) -- a valid plotly username OR a valid plotly url\n\n Keyword arguments:\n file_id (default=None) -- an int or string that can be converted to int\n if you're using a url, don't fill this in!\n width (default=\"100%\") -- an int or string corresp. to width of the figure\n height (default=\"525\") -- same as width but corresp. to the height of the\n figure\n\n \"\"\"\n plotly_rest_url = (session.get_session_config().get('plotly_domain') or\n get_config_file()['plotly_domain'])\n if file_id is None: # assume we're using a url\n url = file_owner_or_url\n if url[:len(plotly_rest_url)] != plotly_rest_url:\n raise exceptions.PlotlyError(\n \"Because you didn't supply a 'file_id' in the call, \"\n \"we're assuming you're trying to snag a figure from a url. \"\n \"You supplied the url, '{0}', we expected it to start with \"\n \"'{1}'.\"\n \"\\nRun help on this function for more information.\"\n \"\".format(url, plotly_rest_url))\n urlsplit = six.moves.urllib.parse.urlparse(url)\n file_owner = urlsplit.path.split('/')[1].split('~')[1]\n file_id = urlsplit.path.split('/')[2]\n\n # to check for share_key we check urlsplit.query\n query_dict = six.moves.urllib.parse.parse_qs(urlsplit.query)\n if query_dict:\n share_key = query_dict['share_key'][-1]\n else:\n share_key = ''\n else:\n file_owner = file_owner_or_url\n share_key = ''\n try:\n test_if_int = int(file_id)\n except ValueError:\n raise exceptions.PlotlyError(\n \"The 'file_id' argument was not able to be converted into an \"\n \"integer number. Make sure that the positional 'file_id' argument \"\n \"is a number that can be converted into an integer or a string \"\n \"that can be converted into an integer.\"\n )\n if int(file_id) < 0:\n raise exceptions.PlotlyError(\n \"The 'file_id' argument must be a non-negative number.\"\n )\n if share_key is '':\n s = (\"<iframe id=\\\"igraph\\\" scrolling=\\\"no\\\" style=\\\"border:none;\\\" \"\n \"seamless=\\\"seamless\\\" \"\n \"src=\\\"{plotly_rest_url}/\"\n \"~{file_owner}/{file_id}.embed\\\" \"\n \"height=\\\"{iframe_height}\\\" width=\\\"{iframe_width}\\\">\"\n \"</iframe>\").format(\n plotly_rest_url=plotly_rest_url,\n file_owner=file_owner, file_id=file_id,\n iframe_height=height, iframe_width=width)\n else:\n s = (\"<iframe id=\\\"igraph\\\" scrolling=\\\"no\\\" style=\\\"border:none;\\\" \"\n \"seamless=\\\"seamless\\\" \"\n \"src=\\\"{plotly_rest_url}/\"\n \"~{file_owner}/{file_id}.embed?share_key={share_key}\\\" \"\n \"height=\\\"{iframe_height}\\\" width=\\\"{iframe_width}\\\">\"\n \"</iframe>\").format(\n plotly_rest_url=plotly_rest_url,\n file_owner=file_owner, file_id=file_id, share_key=share_key,\n iframe_height=height, iframe_width=width)\n\n return s\n\n\ndef embed(file_owner_or_url, file_id=None, width=\"100%\", height=525):\n \"\"\"Embeds existing Plotly figure in IPython Notebook\n\n Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.\n Since each file is given a corresponding unique url, you may also simply\n pass a valid plotly url as the first argument.\n\n Note, if you're using a file_owner string as the first argument, you MUST\n specify a `file_id` keyword argument. Else, if you're using a url string\n as the first argument, you MUST NOT specify a `file_id` keyword argument,\n or file_id must be set to Python's None value.\n\n Positional arguments:\n file_owner_or_url (string) -- a valid plotly username OR a valid plotly url\n\n Keyword arguments:\n file_id (default=None) -- an int or string that can be converted to int\n if you're using a url, don't fill this in!\n width (default=\"100%\") -- an int or string corresp. to width of the figure\n height (default=\"525\") -- same as width but corresp. to the height of the\n figure\n\n \"\"\"\n try:\n s = get_embed(file_owner_or_url, file_id=file_id, width=width,\n height=height)\n\n # see if we are in the SageMath Cloud\n from sage_salvus import html\n return html(s, hide=False)\n except:\n pass\n if _ipython_imported:\n if file_id:\n plotly_domain = (\n session.get_session_config().get('plotly_domain') or\n get_config_file()['plotly_domain']\n )\n url = \"{plotly_domain}/~{un}/{fid}\".format(\n plotly_domain=plotly_domain,\n un=file_owner_or_url,\n fid=file_id)\n else:\n url = file_owner_or_url\n return PlotlyDisplay(url, width, height)\n else:\n if (get_config_defaults()['plotly_domain']\n != session.get_session_config()['plotly_domain']):\n feedback_email = '[email protected]'\n else:\n\n # different domain likely means enterprise\n feedback_email = '[email protected]'\n\n warnings.warn(\n \"Looks like you're not using IPython or Sage to embed this \"\n \"plot. If you just want the *embed code*,\\ntry using \"\n \"`get_embed()` instead.\"\n '\\nQuestions? {}'.format(feedback_email))\n\n\n### mpl-related tools ###\[email protected]_doc(**get_config_file())\ndef mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):\n \"\"\"Convert a matplotlib figure to plotly dictionary and send.\n\n All available information about matplotlib visualizations are stored\n within a matplotlib.figure.Figure object. You can create a plot in python\n using matplotlib, store the figure object, and then pass this object to\n the fig_to_plotly function. In the background, mplexporter is used to\n crawl through the mpl figure object for appropriate information. This\n information is then systematically sent to the PlotlyRenderer which\n creates the JSON structure used to make plotly visualizations. Finally,\n these dictionaries are sent to plotly and your browser should open up a\n new tab for viewing! Optionally, if you're working in IPython, you can\n set notebook=True and the PlotlyRenderer will call plotly.iplot instead\n of plotly.plot to have the graph appear directly in the IPython notebook.\n\n Note, this function gives the user access to a simple, one-line way to\n render an mpl figure in plotly. If you need to trouble shoot, you can do\n this step manually by NOT running this fuction and entereing the following:\n\n ===========================================================================\n from mplexporter import Exporter\n from mplexporter.renderers import PlotlyRenderer\n\n # create an mpl figure and store it under a varialble 'fig'\n\n renderer = PlotlyRenderer()\n exporter = Exporter(renderer)\n exporter.run(fig)\n ===========================================================================\n\n You can then inspect the JSON structures by accessing these:\n\n renderer.layout -- a plotly layout dictionary\n renderer.data -- a list of plotly data dictionaries\n\n Positional arguments:\n fig -- a matplotlib figure object\n username -- a valid plotly username **\n api_key -- a valid api_key for the above username **\n notebook -- an option for use with an IPython notebook\n\n ** Don't have a username/api_key? Try looking here:\n {plotly_domain}/plot\n\n ** Forgot your api_key? Try signing in and looking here:\n {plotly_domain}/python/getting-started\n\n \"\"\"\n if _matplotlylib_imported:\n renderer = matplotlylib.PlotlyRenderer()\n matplotlylib.Exporter(renderer).run(fig)\n if resize:\n renderer.resize()\n if strip_style:\n renderer.strip_style()\n if verbose:\n print(renderer.msg)\n return renderer.plotly_fig\n else:\n warnings.warn(\n \"To use Plotly's matplotlylib functionality, you'll need to have \"\n \"matplotlib successfully installed with all of its dependencies. \"\n \"You're getting this error because matplotlib or one of its \"\n \"dependencies doesn't seem to be installed correctly.\")\n\n\n### graph_objs related tools ###\n\ndef get_subplots(rows=1, columns=1, print_grid=False, **kwargs):\n \"\"\"Return a dictionary instance with the subplots set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.get_subplots(rows=2)\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x1', yaxis='y1')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 2:\n # print out string showing the subplot grid you've put in the layout\n fig = tools.get_subplots(rows=3, columns=2, print_grid=True)\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows, evenly spaced vertically on the figure.\n\n columns (kwarg, int greater than 0, default=1):\n Number of columns, evenly spaced horizontally on the figure.\n\n horizontal_spacing (kwarg, float in [0,1], default=0.1):\n Space between subplot columns. Applied to all columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.05):\n Space between subplot rows. Applied to all rows.\n\n print_grid (kwarg, True | False, default=False):\n If True, prints a tab-delimited string representation\n of your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / columns):\n Space between subplot columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n warnings.warn(\n \"tools.get_subplots is depreciated. \"\n \"Please use tools.make_subplots instead.\"\n )\n\n # Throw exception for non-integer rows and columns\n if not isinstance(rows, int) or rows <= 0:\n raise Exception(\"Keyword argument 'rows' \"\n \"must be an int greater than 0\")\n if not isinstance(columns, int) or columns <= 0:\n raise Exception(\"Keyword argument 'columns' \"\n \"must be an int greater than 0\")\n\n # Throw exception if non-valid kwarg is sent\n VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing']\n for key in kwargs.keys():\n if key not in VALID_KWARGS:\n raise Exception(\"Invalid keyword argument: '{0}'\".format(key))\n\n # Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns\n try:\n horizontal_spacing = float(kwargs['horizontal_spacing'])\n except KeyError:\n horizontal_spacing = 0.2 / columns\n try:\n vertical_spacing = float(kwargs['vertical_spacing'])\n except KeyError:\n vertical_spacing = 0.3 / rows\n\n fig = dict(layout=graph_objs.Layout()) # will return this at the end\n plot_width = (1 - horizontal_spacing * (columns - 1)) / columns\n plot_height = (1 - vertical_spacing * (rows - 1)) / rows\n plot_num = 0\n for rrr in range(rows):\n for ccc in range(columns):\n xaxis_name = 'xaxis{0}'.format(plot_num + 1)\n x_anchor = 'y{0}'.format(plot_num + 1)\n x_start = (plot_width + horizontal_spacing) * ccc\n x_end = x_start + plot_width\n\n yaxis_name = 'yaxis{0}'.format(plot_num + 1)\n y_anchor = 'x{0}'.format(plot_num + 1)\n y_start = (plot_height + vertical_spacing) * rrr\n y_end = y_start + plot_height\n\n xaxis = graph_objs.XAxis(domain=[x_start, x_end], anchor=x_anchor)\n fig['layout'][xaxis_name] = xaxis\n yaxis = graph_objs.YAxis(domain=[y_start, y_end], anchor=y_anchor)\n fig['layout'][yaxis_name] = yaxis\n plot_num += 1\n\n if print_grid:\n print(\"This is the format of your plot grid!\")\n grid_string = \"\"\n plot = 1\n for rrr in range(rows):\n grid_line = \"\"\n for ccc in range(columns):\n grid_line += \"[{0}]\\t\".format(plot)\n plot += 1\n grid_string = grid_line + '\\n' + grid_string\n print(grid_string)\n\n return graph_objs.Figure(fig) # forces us to validate what we just did...\n\n\ndef make_subplots(rows=1, cols=1,\n shared_xaxes=False, shared_yaxes=False,\n start_cell='top-left', print_grid=True,\n **kwargs):\n \"\"\"Return an instance of plotly.graph_objs.Figure\n with the subplots domain set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.make_subplots(rows=2)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n # or see Figure.append_trace\n\n Example 2:\n # subplots with shared x axes\n fig = tools.make_subplots(rows=2, shared_xaxes=True)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x1,y2 ]\n\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]\n\n Example 3:\n # irregular subplot layout (more examples below under 'specs')\n fig = tools.make_subplots(rows=2, cols=2,\n specs=[[{}, {}],\n [{'colspan': 2}, None]])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ] [ (1,2) x2,y2 ]\n [ (2,1) x3,y3 - ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]\n\n Example 4:\n # insets\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 5:\n # include subplot titles\n fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 6:\n # Include subplot title on one plot (but not all)\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],\n subplot_titles=('','Inset'))\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows in the subplot grid.\n\n cols (kwarg, int greater than 0, default=1):\n Number of columns in the subplot grid.\n\n shared_xaxes (kwarg, boolean or list, default=False)\n Assign shared x axes.\n If True, subplots in the same grid column have one common\n shared x-axis at the bottom of the gird.\n\n To assign shared x axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared x axis)\n of cell index tuples.\n\n shared_yaxes (kwarg, boolean or list, default=False)\n Assign shared y axes.\n If True, subplots in the same grid row have one common\n shared y-axis on the left-hand side of the gird.\n\n To assign shared y axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared y axis)\n of cell index tuples.\n\n start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')\n Choose the starting cell in the subplot grid used to set the\n domains of the subplots.\n\n print_grid (kwarg, boolean, default=True):\n If True, prints a tab-delimited string representation of\n your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):\n Space between subplot columns.\n Applies to all columns (use 'specs' subplot-dependents spacing)\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n Applies to all rows (use 'specs' subplot-dependents spacing)\n\n subplot_titles (kwarg, list of strings, default=empty list):\n Title of each subplot.\n \"\" can be included in the list if no subplot title is desired in\n that space so that the titles are properly indexed.\n\n specs (kwarg, list of lists of dictionaries):\n Subplot specifications.\n\n ex1: specs=[[{}, {}], [{'colspan': 2}, None]]\n\n ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]\n\n - Indices of the outer list correspond to subplot grid rows\n starting from the bottom. The number of rows in 'specs'\n must be equal to 'rows'.\n\n - Indices of the inner lists correspond to subplot grid columns\n starting from the left. The number of columns in 'specs'\n must be equal to 'cols'.\n\n - Each item in the 'specs' list corresponds to one subplot\n in a subplot grid. (N.B. The subplot grid has exactly 'rows'\n times 'cols' cells.)\n\n - Use None for blank a subplot cell (or to move pass a col/row span).\n\n - Note that specs[0][0] has the specs of the 'start_cell' subplot.\n\n - Each item in 'specs' is a dictionary.\n The available keys are:\n\n * is_3d (boolean, default=False): flag for 3d scenes\n * colspan (int, default=1): number of subplot columns\n for this subplot to span.\n * rowspan (int, default=1): number of subplot rows\n for this subplot to span.\n * l (float, default=0.0): padding left of cell\n * r (float, default=0.0): padding right of cell\n * t (float, default=0.0): padding right of cell\n * b (float, default=0.0): padding bottom of cell\n\n - Use 'horizontal_spacing' and 'vertical_spacing' to adjust\n the spacing in between the subplots.\n\n insets (kwarg, list of dictionaries):\n Inset specifications.\n\n - Each item in 'insets' is a dictionary.\n The available keys are:\n\n * cell (tuple, default=(1,1)): (row, col) index of the\n subplot cell to overlay inset axes onto.\n * is_3d (boolean, default=False): flag for 3d scenes\n * l (float, default=0.0): padding left of inset\n in fraction of cell width\n * w (float or 'to_end', default='to_end') inset width\n in fraction of cell width ('to_end': to cell right edge)\n * b (float, default=0.0): padding bottom of inset\n in fraction of cell height\n * h (float or 'to_end', default='to_end') inset height\n in fraction of cell height ('to_end': to cell top edge)\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Throw exception for non-integer rows and cols\n if not isinstance(rows, int) or rows <= 0:\n raise Exception(\"Keyword argument 'rows' \"\n \"must be an int greater than 0\")\n if not isinstance(cols, int) or cols <= 0:\n raise Exception(\"Keyword argument 'cols' \"\n \"must be an int greater than 0\")\n\n # Dictionary of things start_cell\n START_CELL_all = {\n 'bottom-left': {\n # 'natural' setup where x & y domains increase monotonically\n 'col_dir': 1,\n 'row_dir': 1\n },\n 'top-left': {\n # 'default' setup visually matching the 'specs' list of lists\n 'col_dir': 1,\n 'row_dir': -1\n }\n # TODO maybe add 'bottom-right' and 'top-right'\n }\n\n # Throw exception for invalid 'start_cell' values\n try:\n START_CELL = START_CELL_all[start_cell]\n except KeyError:\n raise Exception(\"Invalid 'start_cell' value\")\n\n # Throw exception if non-valid kwarg is sent\n VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing',\n 'specs', 'insets', 'subplot_titles']\n for key in kwargs.keys():\n if key not in VALID_KWARGS:\n raise Exception(\"Invalid keyword argument: '{0}'\".format(key))\n\n # Set 'subplot_titles'\n subplot_titles = kwargs.get('subplot_titles', [\"\"] * rows * cols)\n\n # Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / cols\n try:\n horizontal_spacing = float(kwargs['horizontal_spacing'])\n except KeyError:\n horizontal_spacing = 0.2 / cols\n try:\n vertical_spacing = float(kwargs['vertical_spacing'])\n except KeyError:\n if 'subplot_titles' in kwargs:\n vertical_spacing = 0.5 / rows\n else:\n vertical_spacing = 0.3 / rows\n\n # Sanitize 'specs' (must be a list of lists)\n exception_msg = \"Keyword argument 'specs' must be a list of lists\"\n try:\n specs = kwargs['specs']\n if not isinstance(specs, list):\n raise Exception(exception_msg)\n else:\n for spec_row in specs:\n if not isinstance(spec_row, list):\n raise Exception(exception_msg)\n except KeyError:\n specs = [[{}\n for c in range(cols)]\n for r in range(rows)] # default 'specs'\n\n # Throw exception if specs is over or under specified\n if len(specs) != rows:\n raise Exception(\"The number of rows in 'specs' \"\n \"must be equal to 'rows'\")\n for r, spec_row in enumerate(specs):\n if len(spec_row) != cols:\n raise Exception(\"The number of columns in 'specs' \"\n \"must be equal to 'cols'\")\n\n # Sanitize 'insets'\n try:\n insets = kwargs['insets']\n if not isinstance(insets, list):\n raise Exception(\"Keyword argument 'insets' must be a list\")\n except KeyError:\n insets = False\n\n # Throw exception if non-valid key / fill in defaults\n def _check_keys_and_fill(name, arg, defaults):\n def _checks(item, defaults):\n if item is None:\n return\n if not isinstance(item, dict):\n raise Exception(\"Items in keyword argument '{name}' must be \"\n \"dictionaries or None\".format(name=name))\n for k in item.keys():\n if k not in defaults.keys():\n raise Exception(\"Invalid key '{k}' in keyword \"\n \"argument '{name}'\".format(k=k, name=name))\n for k in defaults.keys():\n if k not in item.keys():\n item[k] = defaults[k]\n for arg_i in arg:\n if isinstance(arg_i, list):\n for arg_ii in arg_i:\n _checks(arg_ii, defaults)\n elif isinstance(arg_i, dict):\n _checks(arg_i, defaults)\n\n # Default spec key-values\n SPEC_defaults = dict(\n is_3d=False,\n colspan=1,\n rowspan=1,\n l=0.0,\n r=0.0,\n b=0.0,\n t=0.0\n # TODO add support for 'w' and 'h'\n )\n _check_keys_and_fill('specs', specs, SPEC_defaults)\n\n # Default inset key-values\n if insets:\n INSET_defaults = dict(\n cell=(1, 1),\n is_3d=False,\n l=0.0,\n w='to_end',\n b=0.0,\n h='to_end'\n )\n _check_keys_and_fill('insets', insets, INSET_defaults)\n\n # Set width & height of each subplot cell (excluding padding)\n width = (1. - horizontal_spacing * (cols - 1)) / cols\n height = (1. - vertical_spacing * (rows - 1)) / rows\n\n # Built row/col sequence using 'row_dir' and 'col_dir'\n COL_DIR = START_CELL['col_dir']\n ROW_DIR = START_CELL['row_dir']\n col_seq = range(cols)[::COL_DIR]\n row_seq = range(rows)[::ROW_DIR]\n\n # [grid] Build subplot grid (coord tuple of cell)\n grid = [[((width + horizontal_spacing) * c,\n (height + vertical_spacing) * r)\n for c in col_seq]\n for r in row_seq]\n\n # [grid_ref] Initialize the grid and insets' axis-reference lists\n grid_ref = [[None for c in range(cols)] for r in range(rows)]\n insets_ref = [None for inset in range(len(insets))] if insets else None\n\n layout = graph_objs.Layout() # init layout object\n\n # Function handling logic around 2d axis labels\n # Returns 'x{}' | 'y{}'\n def _get_label(x_or_y, r, c, cnt, shared_axes):\n # Default label (given strictly by cnt)\n label = \"{x_or_y}{cnt}\".format(x_or_y=x_or_y, cnt=cnt)\n\n if isinstance(shared_axes, bool):\n if shared_axes:\n if x_or_y == 'x':\n label = \"{x_or_y}{c}\".format(x_or_y=x_or_y, c=c + 1)\n if x_or_y == 'y':\n label = \"{x_or_y}{r}\".format(x_or_y=x_or_y, r=r + 1)\n\n if isinstance(shared_axes, list):\n if isinstance(shared_axes[0], tuple):\n shared_axes = [shared_axes] # TODO put this elsewhere\n for shared_axis in shared_axes:\n if (r + 1, c + 1) in shared_axis:\n label = {\n 'x': \"x{0}\".format(shared_axis[0][1]),\n 'y': \"y{0}\".format(shared_axis[0][0])\n }[x_or_y]\n\n return label\n\n # Row in grid of anchor row if shared_xaxes=True\n ANCHOR_ROW = 0 if ROW_DIR > 0 else rows - 1\n\n # Function handling logic around 2d axis anchors\n # Return 'x{}' | 'y{}' | 'free' | False\n def _get_anchors(r, c, x_cnt, y_cnt, shared_xaxes, shared_yaxes):\n # Default anchors (give strictly by cnt)\n x_anchor = \"y{y_cnt}\".format(y_cnt=y_cnt)\n y_anchor = \"x{x_cnt}\".format(x_cnt=x_cnt)\n\n if isinstance(shared_xaxes, bool):\n if shared_xaxes:\n if r != ANCHOR_ROW:\n x_anchor = False\n y_anchor = 'free'\n if shared_yaxes and c != 0: # TODO covers all cases?\n y_anchor = False\n return x_anchor, y_anchor\n\n elif isinstance(shared_xaxes, list):\n if isinstance(shared_xaxes[0], tuple):\n shared_xaxes = [shared_xaxes] # TODO put this elsewhere\n for shared_xaxis in shared_xaxes:\n if (r + 1, c + 1) in shared_xaxis[1:]:\n x_anchor = False\n y_anchor = 'free' # TODO covers all cases?\n\n if isinstance(shared_yaxes, bool):\n if shared_yaxes:\n if c != 0:\n y_anchor = False\n x_anchor = 'free'\n if shared_xaxes and r != ANCHOR_ROW: # TODO all cases?\n x_anchor = False\n return x_anchor, y_anchor\n\n elif isinstance(shared_yaxes, list):\n if isinstance(shared_yaxes[0], tuple):\n shared_yaxes = [shared_yaxes] # TODO put this elsewhere\n for shared_yaxis in shared_yaxes:\n if (r + 1, c + 1) in shared_yaxis[1:]:\n y_anchor = False\n x_anchor = 'free' # TODO covers all cases?\n\n return x_anchor, y_anchor\n\n list_of_domains = [] # added for subplot titles\n\n # Function pasting x/y domains in layout object (2d case)\n def _add_domain(layout, x_or_y, label, domain, anchor, position):\n name = label[0] + 'axis' + label[1:]\n graph_obj = '{X_or_Y}Axis'.format(X_or_Y=x_or_y.upper())\n axis = getattr(graph_objs, graph_obj)(domain=domain)\n if anchor:\n axis['anchor'] = anchor\n if isinstance(position, float):\n axis['position'] = position\n layout[name] = axis\n list_of_domains.append(domain) # added for subplot titles\n\n # Function pasting x/y domains in layout object (3d case)\n def _add_domain_is_3d(layout, s_label, x_domain, y_domain):\n scene = graph_objs.Scene(domain={'x': x_domain, 'y': y_domain})\n layout[s_label] = scene\n\n x_cnt = y_cnt = s_cnt = 1 # subplot axis/scene counters\n\n # Loop through specs -- (r, c) <-> (row, col)\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n\n if spec is None: # skip over None cells\n continue\n\n c_spanned = c + spec['colspan'] - 1 # get spanned c\n r_spanned = r + spec['rowspan'] - 1 # get spanned r\n\n # Throw exception if 'colspan' | 'rowspan' is too large for grid\n if c_spanned >= cols:\n raise Exception(\"Some 'colspan' value is too large for \"\n \"this subplot grid.\")\n if r_spanned >= rows:\n raise Exception(\"Some 'rowspan' value is too large for \"\n \"this subplot grid.\")\n\n # Get x domain using grid and colspan\n x_s = grid[r][c][0] + spec['l']\n x_e = grid[r][c_spanned][0] + width - spec['r']\n x_domain = [x_s, x_e]\n\n # Get y domain (dep. on row_dir) using grid & r_spanned\n if ROW_DIR > 0:\n y_s = grid[r][c][1] + spec['b']\n y_e = grid[r_spanned][c][1] + height - spec['t']\n else:\n y_s = grid[r_spanned][c][1] + spec['b']\n y_e = grid[r][c][1] + height - spec['t']\n y_domain = [y_s, y_e]\n\n if spec['is_3d']:\n\n # Add scene to layout\n s_label = 'scene{0}'.format(s_cnt)\n _add_domain_is_3d(layout, s_label, x_domain, y_domain)\n grid_ref[r][c] = (s_label, )\n s_cnt += 1\n\n else:\n\n # Get axis label and anchor\n x_label = _get_label('x', r, c, x_cnt, shared_xaxes)\n y_label = _get_label('y', r, c, y_cnt, shared_yaxes)\n x_anchor, y_anchor = _get_anchors(r, c,\n x_cnt, y_cnt,\n shared_xaxes,\n shared_yaxes)\n\n # Add a xaxis to layout (N.B anchor == False -> no axis)\n if x_anchor:\n if x_anchor == 'free':\n x_position = y_domain[0]\n else:\n x_position = False\n _add_domain(layout, 'x', x_label, x_domain,\n x_anchor, x_position)\n x_cnt += 1\n\n # Add a yaxis to layout (N.B anchor == False -> no axis)\n if y_anchor:\n if y_anchor == 'free':\n y_position = x_domain[0]\n else:\n y_position = False\n _add_domain(layout, 'y', y_label, y_domain,\n y_anchor, y_position)\n y_cnt += 1\n\n grid_ref[r][c] = (x_label, y_label) # fill in ref\n\n # Loop through insets\n if insets:\n for i_inset, inset in enumerate(insets):\n\n r = inset['cell'][0] - 1\n c = inset['cell'][1] - 1\n\n # Throw exception if r | c is out of range\n if not (0 <= r < rows):\n raise Exception(\"Some 'cell' row value is out of range. \"\n \"Note: the starting cell is (1, 1)\")\n if not (0 <= c < cols):\n raise Exception(\"Some 'cell' col value is out of range. \"\n \"Note: the starting cell is (1, 1)\")\n\n # Get inset x domain using grid\n x_s = grid[r][c][0] + inset['l'] * width\n if inset['w'] == 'to_end':\n x_e = grid[r][c][0] + width\n else:\n x_e = x_s + inset['w'] * width\n x_domain = [x_s, x_e]\n\n # Get inset y domain using grid\n y_s = grid[r][c][1] + inset['b'] * height\n if inset['h'] == 'to_end':\n y_e = grid[r][c][1] + height\n else:\n y_e = y_s + inset['h'] * height\n y_domain = [y_s, y_e]\n\n if inset['is_3d']:\n\n # Add scene to layout\n s_label = 'scene{0}'.format(s_cnt)\n _add_domain_is_3d(layout, s_label, x_domain, y_domain)\n insets_ref[i_inset] = (s_label, )\n s_cnt += 1\n\n else:\n\n # Get axis label and anchor\n x_label = _get_label('x', False, False, x_cnt, False)\n y_label = _get_label('y', False, False, y_cnt, False)\n x_anchor, y_anchor = _get_anchors(r, c,\n x_cnt, y_cnt,\n False, False)\n\n # Add a xaxis to layout (N.B insets always have anchors)\n _add_domain(layout, 'x', x_label, x_domain, x_anchor, False)\n x_cnt += 1\n\n # Add a yayis to layout (N.B insets always have anchors)\n _add_domain(layout, 'y', y_label, y_domain, y_anchor, False)\n y_cnt += 1\n\n insets_ref[i_inset] = (x_label, y_label) # fill in ref\n\n # [grid_str] Set the grid's string representation\n sp = \" \" # space between cell\n s_str = \"[ \" # cell start string\n e_str = \" ]\" # cell end string\n colspan_str = ' -' # colspan string\n rowspan_str = ' |' # rowspan string\n empty_str = ' (empty) ' # empty cell string\n\n # Init grid_str with intro message\n grid_str = \"This is the format of your plot grid:\\n\"\n\n # Init tmp list of lists of strings (sorta like 'grid_ref' but w/ strings)\n _tmp = [['' for c in range(cols)] for r in range(rows)]\n\n # Define cell string as function of (r, c) and grid_ref\n def _get_cell_str(r, c, ref):\n return '({r},{c}) {ref}'.format(r=r + 1, c=c + 1, ref=','.join(ref))\n\n # Find max len of _cell_str, add define a padding function\n cell_len = max([len(_get_cell_str(r, c, ref))\n for r, row_ref in enumerate(grid_ref)\n for c, ref in enumerate(row_ref)\n if ref]) + len(s_str) + len(e_str)\n\n def _pad(s, cell_len=cell_len):\n return ' ' * (cell_len - len(s))\n\n # Loop through specs, fill in _tmp\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n\n ref = grid_ref[r][c]\n if ref is None:\n if _tmp[r][c] == '':\n _tmp[r][c] = empty_str + _pad(empty_str)\n continue\n\n cell_str = s_str + _get_cell_str(r, c, ref)\n\n if spec['colspan'] > 1:\n for cc in range(1, spec['colspan'] - 1):\n _tmp[r][c + cc] = colspan_str + _pad(colspan_str)\n _tmp[r][c + spec['colspan'] - 1] = (\n colspan_str + _pad(colspan_str + e_str)) + e_str\n else:\n cell_str += e_str\n\n if spec['rowspan'] > 1:\n for rr in range(1, spec['rowspan'] - 1):\n _tmp[r + rr][c] = rowspan_str + _pad(rowspan_str)\n for cc in range(spec['colspan']):\n _tmp[r + spec['rowspan'] - 1][c + cc] = (\n rowspan_str + _pad(rowspan_str))\n\n _tmp[r][c] = cell_str + _pad(cell_str)\n\n # Append grid_str using data from _tmp in the correct order\n for r in row_seq[::-1]:\n grid_str += sp.join(_tmp[r]) + '\\n'\n\n # Append grid_str to include insets info\n if insets:\n grid_str += \"\\nWith insets:\\n\"\n for i_inset, inset in enumerate(insets):\n\n r = inset['cell'][0] - 1\n c = inset['cell'][1] - 1\n ref = grid_ref[r][c]\n\n grid_str += (\n s_str + ','.join(insets_ref[i_inset]) + e_str +\n ' over ' +\n s_str + _get_cell_str(r, c, ref) + e_str + '\\n'\n )\n\n # Add subplot titles\n\n # If shared_axes is False (default) use list_of_domains\n # This is used for insets and irregular layouts\n if not shared_xaxes and not shared_yaxes:\n x_dom = list_of_domains[::2]\n y_dom = list_of_domains[1::2]\n subtitle_pos_x = []\n subtitle_pos_y = []\n for x_domains in x_dom:\n subtitle_pos_x.append(sum(x_domains) / 2)\n for y_domains in y_dom:\n subtitle_pos_y.append(y_domains[1])\n # If shared_axes is True the domin of each subplot is not returned so the\n # title position must be calculated for each subplot\n else:\n subtitle_pos_x = [None] * cols\n subtitle_pos_y = [None] * rows\n delt_x = (x_e - x_s)\n for index in range(cols):\n subtitle_pos_x[index] = ((delt_x / 2) +\n ((delt_x + horizontal_spacing) * index))\n subtitle_pos_x *= rows\n for index in range(rows):\n subtitle_pos_y[index] = (1 - ((y_e + vertical_spacing) * index))\n subtitle_pos_y *= cols\n subtitle_pos_y = sorted(subtitle_pos_y, reverse=True)\n\n plot_titles = []\n for index in range(len(subplot_titles)):\n if not subplot_titles[index]:\n pass\n else:\n plot_titles.append({'y': subtitle_pos_y[index],\n 'xref': 'paper',\n 'x': subtitle_pos_x[index],\n 'yref': 'paper',\n 'text': subplot_titles[index],\n 'showarrow': False,\n 'font': graph_objs.Font(size=16),\n 'xanchor': 'center',\n 'yanchor': 'bottom'\n })\n\n layout['annotations'] = plot_titles\n\n if print_grid:\n print(grid_str)\n\n fig = graph_objs.Figure(layout=layout)\n\n fig.__dict__['_grid_ref'] = grid_ref\n fig.__dict__['_grid_str'] = grid_str\n\n return fig\n\n\ndef get_valid_graph_obj(obj, obj_type=None):\n \"\"\"Returns a new graph object that won't raise.\n\n CAREFUL: this will *silently* strip out invalid pieces of the object.\n\n \"\"\"\n # TODO: Deprecate or move. #283\n from plotly.graph_objs import graph_objs\n try:\n cls = getattr(graph_objs, obj_type)\n except (AttributeError, KeyError):\n raise exceptions.PlotlyError(\n \"'{}' is not a recognized graph_obj.\".format(obj_type)\n )\n return cls(obj, _raise=False)\n\n\ndef validate(obj, obj_type):\n \"\"\"Validate a dictionary, list, or graph object as 'obj_type'.\n\n This will not alter the 'obj' referenced in the call signature. It will\n raise an error if the 'obj' reference could not be instantiated as a\n valid 'obj_type' graph object.\n\n \"\"\"\n # TODO: Deprecate or move. #283\n from plotly.graph_objs import graph_objs\n\n if obj_type not in graph_reference.CLASSES:\n obj_type = graph_reference.string_to_class_name(obj_type)\n\n try:\n cls = getattr(graph_objs, obj_type)\n except AttributeError:\n raise exceptions.PlotlyError(\n \"'{0}' is not a recognizable graph_obj.\".\n format(obj_type))\n cls(obj) # this will raise on invalid keys/items\n\n\ndef _replace_newline(obj):\n \"\"\"Replaces '\\n' with '<br>' for all strings in a collection.\"\"\"\n if isinstance(obj, dict):\n d = dict()\n for key, val in list(obj.items()):\n d[key] = _replace_newline(val)\n return d\n elif isinstance(obj, list):\n l = list()\n for index, entry in enumerate(obj):\n l += [_replace_newline(entry)]\n return l\n elif isinstance(obj, six.string_types):\n s = obj.replace('\\n', '<br>')\n if s != obj:\n warnings.warn(\"Looks like you used a newline character: '\\\\n'.\\n\\n\"\n \"Plotly uses a subset of HTML escape characters\\n\"\n \"to do things like newline (<br>), bold (<b></b>),\\n\"\n \"italics (<i></i>), etc. Your newline characters \\n\"\n \"have been converted to '<br>' so they will show \\n\"\n \"up right on your Plotly figure!\")\n return s\n else:\n return obj # we return the actual reference... but DON'T mutate.\n\n\nif _ipython_imported:\n class PlotlyDisplay(IPython.core.display.HTML):\n \"\"\"An IPython display object for use with plotly urls\n\n PlotlyDisplay objects should be instantiated with a url for a plot.\n IPython will *choose* the proper display representation from any\n Python object, and using provided methods if they exist. By defining\n the following, if an HTML display is unusable, the PlotlyDisplay\n object can provide alternate representations.\n\n \"\"\"\n def __init__(self, url, width, height):\n self.resource = url\n self.embed_code = get_embed(url, width=width, height=height)\n super(PlotlyDisplay, self).__init__(data=self.embed_code)\n\n def _repr_html_(self):\n return self.embed_code\n\n\ndef return_figure_from_figure_or_data(figure_or_data, validate_figure):\n from plotly.graph_objs import graph_objs\n if isinstance(figure_or_data, dict):\n figure = figure_or_data\n elif isinstance(figure_or_data, list):\n figure = {'data': figure_or_data}\n else:\n raise exceptions.PlotlyError(\"The `figure_or_data` positional \"\n \"argument must be either \"\n \"`dict`-like or `list`-like.\")\n if validate_figure:\n\n try:\n graph_objs.Figure(figure)\n except exceptions.PlotlyError as err:\n raise exceptions.PlotlyError(\"Invalid 'figure_or_data' argument. \"\n \"Plotly will not be able to properly \"\n \"parse the resulting JSON. If you \"\n \"want to send this 'figure_or_data' \"\n \"to Plotly anyway (not recommended), \"\n \"you can set 'validate=False' as a \"\n \"plot option.\\nHere's why you're \"\n \"seeing this error:\\n\\n{0}\"\n \"\".format(err))\n if not figure['data']:\n raise exceptions.PlotlyEmptyDataError(\n \"Empty data list found. Make sure that you populated the \"\n \"list of data objects you're sending and try again.\\n\"\n \"Questions? [email protected]\"\n )\n\n return figure\n\n# Default colours for finance charts\n_DEFAULT_INCREASING_COLOR = '#3D9970' # http://clrs.cc\n_DEFAULT_DECREASING_COLOR = '#FF4136'\n\nDIAG_CHOICES = ['scatter', 'histogram', 'box']\nVALID_COLORMAP_TYPES = ['cat', 'seq']\n\n\nclass FigureFactory(object):\n \"\"\"\n BETA functions to create specific chart types.\n\n This is beta as in: subject to change in a backwards incompatible way\n without notice.\n\n Supported chart types include candlestick, open high low close, quiver,\n streamline, distplot, dendrogram, annotated heatmap, and tables. See\n FigureFactory.create_candlestick, FigureFactory.create_ohlc,\n FigureFactory.create_quiver, FigureFactory.create_streamline,\n FigureFactory.create_distplot, FigureFactory.create_dendrogram,\n FigureFactory.create_annotated_heatmap, or FigureFactory.create_table for\n more information and examples of a specific chart type.\n \"\"\"\n\n @staticmethod\n def _make_colorscale(colors, scale=None):\n \"\"\"\n Makes a colorscale from a list of colors and scale\n\n Takes a list of colors and scales and constructs a colorscale based\n on the colors in sequential order. If 'scale' is left empty, a linear-\n interpolated colorscale will be generated. If 'scale' is a specificed\n list, it must be the same legnth as colors and must contain all floats\n For documentation regarding to the form of the output, see\n https://plot.ly/python/reference/#mesh3d-colorscale\n \"\"\"\n colorscale = []\n\n if not scale:\n for j, color in enumerate(colors):\n colorscale.append([j * 1./(len(colors) - 1), color])\n return colorscale\n\n else:\n colorscale = [list(tup) for tup in zip(scale, colors)]\n return colorscale\n\n @staticmethod\n def _convert_colorscale_to_rgb(colorscale):\n \"\"\"\n Converts the colors in a colorscale to rgb colors\n\n A colorscale is an array of arrays, each with a numeric value as the\n first item and a color as the second. This function specifically is\n converting a colorscale with tuple colors (each coordinate between 0\n and 1) into a colorscale with the colors transformed into rgb colors\n \"\"\"\n for color in colorscale:\n color[1] = FigureFactory._convert_to_RGB_255(\n color[1]\n )\n\n for color in colorscale:\n color[1] = FigureFactory._label_rgb(\n color[1]\n )\n return colorscale\n\n @staticmethod\n def _make_linear_colorscale(colors):\n \"\"\"\n Makes a list of colors into a colorscale-acceptable form\n\n For documentation regarding to the form of the output, see\n https://plot.ly/python/reference/#mesh3d-colorscale\n \"\"\"\n scale = 1./(len(colors) - 1)\n return[[i * scale, color] for i, color in enumerate(colors)]\n\n @staticmethod\n def create_2D_density(x, y, colorscale='Earth', ncontours=20,\n hist_color=(0, 0, 0.5), point_color=(0, 0, 0.5),\n point_size=2, title='2D Density Plot',\n height=600, width=600):\n \"\"\"\n Returns figure for a 2D density plot\n\n :param (list|array) x: x-axis data for plot generation\n :param (list|array) y: y-axis data for plot generation\n :param (str|tuple|list) colorscale: either a plotly scale name, an rgb\n or hex color, a color tuple or a list or tuple of colors. An rgb\n color is of the form 'rgb(x, y, z)' where x, y, z belong to the\n interval [0, 255] and a color tuple is a tuple of the form\n (a, b, c) where a, b and c belong to [0, 1]. If colormap is a\n list, it must contain the valid color types aforementioned as its\n members.\n :param (int) ncontours: the number of 2D contours to draw on the plot\n :param (str) hist_color: the color of the plotted histograms\n :param (str) point_color: the color of the scatter points\n :param (str) point_size: the color of the scatter points\n :param (str) title: set the title for the plot\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple 2D Density Plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n # Make data points\n t = np.linspace(-1,1.2,2000)\n x = (t**3)+(0.3*np.random.randn(2000))\n y = (t**6)+(0.3*np.random.randn(2000))\n\n # Create a figure\n fig = FF.create_2D_density(x, y)\n\n # Plot the data\n py.iplot(fig, filename='simple-2d-density')\n ```\n\n Example 2: Using Parameters\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n # Make data points\n t = np.linspace(-1,1.2,2000)\n x = (t**3)+(0.3*np.random.randn(2000))\n y = (t**6)+(0.3*np.random.randn(2000))\n\n # Create custom colorscale\n colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)',\n (1, 1, 0.2), (0.98,0.98,0.98)]\n\n # Create a figure\n fig = FF.create_2D_density(\n x, y, colorscale=colorscale,\n hist_color='rgb(255, 237, 222)', point_size=3)\n\n # Plot the data\n py.iplot(fig, filename='use-parameters')\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n from numbers import Number\n\n # validate x and y are filled with numbers only\n for array in [x, y]:\n if not all(isinstance(element, Number) for element in array):\n raise exceptions.PlotlyError(\n \"All elements of your 'x' and 'y' lists must be numbers.\"\n )\n\n # validate x and y are the same length\n if len(x) != len(y):\n raise exceptions.PlotlyError(\n \"Both lists 'x' and 'y' must be the same length.\"\n )\n\n colorscale = FigureFactory._validate_colors(colorscale, 'rgb')\n colorscale = FigureFactory._make_linear_colorscale(colorscale)\n\n # validate hist_color and point_color\n hist_color = FigureFactory._validate_colors(hist_color, 'rgb')\n point_color = FigureFactory._validate_colors(point_color, 'rgb')\n\n trace1 = graph_objs.Scatter(\n x=x, y=y, mode='markers', name='points',\n marker=dict(\n color=point_color[0],\n size=point_size,\n opacity=0.4\n )\n )\n trace2 = graph_objs.Histogram2dcontour(\n x=x, y=y, name='density', ncontours=ncontours,\n colorscale=colorscale, reversescale=True, showscale=False\n )\n trace3 = graph_objs.Histogram(\n x=x, name='x density',\n marker=dict(color=hist_color[0]), yaxis='y2'\n )\n trace4 = graph_objs.Histogram(\n y=y, name='y density',\n marker=dict(color=hist_color[0]), xaxis='x2'\n )\n data = [trace1, trace2, trace3, trace4]\n\n layout = graph_objs.Layout(\n showlegend=False,\n autosize=False,\n title=title,\n height=height,\n width=width,\n xaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n yaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n margin=dict(\n t=50\n ),\n hovermode='closest',\n bargap=0,\n xaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n ),\n yaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n )\n )\n\n fig = graph_objs.Figure(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _validate_gantt(df):\n \"\"\"\n Validates the inputted dataframe or list\n \"\"\"\n if _pandas_imported and isinstance(df, pd.core.frame.DataFrame):\n # validate that df has all the required keys\n for key in REQUIRED_GANTT_KEYS:\n if key not in df:\n raise exceptions.PlotlyError(\n \"The columns in your dataframe must include the \"\n \"keys\".format(REQUIRED_GANTT_KEYS)\n )\n\n num_of_rows = len(df.index)\n chart = []\n for index in range(num_of_rows):\n task_dict = {}\n for key in df:\n task_dict[key] = df.ix[index][key]\n chart.append(task_dict)\n\n return chart\n\n # validate if df is a list\n if not isinstance(df, list):\n raise exceptions.PlotlyError(\"You must input either a dataframe \"\n \"or a list of dictionaries.\")\n\n # validate if df is empty\n if len(df) <= 0:\n raise exceptions.PlotlyError(\"Your list is empty. It must contain \"\n \"at least one dictionary.\")\n if not isinstance(df[0], dict):\n raise exceptions.PlotlyError(\"Your list must only \"\n \"include dictionaries.\")\n return df\n\n @staticmethod\n def _gantt(chart, colors, title, bar_width, showgrid_x, showgrid_y,\n height, width, tasks=None, task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n color_index = 0\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n # check if colors need to be looped\n if color_index >= len(colors):\n color_index = 0\n tasks[index]['fillcolor'] = colors[color_index]\n # Add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n color_index += 1\n\n layout = dict(\n title=title,\n showlegend=False,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _gantt_colorscale(chart, colors, title, index_col, show_colorbar,\n bar_width, showgrid_x, showgrid_y, height,\n width, tasks=None, task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n from numbers import Number\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n # compute the color for task based on indexing column\n if isinstance(chart[0][index_col], Number):\n # check that colors has at least 2 colors\n if len(colors) < 2:\n raise exceptions.PlotlyError(\n \"You must use at least 2 colors in 'colors' if you \"\n \"are using a colorscale. However only the first two \"\n \"colors given will be used for the lower and upper \"\n \"bounds on the colormap.\"\n )\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n # unlabel color\n colors = FigureFactory._color_parser(\n colors, FigureFactory._unlabel_rgb\n )\n lowcolor = colors[0]\n highcolor = colors[1]\n\n intermed = (chart[index][index_col])/100.0\n intermed_color = FigureFactory._find_intermediate_color(\n lowcolor, highcolor, intermed\n )\n intermed_color = FigureFactory._color_parser(\n intermed_color, FigureFactory._label_rgb\n )\n tasks[index]['fillcolor'] = intermed_color\n # relabel colors with 'rgb'\n colors = FigureFactory._color_parser(\n colors, FigureFactory._label_rgb\n )\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data for colorscale visibility\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[index, index],\n name='',\n marker={'color': 'white',\n 'colorscale': [[0, colors[0]], [1, colors[1]]],\n 'showscale': True,\n 'cmax': 100,\n 'cmin': 0}\n )\n )\n\n if isinstance(chart[0][index_col], str):\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n if len(colors) < len(index_vals):\n raise exceptions.PlotlyError(\n \"Error. The number of colors in 'colors' must be no less \"\n \"than the number of unique index values in your group \"\n \"column.\"\n )\n\n # make a dictionary assignment to each index value\n index_vals_dict = {}\n # define color index\n c_index = 0\n for key in index_vals:\n if c_index > len(colors) - 1:\n c_index = 0\n index_vals_dict[key] = colors[c_index]\n c_index += 1\n\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n tasks[index]['fillcolor'] = index_vals_dict[\n chart[index][index_col]\n ]\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data to generate legend\n showlegend = True\n for k, index_value in enumerate(index_vals):\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[k, k],\n showlegend=True,\n name=str(index_value),\n hoverinfo='none',\n marker=dict(\n color=colors[k],\n size=1\n )\n )\n )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _gantt_dict(chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width, tasks=None,\n task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # verify each value in index column appears in colors dictionary\n for key in index_vals:\n if key not in colors:\n raise exceptions.PlotlyError(\n \"If you are using colors as a dictionary, all of its \"\n \"keys must be all the values in the index column.\"\n )\n\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n tasks[index]['fillcolor'] = colors[chart[index][index_col]]\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data to generate legend\n showlegend = True\n for k, index_value in enumerate(index_vals):\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[k, k],\n showlegend=True,\n hoverinfo='none',\n name=str(index_value),\n marker=dict(\n color=colors[index_value],\n size=1\n )\n )\n )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def create_gantt(df, colors=None, index_col=None, show_colorbar=False,\n reverse_colors=False, title='Gantt Chart',\n bar_width=0.2, showgrid_x=False, showgrid_y=False,\n height=600, width=900, tasks=None,\n task_names=None, data=None):\n \"\"\"\n Returns figure for a gantt chart\n\n :param (array|list) df: input data for gantt chart. Must be either a\n a dataframe or a list. If dataframe, the columns must include\n 'Task', 'Start' and 'Finish'. Other columns can be included and\n used for indexing. If a list, its elements must be dictionaries\n with the same required column headers: 'Task', 'Start' and\n 'Finish'.\n :param (str|list|dict|tuple) colors: either a plotly scale name, an\n rgb or hex color, a color tuple or a list of colors. An rgb color\n is of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colors is a list, it must\n contain the valid color types aforementioned as its members.\n If a dictionary, all values of the indexing column must be keys in\n colors.\n :param (str|float) index_col: the column header (if df is a data\n frame) that will function as the indexing column. If df is a list,\n index_col must be one of the keys in all the items of df.\n :param (bool) show_colorbar: determines if colorbar will be visible.\n Only applies if values in the index column are numeric.\n :param (bool) reverse_colors: reverses the order of selected colors\n :param (str) title: the title of the chart\n :param (float) bar_width: the width of the horizontal bars in the plot\n :param (bool) showgrid_x: show/hide the x-axis grid\n :param (bool) showgrid_y: show/hide the y-axis grid\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple Gantt Chart\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01', Finish='2009-02-30'),\n dict(Task=\"Job B\", Start='2009-03-05', Finish='2009-04-15'),\n dict(Task=\"Job C\", Start='2009-02-20', Finish='2009-05-30')]\n\n # Create a figure\n fig = FF.create_gantt(df)\n\n # Plot the data\n py.iplot(fig, filename='Simple Gantt Chart', world_readable=True)\n ```\n\n Example 2: Index by Column with Numerical Entries\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Complete=10),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Complete=60),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Complete=95)]\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors='Blues', index_col='Complete',\n show_colorbar=True, bar_width=0.5,\n showgrid_x=True, showgrid_y=True)\n\n # Plot the data\n py.iplot(fig, filename='Numerical Entries', world_readable=True)\n ```\n\n Example 3: Index by Column with String Entries\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Resource='Apple'),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Resource='Grape'),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Resource='Banana')]\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors=['rgb(200, 50, 25)',\n (1, 0, 1),\n '#6c4774'],\n index_col='Resource',\n reverse_colors=True,\n show_colorbar=True)\n\n # Plot the data\n py.iplot(fig, filename='String Entries', world_readable=True)\n ```\n\n Example 4: Use a dictionary for colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Resource='Apple'),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Resource='Grape'),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Resource='Banana')]\n\n # Make a dictionary of colors\n colors = {'Apple': 'rgb(255, 0, 0)',\n 'Grape': 'rgb(170, 14, 200)',\n 'Banana': (1, 1, 0.2)}\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors=colors,\n index_col='Resource',\n show_colorbar=True)\n\n # Plot the data\n py.iplot(fig, filename='dictioanry colors', world_readable=True)\n ```\n\n Example 5: Use a pandas dataframe\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import pandas as pd\n\n # Make data as a dataframe\n df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],\n ['Fast', '2011-01-01', '2012-06-05', 55],\n ['Eat', '2012-01-05', '2013-07-05', 94]],\n columns=['Task', 'Start', 'Finish', 'Complete'])\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors='Blues', index_col='Complete',\n show_colorbar=True, bar_width=0.5,\n showgrid_x=True, showgrid_y=True)\n\n # Plot the data\n py.iplot(fig, filename='data with dataframe', world_readable=True)\n ```\n \"\"\"\n # validate gantt input data\n chart = FigureFactory._validate_gantt(df)\n\n if index_col:\n if index_col not in chart[0]:\n raise exceptions.PlotlyError(\n \"In order to use an indexing column and assign colors to \"\n \"the values of the index, you must choose an actual \"\n \"column name in the dataframe or key if a list of \"\n \"dictionaries is being used.\")\n\n # validate gantt index column\n index_list = []\n for dictionary in chart:\n index_list.append(dictionary[index_col])\n FigureFactory._validate_index(index_list)\n\n # Validate colors\n if isinstance(colors, dict):\n colors = FigureFactory._validate_colors_dict(colors, 'rgb')\n else:\n colors = FigureFactory._validate_colors(colors, 'rgb')\n\n if reverse_colors is True:\n colors.reverse()\n\n if not index_col:\n if isinstance(colors, dict):\n raise exceptions.PlotlyError(\n \"Error. You have set colors to a dictionary but have not \"\n \"picked an index. An index is required if you are \"\n \"assigning colors to particular values in a dictioanry.\"\n )\n fig = FigureFactory._gantt(\n chart, colors, title, bar_width, showgrid_x, showgrid_y,\n height, width, tasks=None, task_names=None, data=None\n )\n return fig\n else:\n if not isinstance(colors, dict):\n fig = FigureFactory._gantt_colorscale(\n chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width,\n tasks=None, task_names=None, data=None\n )\n return fig\n else:\n fig = FigureFactory._gantt_dict(\n chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width,\n tasks=None, task_names=None, data=None\n )\n return fig\n\n @staticmethod\n def _validate_colors(colors, colortype='tuple'):\n \"\"\"\n Validates color(s) and returns a list of color(s) of a specified type\n \"\"\"\n from numbers import Number\n if colors is None:\n colors = DEFAULT_PLOTLY_COLORS\n\n if isinstance(colors, str):\n if colors in PLOTLY_SCALES:\n colors = PLOTLY_SCALES[colors]\n elif 'rgb' in colors or '#' in colors:\n colors = [colors]\n else:\n raise exceptions.PlotlyError(\n \"If your colors variable is a string, it must be a \"\n \"Plotly scale, an rgb color or a hex color.\")\n\n elif isinstance(colors, tuple):\n if isinstance(colors[0], Number):\n colors = [colors]\n else:\n colors = list(colors)\n\n # convert color elements in list to tuple color\n for j, each_color in enumerate(colors):\n if 'rgb' in each_color:\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unlabel_rgb\n )\n for value in each_color:\n if value > 255.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your rgb colors \"\n \"tuples cannot exceed 255.0.\"\n )\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unconvert_from_RGB_255\n )\n colors[j] = each_color\n\n if '#' in each_color:\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._hex_to_rgb\n )\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unconvert_from_RGB_255\n )\n\n colors[j] = each_color\n\n if isinstance(each_color, tuple):\n for value in each_color:\n if value > 1.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your colors tuples \"\n \"cannot exceed 1.0.\"\n )\n colors[j] = each_color\n\n if colortype == 'rgb':\n for j, each_color in enumerate(colors):\n rgb_color = FigureFactory._color_parser(\n each_color, FigureFactory._convert_to_RGB_255\n )\n colors[j] = FigureFactory._color_parser(\n rgb_color, FigureFactory._label_rgb\n )\n\n return colors\n\n @staticmethod\n def _validate_colors_dict(colors, colortype='tuple'):\n \"\"\"\n Validates dictioanry of color(s)\n \"\"\"\n # validate each color element in the dictionary\n for key in colors:\n if 'rgb' in colors[key]:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unlabel_rgb\n )\n for value in colors[key]:\n if value > 255.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your rgb colors \"\n \"tuples cannot exceed 255.0.\"\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unconvert_from_RGB_255\n )\n\n if '#' in colors[key]:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._hex_to_rgb\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unconvert_from_RGB_255\n )\n\n if isinstance(colors[key], tuple):\n for value in colors[key]:\n if value > 1.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your colors tuples \"\n \"cannot exceed 1.0.\"\n )\n\n if colortype == 'rgb':\n for key in colors:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._convert_to_RGB_255\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._label_rgb\n )\n\n return colors\n\n @staticmethod\n def _calc_stats(data):\n \"\"\"\n Calculate statistics for use in violin plot.\n \"\"\"\n import numpy as np\n\n x = np.asarray(data, np.float)\n vals_min = np.min(x)\n vals_max = np.max(x)\n q2 = np.percentile(x, 50, interpolation='linear')\n q1 = np.percentile(x, 25, interpolation='lower')\n q3 = np.percentile(x, 75, interpolation='higher')\n iqr = q3 - q1\n whisker_dist = 1.5 * iqr\n\n # in order to prevent drawing whiskers outside the interval\n # of data one defines the whisker positions as:\n d1 = np.min(x[x >= (q1 - whisker_dist)])\n d2 = np.max(x[x <= (q3 + whisker_dist)])\n return {\n 'min': vals_min,\n 'max': vals_max,\n 'q1': q1,\n 'q2': q2,\n 'q3': q3,\n 'd1': d1,\n 'd2': d2\n }\n\n @staticmethod\n def _make_half_violin(x, y, fillcolor='#1f77b4',\n linecolor='rgb(0, 0, 0)'):\n \"\"\"\n Produces a sideways probability distribution fig violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n text = ['(pdf(y), y)=(' + '{:0.2f}'.format(x[i]) +\n ', ' + '{:0.2f}'.format(y[i]) + ')'\n for i in range(len(x))]\n\n return graph_objs.Scatter(\n x=x,\n y=y,\n mode='lines',\n name='',\n text=text,\n fill='tonextx',\n fillcolor=fillcolor,\n line=graph_objs.Line(width=0.5, color=linecolor, shape='spline'),\n hoverinfo='text',\n opacity=0.5\n )\n\n @staticmethod\n def _make_violin_rugplot(vals, pdf_max, distance,\n color='#1f77b4'):\n \"\"\"\n Returns a rugplot fig for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n y=vals,\n x=[-pdf_max-distance]*len(vals),\n marker=graph_objs.Marker(\n color=color,\n symbol='line-ew-open'\n ),\n mode='markers',\n name='',\n showlegend=False,\n hoverinfo='y'\n )\n\n @staticmethod\n def _make_quartiles(q1, q3):\n \"\"\"\n Makes the upper and lower quartiles for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0, 0],\n y=[q1, q3],\n text=['lower-quartile: ' + '{:0.2f}'.format(q1),\n 'upper-quartile: ' + '{:0.2f}'.format(q3)],\n mode='lines',\n line=graph_objs.Line(\n width=4,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text'\n )\n\n @staticmethod\n def _make_median(q2):\n \"\"\"\n Formats the 'median' hovertext for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0],\n y=[q2],\n text=['median: ' + '{:0.2f}'.format(q2)],\n mode='markers',\n marker=dict(symbol='square',\n color='rgb(255,255,255)'),\n hoverinfo='text'\n )\n\n @staticmethod\n def _make_non_outlier_interval(d1, d2):\n \"\"\"\n Returns the scatterplot fig of most of a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0, 0],\n y=[d1, d2],\n name='',\n mode='lines',\n line=graph_objs.Line(width=1.5,\n color='rgb(0,0,0)')\n )\n\n @staticmethod\n def _make_XAxis(xaxis_title, xaxis_range):\n \"\"\"\n Makes the x-axis for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n xaxis = graph_objs.XAxis(title=xaxis_title,\n range=xaxis_range,\n showgrid=False,\n zeroline=False,\n showline=False,\n mirror=False,\n ticks='',\n showticklabels=False,\n )\n return xaxis\n\n @staticmethod\n def _make_YAxis(yaxis_title):\n \"\"\"\n Makes the y-axis for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n yaxis = graph_objs.YAxis(title=yaxis_title,\n showticklabels=True,\n autorange=True,\n ticklen=4,\n showline=True,\n zeroline=False,\n showgrid=False,\n mirror=False)\n return yaxis\n\n @staticmethod\n def _violinplot(vals, fillcolor='#1f77b4', rugplot=True):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n \"\"\"\n import numpy as np\n from scipy import stats\n\n vals = np.asarray(vals, np.float)\n # summary statistics\n vals_min = FigureFactory._calc_stats(vals)['min']\n vals_max = FigureFactory._calc_stats(vals)['max']\n q1 = FigureFactory._calc_stats(vals)['q1']\n q2 = FigureFactory._calc_stats(vals)['q2']\n q3 = FigureFactory._calc_stats(vals)['q3']\n d1 = FigureFactory._calc_stats(vals)['d1']\n d2 = FigureFactory._calc_stats(vals)['d2']\n\n # kernel density estimation of pdf\n pdf = stats.gaussian_kde(vals)\n # grid over the data interval\n xx = np.linspace(vals_min, vals_max, 100)\n # evaluate the pdf at the grid xx\n yy = pdf(xx)\n max_pdf = np.max(yy)\n # distance from the violin plot to rugplot\n distance = (2.0 * max_pdf)/10 if rugplot else 0\n # range for x values in the plot\n plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]\n plot_data = [FigureFactory._make_half_violin(\n -yy, xx, fillcolor=fillcolor),\n FigureFactory._make_half_violin(\n yy, xx, fillcolor=fillcolor),\n FigureFactory._make_non_outlier_interval(d1, d2),\n FigureFactory._make_quartiles(q1, q3),\n FigureFactory._make_median(q2)]\n if rugplot:\n plot_data.append(FigureFactory._make_violin_rugplot(\n vals,\n max_pdf,\n distance=distance,\n color=fillcolor)\n )\n return plot_data, plot_xrange\n\n @staticmethod\n def _violin_no_colorscale(data, data_header, group_header, colors,\n use_colorscale, group_stats,\n height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n color_index = 0\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n if color_index >= len(colors):\n color_index = 0\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor=colors[color_index]\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n color_index += 1\n\n # add violin plot labels\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def _violin_colorscale(data, data_header, group_header, colors,\n use_colorscale, group_stats, height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot with colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n # make sure all group names are keys in group_stats\n for group in group_name:\n if group not in group_stats:\n raise exceptions.PlotlyError(\"All values/groups in the index \"\n \"column must be represented \"\n \"as a key in group_stats.\")\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n\n # prepare low and high color for colorscale\n lowcolor = FigureFactory._color_parser(\n colors[0], FigureFactory._unlabel_rgb\n )\n highcolor = FigureFactory._color_parser(\n colors[1], FigureFactory._unlabel_rgb\n )\n\n # find min and max values in group_stats\n group_stats_values = []\n for key in group_stats:\n group_stats_values.append(group_stats[key])\n\n max_value = max(group_stats_values)\n min_value = min(group_stats_values)\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n\n # find intermediate color from colorscale\n intermed = (group_stats[gr] - min_value) / (max_value - min_value)\n intermed_color = FigureFactory._find_intermediate_color(\n lowcolor, highcolor, intermed\n )\n\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor='rgb{}'.format(intermed_color)\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n # add colorbar to plot\n trace_dummy = graph_objs.Scatter(\n x=[0],\n y=[0],\n mode='markers',\n marker=dict(\n size=2,\n cmin=min_value,\n cmax=max_value,\n colorscale=[[0, colors[0]],\n [1, colors[1]]],\n showscale=True),\n showlegend=False,\n )\n fig.append_trace(trace_dummy, 1, L)\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def _violin_dict(data, data_header, group_header, colors, use_colorscale,\n group_stats, height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n # check if all group names appear in colors dict\n for group in group_name:\n if group not in colors:\n raise exceptions.PlotlyError(\"If colors is a dictionary, all \"\n \"the group names must appear as \"\n \"keys in colors.\")\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor=colors[gr]\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n\n # add violin plot labels\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def create_violin(data, data_header=None, group_header=None,\n colors=None, use_colorscale=False, group_stats=None,\n height=450, width=600, title='Violin and Rug Plot'):\n \"\"\"\n Returns figure for a violin plot\n\n :param (list|array) data: accepts either a list of numerical values,\n a list of dictionaries all with identical keys and at least one\n column of numeric values, or a pandas dataframe with at least one\n column of numbers\n :param (str) data_header: the header of the data column to be used\n from an inputted pandas dataframe. Not applicable if 'data' is\n a list of numeric values\n :param (str) group_header: applicable if grouping data by a variable.\n 'group_header' must be set to the name of the grouping variable.\n :param (str|tuple|list|dict) colors: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colors is a list, it must contain valid color types as its\n members.\n :param (bool) use_colorscale: Only applicable if grouping by another\n variable. Will implement a colorscale based on the first 2 colors\n of param colors. This means colors must be a list with at least 2\n colors in it (Plotly colorscales are accepted since they map to a\n list of two rgb colors)\n :param (dict) group_stats: a dictioanry where each key is a unique\n value from the group_header column in data. Each value must be a\n number and will be used to color the violin plots if a colorscale\n is being used\n :param (float) height: the height of the violin plot\n :param (float) width: the width of the violin plot\n :param (str) title: the title of the violin plot\n\n Example 1: Single Violin Plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n from scipy import stats\n\n # create list of random values\n data_list = np.random.randn(100)\n data_list.tolist()\n\n # create violin fig\n fig = FF.create_violin(data_list, colors='#604d9e')\n\n # plot\n py.iplot(fig, filename='Violin Plot')\n ```\n\n Example 2: Multiple Violin Plots with Qualitative Coloring\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n import pandas as pd\n from scipy import stats\n\n # create dataframe\n np.random.seed(619517)\n Nr=250\n y = np.random.randn(Nr)\n gr = np.random.choice(list(\"ABCDE\"), Nr)\n norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n for i, letter in enumerate(\"ABCDE\"):\n y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n df = pd.DataFrame(dict(Score=y, Group=gr))\n\n # create violin fig\n fig = FF.create_violin(df, data_header='Score', group_header='Group',\n height=600, width=1000)\n\n # plot\n py.iplot(fig, filename='Violin Plot with Coloring')\n ```\n\n Example 3: Violin Plots with Colorscale\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n import pandas as pd\n from scipy import stats\n\n # create dataframe\n np.random.seed(619517)\n Nr=250\n y = np.random.randn(Nr)\n gr = np.random.choice(list(\"ABCDE\"), Nr)\n norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n for i, letter in enumerate(\"ABCDE\"):\n y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n df = pd.DataFrame(dict(Score=y, Group=gr))\n\n # define header params\n data_header = 'Score'\n group_header = 'Group'\n\n # make groupby object with pandas\n group_stats = {}\n groupby_data = df.groupby([group_header])\n\n for group in \"ABCDE\":\n data_from_group = groupby_data.get_group(group)[data_header]\n # take a stat of the grouped data\n stat = np.median(data_from_group)\n # add to dictionary\n group_stats[group] = stat\n\n # create violin fig\n fig = FF.create_violin(df, data_header='Score', group_header='Group',\n height=600, width=1000, use_colorscale=True,\n group_stats=group_stats)\n\n # plot\n py.iplot(fig, filename='Violin Plot with Colorscale')\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n from numbers import Number\n\n # Validate colors\n if isinstance(colors, dict):\n valid_colors = FigureFactory._validate_colors_dict(colors, 'rgb')\n else:\n valid_colors = FigureFactory._validate_colors(colors, 'rgb')\n\n # validate data and choose plot type\n if group_header is None:\n if isinstance(data, list):\n if len(data) <= 0:\n raise exceptions.PlotlyError(\"If data is a list, it must be \"\n \"nonempty and contain either \"\n \"numbers or dictionaries.\")\n\n if not all(isinstance(element, Number) for element in data):\n raise exceptions.PlotlyError(\"If data is a list, it must \"\n \"contain only numbers.\")\n\n if _pandas_imported and isinstance(data, pd.core.frame.DataFrame):\n if data_header is None:\n raise exceptions.PlotlyError(\"data_header must be the \"\n \"column name with the \"\n \"desired numeric data for \"\n \"the violin plot.\")\n\n data = data[data_header].values.tolist()\n\n # call the plotting functions\n plot_data, plot_xrange = FigureFactory._violinplot(\n data, fillcolor=valid_colors[0]\n )\n\n layout = graph_objs.Layout(\n title=title,\n autosize=False,\n font=graph_objs.Font(size=11),\n height=height,\n showlegend=False,\n width=width,\n xaxis=FigureFactory._make_XAxis('', plot_xrange),\n yaxis=FigureFactory._make_YAxis(''),\n hovermode='closest'\n )\n layout['yaxis'].update(dict(showline=False,\n showticklabels=False,\n ticks=''))\n\n fig = graph_objs.Figure(data=graph_objs.Data(plot_data),\n layout=layout)\n\n return fig\n\n else:\n if not isinstance(data, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\"Error. You must use a pandas \"\n \"DataFrame if you are using a \"\n \"group header.\")\n\n if data_header is None:\n raise exceptions.PlotlyError(\"data_header must be the column \"\n \"name with the desired numeric \"\n \"data for the violin plot.\")\n\n if use_colorscale is False:\n if isinstance(valid_colors, dict):\n # validate colors dict choice below\n fig = FigureFactory._violin_dict(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n else:\n fig = FigureFactory._violin_no_colorscale(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n else:\n if isinstance(valid_colors, dict):\n raise exceptions.PlotlyError(\"The colors param cannot be \"\n \"a dictionary if you are \"\n \"using a colorscale.\")\n\n if len(valid_colors) < 2:\n raise exceptions.PlotlyError(\"colors must be a list with \"\n \"at least 2 colors. A \"\n \"Plotly scale is allowed.\")\n\n if not isinstance(group_stats, dict):\n raise exceptions.PlotlyError(\"Your group_stats param \"\n \"must be a dictionary.\")\n\n fig = FigureFactory._violin_colorscale(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n\n @staticmethod\n def _find_intermediate_color(lowcolor, highcolor, intermed):\n \"\"\"\n Returns the color at a given distance between two colors\n\n This function takes two color tuples, where each element is between 0\n and 1, along with a value 0 < intermed < 1 and returns a color that is\n intermed-percent from lowcolor to highcolor\n\n \"\"\"\n diff_0 = float(highcolor[0] - lowcolor[0])\n diff_1 = float(highcolor[1] - lowcolor[1])\n diff_2 = float(highcolor[2] - lowcolor[2])\n\n return (lowcolor[0] + intermed * diff_0,\n lowcolor[1] + intermed * diff_1,\n lowcolor[2] + intermed * diff_2)\n\n @staticmethod\n def _color_parser(colors, function):\n \"\"\"\n Takes color(s) and a function and applies the function on the color(s)\n\n In particular, this function identifies whether the given color object\n is an iterable or not and applies the given color-parsing function to\n the color or iterable of colors. If given an iterable, it will only be\n able to work with it if all items in the iterable are of the same type\n - rgb string, hex string or tuple\n\n \"\"\"\n from numbers import Number\n if isinstance(colors, str):\n return function(colors)\n\n if isinstance(colors, tuple) and isinstance(colors[0], Number):\n return function(colors)\n\n if hasattr(colors, '__iter__'):\n if isinstance(colors, tuple):\n new_color_tuple = tuple(function(item) for item in colors)\n return new_color_tuple\n\n else:\n new_color_list = [function(item) for item in colors]\n return new_color_list\n\n @staticmethod\n def _unconvert_from_RGB_255(colors):\n \"\"\"\n Return a tuple where each element gets divided by 255\n\n Takes a (list of) color tuple(s) where each element is between 0 and\n 255. Returns the same tuples where each tuple element is normalized to\n a value between 0 and 1\n\n \"\"\"\n return (colors[0]/(255.0),\n colors[1]/(255.0),\n colors[2]/(255.0))\n\n @staticmethod\n def _map_face2color(face, colormap, vmin, vmax):\n \"\"\"\n Normalize facecolor values by vmin/vmax and return rgb-color strings\n\n This function takes a tuple color along with a colormap and a minimum\n (vmin) and maximum (vmax) range of possible mean distances for the\n given parametrized surface. It returns an rgb color based on the mean\n distance between vmin and vmax\n\n \"\"\"\n if vmin >= vmax:\n raise exceptions.PlotlyError(\"Incorrect relation between vmin \"\n \"and vmax. The vmin value cannot be \"\n \"bigger than or equal to the value \"\n \"of vmax.\")\n\n if len(colormap) == 1:\n # color each triangle face with the same color in colormap\n face_color = colormap[0]\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n else:\n if face == vmax:\n # pick last color in colormap\n face_color = colormap[-1]\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n else:\n # find the normalized distance t of a triangle face between\n # vmin and vmax where the distance is between 0 and 1\n t = (face - vmin) / float((vmax - vmin))\n low_color_index = int(t / (1./(len(colormap) - 1)))\n\n face_color = FigureFactory._find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n t * (len(colormap) - 1) - low_color_index\n )\n\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n return face_color\n\n @staticmethod\n def _trisurf(x, y, z, simplices, show_colorbar, edges_color,\n colormap=None, color_func=None, plot_edges=False,\n x_edge=None, y_edge=None, z_edge=None, facecolor=None):\n \"\"\"\n Refer to FigureFactory.create_trisurf() for docstring\n \"\"\"\n # numpy import check\n if _numpy_imported is False:\n raise ImportError(\"FigureFactory._trisurf() requires \"\n \"numpy imported.\")\n import numpy as np\n from plotly.graph_objs import graph_objs\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\"If color_func is a list/array, it must \"\n \"be the same length as simplices.\")\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if '#' in color_func[index]:\n foo = FigureFactory._hex_to_rgb(color_func[index])\n color_func[index] = FigureFactory._label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = FigureFactory._convert_to_RGB_255(color_func[index])\n color_func[index] = FigureFactory._label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = FigureFactory._map_face2color(mean_dists[index],\n colormap,\n min_mean_dists,\n max_mean_dists)\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = list(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(x=x, y=y, z=z, facecolor=facecolor,\n i=ii, j=jj, k=kk, name='')\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = FigureFactory._make_colorscale(colormap)\n colorscale = FigureFactory._convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[0],\n y=y[0],\n z=z[0],\n mode='markers',\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True),\n hoverinfo='None',\n showlegend=False\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return graph_objs.Data([triangles, colorbar])\n else:\n return graph_objs.Data([triangles])\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\"If any (x_edge, y_edge, z_edge) is None, \"\n \"all must be None\")\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack([pull_edges[:, :, 0],\n np.tile(None, [pull_edges.shape[0], 1])])\n y_edge_pull = np.hstack([pull_edges[:, :, 1],\n np.tile(None, [pull_edges.shape[0], 1])])\n z_edge_pull = np.hstack([pull_edges[:, :, 2],\n np.tile(None, [pull_edges.shape[0], 1])])\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\"The lengths of x_edge, y_edge and \"\n \"z_edge are not the same.\")\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge, y=y_edge, z=z_edge, mode='lines',\n line=graph_objs.Line(\n color=edges_color,\n width=1.5\n ),\n showlegend=False\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return graph_objs.Data([triangles, lines, colorbar])\n else:\n return graph_objs.Data([triangles, lines])\n\n @staticmethod\n def create_trisurf(x, y, z, simplices, colormap=None, show_colorbar=True,\n color_func=None, title='Trisurf Plot', plot_edges=True,\n showbackground=True,\n backgroundcolor='rgb(230, 230, 230)',\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n edges_color='rgb(50, 50, 50)',\n height=800, width=800,\n aspectratio=dict(x=1, y=1, z=1)):\n \"\"\"\n Returns figure for a triangulated surface plot\n\n :param (array) x: data values of x in a 1D array\n :param (array) y: data values of y in a 1D array\n :param (array) z: data values of z in a 1D array\n :param (array) simplices: an array of shape (ntri, 3) where ntri is\n the number of triangles in the triangularization. Each row of the\n array contains the indicies of the verticies of each triangle\n :param (str|tuple|list) colormap: either a plotly scale name, an rgb\n or hex color, a color tuple or a list of colors. An rgb color is\n of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colormap is a list, it must\n contain the valid color types aforementioned as its members\n :param (bool) show_colorbar: determines if colorbar is visible\n :param (function|list) color_func: The parameter that determines the\n coloring of the surface. Takes either a function with 3 arguments\n x, y, z or a list/array of color values the same length as\n simplices. If None, coloring will only depend on the z axis\n :param (str) title: title of the plot\n :param (bool) plot_edges: determines if the triangles on the trisurf\n are visible\n :param (bool) showbackground: makes background in plot visible\n :param (str) backgroundcolor: color of background. Takes a string of\n the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) gridcolor: color of the gridlines besides the axes. Takes\n a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255\n inclusive\n :param (str) zerolinecolor: color of the axes. Takes a string of the\n form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) edges_color: color of the edges, if plot_edges is True\n :param (int|float) height: the height of the plot (in pixels)\n :param (int|float) width: the width of the plot (in pixels)\n :param (dict) aspectratio: a dictionary of the aspect ratio values for\n the x, y and z axes. 'x', 'y' and 'z' take (int|float) values\n\n Example 1: Sphere\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 20)\n v = np.linspace(0, np.pi, 20)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n x = np.sin(v)*np.cos(u)\n y = np.sin(v)*np.sin(u)\n z = np.cos(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=\"Blues\",\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-sphere')\n ```\n\n Example 2: Torus\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 20)\n v = np.linspace(0, 2*np.pi, 20)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n x = (3 + (np.cos(v)))*np.cos(u)\n y = (3 + (np.cos(v)))*np.sin(u)\n z = np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=\"Greys\",\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-torus')\n ```\n\n Example 3: Mobius Band\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 24)\n v = np.linspace(-1, 1, 8)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n tp = 1 + 0.5*v*np.cos(u/2.)\n x = tp*np.cos(u)\n y = tp*np.sin(u)\n z = 0.5*v*np.sin(u/2.)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-mobius-band')\n ```\n\n Example 4: Using a Custom Colormap Function with Light Cone\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u=np.linspace(-np.pi, np.pi, 30)\n v=np.linspace(-np.pi, np.pi, 30)\n u,v=np.meshgrid(u,v)\n u=u.flatten()\n v=v.flatten()\n\n x = u\n y = u*np.cos(v)\n z = u*np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Define distance function\n def dist_origin(x, y, z):\n return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=['#604d9e',\n 'rgb(50, 150, 255)',\n (0.2, 0.2, 0.8)],\n simplices=simplices,\n color_func=dist_origin)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-custom-coloring')\n ```\n\n Example 5: Enter color_func as a list of colors\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n import random\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u=np.linspace(-np.pi, np.pi, 30)\n v=np.linspace(-np.pi, np.pi, 30)\n u,v=np.meshgrid(u,v)\n u=u.flatten()\n v=v.flatten()\n\n x = u\n y = u*np.cos(v)\n z = u*np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n\n colors = []\n color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']\n\n for index in range(len(simplices)):\n colors.append(random.choice(color_choices))\n\n fig = FF.create_trisurf(\n x, y, z, simplices,\n color_func=colors,\n show_colorbar=True,\n edges_color='rgb(2, 85, 180)',\n title=' Modern Art'\n )\n\n py.iplot(fig, filename=\"trisurf-plot-modern-art\")\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n # Validate colormap\n colormap = FigureFactory._validate_colors(colormap, 'tuple')\n\n data1 = FigureFactory._trisurf(x, y, z, simplices,\n show_colorbar=show_colorbar,\n color_func=color_func,\n colormap=colormap,\n edges_color=edges_color,\n plot_edges=plot_edges)\n axis = dict(\n showbackground=showbackground,\n backgroundcolor=backgroundcolor,\n gridcolor=gridcolor,\n zerolinecolor=zerolinecolor,\n )\n layout = graph_objs.Layout(\n title=title,\n width=width,\n height=height,\n scene=graph_objs.Scene(\n xaxis=graph_objs.XAxis(axis),\n yaxis=graph_objs.YAxis(axis),\n zaxis=graph_objs.ZAxis(axis),\n aspectratio=dict(\n x=aspectratio['x'],\n y=aspectratio['y'],\n z=aspectratio['z']),\n )\n )\n\n return graph_objs.Figure(data=data1, layout=layout)\n\n @staticmethod\n def _scatterplot(dataframe, headers, diag, size,\n height, width, title, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix without index\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n # Insert traces into trace_list\n for listy in dataframe:\n for listx in dataframe:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n name=None,\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n trace_list.append(trace)\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size),\n showlegend=False,\n **kwargs\n )\n trace_list.append(trace)\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True\n )\n return fig\n\n @staticmethod\n def _scatterplot_dict(dataframe, headers, diag, size,\n height, width, title, index, index_vals,\n endpts, colormap, colormap_type, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked.\n Used if colormap is a dictionary with index values as keys pointing to\n colors. Forces colormap_type to behave categorically because it would\n not make sense colors are assigned to each index value and thus\n implies that a categorical approach should be taken\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n theme = colormap\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[name]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[name]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[name]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[name]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[name]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[name]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(\n trace_list[trace_index][name],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n @staticmethod\n def _scatterplot_theme(dataframe, headers, diag, size, height,\n width, title, index, index_vals, endpts,\n colormap, colormap_type, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n # Check if index is made of string values\n if isinstance(index_vals[0], str):\n unique_index_vals = []\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals.append(name)\n n_colors_len = len(unique_index_vals)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == 'seq':\n foo = FigureFactory._color_parser(\n colormap, FigureFactory._unlabel_rgb\n )\n foo = FigureFactory._n_colors(foo[0],\n foo[1],\n n_colors_len)\n theme = FigureFactory._color_parser(\n foo, FigureFactory._label_rgb\n )\n\n if colormap_type == 'cat':\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(\n trace_list[trace_index][name],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n if endpts:\n intervals = FigureFactory._endpts_to_intervals(endpts)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == 'seq':\n foo = FigureFactory._color_parser(\n colormap, FigureFactory._unlabel_rgb\n )\n foo = FigureFactory._n_colors(foo[0],\n foo[1],\n len(intervals))\n theme = FigureFactory._color_parser(\n foo, FigureFactory._label_rgb\n )\n\n if colormap_type == 'cat':\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n interval_labels = {}\n for interval in intervals:\n interval_labels[str(interval)] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for interval in intervals:\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if interval[0] < index_vals[j] <= interval[1]:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n (kwargs['marker']\n ['color']) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n (kwargs['marker']\n ['color']) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n interval_labels[str(interval)] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(interval_labels)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for interval in intervals:\n fig.append_trace(\n trace_list[trace_index][str(interval)],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n theme = colormap\n\n # add a copy of rgb color to theme if it contains one color\n if len(theme) <= 1:\n theme.append(theme[0])\n\n color = []\n for incr in range(len(theme)):\n color.append([1./(len(theme)-1)*incr, theme[incr]])\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Run through all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = index_vals\n kwargs['marker']['colorscale'] = color\n kwargs['marker']['showscale'] = True\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=True),\n showlegend=False,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = index_vals\n kwargs['marker']['colorscale'] = color\n kwargs['marker']['showscale'] = False\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=False),\n showlegend=False,\n **kwargs\n )\n # Push the trace into list\n trace_list.append(trace)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n @staticmethod\n def _validate_index(index_vals):\n \"\"\"\n Validates if a list contains all numbers or all strings\n\n :raises: (PlotlyError) If there are any two items in the list whose\n types differ\n \"\"\"\n from numbers import Number\n if isinstance(index_vals[0], Number):\n if not all(isinstance(item, Number) for item in index_vals):\n raise exceptions.PlotlyError(\"Error in indexing column. \"\n \"Make sure all entries of each \"\n \"column are all numbers or \"\n \"all strings.\")\n\n elif isinstance(index_vals[0], str):\n if not all(isinstance(item, str) for item in index_vals):\n raise exceptions.PlotlyError(\"Error in indexing column. \"\n \"Make sure all entries of each \"\n \"column are all numbers or \"\n \"all strings.\")\n\n @staticmethod\n def _validate_dataframe(array):\n \"\"\"\n Validates all strings or numbers in each dataframe column\n\n :raises: (PlotlyError) If there are any two items in any list whose\n types differ\n \"\"\"\n from numbers import Number\n for vector in array:\n if isinstance(vector[0], Number):\n if not all(isinstance(item, Number) for item in vector):\n raise exceptions.PlotlyError(\"Error in dataframe. \"\n \"Make sure all entries of \"\n \"each column are either \"\n \"numbers or strings.\")\n elif isinstance(vector[0], str):\n if not all(isinstance(item, str) for item in vector):\n raise exceptions.PlotlyError(\"Error in dataframe. \"\n \"Make sure all entries of \"\n \"each column are either \"\n \"numbers or strings.\")\n\n @staticmethod\n def _validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):\n \"\"\"\n Validates basic inputs for FigureFactory.create_scatterplotmatrix()\n\n :raises: (PlotlyError) If pandas is not imported\n :raises: (PlotlyError) If pandas dataframe is not inputted\n :raises: (PlotlyError) If pandas dataframe has <= 1 columns\n :raises: (PlotlyError) If diagonal plot choice (diag) is not one of\n the viable options\n :raises: (PlotlyError) If colormap_type is not a valid choice\n :raises: (PlotlyError) If kwargs contains 'size', 'color' or\n 'colorscale'\n \"\"\"\n if _pandas_imported is False:\n raise ImportError(\"FigureFactory.scatterplotmatrix requires \"\n \"a pandas DataFrame.\")\n\n # Check if pandas dataframe\n if not isinstance(df, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\"Dataframe not inputed. Please \"\n \"use a pandas dataframe to pro\"\n \"duce a scatterplot matrix.\")\n\n # Check if dataframe is 1 column or less\n if len(df.columns) <= 1:\n raise exceptions.PlotlyError(\"Dataframe has only one column. To \"\n \"use the scatterplot matrix, use at \"\n \"least 2 columns.\")\n\n # Check that diag parameter is a valid selection\n if diag not in DIAG_CHOICES:\n raise exceptions.PlotlyError(\"Make sure diag is set to \"\n \"one of {}\".format(DIAG_CHOICES))\n\n # Check that colormap_types is a valid selection\n if colormap_type not in VALID_COLORMAP_TYPES:\n raise exceptions.PlotlyError(\"Must choose a valid colormap type. \"\n \"Either 'cat' or 'seq' for a cate\"\n \"gorical and sequential colormap \"\n \"respectively.\")\n\n # Check for not 'size' or 'color' in 'marker' of **kwargs\n if 'marker' in kwargs:\n FORBIDDEN_PARAMS = ['size', 'color', 'colorscale']\n if any(param in kwargs['marker'] for param in FORBIDDEN_PARAMS):\n raise exceptions.PlotlyError(\"Your kwargs dictionary cannot \"\n \"include the 'size', 'color' or \"\n \"'colorscale' key words inside \"\n \"the marker dict since 'size' is \"\n \"already an argument of the \"\n \"scatterplot matrix function and \"\n \"both 'color' and 'colorscale \"\n \"are set internally.\")\n\n @staticmethod\n def _endpts_to_intervals(endpts):\n \"\"\"\n Returns a list of intervals for categorical colormaps\n\n Accepts a list or tuple of sequentially increasing numbers and returns\n a list representation of the mathematical intervals with these numbers\n as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]\n\n :raises: (PlotlyError) If input is not a list or tuple\n :raises: (PlotlyError) If the input contains a string\n :raises: (PlotlyError) If any number does not increase after the\n previous one in the sequence\n \"\"\"\n length = len(endpts)\n # Check if endpts is a list or tuple\n if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):\n raise exceptions.PlotlyError(\"The intervals_endpts argument must \"\n \"be a list or tuple of a sequence \"\n \"of increasing numbers.\")\n # Check if endpts contains only numbers\n for item in endpts:\n if isinstance(item, str):\n raise exceptions.PlotlyError(\"The intervals_endpts argument \"\n \"must be a list or tuple of a \"\n \"sequence of increasing \"\n \"numbers.\")\n # Check if numbers in endpts are increasing\n for k in range(length-1):\n if endpts[k] >= endpts[k+1]:\n raise exceptions.PlotlyError(\"The intervals_endpts argument \"\n \"must be a list or tuple of a \"\n \"sequence of increasing \"\n \"numbers.\")\n else:\n intervals = []\n # add -inf to intervals\n intervals.append([float('-inf'), endpts[0]])\n for k in range(length - 1):\n interval = []\n interval.append(endpts[k])\n interval.append(endpts[k + 1])\n intervals.append(interval)\n # add +inf to intervals\n intervals.append([endpts[length - 1], float('inf')])\n return intervals\n\n @staticmethod\n def _convert_to_RGB_255(colors):\n \"\"\"\n Multiplies each element of a triplet by 255\n\n Each coordinate of the color tuple is rounded to the nearest float and\n then is turned into an integer. If a number is of the form x.5, then\n if x is odd, the number rounds up to (x+1). Otherwise, it rounds down\n to just x. This is the way rounding works in Python 3 and in current\n statistical analysis to avoid rounding bias\n \"\"\"\n rgb_components = []\n\n for component in colors:\n rounded_num = decimal.Decimal(str(component*255.0)).quantize(\n decimal.Decimal('1'), rounding=decimal.ROUND_HALF_EVEN\n )\n # convert rounded number to an integer from 'Decimal' form\n rounded_num = int(rounded_num)\n rgb_components.append(rounded_num)\n\n return (rgb_components[0], rgb_components[1], rgb_components[2])\n\n @staticmethod\n def _n_colors(lowcolor, highcolor, n_colors):\n \"\"\"\n Splits a low and high color into a list of n_colors colors in it\n\n Accepts two color tuples and returns a list of n_colors colors\n which form the intermediate colors between lowcolor and highcolor\n from linearly interpolating through RGB space\n\n \"\"\"\n diff_0 = float(highcolor[0] - lowcolor[0])\n incr_0 = diff_0/(n_colors - 1)\n diff_1 = float(highcolor[1] - lowcolor[1])\n incr_1 = diff_1/(n_colors - 1)\n diff_2 = float(highcolor[2] - lowcolor[2])\n incr_2 = diff_2/(n_colors - 1)\n color_tuples = []\n\n for index in range(n_colors):\n new_tuple = (lowcolor[0] + (index * incr_0),\n lowcolor[1] + (index * incr_1),\n lowcolor[2] + (index * incr_2))\n color_tuples.append(new_tuple)\n\n return color_tuples\n\n @staticmethod\n def _label_rgb(colors):\n \"\"\"\n Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'\n \"\"\"\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))\n\n @staticmethod\n def _unlabel_rgb(colors):\n \"\"\"\n Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)\n\n This function takes either an 'rgb(a, b, c)' color or a list of\n such colors and returns the color tuples in tuple(s) (a, b, c)\n\n \"\"\"\n str_vals = ''\n for index in range(len(colors)):\n try:\n float(colors[index])\n str_vals = str_vals + colors[index]\n except ValueError:\n if colors[index] == ',' or colors[index] == '.':\n str_vals = str_vals + colors[index]\n\n str_vals = str_vals + ','\n numbers = []\n str_num = ''\n for char in str_vals:\n if char != ',':\n str_num = str_num + char\n else:\n numbers.append(float(str_num))\n str_num = ''\n return (numbers[0], numbers[1], numbers[2])\n\n @staticmethod\n def create_scatterplotmatrix(df, index=None, endpts=None, diag='scatter',\n height=500, width=500, size=6,\n title='Scatterplot Matrix', colormap=None,\n colormap_type='cat', dataframe=None,\n headers=None, index_vals=None, **kwargs):\n \"\"\"\n Returns data for a scatterplot matrix.\n\n :param (array) df: array of the data with column headers\n :param (str) index: name of the index column in data array\n :param (list|tuple) endpts: takes an increasing sequece of numbers\n that defines intervals on the real line. They are used to group\n the entries in an index of numbers into their corresponding\n interval and therefore can be treated as categorical data\n :param (str) diag: sets the chart type for the main diagonal plots\n :param (int|float) height: sets the height of the chart\n :param (int|float) width: sets the width of the chart\n :param (float) size: sets the marker size (in px)\n :param (str) title: the title label of the scatterplot matrix\n :param (str|tuple|list|dict) colormap: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colormap is a list, it must contain valid color types as its\n members.\n If colormap is a dictionary, all the string entries in\n the index column must be a key in colormap. In this case, the\n colormap_type is forced to 'cat' or categorical\n :param (str) colormap_type: determines how colormap is interpreted.\n Valid choices are 'seq' (sequential) and 'cat' (categorical). If\n 'seq' is selected, only the first two colors in colormap will be\n considered (when colormap is a list) and the index values will be\n linearly interpolated between those two colors. This option is\n forced if all index values are numeric.\n If 'cat' is selected, a color from colormap will be assigned to\n each category from index, including the intervals if endpts is\n being used\n :param (dict) **kwargs: a dictionary of scatterplot arguments\n The only forbidden parameters are 'size', 'color' and\n 'colorscale' in 'marker'\n\n Example 1: Vanilla Scatterplot Matrix\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe\n df = pd.DataFrame(np.random.randn(10, 2),\n columns=['Column 1', 'Column 2'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df)\n\n # Plot\n py.iplot(fig, filename='Vanilla Scatterplot Matrix')\n ```\n\n Example 2: Indexing a Column\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with index\n df = pd.DataFrame(np.random.randn(10, 2),\n columns=['A', 'B'])\n\n # Add another column of strings to the dataframe\n df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df, index='Fruit', size=10)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix with Index')\n ```\n\n Example 3: Styling the Diagonal Subplots\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with index\n df = pd.DataFrame(np.random.randn(10, 4),\n columns=['A', 'B', 'C', 'D'])\n\n # Add another column of strings to the dataframe\n df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df, diag='box', index='Fruit',\n height=1000, width=1000)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Diagonal Styling')\n ```\n\n Example 4: Use a Theme to Style the Subplots\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['A', 'B', 'C'])\n\n # Create scatterplot matrix using a built-in\n # Plotly palette scale and indexing column 'A'\n fig = FF.create_scatterplotmatrix(df, diag='histogram',\n index='A', colormap='Blues',\n height=800, width=800)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Colormap Theme')\n ```\n\n Example 5: Example 4 with Interval Factoring\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['A', 'B', 'C'])\n\n # Create scatterplot matrix using a list of 2 rgb tuples\n # and endpoints at -1, 0 and 1\n fig = FF.create_scatterplotmatrix(df, diag='histogram', index='A',\n colormap=['rgb(140, 255, 50)',\n 'rgb(170, 60, 115)',\n '#6c4774',\n (0.5, 0.1, 0.8)],\n endpts=[-1, 0, 1],\n height=800, width=800)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Intervals')\n ```\n\n Example 6: Using the colormap as a Dictionary\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n import random\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['Column A',\n 'Column B',\n 'Column C'])\n\n # Add new color column to dataframe\n new_column = []\n strange_colors = ['turquoise', 'limegreen', 'goldenrod']\n\n for j in range(100):\n new_column.append(random.choice(strange_colors))\n df['Colors'] = pd.Series(new_column, index=df.index)\n\n # Create scatterplot matrix using a dictionary of hex color values\n # which correspond to actual color names in 'Colors' column\n fig = FF.create_scatterplotmatrix(\n df, diag='box', index='Colors',\n colormap= dict(\n turquoise = '#00F5FF',\n limegreen = '#32CD32',\n goldenrod = '#DAA520'\n ),\n colormap_type='cat',\n height=800, width=800\n )\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - colormap dictionary ')\n ```\n \"\"\"\n # TODO: protected until #282\n if dataframe is None:\n dataframe = []\n if headers is None:\n headers = []\n if index_vals is None:\n index_vals = []\n\n FigureFactory._validate_scatterplotmatrix(df, index, diag,\n colormap_type, **kwargs)\n\n # Validate colormap\n if isinstance(colormap, dict):\n colormap = FigureFactory._validate_colors_dict(colormap, 'rgb')\n else:\n colormap = FigureFactory._validate_colors(colormap, 'rgb')\n\n if not index:\n for name in df:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n # Check for same data-type in df columns\n FigureFactory._validate_dataframe(dataframe)\n figure = FigureFactory._scatterplot(dataframe, headers, diag,\n size, height, width, title,\n **kwargs)\n return figure\n else:\n # Validate index selection\n if index not in df:\n raise exceptions.PlotlyError(\"Make sure you set the index \"\n \"input variable to one of the \"\n \"column names of your \"\n \"dataframe.\")\n index_vals = df[index].values.tolist()\n for name in df:\n if name != index:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n\n # check for same data-type in each df column\n FigureFactory._validate_dataframe(dataframe)\n FigureFactory._validate_index(index_vals)\n\n # check if all colormap keys are in the index\n # if colormap is a dictionary\n if isinstance(colormap, dict):\n for key in colormap:\n if not all(index in colormap for index in index_vals):\n raise exceptions.PlotlyError(\"If colormap is a \"\n \"dictionary, all the \"\n \"names in the index \"\n \"must be keys.\")\n figure = FigureFactory._scatterplot_dict(\n dataframe, headers, diag, size, height, width, title,\n index, index_vals, endpts, colormap, colormap_type,\n **kwargs\n )\n return figure\n\n else:\n figure = FigureFactory._scatterplot_theme(\n dataframe, headers, diag, size, height, width, title,\n index, index_vals, endpts, colormap, colormap_type,\n **kwargs\n )\n return figure\n\n @staticmethod\n def _validate_equal_length(*args):\n \"\"\"\n Validates that data lists or ndarrays are the same length.\n\n :raises: (PlotlyError) If any data lists are not the same length.\n \"\"\"\n length = len(args[0])\n if any(len(lst) != length for lst in args):\n raise exceptions.PlotlyError(\"Oops! Your data lists or ndarrays \"\n \"should be the same length.\")\n\n @staticmethod\n def _validate_ohlc(open, high, low, close, direction, **kwargs):\n \"\"\"\n ohlc and candlestick specific validations\n\n Specifically, this checks that the high value is the greatest value and\n the low value is the lowest value in each unit.\n\n See FigureFactory.create_ohlc() or FigureFactory.create_candlestick()\n for params\n\n :raises: (PlotlyError) If the high value is not the greatest value in\n each unit.\n :raises: (PlotlyError) If the low value is not the lowest value in each\n unit.\n :raises: (PlotlyError) If direction is not 'increasing' or 'decreasing'\n \"\"\"\n for lst in [open, low, close]:\n for index in range(len(high)):\n if high[index] < lst[index]:\n raise exceptions.PlotlyError(\"Oops! Looks like some of \"\n \"your high values are less \"\n \"the corresponding open, \"\n \"low, or close values. \"\n \"Double check that your data \"\n \"is entered in O-H-L-C order\")\n\n for lst in [open, high, close]:\n for index in range(len(low)):\n if low[index] > lst[index]:\n raise exceptions.PlotlyError(\"Oops! Looks like some of \"\n \"your low values are greater \"\n \"than the corresponding high\"\n \", open, or close values. \"\n \"Double check that your data \"\n \"is entered in O-H-L-C order\")\n\n direction_opts = ('increasing', 'decreasing', 'both')\n if direction not in direction_opts:\n raise exceptions.PlotlyError(\"direction must be defined as \"\n \"'increasing', 'decreasing', or \"\n \"'both'\")\n\n @staticmethod\n def _validate_distplot(hist_data, curve_type):\n \"\"\"\n Distplot-specific validations\n\n :raises: (PlotlyError) If hist_data is not a list of lists\n :raises: (PlotlyError) If curve_type is not valid (i.e. not 'kde' or\n 'normal').\n \"\"\"\n try:\n import pandas as pd\n _pandas_imported = True\n except ImportError:\n _pandas_imported = False\n\n hist_data_types = (list,)\n if _numpy_imported:\n hist_data_types += (np.ndarray,)\n if _pandas_imported:\n hist_data_types += (pd.core.series.Series,)\n\n if not isinstance(hist_data[0], hist_data_types):\n raise exceptions.PlotlyError(\"Oops, this function was written \"\n \"to handle multiple datasets, if \"\n \"you want to plot just one, make \"\n \"sure your hist_data variable is \"\n \"still a list of lists, i.e. x = \"\n \"[1, 2, 3] -> x = [[1, 2, 3]]\")\n\n curve_opts = ('kde', 'normal')\n if curve_type not in curve_opts:\n raise exceptions.PlotlyError(\"curve_type must be defined as \"\n \"'kde' or 'normal'\")\n\n if _scipy_imported is False:\n raise ImportError(\"FigureFactory.create_distplot requires scipy\")\n\n @staticmethod\n def _validate_positive_scalars(**kwargs):\n \"\"\"\n Validates that all values given in key/val pairs are positive.\n\n Accepts kwargs to improve Exception messages.\n\n :raises: (PlotlyError) If any value is < 0 or raises.\n \"\"\"\n for key, val in kwargs.items():\n try:\n if val <= 0:\n raise ValueError('{} must be > 0, got {}'.format(key, val))\n except TypeError:\n raise exceptions.PlotlyError('{} must be a number, got {}'\n .format(key, val))\n\n @staticmethod\n def _validate_streamline(x, y):\n \"\"\"\n Streamline-specific validations\n\n Specifically, this checks that x and y are both evenly spaced,\n and that the package numpy is available.\n\n See FigureFactory.create_streamline() for params\n\n :raises: (ImportError) If numpy is not available.\n :raises: (PlotlyError) If x is not evenly spaced.\n :raises: (PlotlyError) If y is not evenly spaced.\n \"\"\"\n if _numpy_imported is False:\n raise ImportError(\"FigureFactory.create_streamline requires numpy\")\n for index in range(len(x) - 1):\n if ((x[index + 1] - x[index]) - (x[1] - x[0])) > .0001:\n raise exceptions.PlotlyError(\"x must be a 1 dimensional, \"\n \"evenly spaced array\")\n for index in range(len(y) - 1):\n if ((y[index + 1] - y[index]) -\n (y[1] - y[0])) > .0001:\n raise exceptions.PlotlyError(\"y must be a 1 dimensional, \"\n \"evenly spaced array\")\n\n @staticmethod\n def _validate_annotated_heatmap(z, x, y, annotation_text):\n \"\"\"\n Annotated-heatmap-specific validations\n\n Check that if a text matrix is supplied, it has the same\n dimensions as the z matrix.\n\n See FigureFactory.create_annotated_heatmap() for params\n\n :raises: (PlotlyError) If z and text matrices do not have the same\n dimensions.\n \"\"\"\n if annotation_text is not None and isinstance(annotation_text, list):\n FigureFactory._validate_equal_length(z, annotation_text)\n for lst in range(len(z)):\n if len(z[lst]) != len(annotation_text[lst]):\n raise exceptions.PlotlyError(\"z and text should have the \"\n \"same dimensions\")\n\n if x:\n if len(x) != len(z[0]):\n raise exceptions.PlotlyError(\"oops, the x list that you \"\n \"provided does not match the \"\n \"width of your z matrix \")\n\n if y:\n if len(y) != len(z):\n raise exceptions.PlotlyError(\"oops, the y list that you \"\n \"provided does not match the \"\n \"length of your z matrix \")\n\n @staticmethod\n def _validate_table(table_text, font_colors):\n \"\"\"\n Table-specific validations\n\n Check that font_colors is supplied correctly (1, 3, or len(text)\n colors).\n\n :raises: (PlotlyError) If font_colors is supplied incorretly.\n\n See FigureFactory.create_table() for params\n \"\"\"\n font_colors_len_options = [1, 3, len(table_text)]\n if len(font_colors) not in font_colors_len_options:\n raise exceptions.PlotlyError(\"Oops, font_colors should be a list \"\n \"of length 1, 3 or len(text)\")\n\n @staticmethod\n def _flatten(array):\n \"\"\"\n Uses list comprehension to flatten array\n\n :param (array): An iterable to flatten\n :raises (PlotlyError): If iterable is not nested.\n :rtype (list): The flattened list.\n \"\"\"\n try:\n return [item for sublist in array for item in sublist]\n except TypeError:\n raise exceptions.PlotlyError(\"Your data array could not be \"\n \"flattened! Make sure your data is \"\n \"entered as lists or ndarrays!\")\n\n @staticmethod\n def _hex_to_rgb(value):\n \"\"\"\n Calculates rgb values from a hex color code.\n\n :param (string) value: Hex color string\n\n :rtype (tuple) (r_value, g_value, b_value): tuple of rgb values\n \"\"\"\n value = value.lstrip('#')\n hex_total_length = len(value)\n rgb_section_length = hex_total_length // 3\n return tuple(int(value[i:i + rgb_section_length], 16)\n for i in range(0, hex_total_length, rgb_section_length))\n\n @staticmethod\n def create_quiver(x, y, u, v, scale=.1, arrow_scale=.3,\n angle=math.pi / 9, **kwargs):\n \"\"\"\n Returns data for a quiver plot.\n\n :param (list|ndarray) x: x coordinates of the arrow locations\n :param (list|ndarray) y: y coordinates of the arrow locations\n :param (list|ndarray) u: x components of the arrow vectors\n :param (list|ndarray) v: y components of the arrow vectors\n :param (float in [0,1]) scale: scales size of the arrows(ideally to\n avoid overlap). Default = .1\n :param (float in [0,1]) arrow_scale: value multiplied to length of barb\n to get length of arrowhead. Default = .3\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of quiver figure.\n\n Example 1: Trivial Quiver\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import math\n\n # 1 Arrow from (0,0) to (1,1)\n fig = FF.create_quiver(x=[0], y=[0],\n u=[1], v=[1],\n scale=1)\n\n py.plot(fig, filename='quiver')\n ```\n\n Example 2: Quiver plot using meshgrid\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))\n u = np.cos(x)*y\n v = np.sin(x)*y\n\n #Create quiver\n fig = FF.create_quiver(x, y, u, v)\n\n # Plot\n py.plot(fig, filename='quiver')\n ```\n\n Example 3: Styling the quiver plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n import math\n\n # Add data\n x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),\n np.arange(-math.pi, math.pi, .5))\n u = np.cos(x)*y\n v = np.sin(x)*y\n\n # Create quiver\n fig = FF.create_quiver(x, y, u, v, scale=.2,\n arrow_scale=.3,\n angle=math.pi/6,\n name='Wind Velocity',\n line=Line(width=1))\n\n # Add title to layout\n fig['layout'].update(title='Quiver Plot')\n\n # Plot\n py.plot(fig, filename='quiver')\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_equal_length(x, y, u, v)\n FigureFactory._validate_positive_scalars(arrow_scale=arrow_scale,\n scale=scale)\n\n barb_x, barb_y = _Quiver(x, y, u, v, scale,\n arrow_scale, angle).get_barbs()\n arrow_x, arrow_y = _Quiver(x, y, u, v, scale,\n arrow_scale, angle).get_quiver_arrows()\n quiver = graph_objs.Scatter(x=barb_x + arrow_x,\n y=barb_y + arrow_y,\n mode='lines', **kwargs)\n\n data = [quiver]\n layout = graph_objs.Layout(hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_streamline(x, y, u, v,\n density=1, angle=math.pi / 9,\n arrow_scale=.09, **kwargs):\n \"\"\"\n Returns data for a streamline plot.\n\n :param (list|ndarray) x: 1 dimensional, evenly spaced list or array\n :param (list|ndarray) y: 1 dimensional, evenly spaced list or array\n :param (ndarray) u: 2 dimensional array\n :param (ndarray) v: 2 dimensional array\n :param (float|int) density: controls the density of streamlines in\n plot. This is multiplied by 30 to scale similiarly to other\n available streamline functions such as matplotlib.\n Default = 1\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of streamline figure.\n\n Example 1: Plot simple streamline and increase arrow size\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n x = np.linspace(-3, 3, 100)\n y = np.linspace(-3, 3, 100)\n Y, X = np.meshgrid(x, y)\n u = -1 - X**2 + Y\n v = 1 + X - Y**2\n u = u.T # Transpose\n v = v.T # Transpose\n\n # Create streamline\n fig = FF.create_streamline(x, y, u, v,\n arrow_scale=.1)\n\n # Plot\n py.plot(fig, filename='streamline')\n ```\n\n Example 2: from nbviewer.ipython.org/github/barbagroup/AeroPython\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n N = 50\n x_start, x_end = -2.0, 2.0\n y_start, y_end = -1.0, 1.0\n x = np.linspace(x_start, x_end, N)\n y = np.linspace(y_start, y_end, N)\n X, Y = np.meshgrid(x, y)\n ss = 5.0\n x_s, y_s = -1.0, 0.0\n\n # Compute the velocity field on the mesh grid\n u_s = ss/(2*np.pi) * (X-x_s)/((X-x_s)**2 + (Y-y_s)**2)\n v_s = ss/(2*np.pi) * (Y-y_s)/((X-x_s)**2 + (Y-y_s)**2)\n\n # Create streamline\n fig = FF.create_streamline(x, y, u_s, v_s,\n density=2, name='streamline')\n\n # Add source point\n point = Scatter(x=[x_s], y=[y_s], mode='markers',\n marker=Marker(size=14), name='source point')\n\n # Plot\n fig['data'].append(point)\n py.plot(fig, filename='streamline')\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_equal_length(x, y)\n FigureFactory._validate_equal_length(u, v)\n FigureFactory._validate_streamline(x, y)\n FigureFactory._validate_positive_scalars(density=density,\n arrow_scale=arrow_scale)\n\n streamline_x, streamline_y = _Streamline(x, y, u, v,\n density, angle,\n arrow_scale).sum_streamlines()\n arrow_x, arrow_y = _Streamline(x, y, u, v,\n density, angle,\n arrow_scale).get_streamline_arrows()\n\n streamline = graph_objs.Scatter(x=streamline_x + arrow_x,\n y=streamline_y + arrow_y,\n mode='lines', **kwargs)\n\n data = [streamline]\n layout = graph_objs.Layout(hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def _make_increasing_ohlc(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes increasing ohlc sticks\n\n _make_increasing_ohlc() and _make_decreasing_ohlc separate the\n increasing trace from the decreasing trace so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_incr_data: Scatter trace of all increasing ohlc\n sticks.\n \"\"\"\n (flat_increase_x,\n flat_increase_y,\n text_increase) = _OHLC(open, high, low, close, dates).get_increase()\n\n if 'name' in kwargs:\n showlegend = True\n else:\n kwargs.setdefault('name', 'Increasing')\n showlegend = False\n\n kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR,\n width=1))\n kwargs.setdefault('text', text_increase)\n\n ohlc_incr = dict(type='scatter',\n x=flat_increase_x,\n y=flat_increase_y,\n mode='lines',\n showlegend=showlegend,\n **kwargs)\n return ohlc_incr\n\n @staticmethod\n def _make_decreasing_ohlc(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes decreasing ohlc sticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_decr_data: Scatter trace of all decreasing ohlc\n sticks.\n \"\"\"\n (flat_decrease_x,\n flat_decrease_y,\n text_decrease) = _OHLC(open, high, low, close, dates).get_decrease()\n\n kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR,\n width=1))\n kwargs.setdefault('text', text_decrease)\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('name', 'Decreasing')\n\n ohlc_decr = dict(type='scatter',\n x=flat_decrease_x,\n y=flat_decrease_y,\n mode='lines',\n **kwargs)\n return ohlc_decr\n\n @staticmethod\n def create_ohlc(open, high, low, close,\n dates=None, direction='both',\n **kwargs):\n \"\"\"\n BETA function that creates an ohlc chart\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all units where the close value is greater than the\n corresponding open value, and when the direction is 'decreasing',\n the returned figure consists of all units where the close value is\n less than or equal to the corresponding open value. When the\n direction is 'both', both increasing and decreasing units are\n returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of an ohlc chart figure.\n\n Example 1: Simple OHLC chart from a Pandas DataFrame\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))\n fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)\n\n py.plot(fig, filename='finance/aapl-ohlc')\n ```\n\n Example 2: Add text and annotations to the OHLC chart\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))\n fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)\n\n # Update the fig - all options here: https://plot.ly/python/reference/#Layout\n fig['layout'].update({\n 'title': 'The Great Recession',\n 'yaxis': {'title': 'AAPL Stock'},\n 'shapes': [{\n 'x0': '2008-09-15', 'x1': '2008-09-15', 'type': 'line',\n 'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',\n 'line': {'color': 'rgb(40,40,40)', 'width': 0.5}\n }],\n 'annotations': [{\n 'text': \"the fall of Lehman Brothers\",\n 'x': '2008-09-15', 'y': 1.02,\n 'xref': 'x', 'yref': 'paper',\n 'showarrow': False, 'xanchor': 'left'\n }]\n })\n\n py.plot(fig, filename='finance/aapl-recession-ohlc', validate=False)\n ```\n\n Example 3: Customize the OHLC colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import Line, Marker\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))\n\n # Make increasing ohlc sticks and customize their color and name\n fig_increasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='increasing', name='AAPL',\n line=Line(color='rgb(150, 200, 250)'))\n\n # Make decreasing ohlc sticks and customize their color and name\n fig_decreasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='decreasing',\n line=Line(color='rgb(128, 128, 128)'))\n\n # Initialize the figure\n fig = fig_increasing\n\n # Add decreasing data with .extend()\n fig['data'].extend(fig_decreasing['data'])\n\n py.iplot(fig, filename='finance/aapl-ohlc-colors', validate=False)\n ```\n\n Example 4: OHLC chart with datetime objects\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n from datetime import datetime\n\n # Add data\n open_data = [33.0, 33.3, 33.5, 33.0, 34.1]\n high_data = [33.1, 33.3, 33.6, 33.2, 34.8]\n low_data = [32.7, 32.7, 32.8, 32.6, 32.8]\n close_data = [33.0, 32.9, 33.3, 33.1, 33.1]\n dates = [datetime(year=2013, month=10, day=10),\n datetime(year=2013, month=11, day=10),\n datetime(year=2013, month=12, day=10),\n datetime(year=2014, month=1, day=10),\n datetime(year=2014, month=2, day=10)]\n\n # Create ohlc\n fig = FF.create_ohlc(open_data, high_data,\n low_data, close_data, dates=dates)\n\n py.iplot(fig, filename='finance/simple-ohlc', validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n if dates is not None:\n FigureFactory._validate_equal_length(open, high, low, close, dates)\n else:\n FigureFactory._validate_equal_length(open, high, low, close)\n FigureFactory._validate_ohlc(open, high, low, close, direction,\n **kwargs)\n\n if direction is 'increasing':\n ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_incr]\n elif direction is 'decreasing':\n ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_decr]\n else:\n ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_incr, ohlc_decr]\n\n layout = graph_objs.Layout(xaxis=dict(zeroline=False),\n hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def _make_increasing_candle(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes boxplot trace for increasing candlesticks\n\n _make_increasing_candle() and _make_decreasing_candle separate the\n increasing traces from the decreasing traces so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_incr_data: list of the box trace for\n increasing candlesticks.\n \"\"\"\n increase_x, increase_y = _Candlestick(\n open, high, low, close, dates, **kwargs).get_candle_increase()\n\n if 'line' in kwargs:\n kwargs.setdefault('fillcolor', kwargs['line']['color'])\n else:\n kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR)\n if 'name' in kwargs:\n kwargs.setdefault('showlegend', True)\n else:\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('name', 'Increasing')\n kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR))\n\n candle_incr_data = dict(type='box',\n x=increase_x,\n y=increase_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs)\n\n return [candle_incr_data]\n\n @staticmethod\n def _make_decreasing_candle(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes boxplot trace for decreasing candlesticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to decreasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_decr_data: list of the box trace for\n decreasing candlesticks.\n \"\"\"\n\n decrease_x, decrease_y = _Candlestick(\n open, high, low, close, dates, **kwargs).get_candle_decrease()\n\n if 'line' in kwargs:\n kwargs.setdefault('fillcolor', kwargs['line']['color'])\n else:\n kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR)\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR))\n kwargs.setdefault('name', 'Decreasing')\n\n candle_decr_data = dict(type='box',\n x=decrease_x,\n y=decrease_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs)\n\n return [candle_decr_data]\n\n @staticmethod\n def create_candlestick(open, high, low, close,\n dates=None, direction='both', **kwargs):\n \"\"\"\n BETA function that creates a candlestick chart\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all candlesticks where the close value is greater than\n the corresponding open value, and when the direction is\n 'decreasing', the returned figure consists of all candlesticks\n where the close value is less than or equal to the corresponding\n open value. When the direction is 'both', both increasing and\n decreasing candlesticks are returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of candlestick chart figure.\n\n Example 1: Simple candlestick chart from a Pandas DataFrame\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2007, 10, 1), datetime(2009, 4, 1))\n fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)\n py.plot(fig, filename='finance/aapl-candlestick', validate=False)\n ```\n\n Example 2: Add text and annotations to the candlestick chart\n ```\n fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)\n # Update the fig - all options here: https://plot.ly/python/reference/#Layout\n fig['layout'].update({\n 'title': 'The Great Recession',\n 'yaxis': {'title': 'AAPL Stock'},\n 'shapes': [{\n 'x0': '2007-12-01', 'x1': '2007-12-01',\n 'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',\n 'line': {'color': 'rgb(30,30,30)', 'width': 1}\n }],\n 'annotations': [{\n 'x': '2007-12-01', 'y': 0.05, 'xref': 'x', 'yref': 'paper',\n 'showarrow': False, 'xanchor': 'left',\n 'text': 'Official start of the recession'\n }]\n })\n py.plot(fig, filename='finance/aapl-recession-candlestick', validate=False)\n ```\n\n Example 3: Customize the candlestick colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import Line, Marker\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))\n\n # Make increasing candlesticks and customize their color and name\n fig_increasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='increasing', name='AAPL',\n marker=Marker(color='rgb(150, 200, 250)'),\n line=Line(color='rgb(150, 200, 250)'))\n\n # Make decreasing candlesticks and customize their color and name\n fig_decreasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='decreasing',\n marker=Marker(color='rgb(128, 128, 128)'),\n line=Line(color='rgb(128, 128, 128)'))\n\n # Initialize the figure\n fig = fig_increasing\n\n # Add decreasing data with .extend()\n fig['data'].extend(fig_decreasing['data'])\n\n py.iplot(fig, filename='finance/aapl-candlestick-custom', validate=False)\n ```\n\n Example 4: Candlestick chart with datetime objects\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n from datetime import datetime\n\n # Add data\n open_data = [33.0, 33.3, 33.5, 33.0, 34.1]\n high_data = [33.1, 33.3, 33.6, 33.2, 34.8]\n low_data = [32.7, 32.7, 32.8, 32.6, 32.8]\n close_data = [33.0, 32.9, 33.3, 33.1, 33.1]\n dates = [datetime(year=2013, month=10, day=10),\n datetime(year=2013, month=11, day=10),\n datetime(year=2013, month=12, day=10),\n datetime(year=2014, month=1, day=10),\n datetime(year=2014, month=2, day=10)]\n\n # Create ohlc\n fig = FF.create_candlestick(open_data, high_data,\n low_data, close_data, dates=dates)\n\n py.iplot(fig, filename='finance/simple-candlestick', validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n if dates is not None:\n FigureFactory._validate_equal_length(open, high, low, close, dates)\n else:\n FigureFactory._validate_equal_length(open, high, low, close)\n FigureFactory._validate_ohlc(open, high, low, close, direction,\n **kwargs)\n\n if direction is 'increasing':\n candle_incr_data = FigureFactory._make_increasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_incr_data\n elif direction is 'decreasing':\n candle_decr_data = FigureFactory._make_decreasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_decr_data\n else:\n candle_incr_data = FigureFactory._make_increasing_candle(\n open, high, low, close, dates, **kwargs)\n candle_decr_data = FigureFactory._make_decreasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_incr_data + candle_decr_data\n\n layout = graph_objs.Layout()\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_distplot(hist_data, group_labels,\n bin_size=1., curve_type='kde',\n colors=[], rug_text=[], histnorm=DEFAULT_HISTNORM,\n show_hist=True, show_curve=True,\n show_rug=True):\n \"\"\"\n BETA function that creates a distplot similar to seaborn.distplot\n\n The distplot can be composed of all or any combination of the following\n 3 components: (1) histogram, (2) curve: (a) kernel density estimation\n or (b) normal curve, and (3) rug plot. Additionally, multiple distplots\n (from multiple datasets) can be created in the same plot.\n\n :param (list[list]) hist_data: Use list of lists to plot multiple data\n sets on the same plot.\n :param (list[str]) group_labels: Names for each data set.\n :param (list[float]|float) bin_size: Size of histogram bins.\n Default = 1.\n :param (str) curve_type: 'kde' or 'normal'. Default = 'kde'\n :param (str) histnorm: 'probability density' or 'probability'\n Default = 'probability density'\n :param (bool) show_hist: Add histogram to distplot? Default = True\n :param (bool) show_curve: Add curve to distplot? Default = True\n :param (bool) show_rug: Add rug to distplot? Default = True\n :param (list[str]) colors: Colors for traces.\n :param (list[list]) rug_text: Hovertext values for rug_plot,\n :return (dict): Representation of a distplot figure.\n\n Example 1: Simple distplot of 1 data set\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,\n 3.5, 4.1, 4.4, 4.5, 4.5,\n 5.0, 5.0, 5.2, 5.5, 5.5,\n 5.5, 5.5, 5.5, 6.1, 7.0]]\n\n group_labels = ['distplot example']\n\n fig = FF.create_distplot(hist_data, group_labels)\n\n url = py.plot(fig, filename='Simple distplot', validate=False)\n ```\n\n Example 2: Two data sets and added rug text\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Add histogram data\n hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,\n -0.9, -0.07, 1.95, 0.9, -0.2,\n -0.5, 0.3, 0.4, -0.37, 0.6]\n hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,\n 1.0, 0.8, 1.7, 0.5, 0.8,\n -0.3, 1.2, 0.56, 0.3, 2.2]\n\n # Group data together\n hist_data = [hist1_x, hist2_x]\n\n group_labels = ['2012', '2013']\n\n # Add text\n rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',\n 'f1', 'g1', 'h1', 'i1', 'j1',\n 'k1', 'l1', 'm1', 'n1', 'o1']\n\n rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',\n 'f2', 'g2', 'h2', 'i2', 'j2',\n 'k2', 'l2', 'm2', 'n2', 'o2']\n\n # Group text together\n rug_text_all = [rug_text_1, rug_text_2]\n\n # Create distplot\n fig = FF.create_distplot(\n hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)\n\n # Add title\n fig['layout'].update(title='Dist Plot')\n\n # Plot!\n url = py.plot(fig, filename='Distplot with rug text', validate=False)\n ```\n\n Example 3: Plot with normal curve and hide rug plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n\n x1 = np.random.randn(190)\n x2 = np.random.randn(200)+1\n x3 = np.random.randn(200)-1\n x4 = np.random.randn(210)+2\n\n hist_data = [x1, x2, x3, x4]\n group_labels = ['2012', '2013', '2014', '2015']\n\n fig = FF.create_distplot(\n hist_data, group_labels, curve_type='normal',\n show_rug=False, bin_size=.4)\n\n url = py.plot(fig, filename='hist and normal curve', validate=False)\n\n Example 4: Distplot with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n import pandas as pd\n\n df = pd.DataFrame({'2012': np.random.randn(200),\n '2013': np.random.randn(200)+1})\n py.iplot(FF.create_distplot([df[c] for c in df.columns], df.columns),\n filename='examples/distplot with pandas',\n validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_distplot(hist_data, curve_type)\n FigureFactory._validate_equal_length(hist_data, group_labels)\n\n if isinstance(bin_size, (float, int)):\n bin_size = [bin_size]*len(hist_data)\n\n hist = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_hist()\n\n if curve_type == 'normal':\n curve = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_normal()\n else:\n curve = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_kde()\n\n rug = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_rug()\n\n data = []\n if show_hist:\n data.append(hist)\n if show_curve:\n data.append(curve)\n if show_rug:\n data.append(rug)\n layout = graph_objs.Layout(\n barmode='overlay',\n hovermode='closest',\n legend=dict(traceorder='reversed'),\n xaxis1=dict(domain=[0.0, 1.0],\n anchor='y2',\n zeroline=False),\n yaxis1=dict(domain=[0.35, 1],\n anchor='free',\n position=0.0),\n yaxis2=dict(domain=[0, 0.25],\n anchor='x1',\n dtick=1,\n showticklabels=False))\n else:\n layout = graph_objs.Layout(\n barmode='overlay',\n hovermode='closest',\n legend=dict(traceorder='reversed'),\n xaxis1=dict(domain=[0.0, 1.0],\n anchor='y2',\n zeroline=False),\n yaxis1=dict(domain=[0., 1],\n anchor='free',\n position=0.0))\n\n data = sum(data, [])\n return graph_objs.Figure(data=data, layout=layout)\n\n\n @staticmethod\n def create_dendrogram(X, orientation=\"bottom\", labels=None,\n colorscale=None):\n \"\"\"\n BETA function that returns a dendrogram Plotly figure object.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (str) orientation: 'top', 'right', 'bottom', or 'left'\n :param (list) labels: List of axis category labels(observation labels)\n :param (list) colorscale: Optional colorscale for dendrogram tree\n clusters\n\n Example 1: Simple bottom oriented dendrogram\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n X = np.random.rand(10,10)\n dendro = FF.create_dendrogram(X)\n plot_url = py.plot(dendro, filename='simple-dendrogram')\n\n ```\n\n Example 2: Dendrogram to put on the left of the heatmap\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n X = np.random.rand(5,5)\n names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']\n dendro = FF.create_dendrogram(X, orientation='right', labels=names)\n dendro['layout'].update({'width':700, 'height':500})\n\n py.iplot(dendro, filename='vertical-dendrogram')\n ```\n\n Example 3: Dendrogram with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n Index= ['A','B','C','D','E','F','G','H','I','J']\n df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)\n fig = FF.create_dendrogram(df, labels=Index)\n url = py.plot(fig, filename='pandas-dendrogram')\n ```\n \"\"\"\n dependencies = (_scipy_imported and _scipy__spatial_imported and\n _scipy__cluster__hierarchy_imported)\n\n if dependencies is False:\n raise ImportError(\"FigureFactory.create_dendrogram requires scipy, \\\n scipy.spatial and scipy.hierarchy\")\n\n s = X.shape\n if len(s) != 2:\n exceptions.PlotlyError(\"X should be 2-dimensional array.\")\n\n dendrogram = _Dendrogram(X, orientation, labels, colorscale)\n\n return {'layout': dendrogram.layout,\n 'data': dendrogram.data}\n\n @staticmethod\n def create_annotated_heatmap(z, x=None, y=None, annotation_text=None,\n colorscale='RdBu', font_colors=None,\n showscale=False, reversescale=False,\n **kwargs):\n \"\"\"\n BETA function that creates annotated heatmaps\n\n This function adds annotations to each cell of the heatmap.\n\n :param (list[list]|ndarray) z: z matrix to create heatmap.\n :param (list) x: x axis labels.\n :param (list) y: y axis labels.\n :param (list[list]|ndarray) annotation_text: Text strings for\n annotations. Should have the same dimensions as the z matrix. If no\n text is added, the values of the z matrix are annotated. Default =\n z matrix values.\n :param (list|str) colorscale: heatmap colorscale.\n :param (list) font_colors: List of two color strings: [min_text_color,\n max_text_color] where min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. If font_colors is not\n defined, the colors are defined logically as black or white\n depending on the heatmap's colorscale.\n :param (bool) showscale: Display colorscale. Default = False\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple annotated heatmap with default configuration\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n z = [[0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.4300],\n [0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.00000]]\n\n figure = FF.create_annotated_heatmap(z)\n py.iplot(figure)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Avoiding mutables in the call signature\n font_colors = font_colors if font_colors is not None else []\n FigureFactory._validate_annotated_heatmap(z, x, y, annotation_text)\n annotations = _AnnotatedHeatmap(z, x, y, annotation_text,\n colorscale, font_colors, reversescale,\n **kwargs).make_annotations()\n\n if x or y:\n trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,\n showscale=showscale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', dtick=1, side='top',\n gridcolor='rgb(0, 0, 0)'),\n yaxis=dict(ticks='', dtick=1, ticksuffix=' '))\n else:\n trace = dict(type='heatmap', z=z, colorscale=colorscale,\n showscale=showscale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', side='top',\n gridcolor='rgb(0, 0, 0)',\n showticklabels=False),\n yaxis=dict(ticks='', ticksuffix=' ',\n showticklabels=False))\n\n data = [trace]\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_table(table_text, colorscale=None, font_colors=None,\n index=False, index_title='', annotation_offset=.45,\n height_constant=30, hoverinfo='none', **kwargs):\n \"\"\"\n BETA function that creates data tables\n\n :param (pandas.Dataframe | list[list]) text: data for table.\n :param (str|list[list]) colorscale: Colorscale for table where the\n color at value 0 is the header color, .5 is the first table color\n and 1 is the second table color. (Set .5 and 1 to avoid the striped\n table effect). Default=[[0, '#66b2ff'], [.5, '#d9d9d9'],\n [1, '#ffffff']]\n :param (list) font_colors: Color for fonts in table. Can be a single\n color, three colors, or a color for each row in the table.\n Default=['#000000'] (black text for the entire table)\n :param (int) height_constant: Constant multiplied by # of rows to\n create table height. Default=30.\n :param (bool) index: Create (header-colored) index column index from\n Pandas dataframe or list[0] for each list in text. Default=False.\n :param (string) index_title: Title for index column. Default=''.\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple Plotly Table\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n text = [['Country', 'Year', 'Population'],\n ['US', 2000, 282200000],\n ['Canada', 2000, 27790000],\n ['US', 2010, 309000000],\n ['Canada', 2010, 34000000]]\n\n table = FF.create_table(text)\n py.iplot(table)\n ```\n\n Example 2: Table with Custom Coloring\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n text = [['Country', 'Year', 'Population'],\n ['US', 2000, 282200000],\n ['Canada', 2000, 27790000],\n ['US', 2010, 309000000],\n ['Canada', 2010, 34000000]]\n\n table = FF.create_table(text,\n colorscale=[[0, '#000000'],\n [.5, '#80beff'],\n [1, '#cce5ff']],\n font_colors=['#ffffff', '#000000',\n '#000000'])\n py.iplot(table)\n ```\n Example 3: Simple Plotly Table with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import pandas as pd\n\n df = pd.read_csv('http://www.stat.ubc.ca/~jenny/notOcto/STAT545A/examples/gapminder/data/gapminderDataFiveYear.txt', sep='\\t')\n df_p = df[0:25]\n\n table_simple = FF.create_table(df_p)\n py.iplot(table_simple)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Avoiding mutables in the call signature\n colorscale = \\\n colorscale if colorscale is not None else [[0, '#00083e'],\n [.5, '#ededee'],\n [1, '#ffffff']]\n font_colors = font_colors if font_colors is not None else ['#ffffff',\n '#000000',\n '#000000']\n\n FigureFactory._validate_table(table_text, font_colors)\n table_matrix = _Table(table_text, colorscale, font_colors, index,\n index_title, annotation_offset,\n **kwargs).get_table_matrix()\n annotations = _Table(table_text, colorscale, font_colors, index,\n index_title, annotation_offset,\n **kwargs).make_table_annotations()\n\n trace = dict(type='heatmap', z=table_matrix, opacity=.75,\n colorscale=colorscale, showscale=False,\n hoverinfo=hoverinfo, **kwargs)\n\n data = [trace]\n layout = dict(annotations=annotations,\n height=len(table_matrix)*height_constant + 50,\n margin=dict(t=0, b=0, r=0, l=0),\n yaxis=dict(autorange='reversed', zeroline=False,\n gridwidth=2, ticks='', dtick=1, tick0=.5,\n showticklabels=False),\n xaxis=dict(zeroline=False, gridwidth=2, ticks='',\n dtick=1, tick0=-0.5, showticklabels=False))\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Quiver(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_quiver() for docstring\n \"\"\"\n def __init__(self, x, y, u, v,\n scale, arrow_scale, angle, **kwargs):\n try:\n x = FigureFactory._flatten(x)\n except exceptions.PlotlyError:\n pass\n\n try:\n y = FigureFactory._flatten(y)\n except exceptions.PlotlyError:\n pass\n\n try:\n u = FigureFactory._flatten(u)\n except exceptions.PlotlyError:\n pass\n\n try:\n v = FigureFactory._flatten(v)\n except exceptions.PlotlyError:\n pass\n\n self.x = x\n self.y = y\n self.u = u\n self.v = v\n self.scale = scale\n self.arrow_scale = arrow_scale\n self.angle = angle\n self.end_x = []\n self.end_y = []\n self.scale_uv()\n barb_x, barb_y = self.get_barbs()\n arrow_x, arrow_y = self.get_quiver_arrows()\n\n def scale_uv(self):\n \"\"\"\n Scales u and v to avoid overlap of the arrows.\n\n u and v are added to x and y to get the\n endpoints of the arrows so a smaller scale value will\n result in less overlap of arrows.\n \"\"\"\n self.u = [i * self.scale for i in self.u]\n self.v = [i * self.scale for i in self.v]\n\n def get_barbs(self):\n \"\"\"\n Creates x and y startpoint and endpoint pairs\n\n After finding the endpoint of each barb this zips startpoint and\n endpoint pairs to create 2 lists: x_values for barbs and y values\n for barbs\n\n :rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint\n x_value pairs separated by a None to create the barb of the arrow,\n and list of startpoint and endpoint y_value pairs separated by a\n None to create the barb of the arrow.\n \"\"\"\n self.end_x = [i + j for i, j in zip(self.x, self.u)]\n self.end_y = [i + j for i, j in zip(self.y, self.v)]\n empty = [None] * len(self.x)\n barb_x = FigureFactory._flatten(zip(self.x, self.end_x, empty))\n barb_y = FigureFactory._flatten(zip(self.y, self.end_y, empty))\n return barb_x, barb_y\n\n def get_quiver_arrows(self):\n \"\"\"\n Creates lists of x and y values to plot the arrows\n\n Gets length of each barb then calculates the length of each side of\n the arrow. Gets angle of barb and applies angle to each side of the\n arrowhead. Next uses arrow_scale to scale the length of arrowhead and\n creates x and y values for arrowhead point1 and point2. Finally x and y\n values for point1, endpoint and point2s for each arrowhead are\n separated by a None and zipped to create lists of x and y values for\n the arrows.\n\n :rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2\n x_values separated by a None to create the arrowhead and list of\n point1, endpoint, point2 y_values separated by a None to create\n the barb of the arrow.\n \"\"\"\n dif_x = [i - j for i, j in zip(self.end_x, self.x)]\n dif_y = [i - j for i, j in zip(self.end_y, self.y)]\n\n # Get barb lengths(default arrow length = 30% barb length)\n barb_len = [None] * len(self.x)\n for index in range(len(barb_len)):\n barb_len[index] = math.hypot(dif_x[index], dif_y[index])\n\n # Make arrow lengths\n arrow_len = [None] * len(self.x)\n arrow_len = [i * self.arrow_scale for i in barb_len]\n\n # Get barb angles\n barb_ang = [None] * len(self.x)\n for index in range(len(barb_ang)):\n barb_ang[index] = math.atan2(dif_y[index], dif_x[index])\n\n # Set angles to create arrow\n ang1 = [i + self.angle for i in barb_ang]\n ang2 = [i - self.angle for i in barb_ang]\n\n cos_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n cos_ang1[index] = math.cos(ang1[index])\n seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]\n\n sin_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n sin_ang1[index] = math.sin(ang1[index])\n seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]\n\n cos_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n cos_ang2[index] = math.cos(ang2[index])\n seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]\n\n sin_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n sin_ang2[index] = math.sin(ang2[index])\n seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]\n\n # Set coordinates to create arrow\n for index in range(len(self.end_x)):\n point1_x = [i - j for i, j in zip(self.end_x, seg1_x)]\n point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]\n point2_x = [i - j for i, j in zip(self.end_x, seg2_x)]\n point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]\n\n # Combine lists to create arrow\n empty = [None] * len(self.end_x)\n arrow_x = FigureFactory._flatten(zip(point1_x, self.end_x,\n point2_x, empty))\n arrow_y = FigureFactory._flatten(zip(point1_y, self.end_y,\n point2_y, empty))\n return arrow_x, arrow_y\n\n\nclass _Streamline(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_streamline() for docstring\n \"\"\"\n def __init__(self, x, y, u, v,\n density, angle,\n arrow_scale, **kwargs):\n self.x = np.array(x)\n self.y = np.array(y)\n self.u = np.array(u)\n self.v = np.array(v)\n self.angle = angle\n self.arrow_scale = arrow_scale\n self.density = int(30 * density) # Scale similarly to other functions\n self.delta_x = self.x[1] - self.x[0]\n self.delta_y = self.y[1] - self.y[0]\n self.val_x = self.x\n self.val_y = self.y\n\n # Set up spacing\n self.blank = np.zeros((self.density, self.density))\n self.spacing_x = len(self.x) / float(self.density - 1)\n self.spacing_y = len(self.y) / float(self.density - 1)\n self.trajectories = []\n\n # Rescale speed onto axes-coordinates\n self.u = self.u / (self.x[-1] - self.x[0])\n self.v = self.v / (self.y[-1] - self.y[0])\n self.speed = np.sqrt(self.u ** 2 + self.v ** 2)\n\n # Rescale u and v for integrations.\n self.u *= len(self.x)\n self.v *= len(self.y)\n self.st_x = []\n self.st_y = []\n self.get_streamlines()\n streamline_x, streamline_y = self.sum_streamlines()\n arrows_x, arrows_y = self.get_streamline_arrows()\n\n def blank_pos(self, xi, yi):\n \"\"\"\n Set up positions for trajectories to be used with rk4 function.\n \"\"\"\n return (int((xi / self.spacing_x) + 0.5),\n int((yi / self.spacing_y) + 0.5))\n\n def value_at(self, a, xi, yi):\n \"\"\"\n Set up for RK4 function, based on Bokeh's streamline code\n \"\"\"\n if isinstance(xi, np.ndarray):\n self.x = xi.astype(np.int)\n self.y = yi.astype(np.int)\n else:\n self.val_x = np.int(xi)\n self.val_y = np.int(yi)\n a00 = a[self.val_y, self.val_x]\n a01 = a[self.val_y, self.val_x + 1]\n a10 = a[self.val_y + 1, self.val_x]\n a11 = a[self.val_y + 1, self.val_x + 1]\n xt = xi - self.val_x\n yt = yi - self.val_y\n a0 = a00 * (1 - xt) + a01 * xt\n a1 = a10 * (1 - xt) + a11 * xt\n return a0 * (1 - yt) + a1 * yt\n\n def rk4_integrate(self, x0, y0):\n \"\"\"\n RK4 forward and back trajectories from the initial conditions.\n\n Adapted from Bokeh's streamline -uses Runge-Kutta method to fill\n x and y trajectories then checks length of traj (s in units of axes)\n \"\"\"\n def f(xi, yi):\n dt_ds = 1. / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return ui * dt_ds, vi * dt_ds\n\n def g(xi, yi):\n dt_ds = 1. / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return -ui * dt_ds, -vi * dt_ds\n\n check = lambda xi, yi: (0 <= xi < len(self.x) - 1 and\n 0 <= yi < len(self.y) - 1)\n xb_changes = []\n yb_changes = []\n\n def rk4(x0, y0, f):\n ds = 0.01\n stotal = 0\n xi = x0\n yi = y0\n xb, yb = self.blank_pos(xi, yi)\n xf_traj = []\n yf_traj = []\n while check(xi, yi):\n xf_traj.append(xi)\n yf_traj.append(yi)\n try:\n k1x, k1y = f(xi, yi)\n k2x, k2y = f(xi + .5 * ds * k1x, yi + .5 * ds * k1y)\n k3x, k3y = f(xi + .5 * ds * k2x, yi + .5 * ds * k2y)\n k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)\n except IndexError:\n break\n xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.\n yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.\n if not check(xi, yi):\n break\n stotal += ds\n new_xb, new_yb = self.blank_pos(xi, yi)\n if new_xb != xb or new_yb != yb:\n if self.blank[new_yb, new_xb] == 0:\n self.blank[new_yb, new_xb] = 1\n xb_changes.append(new_xb)\n yb_changes.append(new_yb)\n xb = new_xb\n yb = new_yb\n else:\n break\n if stotal > 2:\n break\n return stotal, xf_traj, yf_traj\n\n sf, xf_traj, yf_traj = rk4(x0, y0, f)\n sb, xb_traj, yb_traj = rk4(x0, y0, g)\n stotal = sf + sb\n x_traj = xb_traj[::-1] + xf_traj[1:]\n y_traj = yb_traj[::-1] + yf_traj[1:]\n\n if len(x_traj) < 1:\n return None\n if stotal > .2:\n initxb, inityb = self.blank_pos(x0, y0)\n self.blank[inityb, initxb] = 1\n return x_traj, y_traj\n else:\n for xb, yb in zip(xb_changes, yb_changes):\n self.blank[yb, xb] = 0\n return None\n\n def traj(self, xb, yb):\n \"\"\"\n Integrate trajectories\n\n :param (int) xb: results of passing xi through self.blank_pos\n :param (int) xy: results of passing yi through self.blank_pos\n\n Calculate each trajectory based on rk4 integrate method.\n \"\"\"\n\n if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:\n return\n if self.blank[yb, xb] == 0:\n t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)\n if t is not None:\n self.trajectories.append(t)\n\n def get_streamlines(self):\n \"\"\"\n Get streamlines by building trajectory set.\n \"\"\"\n for indent in range(self.density // 2):\n for xi in range(self.density - 2 * indent):\n self.traj(xi + indent, indent)\n self.traj(xi + indent, self.density - 1 - indent)\n self.traj(indent, xi + indent)\n self.traj(self.density - 1 - indent, xi + indent)\n\n self.st_x = [np.array(t[0]) * self.delta_x + self.x[0] for t in\n self.trajectories]\n self.st_y = [np.array(t[1]) * self.delta_y + self.y[0] for t in\n self.trajectories]\n\n for index in range(len(self.st_x)):\n self.st_x[index] = self.st_x[index].tolist()\n self.st_x[index].append(np.nan)\n\n for index in range(len(self.st_y)):\n self.st_y[index] = self.st_y[index].tolist()\n self.st_y[index].append(np.nan)\n\n def get_streamline_arrows(self):\n \"\"\"\n Makes an arrow for each streamline.\n\n Gets angle of streamline at 1/3 mark and creates arrow coordinates\n based off of user defined angle and arrow_scale.\n\n :param (array) st_x: x-values for all streamlines\n :param (array) st_y: y-values for all streamlines\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :rtype (list, list) arrows_x: x-values to create arrowhead and\n arrows_y: y-values to create arrowhead\n \"\"\"\n arrow_end_x = np.empty((len(self.st_x)))\n arrow_end_y = np.empty((len(self.st_y)))\n arrow_start_x = np.empty((len(self.st_x)))\n arrow_start_y = np.empty((len(self.st_y)))\n for index in range(len(self.st_x)):\n arrow_end_x[index] = (self.st_x[index]\n [int(len(self.st_x[index]) / 3)])\n arrow_start_x[index] = (self.st_x[index]\n [(int(len(self.st_x[index]) / 3)) - 1])\n arrow_end_y[index] = (self.st_y[index]\n [int(len(self.st_y[index]) / 3)])\n arrow_start_y[index] = (self.st_y[index]\n [(int(len(self.st_y[index]) / 3)) - 1])\n\n dif_x = arrow_end_x - arrow_start_x\n dif_y = arrow_end_y - arrow_start_y\n\n streamline_ang = np.arctan(dif_y / dif_x)\n\n ang1 = streamline_ang + (self.angle)\n ang2 = streamline_ang - (self.angle)\n\n seg1_x = np.cos(ang1) * self.arrow_scale\n seg1_y = np.sin(ang1) * self.arrow_scale\n seg2_x = np.cos(ang2) * self.arrow_scale\n seg2_y = np.sin(ang2) * self.arrow_scale\n\n point1_x = np.empty((len(dif_x)))\n point1_y = np.empty((len(dif_y)))\n point2_x = np.empty((len(dif_x)))\n point2_y = np.empty((len(dif_y)))\n\n for index in range(len(dif_x)):\n if dif_x[index] >= 0:\n point1_x[index] = arrow_end_x[index] - seg1_x[index]\n point1_y[index] = arrow_end_y[index] - seg1_y[index]\n point2_x[index] = arrow_end_x[index] - seg2_x[index]\n point2_y[index] = arrow_end_y[index] - seg2_y[index]\n else:\n point1_x[index] = arrow_end_x[index] + seg1_x[index]\n point1_y[index] = arrow_end_y[index] + seg1_y[index]\n point2_x[index] = arrow_end_x[index] + seg2_x[index]\n point2_y[index] = arrow_end_y[index] + seg2_y[index]\n\n space = np.empty((len(point1_x)))\n space[:] = np.nan\n\n # Combine arrays into matrix\n arrows_x = np.matrix([point1_x, arrow_end_x, point2_x, space])\n arrows_x = np.array(arrows_x)\n arrows_x = arrows_x.flatten('F')\n arrows_x = arrows_x.tolist()\n\n # Combine arrays into matrix\n arrows_y = np.matrix([point1_y, arrow_end_y, point2_y, space])\n arrows_y = np.array(arrows_y)\n arrows_y = arrows_y.flatten('F')\n arrows_y = arrows_y.tolist()\n\n return arrows_x, arrows_y\n\n def sum_streamlines(self):\n \"\"\"\n Makes all streamlines readable as a single trace.\n\n :rtype (list, list): streamline_x: all x values for each streamline\n combined into single list and streamline_y: all y values for each\n streamline combined into single list\n \"\"\"\n streamline_x = sum(self.st_x, [])\n streamline_y = sum(self.st_y, [])\n return streamline_x, streamline_y\n\n\nclass _OHLC(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_ohlc_increase() for docstring.\n \"\"\"\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.empty = [None] * len(open)\n self.dates = dates\n\n self.all_x = []\n self.all_y = []\n self.increase_x = []\n self.increase_y = []\n self.decrease_x = []\n self.decrease_y = []\n self.get_all_xy()\n self.separate_increase_decrease()\n\n def get_all_xy(self):\n \"\"\"\n Zip data to create OHLC shape\n\n OHLC shape: low to high vertical bar with\n horizontal branches for open and close values.\n If dates were added, the smallest date difference is calculated and\n multiplied by .2 to get the length of the open and close branches.\n If no date data was provided, the x-axis is a list of integers and the\n length of the open and close branches is .2.\n \"\"\"\n self.all_y = list(zip(self.open, self.open, self.high,\n self.low, self.close, self.close, self.empty))\n if self.dates is not None:\n date_dif = []\n for i in range(len(self.dates) - 1):\n date_dif.append(self.dates[i + 1] - self.dates[i])\n date_dif_min = (min(date_dif)) / 5\n self.all_x = [[x - date_dif_min, x, x, x, x, x +\n date_dif_min, None] for x in self.dates]\n else:\n self.all_x = [[x - .2, x, x, x, x, x + .2, None]\n for x in range(len(self.open))]\n\n def separate_increase_decrease(self):\n \"\"\"\n Separate data into two groups: increase and decrease\n\n (1) Increase, where close > open and\n (2) Decrease, where close <= open\n \"\"\"\n for index in range(len(self.open)):\n if self.close[index] is None:\n pass\n elif self.close[index] > self.open[index]:\n self.increase_x.append(self.all_x[index])\n self.increase_y.append(self.all_y[index])\n else:\n self.decrease_x.append(self.all_x[index])\n self.decrease_y.append(self.all_y[index])\n\n def get_increase(self):\n \"\"\"\n Flatten increase data and get increase text\n\n :rtype (list, list, list): flat_increase_x: x-values for the increasing\n trace, flat_increase_y: y=values for the increasing trace and\n text_increase: hovertext for the increasing trace\n \"\"\"\n flat_increase_x = FigureFactory._flatten(self.increase_x)\n flat_increase_y = FigureFactory._flatten(self.increase_y)\n text_increase = ((\"Open\", \"Open\", \"High\",\n \"Low\", \"Close\", \"Close\", '')\n * (len(self.increase_x)))\n\n return flat_increase_x, flat_increase_y, text_increase\n\n def get_decrease(self):\n \"\"\"\n Flatten decrease data and get decrease text\n\n :rtype (list, list, list): flat_decrease_x: x-values for the decreasing\n trace, flat_decrease_y: y=values for the decreasing trace and\n text_decrease: hovertext for the decreasing trace\n \"\"\"\n flat_decrease_x = FigureFactory._flatten(self.decrease_x)\n flat_decrease_y = FigureFactory._flatten(self.decrease_y)\n text_decrease = ((\"Open\", \"Open\", \"High\",\n \"Low\", \"Close\", \"Close\", '')\n * (len(self.decrease_x)))\n\n return flat_decrease_x, flat_decrease_y, text_decrease\n\n\nclass _Candlestick(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_candlestick() for docstring.\n \"\"\"\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n if dates is not None:\n self.x = dates\n else:\n self.x = [x for x in range(len(self.open))]\n self.get_candle_increase()\n\n def get_candle_increase(self):\n \"\"\"\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n \"\"\"\n increase_y = []\n increase_x = []\n for index in range(len(self.open)):\n if self.close[index] > self.open[index]:\n increase_y.append(self.low[index])\n increase_y.append(self.open[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.high[index])\n increase_x.append(self.x[index])\n\n increase_x = [[x, x, x, x, x, x] for x in increase_x]\n increase_x = FigureFactory._flatten(increase_x)\n\n return increase_x, increase_y\n\n def get_candle_decrease(self):\n \"\"\"\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n \"\"\"\n decrease_y = []\n decrease_x = []\n for index in range(len(self.open)):\n if self.close[index] <= self.open[index]:\n decrease_y.append(self.low[index])\n decrease_y.append(self.open[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.high[index])\n decrease_x.append(self.x[index])\n\n decrease_x = [[x, x, x, x, x, x] for x in decrease_x]\n decrease_x = FigureFactory._flatten(decrease_x)\n\n return decrease_x, decrease_y\n\n\nclass _Distplot(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_distplot() for docstring\n \"\"\"\n def __init__(self, hist_data, histnorm, group_labels,\n bin_size, curve_type, colors,\n rug_text, show_hist, show_curve):\n self.hist_data = hist_data\n self.histnorm = histnorm\n self.group_labels = group_labels\n self.bin_size = bin_size\n self.show_hist = show_hist\n self.show_curve = show_curve\n self.trace_number = len(hist_data)\n if rug_text:\n self.rug_text = rug_text\n else:\n self.rug_text = [None] * self.trace_number\n\n self.start = []\n self.end = []\n if colors:\n self.colors = colors\n else:\n self.colors = [\n \"rgb(31, 119, 180)\", \"rgb(255, 127, 14)\",\n \"rgb(44, 160, 44)\", \"rgb(214, 39, 40)\",\n \"rgb(148, 103, 189)\", \"rgb(140, 86, 75)\",\n \"rgb(227, 119, 194)\", \"rgb(127, 127, 127)\",\n \"rgb(188, 189, 34)\", \"rgb(23, 190, 207)\"]\n self.curve_x = [None] * self.trace_number\n self.curve_y = [None] * self.trace_number\n\n for trace in self.hist_data:\n self.start.append(min(trace) * 1.)\n self.end.append(max(trace) * 1.)\n\n def make_hist(self):\n \"\"\"\n Makes the histogram(s) for FigureFactory.create_distplot().\n\n :rtype (list) hist: list of histogram representations\n \"\"\"\n hist = [None] * self.trace_number\n\n for index in range(self.trace_number):\n hist[index] = dict(type='histogram',\n x=self.hist_data[index],\n xaxis='x1',\n yaxis='y1',\n histnorm=self.histnorm,\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n marker=dict(color=self.colors[index]),\n autobinx=False,\n xbins=dict(start=self.start[index],\n end=self.end[index],\n size=self.bin_size[index]),\n opacity=.7)\n return hist\n\n def make_kde(self):\n \"\"\"\n Makes the kernel density estimation(s) for create_distplot().\n\n This is called when curve_type = 'kde' in create_distplot().\n\n :rtype (list) curve: list of kde representations\n \"\"\"\n curve = [None] * self.trace_number\n for index in range(self.trace_number):\n self.curve_x[index] = [self.start[index] +\n x * (self.end[index] - self.start[index])\n / 500 for x in range(500)]\n self.curve_y[index] = (scipy.stats.gaussian_kde\n (self.hist_data[index])\n (self.curve_x[index]))\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(type='scatter',\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis='x1',\n yaxis='y1',\n mode='lines',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index]))\n return curve\n\n def make_normal(self):\n \"\"\"\n Makes the normal curve(s) for create_distplot().\n\n This is called when curve_type = 'normal' in create_distplot().\n\n :rtype (list) curve: list of normal curve representations\n \"\"\"\n curve = [None] * self.trace_number\n mean = [None] * self.trace_number\n sd = [None] * self.trace_number\n\n for index in range(self.trace_number):\n mean[index], sd[index] = (scipy.stats.norm.fit\n (self.hist_data[index]))\n self.curve_x[index] = [self.start[index] +\n x * (self.end[index] - self.start[index])\n / 500 for x in range(500)]\n self.curve_y[index] = scipy.stats.norm.pdf(\n self.curve_x[index], loc=mean[index], scale=sd[index])\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(type='scatter',\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis='x1',\n yaxis='y1',\n mode='lines',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index]))\n return curve\n\n def make_rug(self):\n \"\"\"\n Makes the rug plot(s) for create_distplot().\n\n :rtype (list) rug: list of rug plot representations\n \"\"\"\n rug = [None] * self.trace_number\n for index in range(self.trace_number):\n\n rug[index] = dict(type='scatter',\n x=self.hist_data[index],\n y=([self.group_labels[index]] *\n len(self.hist_data[index])),\n xaxis='x1',\n yaxis='y2',\n mode='markers',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=(False if self.show_hist or\n self.show_curve else True),\n text=self.rug_text[index],\n marker=dict(color=self.colors[index],\n symbol='line-ns-open'))\n return rug\n\n\nclass _Dendrogram(FigureFactory):\n \"\"\"Refer to FigureFactory.create_dendrogram() for docstring.\"\"\"\n\n def __init__(self, X, orientation='bottom', labels=None, colorscale=None,\n width=\"100%\", height=\"100%\", xaxis='xaxis', yaxis='yaxis'):\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n self.orientation = orientation\n self.labels = labels\n self.xaxis = xaxis\n self.yaxis = yaxis\n self.data = []\n self.leaves = []\n self.sign = {self.xaxis: 1, self.yaxis: 1}\n self.layout = {self.xaxis: {}, self.yaxis: {}}\n\n if self.orientation in ['left', 'bottom']:\n self.sign[self.xaxis] = 1\n else:\n self.sign[self.xaxis] = -1\n\n if self.orientation in ['right', 'bottom']:\n self.sign[self.yaxis] = 1\n else:\n self.sign[self.yaxis] = -1\n\n (dd_traces, xvals, yvals,\n ordered_labels, leaves) = self.get_dendrogram_traces(X, colorscale)\n\n self.labels = ordered_labels\n self.leaves = leaves\n yvals_flat = yvals.flatten()\n xvals_flat = xvals.flatten()\n\n self.zero_vals = []\n\n for i in range(len(yvals_flat)):\n if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:\n self.zero_vals.append(xvals_flat[i])\n\n self.zero_vals.sort()\n\n self.layout = self.set_figure_layout(width, height)\n self.data = graph_objs.Data(dd_traces)\n\n def get_color_dict(self, colorscale):\n \"\"\"\n Returns colorscale used for dendrogram tree clusters.\n\n :param (list) colorscale: Colors to use for the plot in rgb format.\n :rtype (dict): A dict of default colors mapped to the user colorscale.\n\n \"\"\"\n\n # These are the color codes returned for dendrograms\n # We're replacing them with nicer colors\n d = {'r': 'red',\n 'g': 'green',\n 'b': 'blue',\n 'c': 'cyan',\n 'm': 'magenta',\n 'y': 'yellow',\n 'k': 'black',\n 'w': 'white'}\n default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))\n\n if colorscale is None:\n colorscale = [\n 'rgb(0,116,217)', # blue\n 'rgb(35,205,205)', # cyan\n 'rgb(61,153,112)', # green\n 'rgb(40,35,35)', # black\n 'rgb(133,20,75)', # magenta\n 'rgb(255,65,54)', # red\n 'rgb(255,255,255)', # white\n 'rgb(255,220,0)'] # yellow\n\n for i in range(len(default_colors.keys())):\n k = list(default_colors.keys())[i] # PY3 won't index keys\n if i < len(colorscale):\n default_colors[k] = colorscale[i]\n\n return default_colors\n\n def set_axis_layout(self, axis_key):\n \"\"\"\n Sets and returns default axis object for dendrogram figure.\n\n :param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.\n :rtype (dict): An axis_key dictionary with set parameters.\n\n \"\"\"\n axis_defaults = {\n 'type': 'linear',\n 'ticks': 'outside',\n 'mirror': 'allticks',\n 'rangemode': 'tozero',\n 'showticklabels': True,\n 'zeroline': False,\n 'showgrid': False,\n 'showline': True,\n }\n\n if len(self.labels) != 0:\n axis_key_labels = self.xaxis\n if self.orientation in ['left', 'right']:\n axis_key_labels = self.yaxis\n if axis_key_labels not in self.layout:\n self.layout[axis_key_labels] = {}\n self.layout[axis_key_labels]['tickvals'] = \\\n [zv*self.sign[axis_key] for zv in self.zero_vals]\n self.layout[axis_key_labels]['ticktext'] = self.labels\n self.layout[axis_key_labels]['tickmode'] = 'array'\n\n self.layout[axis_key].update(axis_defaults)\n\n return self.layout[axis_key]\n\n def set_figure_layout(self, width, height):\n \"\"\"\n Sets and returns default layout object for dendrogram figure.\n\n \"\"\"\n self.layout.update({\n 'showlegend': False,\n 'autosize': False,\n 'hovermode': 'closest',\n 'width': width,\n 'height': height\n })\n\n self.set_axis_layout(self.xaxis)\n self.set_axis_layout(self.yaxis)\n\n return self.layout\n\n def get_dendrogram_traces(self, X, colorscale):\n \"\"\"\n Calculates all the elements needed for plotting a dendrogram.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (list) colorscale: Color scale for dendrogram tree clusters\n :rtype (tuple): Contains all the traces in the following order:\n (a) trace_list: List of Plotly trace objects for dendrogram tree\n (b) icoord: All X points of the dendrogram tree as array of arrays\n with length 4\n (c) dcoord: All Y points of the dendrogram tree as array of arrays\n with length 4\n (d) ordered_labels: leaf labels in the order they are going to\n appear on the plot\n (e) P['leaves']: left-to-right traversal of the leaves\n\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n d = scs.distance.pdist(X)\n Z = sch.linkage(d, method='complete')\n P = sch.dendrogram(Z, orientation=self.orientation,\n labels=self.labels, no_plot=True)\n\n icoord = scp.array(P['icoord'])\n dcoord = scp.array(P['dcoord'])\n ordered_labels = scp.array(P['ivl'])\n color_list = scp.array(P['color_list'])\n colors = self.get_color_dict(colorscale)\n\n trace_list = []\n\n for i in range(len(icoord)):\n # xs and ys are arrays of 4 points that make up the '∩' shapes\n # of the dendrogram tree\n if self.orientation in ['top', 'bottom']:\n xs = icoord[i]\n else:\n xs = dcoord[i]\n\n if self.orientation in ['top', 'bottom']:\n ys = dcoord[i]\n else:\n ys = icoord[i]\n color_key = color_list[i]\n trace = graph_objs.Scatter(\n x=np.multiply(self.sign[self.xaxis], xs),\n y=np.multiply(self.sign[self.yaxis], ys),\n mode='lines',\n marker=graph_objs.Marker(color=colors[color_key])\n )\n\n try:\n x_index = int(self.xaxis[-1])\n except ValueError:\n x_index = ''\n\n try:\n y_index = int(self.yaxis[-1])\n except ValueError:\n y_index = ''\n\n trace['xaxis'] = 'x' + x_index\n trace['yaxis'] = 'y' + y_index\n\n trace_list.append(trace)\n\n return trace_list, icoord, dcoord, ordered_labels, P['leaves']\n\n\nclass _AnnotatedHeatmap(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_annotated_heatmap() for docstring\n \"\"\"\n def __init__(self, z, x, y, annotation_text, colorscale,\n font_colors, reversescale, **kwargs):\n from plotly.graph_objs import graph_objs\n\n self.z = z\n if x:\n self.x = x\n else:\n self.x = range(len(z[0]))\n if y:\n self.y = y\n else:\n self.y = range(len(z))\n if annotation_text is not None:\n self.annotation_text = annotation_text\n else:\n self.annotation_text = self.z\n self.colorscale = colorscale\n self.reversescale = reversescale\n self.font_colors = font_colors\n\n def get_text_color(self):\n \"\"\"\n Get font color for annotations.\n\n The annotated heatmap can feature two text colors: min_text_color and\n max_text_color. The min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. The user can define these\n two colors. Otherwise the colors are defined logically as black or\n white depending on the heatmap's colorscale.\n\n :rtype (string, string) min_text_color, max_text_color: text\n color for annotations for heatmap values <\n (max_value - min_value)/2 and text color for annotations for\n heatmap values >= (max_value - min_value)/2\n \"\"\"\n # Plotly colorscales ranging from a lighter shade to a darker shade\n colorscales = ['Greys', 'Greens', 'Blues',\n 'YIGnBu', 'YIOrRd', 'RdBu',\n 'Picnic', 'Jet', 'Hot', 'Blackbody',\n 'Earth', 'Electric', 'Viridis']\n # Plotly colorscales ranging from a darker shade to a lighter shade\n colorscales_reverse = ['Reds']\n if self.font_colors:\n min_text_color = self.font_colors[0]\n max_text_color = self.font_colors[-1]\n elif self.colorscale in colorscales and self.reversescale:\n min_text_color = '#000000'\n max_text_color = '#FFFFFF'\n elif self.colorscale in colorscales:\n min_text_color = '#FFFFFF'\n max_text_color = '#000000'\n elif self.colorscale in colorscales_reverse and self.reversescale:\n min_text_color = '#FFFFFF'\n max_text_color = '#000000'\n elif self.colorscale in colorscales_reverse:\n min_text_color = '#000000'\n max_text_color = '#FFFFFF'\n elif isinstance(self.colorscale, list):\n if 'rgb' in self.colorscale[0][1]:\n min_col = map(int,\n self.colorscale[0][1].strip('rgb()').split(','))\n max_col = map(int,\n self.colorscale[-1][1].strip('rgb()').split(','))\n elif '#' in self.colorscale[0][1]:\n min_col = FigureFactory._hex_to_rgb(self.colorscale[0][1])\n max_col = FigureFactory._hex_to_rgb(self.colorscale[-1][1])\n else:\n min_col = [255, 255, 255]\n max_col = [255, 255, 255]\n\n if (min_col[0]*0.299 + min_col[1]*0.587 + min_col[2]*0.114) > 186:\n min_text_color = '#000000'\n else:\n min_text_color = '#FFFFFF'\n if (max_col[0]*0.299 + max_col[1]*0.587 + max_col[2]*0.114) > 186:\n max_text_color = '#000000'\n else:\n max_text_color = '#FFFFFF'\n else:\n min_text_color = '#000000'\n max_text_color = '#000000'\n return min_text_color, max_text_color\n\n def get_z_mid(self):\n \"\"\"\n Get the mid value of z matrix\n\n :rtype (float) z_avg: average val from z matrix\n \"\"\"\n if _numpy_imported and isinstance(self.z, np.ndarray):\n z_min = np.amin(self.z)\n z_max = np.amax(self.z)\n else:\n z_min = min(min(self.z))\n z_max = max(max(self.z))\n z_mid = (z_max+z_min) / 2\n return z_mid\n\n def make_annotations(self):\n \"\"\"\n Get annotations for each cell of the heatmap with graph_objs.Annotation\n\n :rtype (list[dict]) annotations: list of annotations for each cell of\n the heatmap\n \"\"\"\n from plotly.graph_objs import graph_objs\n min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)\n z_mid = _AnnotatedHeatmap.get_z_mid(self)\n annotations = []\n for n, row in enumerate(self.z):\n for m, val in enumerate(row):\n font_color = min_text_color if val < z_mid else max_text_color\n annotations.append(\n graph_objs.Annotation(\n text=str(self.annotation_text[n][m]),\n x=self.x[m],\n y=self.y[n],\n xref='x1',\n yref='y1',\n font=dict(color=font_color),\n showarrow=False))\n return annotations\n\n\nclass _Table(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_table() for docstring\n \"\"\"\n def __init__(self, table_text, colorscale, font_colors, index,\n index_title, annotation_offset, **kwargs):\n from plotly.graph_objs import graph_objs\n if _pandas_imported and isinstance(table_text, pd.DataFrame):\n headers = table_text.columns.tolist()\n table_text_index = table_text.index.tolist()\n table_text = table_text.values.tolist()\n table_text.insert(0, headers)\n if index:\n table_text_index.insert(0, index_title)\n for i in range(len(table_text)):\n table_text[i].insert(0, table_text_index[i])\n self.table_text = table_text\n self.colorscale = colorscale\n self.font_colors = font_colors\n self.index = index\n self.annotation_offset = annotation_offset\n self.x = range(len(table_text[0]))\n self.y = range(len(table_text))\n\n def get_table_matrix(self):\n \"\"\"\n Create z matrix to make heatmap with striped table coloring\n\n :rtype (list[list]) table_matrix: z matrix to make heatmap with striped\n table coloring.\n \"\"\"\n header = [0] * len(self.table_text[0])\n odd_row = [.5] * len(self.table_text[0])\n even_row = [1] * len(self.table_text[0])\n table_matrix = [None] * len(self.table_text)\n table_matrix[0] = header\n for i in range(1, len(self.table_text), 2):\n table_matrix[i] = odd_row\n for i in range(2, len(self.table_text), 2):\n table_matrix[i] = even_row\n if self.index:\n for array in table_matrix:\n array[0] = 0\n return table_matrix\n\n def get_table_font_color(self):\n \"\"\"\n Fill font-color array.\n\n Table text color can vary by row so this extends a single color or\n creates an array to set a header color and two alternating colors to\n create the striped table pattern.\n\n :rtype (list[list]) all_font_colors: list of font colors for each row\n in table.\n \"\"\"\n if len(self.font_colors) == 1:\n all_font_colors = self.font_colors*len(self.table_text)\n elif len(self.font_colors) == 3:\n all_font_colors = list(range(len(self.table_text)))\n all_font_colors[0] = self.font_colors[0]\n for i in range(1, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[1]\n for i in range(2, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[2]\n elif len(self.font_colors) == len(self.table_text):\n all_font_colors = self.font_colors\n else:\n all_font_colors = ['#000000']*len(self.table_text)\n return all_font_colors\n\n def make_table_annotations(self):\n \"\"\"\n Generate annotations to fill in table text\n\n :rtype (list) annotations: list of annotations for each cell of the\n table.\n \"\"\"\n from plotly.graph_objs import graph_objs\n table_matrix = _Table.get_table_matrix(self)\n all_font_colors = _Table.get_table_font_color(self)\n annotations = []\n for n, row in enumerate(self.table_text):\n for m, val in enumerate(row):\n # Bold text in header and index\n format_text = ('<b>' + str(val) + '</b>' if n == 0 or\n self.index and m < 1 else str(val))\n # Match font color of index to font color of header\n font_color = (self.font_colors[0] if self.index and m == 0\n else all_font_colors[n])\n annotations.append(\n graph_objs.Annotation(\n text=format_text,\n x=self.x[m] - self.annotation_offset,\n y=self.y[n],\n xref='x1',\n yref='y1',\n align=\"left\",\n xanchor=\"left\",\n font=dict(color=font_color),\n showarrow=False))\n return annotations\n",
"# \n# This file is part of Healpy.\n# \n# Healpy is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# Healpy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with Healpy; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n# \n# For more information about Healpy, see http://code.google.com/p/healpy\n# \n\"\"\"This module provides classes for some spherical projection.\nTo be used when calling SphereProjAxes class.\n\nSphericalProj : a virtual class (do nothing). Just a template for derived \n (useful) classes\n\nGnomonicProj : Gnomonic projection\n\nAzimuthalProj : Azimuthal equidistant or Lambert azimuthal equal-area projection\n\"\"\"\n\nfrom . import rotator as R\nimport numpy as np\nfrom . import pixelfunc\nfrom .pixelfunc import UNSEEN\n\npi = np.pi\ndtor = np.pi/180.\n\nclass SphericalProj(object):\n \"\"\"\n This class defines functions for spherical projection.\n \n This class contains class method for spherical projection computation. It \n should not be instantiated. It should be inherited from and methods should\n be overloaded for desired projection.\n \"\"\"\n\n name = \"None\"\n \n def __init__(self, rot=None, coord=None, flipconv=None, **kwds):\n self.rotator = R.Rotator(rot=rot, coord=None, eulertype='ZYX')\n self.coordsys = R.Rotator(coord=coord).coordout\n self.coordsysstr = R.Rotator(coord=coord).coordoutstr\n self.set_flip(flipconv)\n self.set_proj_plane_info(**kwds)\n\n def set_proj_plane_info(self, **kwds):\n allNone = True\n for v in kwds.values():\n if v is not None: allNone = False\n if not allNone:\n self._arrayinfo = dict(kwds)\n else:\n self._arrayinfo = None\n\n def get_proj_plane_info(self):\n return self._arrayinfo\n arrayinfo = property(get_proj_plane_info,\n doc=\"Dictionary with information on the projection array\")\n\n def __eq__(self, a):\n if type(a) is not type(self): return False\n return ( (self.rotator == a.rotator) and\n (self.coordsys == a.coordsys ) )\n \n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n \"\"\"From angular direction to position in the projection plane (%s).\n\n Input:\n - theta: if phi is None, theta[0] contains theta, theta[1] contains phi\n - phi : if phi is not None, theta,phi are direction\n - lonlat: if True, angle are assumed in degree, and longitude, latitude\n - flipconv is either 'astro' or 'geo'. None will be default.\n Return:\n - x, y: position in %s plane.\n \"\"\"\n pass\n \n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n \"\"\"From unit vector direction to position in the projection plane (%s).\n\n Input:\n - vx: if vy and vz are None, vx[0],vx[1],vx[2] defines the unit vector.\n - vy,vz: if defined, vx,vy,vz define the unit vector\n - lonlat: if True, angle are assumed in degree, and longitude, latitude\n - flipconv is either 'astro' or 'geo'. None will be default.\n\n Return:\n - x, y: position in %s plane.\n \"\"\"\n pass\n \n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n \"\"\"From position in the projection plane to angular direction (%s).\n\n Input:\n - x : if y is None, x[0], x[1] define the position in %s plane.\n - y : if defined, x,y define the position in projection plane.\n - lonlat: if True, angle are assumed in degree, and longitude, latitude\n - flipconv is either 'astro' or 'geo'. None will be default.\n\n Return:\n - theta, phi : angular direction.\n \"\"\"\n pass\n\n def xy2vec(self, x, y=None, direct=False):\n \"\"\"From position in the projection plane to unit vector direction (%s).\n\n Input:\n - x : if y is None, x[0], x[1] define the position in %s plane.\n - y : if defined, x,y define the position in projection plane.\n - lonlat: if True, angle are assumed in degree, and longitude, latitude\n - flipconv is either 'astro' or 'geo'. None will be default.\n\n Return:\n - theta, phi : angular direction.\n \"\"\"\n pass\n \n def xy2ij(self, x, y=None):\n \"\"\"From position in the projection plane to image array index (%s).\n\n Input:\n - x : if y is None, x[0], x[1] define the position in %s plane.\n - y : if defined, x,y define the position in projection plane.\n - projinfo : additional projection information.\n\n Return:\n - i,j : image array indices.\n \"\"\"\n pass\n \n def ij2xy(self, i=None, j=None):\n \"\"\"From image array indices to position in projection plane (%s).\n\n Input:\n - if i and j are None, generate arrays of i and j as input\n - i : if j is None, i[0], j[1] define array indices in %s image.\n - j : if defined, i,j define array indices in image.\n - projinfo : additional projection information.\n\n Return:\n - x,y : position in projection plane.\n \"\"\"\n pass\n\n def projmap(self, map, vec2pix_func,rot=None,coord=None):\n \"\"\"Create an array containing the projection of the map.\n\n Input:\n - vec2pix_func: a function taking theta,phi and returning pixel number\n - map: an array containing the spherical map to project,\n the pixelisation is described by vec2pix_func\n Return:\n - a 2D array with the projection of the map.\n\n Note: the Projector must contain information on the array.\n \"\"\"\n x,y = self.ij2xy()\n if np.__version__ >= '1.1':\n matype = np.ma.core.MaskedArray\n else:\n matype = np.ma.array\n if type(x) is matype and x.mask is not np.ma.nomask:\n w = (x.mask == False)\n else:\n w = slice(None)\n img=np.zeros(x.shape,np.float64)-np.inf\n vec = self.xy2vec(np.asarray(x[w]),np.asarray(y[w]))\n vec = (R.Rotator(rot=rot,coord=self.mkcoord(coord))).I(vec)\n pix=vec2pix_func(vec[0],vec[1],vec[2])\n # support masked array for map, or a dictionnary (for explicit pixelisation)\n if isinstance(map, matype) and map.mask is not np.ma.nomask:\n mpix = map[pix]\n mpix[map.mask[pix]] = UNSEEN\n elif isinstance(map, dict):\n is_pix_seen = np.in1d(pix, map.keys()).reshape(pix.shape)\n is_pix_unseen = ~is_pix_seen\n mpix = np.zeros_like(img[w])\n mpix[is_pix_unseen] = UNSEEN\n pix_seen = pix[is_pix_seen]\n iterable = (map[p] for p in pix_seen)\n mpix[is_pix_seen] = np.fromiter(iterable, mpix.dtype,\n count = pix_seen.size)\n else:\n mpix = map[pix]\n img[w] = mpix\n return img\n \n def set_flip(self, flipconv):\n \"\"\"flipconv is either 'astro' or 'geo'. None will be default.\n \n With 'astro', east is toward left and west toward right. \n It is the opposite for 'geo'\n \"\"\"\n if flipconv is None:\n flipconv = 'astro' # default\n if flipconv == 'astro': self._flip = -1\n elif flipconv == 'geo': self._flip = 1\n else: raise ValueError(\"flipconv must be 'astro', 'geo' or None for default.\")\n \n def get_extent(self):\n \"\"\"Get the extension of the projection plane.\n\n Return:\n extent = (left,right,bottom,top)\n \"\"\"\n pass\n\n def get_fov(self):\n \"\"\"Get the field of view in degree of the plane of projection\n\n Return:\n fov: the diameter in radian of the field of view\n \"\"\"\n return 2.*pi\n\n def get_center(self,lonlat=False):\n \"\"\"Get the center of the projection.\n\n Input:\n - lonlat : if True, will return longitude and latitude in degree,\n otherwise, theta and phi in radian\n Return:\n - theta,phi or lonlat depending on lonlat keyword\n \"\"\"\n lon, lat = np.asarray(self.rotator.rots[0][0:2])*180/pi\n if lonlat: return lon,lat\n else: return pi/2.-lat*dtor, lon*dtor\n\n def mkcoord(self,coord):\n if self.coordsys is None:\n return (coord,coord)\n elif coord is None:\n return (self.coordsys,self.coordsys)\n elif type(coord) is str:\n return (coord,self.coordsys)\n else:\n return (tuple(coord)[0],self.coordsys)\n \n \nclass GnomonicProj(SphericalProj):\n \"\"\"This class provides class methods for Gnomonic projection.\n \"\"\"\n \n name = \"Gnomonic\"\n\n def __init__(self, rot=None, coord=None, xsize=None, ysize=None, reso=None,\n **kwds):\n super(GnomonicProj,self).__init__(rot=rot, coord=coord,\n xsize=xsize, ysize=ysize,reso=reso,\n **kwds)\n\n def set_proj_plane_info(self, xsize=200,ysize=None,reso=1.5):\n if xsize is None: xsize=200\n if ysize is None: ysize=xsize\n if reso is None: reso=1.5\n super(GnomonicProj,self).set_proj_plane_info(xsize=xsize,\n ysize=ysize,reso=reso)\n \n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n if not direct: vec = self.rotator(vx,vy,vz)\n elif vy is None and vz is None: vec=vx\n elif vy is not None and vz is not None: vec=vx,vy,vz\n else: raise ValueError(\"vy and vz must be both defined or both not defined\")\n flip = self._flip\n mask = (np.asarray(vec[0])<=0.)\n w = np.where(mask == False)\n if not mask.any(): mask=np.ma.nomask\n if not hasattr(vec[0],'__len__'):\n if mask is not np.ma.nomask:\n x = np.nan\n y = np.nan\n else:\n x = flip*vec[1]/vec[0]\n y = vec[2]/vec[0]\n else:\n x = np.zeros(vec[0].shape)+np.nan\n y = np.zeros(vec[0].shape)+np.nan\n x[w] = flip*vec[1][w]/vec[0][w]\n y[w] = vec[2][w]/vec[0][w]\n return x,y\n vec2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n\n def xy2vec(self, x, y=None, direct=False):\n flip = self._flip\n if y is None:\n x,y = x\n x,y=np.asarray(x),np.asarray(y)\n rm1=1./np.sqrt(1.+x**2+y**2)\n vec = (rm1,flip*rm1*x,rm1*y)\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name,name)\n\n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n vec=R.dir2vec(theta,phi,lonlat=lonlat)\n return self.vec2xy(vec,direct=direct)\n ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n \n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n return R.vec2dir(self.xy2vec(x,y,direct=direct),lonlat=lonlat)\n xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name,name)\n\n\n def xy2ij(self, x, y=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n reso = self.arrayinfo['reso']\n if y is None: x,y = x\n dx = reso/60. * dtor\n xc,yc = 0.5*(xsize-1), 0.5*(ysize-1)\n j = np.around(xc+x/dx).astype(np.long)\n i = np.around(yc+y/dx).astype(np.long)\n return i,j\n xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name,name)\n\n def ij2xy(self, i=None, j=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n reso = self.arrayinfo['reso']\n dx = reso/60. * dtor\n xc,yc = 0.5*(xsize-1), 0.5*(ysize-1)\n if i is None and j is None:\n idx=np.outer(np.ones(ysize),np.arange(xsize))\n x=(idx-xc) * dx # astro= '-' sign, geo '+' sign\n idx=np.outer(np.arange(ysize),np.ones(xsize))\n y=(idx-yc)*dx #(idx-yc) * dx\n elif i is not None and j is not None:\n x=(np.asarray(j)-xc) * dx\n y=(np.asarray(i)-yc) * dx #(asarray(i)-yc) * dx\n elif i is not None and j is None:\n i, j = i\n x=(np.asarray(j)-xc) * dx\n y=(np.asarray(i)-yc) * dx #(i-yc) * dx\n else:\n raise TypeError(\"Wrong parameters\")\n return x,y\n ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name,name)\n\n def get_extent(self):\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n left,bottom = self.ij2xy(0,0)\n right,top = self.ij2xy(ysize-1,xsize-1)\n return (left,right,bottom,top)\n\n def get_fov(self):\n vx,vy,vz = self.xy2vec(self.ij2xy(0,0), direct=True)\n a = np.arccos(vx)\n return 2.*a\n\nclass MollweideProj(SphericalProj):\n \"\"\"This class provides class methods for Mollweide projection.\n \"\"\"\n \n name = \"Mollweide\"\n __molldata = []\n\n def __init__(self, rot=None, coord=None, xsize=800, **kwds):\n self.__initialise_data()\n super(MollweideProj,self).__init__(rot=rot, coord=coord,\n xsize=xsize, **kwds)\n \n def set_proj_plane_info(self,xsize):\n super(MollweideProj,self).set_proj_plane_info(xsize=xsize)\n\n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n if not direct:\n theta,phi=R.vec2dir(self.rotator(vx,vy,vz))\n else:\n theta,phi=R.vec2dir(vx,vy,vz)\n flip = self._flip\n X,Y = MollweideProj.__molldata\n # set phi in [-pi,pi]\n phi = (phi+pi)%(2*pi)-pi\n lat = pi/2. - theta\n A = MollweideProj.__lininterp(X,Y,lat)\n x = flip*2./pi * phi * np.cos(A)\n y = np.sin(A)\n return x,y\n vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name,name)\n\n def xy2vec(self, x, y=None, direct=False):\n flip = self._flip\n if y is None: x,y = x\n mask = (np.asarray(x)**2/4.+np.asarray(y)**2 > 1.)\n w=np.where(mask == False)\n if not mask.any(): mask = np.ma.nomask\n if not hasattr(x,'__len__'):\n if mask is not np.ma.nomask:\n return np.nan,np.nan,np.nan\n else:\n s = np.sqrt((1-y)*(1+y))\n a = np.arcsin(y)\n z = 2./pi * (a + y*s)\n phi = flip * pi/2. * x/np.maximum(s,1.e-6)\n sz = np.sqrt((1-z)*(1+z))\n vec = sz*np.cos(phi),sz*np.sin(phi),z\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n else:\n vec = (np.zeros(x.shape)+np.nan,\n np.zeros(x.shape)+np.nan,\n np.zeros(x.shape)+np.nan)\n s = np.sqrt((1-y[w])*(1+y[w]))\n a = np.arcsin(y[w])\n vec[2][w] = 2./pi * (a + y[w]*s)\n phi = flip * pi/2. * x[w]/np.maximum(s,1.e-6)\n sz = np.sqrt((1-vec[2][w])*(1+vec[2][w]))\n vec[0][w] = sz*np.cos(phi)\n vec[1][w] = sz*np.sin(phi)\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name,name)\n\n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n return self.vec2xy(R.dir2vec(theta,phi,lonlat=lonlat),direct=direct)\n ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n \n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n vec = self.xy2vec(x,y,direct=direct)\n return R.vec2dir(vec,lonlat=lonlat)\n xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name,name)\n\n\n def xy2ij(self, x, y=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize = self.arrayinfo['xsize']\n ysize=xsize/2\n if y is None: x,y = x\n xc,yc = (xsize-1.)/2., (ysize-1.)/2.\n if hasattr(x,'__len__'):\n j = np.around(x*xc/2.+xc).astype(np.long)\n i = np.around(yc+y*yc).astype(np.long)\n mask = (x**2/4.+y**2>1.)\n if not mask.any(): mask=np.ma.nomask\n j=np.ma.array(j,mask=mask)\n i=np.ma.array(i,mask=mask)\n else:\n if x**2/4.+y**2 > 1.:\n i,j=np.nan,np.nan\n else:\n j = np.around(x*xc/2.+xc).astype(np.long)\n i = np.around(yc+y*yc).astype(np.long)\n return i,j\n xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name,name)\n\n def ij2xy(self, i=None, j=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize = self.arrayinfo['xsize']\n ysize=xsize/2\n xc,yc=(xsize-1.)/2.,(ysize-1.)/2.\n if i is None and j is None:\n idx = np.outer(np.arange(ysize),np.ones(xsize))\n y = (idx-yc)/yc\n idx = np.outer(np.ones(ysize),np.arange(xsize))\n x = 2.*(idx-xc)/xc\n mask = x**2/4.+y**2 > 1.\n if not mask.any(): mask=np.ma.nomask\n x = np.ma.array(x,mask=mask)\n y = np.ma.array(y,mask=mask)\n elif i is not None and j is not None:\n y = (np.asarray(i)-yc)/yc\n x=2.*(np.asarray(j)-xc)/xc\n if x**2/4.+y**2 > 1.: x,y=np.nan,np.nan\n elif i is not None and j is None:\n i,j = i\n y=(np.asarray(i)-yc)/yc\n x=2.*(np.asarray(j)-xc)/xc\n if x**2/4.+y**2 > 1.: x,y=np.nan,np.nan\n else:\n raise TypeError(\"i and j must be both given or both not given\")\n return x,y\n ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name,name)\n\n def get_extent(self):\n return (-2.0,2.0,-1.0,1.0)\n\n @staticmethod\n def __initialise_data():\n if len(MollweideProj.__molldata) == 0:\n X = (np.arange(1.,180.,1.)-90.)*dtor\n Y = MollweideProj.__findRoot(MollweideProj.__fmoll,\n MollweideProj.__dfmoll,\n X.copy(),X,niter=10)\n X = np.concatenate([[-pi/2],X,[pi/2]])\n Y = np.concatenate([[-pi/2],Y,[pi/2]])\n MollweideProj.__molldata.append( X )\n MollweideProj.__molldata.append( Y )\n return\n\n @staticmethod\n def __findRoot(f, df, x0, argsf=None, argsdf=None, niter=100):\n x = x0\n niter = min(abs(niter),1000)\n i = 0\n while i < niter:\n dx = -f(x,argsf)/df(x,argsdf)\n x += dx\n i += 1\n return x\n\n @staticmethod\n def __fmoll(x,args):\n return 2.*x+np.sin(2.*x)-pi*np.sin(args)\n\n @staticmethod\n def __dfmoll(x,args):\n return 2.*(1.+np.cos(2.*x))\n\n @staticmethod\n def __lininterp(X,Y,x):\n idx = X.searchsorted(x)\n y = Y[idx-1] + (Y[idx]-Y[idx-1])/(X[idx]-X[idx-1]) * (x-X[idx-1])\n return y\n\n\nclass CartesianProj(SphericalProj):\n \"\"\"This class provides class methods for Cartesian projection.\n \"\"\"\n \n name = \"Cartesian\"\n\n def __init__(self, rot=None, coord=None, xsize=800, ysize=None, lonra=None, \n latra=None, **kwds):\n super(CartesianProj,self).__init__(rot=rot, coord=coord,\n xsize=xsize, ysize=ysize, lonra=lonra, latra=latra, **kwds)\n \n def set_proj_plane_info(self,xsize,ysize,lonra,latra):\n if lonra is None: lonra = [-180.,180.]\n if latra is None: latra = [-90.,90.]\n if (len(lonra)!=2 or len(latra)!=2 or lonra[0]<-180. or lonra[1]>180.\n or latra[0]<-90 or latra[1]>90 or lonra[0]>=lonra[1] or latra[0]>=latra[1]):\n raise TypeError(\"Wrong argument lonra or latra. Must be lonra=[a,b],latra=[c,d] \"\n \"with a<b, c<d, a>=-180, b<=180, c>=-90, d<=+90\")\n lonra = self._flip*np.float64(lonra)[::self._flip]\n latra = np.float64(latra)\n xsize = np.long(xsize)\n if ysize is None:\n ratio = (latra[1]-latra[0])/(lonra[1]-lonra[0])\n ysize = np.long(round(ratio*xsize))\n else:\n ysize = np.long(ysize)\n ratio = float(ysize)/float(xsize)\n super(CartesianProj,self).set_proj_plane_info(xsize=xsize, lonra=lonra, latra=latra, \n ysize=ysize, ratio=ratio)\n\n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n if not direct:\n theta,phi=R.vec2dir(self.rotator(vx,vy,vz))\n else:\n theta,phi=R.vec2dir(vx,vy,vz)\n flip = self._flip\n # set phi in [-pi,pi]\n x = flip*((phi+pi)%(2*pi)-pi)\n x /= dtor # convert in degree\n y = pi/2. - theta\n y /= dtor # convert in degree\n return x,y\n vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name,name)\n\n def xy2vec(self, x, y=None, direct=False):\n if y is None:\n x,y = np.asarray(x)\n else:\n x,y = np.asarray(x),np.asarray(y)\n flip = self._flip\n theta = pi/2.-y*dtor # convert in radian\n phi = flip*x*dtor # convert in radian\n # dir2vec does not support 2d arrays, so first use flatten and then\n # reshape back to previous shape\n if not direct: \n vec = self.rotator.I(R.dir2vec(theta.flatten(),phi.flatten()))\n else:\n vec = R.dir2vec(theta.flatten(),phi.flatten())\n vec = [v.reshape(theta.shape) for v in vec]\n return vec\n xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name,name)\n\n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n return self.vec2xy(R.dir2vec(theta,phi,lonlat=lonlat),direct=direct)\n ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n \n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n vec = self.xy2vec(x,y,direct=direct)\n return R.vec2dir(vec,lonlat=lonlat)\n xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name,name)\n\n\n def xy2ij(self, x, y=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize = self.arrayinfo['xsize']\n ysize = self.arrayinfo['ysize']\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n if y is None: x,y = np.asarray(x)\n else: x,y = np.asarray(x), np.asarray(y)\n j = np.around((x-lonra[0])/(lonra[1]-lonra[0])*(xsize-1)).astype(np.int64)\n i = np.around((y-latra[0])/(latra[1]-latra[0])*(ysize-1)).astype(np.int64)\n if len(x.shape) > 0:\n mask = ((i<0)|(i>=ysize)|(j<0)|(j>=xsize))\n if not mask.any(): mask=np.ma.nomask\n j=np.ma.array(j,mask=mask)\n i=np.ma.array(i,mask=mask)\n else:\n if j<0 or j>=xsize or i<0 or i>=ysize: i=j=None\n return i,j\n xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name,name)\n\n def ij2xy(self, i=None, j=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize = self.arrayinfo['xsize']\n ysize = self.arrayinfo['ysize']\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n if i is not None and j is None: i,j = np.asarray(i)\n elif i is not None and j is not None: i,j = np.asarray(i),np.asarray(j)\n if i is None and j is None:\n idx = np.outer(np.arange(ysize),np.ones(xsize))\n y = (float(latra[1]-latra[0])/(ysize-1.)) * idx\n y += latra[0]\n idx = np.outer(np.ones(ysize),np.arange(xsize))\n x = (float(lonra[1]-lonra[0])/(xsize-1.) * idx)\n x += lonra[0]\n x = np.ma.array(x)\n y = np.ma.array(y)\n elif i is not None and j is not None:\n y = (float(latra[1]-latra[0])/(ysize-1) ) * i \n y += latra[0]\n x = (float(lonra[1]-lonra[0])/(xsize-1)) * j \n x += lonra[0]\n if len(i.shape) > 0:\n mask = ((x<-180)|(x>180)|(y<-90)|(y>90))\n if not mask.any():\n mask = np.ma.nomask\n x = np.ma.array(x,mask=mask)\n y = np.ma.array(y,mask=mask)\n else:\n if x<-180 or x>180 or y<-90 or y>90:\n x = y = np.nan\n else:\n raise TypeError(\"i and j must be both given or both not given\")\n return x,y\n ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name,name)\n\n def get_extent(self):\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n return (lonra[0],lonra[1],latra[0],latra[1])\n get_extent.__doc__ = SphericalProj.get_extent.__doc__\n\n def get_fov(self):\n xsize = self.arrayinfo['xsize']\n ysize = self.arrayinfo['ysize']\n v1 = np.asarray(self.xy2vec(self.ij2xy(0,0), direct=True))\n v2 = np.asarray(self.xy2vec(self.ij2xy(ysize-1,xsize-1), direct=True))\n a = np.arccos((v1*v2).sum())\n return 2*a\n\n# def get_fov(self):\n# lonra = self.arrayinfo['lonra']\n# latra = self.arrayinfo['latra']\n# return np.sqrt((lonra[1]-lonra[0])**2+(latra[1]-latra[0])**2)\n \n def get_center(self,lonlat=False):\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n xc = 0.5*(lonra[1]+lonra[0])\n yc = 0.5*(latra[1]+latra[0])\n return self.xy2ang(xc,yc,lonlat=lonlat)\n get_center.__doc__ = SphericalProj.get_center.__doc__\n\n\nclass OrthographicProj(SphericalProj):\n \"\"\"This class provides methods for orthographic projection\n \"\"\"\n \n name = \"Orthographic\"\n \n def __init__(self, rot=None, coord=None, xsize=800, half_sky=False,**kwds):\n super(OrthographicProj,self).__init__(rot=rot, coord=coord,xsize=xsize,\n half_sky=half_sky,**kwds)\n \n def set_proj_plane_info(self,xsize,half_sky):\n super(OrthographicProj,self).set_proj_plane_info(xsize=xsize,\n half_sky=half_sky)\n \n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n if not direct:\n theta,phi=R.vec2dir(self.rotator(vx,vy,vz))\n else:\n theta,phi=R.vec2dir(vx,vy,vz)\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n half_sky = self.arrayinfo['half_sky']\n flip = self._flip\n # set phi in [-pi,pi]\n phi = flip*(phi+pi)%(2*pi)-pi\n lat = pi/2. - theta\n x = np.cos(lat)*np.sin(phi)\n if not half_sky: x -= 1.0\n y = np.sin(lat)\n # unfold back of sphere\n cosc = np.cos(lat)*np.cos(phi)\n if np.any(cosc<0):\n hmask = (cosc<0)\n if hasattr(x,'__len__'):\n if half_sky:\n x[hmask] = np.nan\n else:\n x[hmask] *= -1\n elif hmask:\n if half_sky:\n x = np.nan\n else:\n x *= -1\n if half_sky:\n mask = (np.asarray(x)**2+np.asarray(y)**2>1.0)\n else:\n mask = ((np.mod(np.asarray(x)+2.0,2.0)-1.0)**2 + \\\n np.asarray(y)**2>1.0)\n if mask.any():\n if not hasattr(x,'__len__'):\n x = np.nan\n y = np.nan\n else:\n x[mask] = np.nan\n y[mask] = np.nan\n return x,y\n vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name,name)\n \n def xy2vec(self, x, y=None, direct=False):\n if y is None:\n x,y = x\n if hasattr(x,'__len__'):\n x,y = np.asarray(x),np.asarray(y)\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n half_sky = self.arrayinfo['half_sky']\n flip = self._flip\n # re-fold back of sphere\n mask = None\n if not half_sky:\n if hasattr(x,'__len__'):\n if np.any(x>0.0):\n mask = (x>0.0)\n x[mask] *= -1\n elif x>0:\n mask = 0\n x = -x\n x+=1.0\n r = np.sqrt(x**2+y**2)\n if hasattr(r,'__len__'):\n r[r>1] = np.nan\n elif r>1: r = np.nan\n c = np.arcsin(r)\n if hasattr(y,'__len__'):\n y[np.abs(y)>1] = np.nan\n elif np.abs(y)>1: y = np.nan\n lat = np.arcsin(y)\n phi = np.arctan2(x,np.cos(c))\n phi *= flip\n if not mask is None:\n if hasattr(phi,'__len__'):\n phi[mask] = pi-phi[mask]\n else: phi = pi-phi\n theta = pi/2. - lat\n vec = R.dir2vec(theta,phi)\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name,name)\n \n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n return self.vec2xy(R.dir2vec(theta,phi,lonlat=lonlat),direct=direct)\n ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n \n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n return R.vec2dir(self.xy2vec(x,y,direct=direct),lonlat=lonlat)\n xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name,name)\n\n def xy2ij(self, x, y=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n xsize = self.arrayinfo['xsize']\n half_sky = self.arrayinfo['half_sky']\n if half_sky: ratio = 1.0\n else: ratio = 2.0\n ysize = xsize/ratio\n if y is None: x,y = np.asarray(x)\n else: x,y = np.asarray(x), np.asarray(y)\n xc,yc = (xsize-1.)/2., (ysize-1.)/2.\n if hasattr(x,'__len__'):\n if half_sky:\n mask = (x**2+y**2>1.0)\n else:\n mask = ((np.mod(x+2.0,2.0)-1.0)**2+y**2>1.0)\n if not mask.any(): mask = np.ma.nomask\n j=np.ma.array(np.around(x*xc/ratio+xc).astype(np.long),mask=mask)\n i=np.ma.array(np.around(yc+y*yc).astype(np.long),mask=mask)\n else:\n if ( half_sky and x**2+y**2>1.0 ) or \\\n ( not half_sky and (np.mod(x+2.0,2.0)-1.0)**2+y**2>1.0 ):\n i,j,=np.nan,np.nan\n else:\n j = np.around(x*xc/ratio+xc).astype(np.long)\n i = np.around(yc+y*yc).astype(np.long)\n return i,j\n xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name,name)\n \n def ij2xy(self, i=None, j=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n xsize = self.arrayinfo['xsize']\n half_sky = self.arrayinfo['half_sky']\n if half_sky: ratio = 1.0\n else: ratio = 2.0\n ysize=xsize/ratio\n xc,yc=(xsize-1.)/2.,(ysize-1.)/2.\n if i is None and j is None:\n idx = np.outer(np.arange(ysize),np.ones(xsize))\n y = (idx-yc)/yc\n idx = np.outer(np.ones(ysize),np.arange(xsize))\n x = ratio*(idx-xc)/xc\n elif i is not None and j is not None:\n y = (np.asarray(i)-yc)/yc\n x = ratio*(np.asarray(j)-xc)/xc\n # if np.mod(x,1.0)**2+y**2 > 1.0: x,y=np.nan,np.nan\n elif i is not None and j is None:\n i,j = i\n y=(np.asarray(i)-yc)/yc\n x=ratio*(np.asarray(j)-xc)/xc\n # if np.mod(x,1.0)**2.+y**2 > 1.: x,y=np.nan,np.nan\n else:\n raise TypeError(\"i and j must be both given or both not given\")\n if half_sky:\n mask = (x**2+y**2>1.)\n else:\n mask = ((np.mod(x+2.0,2.0)-1.0)**2+y**2 > 1.)\n if not mask.any(): mask=np.ma.nomask\n x = np.ma.array(x,mask=mask)\n y = np.ma.array(y,mask=mask)\n if len(x)==0: x = x[0]\n if len(y)==0: y = y[0]\n return x,y\n ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name,name)\n \n def get_extent(self):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n half_sky = self.arrayinfo['half_sky']\n if half_sky: ratio = 1.0\n else: ratio = 2.0\n return (-ratio,ratio,-1.0,1.0)\n get_extent.__doc__ = SphericalProj.get_extent.__doc__\n\nclass AzimuthalProj(SphericalProj):\n \"\"\"This class provides methods for Lambert azimuthal equal-area projection and\n azimuthal equidistant projection\n \"\"\"\n\n name = \"Azimuthal\"\n\n def __init__(self, rot=None, coord=None, xsize=None, ysize=None, reso=None, lamb=None, half_sky=None, **kwds):\n super(AzimuthalProj,self).__init__(rot=rot, coord=coord,xsize=xsize,ysize=ysize,reso=reso,lamb=lamb,half_sky=half_sky,**kwds)\n\n def set_proj_plane_info(self, xsize=800,ysize=None,reso=1.5,lamb=True,half_sky=False):\n if xsize is None: xsize=800\n if ysize is None: ysize=xsize\n if reso is None: reso=1.5\n if lamb is None: lamb=True\n if half_sky is None: half_sky=False\n super(AzimuthalProj,self).set_proj_plane_info(xsize=xsize,ysize=ysize,\n reso=reso,lamb=lamb,half_sky=half_sky)\n\n def vec2xy(self, vx, vy=None, vz=None, direct=False):\n if not direct:\n theta,phi=R.vec2dir(self.rotator(vx,vy,vz))\n else:\n theta,phi=R.vec2dir(vx,vy,vz)\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n flip = self._flip\n lamb = self.arrayinfo['lamb']\n half_sky = self.arrayinfo['half_sky']\n # set phi in [-pi,pi]\n phi = flip*((phi+pi)%(2*pi)-pi)\n lat = pi/2. - theta\n if lamb:\n kprime = np.sqrt (2. / (1. + np.cos(lat) * np.cos(phi)))\n else:\n c = np.arccos(np.cos(lat) * np.cos(phi))\n kprime = c / np.sin(c)\n x = kprime * np.cos(lat) * np.sin(phi)\n y = kprime * np.sin(lat)\n if lamb: r2max = 4.\n else: r2max = pi**2\n if half_sky:\n if lamb: r2max /= 2.\n else: r2max /= 4.\n mask = (np.asarray(x)**2+np.asarray(y)**2 > r2max)\n if not hasattr(x,'__len__'):\n if mask is not np.ma.nomask:\n return np.nan,np.nan\n else:\n w = np.where(mask)\n x[w] = np.nan\n y[w] = np.nan \n return x,y\n vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name,name)\n\n def xy2vec(self, x, y=None, direct=False):\n if y is None:\n x,y = x\n if hasattr(x,'__len__'):\n x,y = np.asarray(x),np.asarray(y)\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for\"\n \" this projector\")\n flip = self._flip\n lamb = self.arrayinfo['lamb']\n half_sky = self.arrayinfo['half_sky']\n if lamb: r2max = 4.\n else: r2max = pi**2\n if half_sky:\n if lamb: r2max /= 2.\n else: r2max /= 4.\n mask = (np.asarray(x)**2+np.asarray(y)**2 > r2max)\n w=np.where(mask == False)\n if not mask.any(): mask = np.ma.nomask\n if not hasattr(x,'__len__'):\n if mask is not np.ma.nomask:\n return np.nan,np.nan,np.nan\n else:\n rho = np.sqrt(x**2 + y**2)\n if lamb:\n c = 2. * np.arcsin(rho/2.)\n else:\n c = rho\n lat = np.arcsin(y * np.sin(c)/rho)\n phi = np.arctan2(x * np.sin(c), (rho * np.cos(c)))\n phi *= flip\n vec = R.dir2vec(pi/2.-lat,phi)\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n else:\n vec = (np.zeros(x.shape)+np.nan,\n np.zeros(x.shape)+np.nan,\n np.zeros(x.shape)+np.nan)\n rho = np.sqrt(x[w]**2 + y[w]**2)\n if lamb:\n c = 2. * np.arcsin(rho/2.)\n else:\n c = rho\n lat = np.arcsin(y[w] * np.sin(c)/rho)\n phi = np.arctan2(x[w] * np.sin(c), (rho * np.cos(c)))\n phi *= flip\n vec[0][w] = np.cos(phi)*np.cos(lat)\n vec[1][w] = np.sin(phi)*np.cos(lat)\n vec[2][w] = np.sin(lat)\n if not direct:\n return self.rotator.I(vec)\n else:\n return vec\n xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name,name)\n\n def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n return self.vec2xy(R.dir2vec(theta,phi,lonlat=lonlat),direct=direct)\n ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name,name)\n\n def xy2ang(self, x, y=None, lonlat=False, direct=False):\n return R.vec2dir(self.xy2vec(x,y,direct=direct),lonlat=lonlat)\n xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name,name)\n\n def xy2ij(self, x, y=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n reso = self.arrayinfo['reso']\n lamb = self.arrayinfo['lamb']\n half_sky = self.arrayinfo['half_sky']\n if lamb: r2max = 4.\n else: r2max = pi**2\n if half_sky:\n if lamb: r2max /= 2.\n else: r2max /= 4.\n if y is None: x,y = x\n dx = reso/60. * dtor\n xc,yc = 0.5*(xsize-1), 0.5*(ysize-1)\n if hasattr(x,'__len__'):\n mask = (x**2+y**2>r2max)\n if not mask.any(): mask = np.ma.nomask\n j=np.ma.array(np.around(xc+x/dx).astype(np.long),mask=mask)\n i=np.ma.array(np.around(yc+y/dx).astype(np.long),mask=mask)\n else:\n if (x**2+y**2>r2max):\n i,j,=np.nan,np.nan\n else:\n j = np.around(xc+x/dx).astype(np.long)\n i = np.around(yc+y/dx).astype(np.long)\n return i,j\n xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name,name)\n\n def ij2xy(self, i=None, j=None):\n if self.arrayinfo is None:\n raise TypeError(\"No projection plane array information defined for \"\n \"this projector\")\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n reso = self.arrayinfo['reso']\n lamb = self.arrayinfo['lamb']\n half_sky = self.arrayinfo['half_sky']\n dx = reso/60. * dtor\n xc,yc = 0.5*(xsize-1), 0.5*(ysize-1)\n if lamb: r2max = 4.\n else: r2max = pi**2\n if half_sky:\n if lamb: r2max /= 2.\n else: r2max /= 4.\n if i is None and j is None:\n idx = np.outer(np.arange(ysize),np.ones(xsize))\n y = (idx-yc) * dx\n idx = np.outer(np.ones(ysize),np.arange(xsize))\n x = (idx-xc) * dx\n elif i is not None and j is not None:\n y = (np.asarray(i)-yc) * dx\n x = (np.asarray(j)-xc) * dx\n elif i is not None and j is None:\n i,j = i\n y=(np.asarray(i)-yc) * dx\n x=(np.asarray(j)-xc) * dx\n else:\n raise TypeError(\"i and j must be both given or both not given\")\n if hasattr(x,'__len__'):\n mask = (x**2+y**2 > r2max)\n if not mask.any(): mask=np.ma.nomask\n x = np.ma.array(x,mask=mask)\n y = np.ma.array(y,mask=mask)\n else:\n if (x**2+y**2>r2max): x,y=np.nan,np.nan\n return x,y\n ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name,name)\n\n def get_extent(self):\n xsize,ysize = self.arrayinfo['xsize'],self.arrayinfo['ysize']\n reso = self.arrayinfo['reso']\n dx = reso/60.0 * dtor\n xc,yc = 0.5*(xsize-1), 0.5*(ysize-1)\n left = -xc * dx\n bottom = -yc * dx\n right = (xsize-1-xc) * dx\n top = (ysize-1-yc) * dx\n return (left,right,bottom,top)\n get_extent.__doc__ = SphericalProj.get_extent.__doc__\n\n def get_fov(self):\n half_sky = self.arrayinfo['half_sky']\n vx,vy,vz = self.xy2vec(self.ij2xy(0,0), direct=True)\n a = np.arccos(vx)\n if np.isfinite(a):\n return 2.*a\n else:\n if half_sky: return pi\n else: return 2.*pi\n"
] | [
[
"numpy.matrix",
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"numpy.asarray",
"numpy.max",
"scipy.stats.gaussian_kde",
"numpy.int",
"numpy.mean",
"scipy.stats.norm.fit",
"numpy.sin",
"scipy.cluster.hierarchy.linkage",
"scipy.array",
"numpy.zeros",
"numpy.multiply",
"numpy.min",
"numpy.amin",
"numpy.atleast_2d",
"scipy.cluster.hierarchy.dendrogram",
"numpy.array",
"scipy.stats.norm.pdf",
"numpy.cos",
"numpy.percentile",
"numpy.tile",
"scipy.spatial.distance.pdist",
"numpy.vstack"
],
[
"numpy.long",
"numpy.sqrt",
"numpy.asarray",
"numpy.around",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.any",
"numpy.ma.array",
"numpy.where",
"numpy.arcsin",
"numpy.arange",
"numpy.sin",
"numpy.zeros",
"numpy.arccos",
"numpy.fromiter",
"numpy.maximum",
"numpy.abs",
"numpy.isfinite",
"numpy.cos",
"numpy.ones",
"numpy.float64",
"numpy.mod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vbod/text | [
"07c044b8b851ace1e9a033c9597cdb1bee2d69e0",
"07c044b8b851ace1e9a033c9597cdb1bee2d69e0"
] | [
"tensorflow_text/python/ops/sentencepiece_tokenizer_test.py",
"oss_scripts/model_server/save_models.py"
] | [
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for SentencePieceProcessor Tensorflow op.\"\"\"\n\nimport sys\nimport tempfile\nfrom absl.testing import parameterized\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer\n\n\ndef _utf8(tokens):\n if sys.version_info[0] == 2:\n return tokens\n if isinstance(tokens, list):\n return [_utf8(t) for t in tokens]\n else:\n return tokens.encode('utf-8')\n\n\nclass TestSavedModelModule(module.Module):\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[None], dtype=dtypes.string)\n ])\n def tokenize(self, inputs):\n return self.tokenizer.tokenize(inputs)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SentencepieceTokenizerOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def getTokenizerAndSetOptions(self, reverse, add_bos, add_eos, out_type):\n self.reverse = reverse\n self.add_bos = add_bos\n self.add_eos = add_eos\n self.out_type = out_type\n return SentencepieceTokenizer(\n self.model,\n reverse=reverse,\n add_bos=add_bos,\n add_eos=add_eos,\n out_type=out_type)\n\n def transformExpected(self, expected, is_offsets=False):\n bos = _utf8('<s>')\n eos = _utf8('</s>')\n if is_offsets:\n bos = 0\n eos = 0\n elif self.out_type == dtypes.int32:\n bos = 1\n eos = 2\n if not isinstance(expected[0], list):\n if self.add_bos:\n expected = [bos] + expected\n if self.add_eos:\n expected = expected + [eos]\n if self.reverse:\n expected = [x for x in reversed(expected)]\n else:\n return [self.transformExpected(x) for x in expected]\n return expected\n\n def setUp(self):\n super(SentencepieceTokenizerOpTest, self).setUp()\n sentencepiece_model_file = (\n 'tensorflow_text/python/ops/test_data/'\n 'test_oss_model.model')\n self.model = gfile.GFile(sentencepiece_model_file, 'rb').read()\n\n def testGetVocabSize(self):\n sp = SentencepieceTokenizer(self.model)\n self.assertAllEqual(1000, sp.vocab_size())\n\n def testIdToStringScalar(self):\n sp = SentencepieceTokenizer(self.model)\n result = sp.id_to_string(125)\n self.assertAllEqual('ve', result)\n\n def testIdToStringVector(self):\n sp = SentencepieceTokenizer(self.model)\n pieces = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])\n ids = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]]\n result = sp.id_to_string(ragged_factory_ops.constant(ids))\n self.assertAllEqual(pieces, result)\n\n def testIdToStringRagged(self):\n sp = SentencepieceTokenizer(self.model)\n pieces = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd', 's']]])\n ids = [[[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]],\n [[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n result = sp.id_to_string(ragged_factory_ops.constant(ids, dtypes.int32))\n self.assertAllEqual(pieces, result)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeScalar(self, reverse, add_bos, add_eos,\n out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentence = 'I love lamp.'\n expected = []\n if out_type == dtypes.int32:\n expected = [9, 169, 21, 125, 169, 579, 6]\n else:\n expected = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])\n expected = self.transformExpected(expected)\n result = sp.tokenize(sentence)\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentence), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeVec(self, reverse, add_bos, add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = ['I love carpet', 'I love desk.', 'I love lamp.']\n expected = []\n if out_type == dtypes.int32:\n expected = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]]\n else:\n expected = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])\n expected = self.transformExpected(expected)\n result = sp.tokenize(sentences)\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeUniformTensorMatrix(self, reverse, add_bos,\n add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = [['I love carpet', 'I love desk.'],\n ['I love lamp.', 'Never tell me the odds']]\n expected = []\n if out_type == dtypes.int32:\n expected = [[[9, 169, 21, 125, 78, 48, 132, 15],\n [9, 169, 21, 125, 727, 6]],\n [[9, 169, 21, 125, 169, 579, 6],\n [4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n else:\n expected = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.']],\n [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'],\n ['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',\n 's']]])\n expected = self.transformExpected(expected)\n result = sp.tokenize(constant_op.constant(sentences))\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeRaggedMatrix(self, reverse, add_bos, add_eos,\n out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n expected = []\n if out_type == dtypes.int32:\n expected = [[[9, 169, 21, 125, 78, 48, 132, 15],\n [9, 169, 21, 125, 727, 6], [9, 169, 21, 125, 169, 579, 6]],\n [[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n else:\n expected = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',\n 's']]])\n expected = self.transformExpected(expected)\n result = sp.tokenize(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeWithOffsetsScalar(self, reverse, add_bos,\n add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentence = 'I love lamp.'\n expected_tok = []\n expected_starts = [0, 1, 3, 4, 6, 8, 11]\n expected_limits = [1, 3, 4, 6, 8, 11, 12]\n if out_type == dtypes.int32:\n expected_tok = [9, 169, 21, 125, 169, 579, 6]\n else:\n expected_tok = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])\n expected_tok = self.transformExpected(expected_tok)\n expected_starts = self.transformExpected(expected_starts, True)\n expected_limits = self.transformExpected(expected_limits, True)\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentence))\n self.assertAllEqual(expected_tok, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentence), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsSingleElementVector(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = ['I love lamp.']\n expected_tokens = [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[0, 1, 3, 4, 6, 8, 11]]\n expected_limits = [[1, 3, 4, 6, 8, 11, 12]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsVector(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = ['I love carpet.', 'I love desk.', 'I love lamp.']\n expected_tokens = [['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],\n [0, 1, 3, 4, 6, 8, 11]]\n expected_limits = [[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],\n [1, 3, 4, 6, 8, 11, 12]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsMatrix(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = [['I love carpet.', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n expected_tokens = [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [[\n '▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o',\n 'd', 'd', 's'\n ]]]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],\n [0, 1, 3, 4, 6, 8, 11]],\n [[0, 0, 1, 5, 10, 13, 17, 18, 19, 20, 21]]]\n expected_limits = [[[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],\n [1, 3, 4, 6, 8, 11, 12]],\n [[0, 1, 5, 10, 13, 17, 18, 19, 20, 21, 22]]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (-1, 0.1, dtypes.int32),\n (64, 0.1, dtypes.int32),\n (0, 0.0, dtypes.int32),\n (-1, 0.1, dtypes.string),\n (64, 0.1, dtypes.string),\n (0, 0.0, dtypes.string),\n ])\n def testSampleTokenizeAndDetokenize(self, nbest_size, alpha, out_type):\n sp = SentencepieceTokenizer(\n self.model, nbest_size=nbest_size, alpha=alpha, out_type=out_type)\n sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n result = sp.tokenize(ragged_factory_ops.constant(sentences))\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testSavedModel(self):\n sp = SentencepieceTokenizer(self.model)\n test_module = TestSavedModelModule(sp)\n inputs = constant_op.constant(['hello world'])\n expected_result = test_module.tokenize(inputs)\n temp_dir = tempfile.mkdtemp(dir=test.get_temp_dir())\n save.save(test_module, temp_dir)\n restored_model = load.load(temp_dir)\n self.assertAllEqual(restored_model.tokenize(inputs), expected_result)\n file_io.delete_recursively(temp_dir)\n\n def testBasicPipeline(self):\n if not context.executing_eagerly():\n self.skipTest('testBasicPipeline only supported in eager mode.')\n\n sp = SentencepieceTokenizer(self.model)\n\n strings = ['hello', 'world']\n dataset = dataset_ops.Dataset.from_tensor_slices(strings)\n # Ensure we can map the tokenizer across the dataset.\n dataset1 = dataset.map(sp.tokenize)\n # Ensure there's no error with a second map call.\n dataset2 = dataset.map(sp.tokenize)\n\n expected = sp.tokenize(strings)\n for i, result in enumerate(dataset1):\n self.assertAllEqual(result, expected[i])\n for i, result in enumerate(dataset2):\n self.assertAllEqual(result, expected[i])\n\n def testEmptyModel(self):\n with self.cached_session():\n with self.assertRaises(errors.InvalidArgumentError):\n sp = SentencepieceTokenizer()\n result = sp.tokenize('whatever')\n result.eval()\n\n def testInvalidModel(self):\n with self.cached_session():\n with self.assertRaises(errors.InternalError):\n sp = SentencepieceTokenizer('invalid model')\n result = sp.tokenize('whatever')\n result.eval()\n\n\nif __name__ == '__main__':\n test.main()\n",
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Integration tests for TF.Text ops in model server.\"\"\"\n\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow_text as text\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'dest',\n ('third_party/tensorflow_serving/servables/tensorflow/testdata/'\n 'tf_text_regression/01'),\n 'Destination directory for the model.')\n\n\nclass TfTextOps(tf.Module):\n \"\"\"Module for saving TF Text concrete function.\"\"\"\n\n @tf.function\n def __call__(self, x):\n # Constrained sequence\n cs_scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]])\n cs_input = np.array([cs_scores, cs_scores, cs_scores], dtype=np.float32)\n cs_transition_weights = np.array([[-1.0, 1.0, -2.0, 2.0, 0.0],\n [3.0, -3.0, 4.0, -4.0, 0.0],\n [5.0, 1.0, 10.0, 1.0, 1.0],\n [-7.0, 7.0, -8.0, 8.0, 0.0],\n [0.0, 1.0, 2.0, 3.0, 0.0]],\n dtype=np.float32)\n cs_allowed_transitions = np.array([[True, True, True, True, True],\n [True, True, True, True, True],\n [True, False, True, False, False],\n [True, True, True, True, True],\n [True, False, True, True, True]])\n constrained_sequence = text.viterbi_constrained_sequence(\n cs_input, [2, 2, 2], allowed_transitions=cs_allowed_transitions,\n transition_weights=cs_transition_weights, use_log_space=True,\n use_start_and_end_states=True)\n # Max Spanning Tree\n mst_num_nodes = tf.constant([4, 3], tf.int32)\n mst_scores = tf.constant([[[0, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 2, 0, 0],\n [1, 2, 3, 4]],\n [[4, 3, 2, 9],\n [0, 0, 2, 9],\n [0, 0, 0, 9],\n [9, 9, 9, 9]]],\n tf.int32) # pyformat: disable\n (max_spanning_tree, _) = text.max_spanning_tree(mst_num_nodes, mst_scores)\n # Normalize\n normalized = text.case_fold_utf8(['A String'])\n normalized = text.normalize_utf8(normalized)\n # Regex split\n regex_split = text.regex_split(input=['Yo dawg!'],\n delim_regex_pattern=r'\\s')\n # Rouge-L\n rl_hypotheses = tf.ragged.constant(\n [['captain', 'of', 'the', 'delta', 'flight'],\n ['the', '1990', 'transcript']])\n rl_references = tf.ragged.constant(\n [['delta', 'air', 'lines', 'flight'],\n ['this', 'concludes', 'the', 'transcript']])\n (rouge_l, _, _) = text.metrics.rouge_l(rl_hypotheses, rl_references)\n # Sentence breaking version 1 (token dependent)\n sb_token_word = [['Welcome', 'to', 'the', 'U.S.', '!', 'Harry'],\n ['Wu', 'Tang', 'Clan', ';', 'ain\\'t', 'nothing']]\n sb_token_properties = [[0, 0, 0, 256, 0, 0], [0, 0, 0, 0, 0, 0]]\n sb_token_starts = []\n sb_token_ends = []\n for sentence in sb_token_word:\n sentence_string = ''\n sentence_start = []\n sentence_end = []\n for word in sentence:\n sentence_start.append(len(sentence_string))\n sentence_string = sentence_string.join([word, ' '])\n sentence_end.append(len(sentence_string))\n sb_token_starts.append(sentence_start)\n sb_token_ends.append(sentence_end)\n sb_token_starts = tf.constant(sb_token_starts, dtype=tf.int64)\n sb_token_ends = tf.constant(sb_token_ends, dtype=tf.int64)\n sb_token_properties = tf.ragged.constant(sb_token_properties,\n dtype=tf.int64)\n (sentence_breaking, _, _, _) = text.sentence_fragments(\n sb_token_word, sb_token_starts, sb_token_ends, sb_token_properties)\n # Sentence breaking version 2 (StateBasedSentenceBreaker)\n sbv2_text_input = [['Welcome to the U.S.! Harry'],\n ['Wu Tang Clan; ain\\'t nothing']]\n sentence_breaker_v2 = text.StateBasedSentenceBreaker()\n sbv2_fragment_text, _, _ = (\n sentence_breaker_v2.break_sentences_with_offsets(sbv2_text_input))\n # Sentencepiece tokenizer\n sp_model_file = (\n 'third_party/tensorflow_text/python/ops/test_data/test_oss_model.model')\n sp_model = open(sp_model_file, 'rb').read()\n sp_tokenizer = text.SentencepieceTokenizer(sp_model)\n sentencepiece = sp_tokenizer.tokenize(['A sentence of things.'])\n sentencepiece = sp_tokenizer.detokenize(sentencepiece)\n (sentencepiece, _, _) = sp_tokenizer.tokenize_with_offsets(sentencepiece)\n sentencepiece_size = sp_tokenizer.vocab_size()\n sentencepiece_id = sp_tokenizer.id_to_string(1)\n # Split merge tokenizer\n sm_tokenizer = text.SplitMergeTokenizer()\n split_merge = sm_tokenizer.tokenize(b'IloveFlume!',\n [0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0])\n # Split merge from logits tokenizer\n smfl_tokenizer = text.SplitMergeFromLogitsTokenizer()\n split_merge_from_logits = smfl_tokenizer.tokenize(\n b'IloveFlume!',\n # One pair of logits for each Unicode character from the text. Each\n # pair indicates a \"split\" action if the first component is greater than\n # the second one, and a \"merge\" otherwise.\n [\n [2.7, -0.3], # I: split\n [4.1, 0.82], # l: split\n [-2.3, 4.3], # o: merge\n [3.1, 12.2], # v: merge\n [-3.0, 4.7], # e: merge\n [2.7, -0.7], # F: split\n [0.7, 15.0], # l: merge\n [1.6, 23.0], # u: merge\n [2.1, 11.0], # m: merge\n [0.0, 20.0], # e: merge\n [18.0, 0.7], # !: split\n ])\n # Confirm TF unicode_script op that requires ICU works\n tf_unicode_script = tf.strings.unicode_script(\n [ord('a'), 0x0411, 0x82b8, ord(',')])\n # Unicode script tokenizer\n us_tokenizer = text.UnicodeScriptTokenizer()\n unicode_script = us_tokenizer.tokenize(['a string'])\n # Whitespace tokenizer\n ws_tokenizer = text.WhitespaceTokenizer()\n whitespace = ws_tokenizer.tokenize(['a string'])\n # Wordpiece tokenizer\n wp_initializer = tf.lookup.KeyValueTensorInitializer(\n ['i'], [1], key_dtype=tf.string, value_dtype=tf.int64)\n self.wp_vocab_table = tf.lookup.StaticHashTable(wp_initializer,\n default_value=-1)\n wp_tokenizer = text.WordpieceTokenizer(self.wp_vocab_table)\n wordpiece = wp_tokenizer.tokenize(['i am'])\n # Wordshape\n wordshapes = text.wordshape([u'a-b', u'a\\u2010b'.encode('utf-8')],\n text.WordShape.HAS_PUNCTUATION_DASH)\n\n # Assertion method\n def assert_check(tensor):\n return tf.assert_equal(tensor, tf.identity(tensor))\n\n # Assertions\n constrained_sequence_assert = assert_check(constrained_sequence.to_tensor())\n max_spanning_tree_assert = assert_check(max_spanning_tree)\n normalized_assert = assert_check(normalized)\n regex_split_assert = assert_check(regex_split.to_tensor())\n rouge_l_assert = assert_check(rouge_l)\n sentence_breaking_assert = assert_check(sentence_breaking.to_tensor())\n sentence_breaking_v2_assert = assert_check(sbv2_fragment_text.to_tensor())\n sentencepiece_assert = assert_check(sentencepiece.to_tensor())\n sentencepiece_id_assert = assert_check(sentencepiece_id)\n sentencepiece_size_assert = assert_check(sentencepiece_size)\n split_merge_assert = assert_check(split_merge)\n split_merge_from_logits_assert = assert_check(split_merge_from_logits)\n tf_unicode_script_assert = assert_check(tf_unicode_script)\n unicode_script_assert = assert_check(unicode_script.to_tensor())\n whitespace_assert = assert_check(whitespace.to_tensor())\n wordpiece_assert = assert_check(wordpiece.to_tensor())\n wordshapes_assert = assert_check(wordshapes)\n\n with tf.control_dependencies([constrained_sequence_assert,\n max_spanning_tree_assert,\n normalized_assert,\n regex_split_assert,\n rouge_l_assert,\n sentence_breaking_assert,\n sentence_breaking_v2_assert,\n sentencepiece_assert,\n sentencepiece_id_assert,\n sentencepiece_size_assert,\n split_merge_assert,\n split_merge_from_logits_assert,\n tf_unicode_script_assert,\n unicode_script_assert,\n whitespace_assert,\n wordpiece_assert,\n wordshapes_assert]):\n y = tf.add(x, [1])\n return {'y': y}\n\n\nmodule = TfTextOps()\nexport_path = tempfile.TemporaryDirectory()\nprint('Exporting saved model to ', export_path)\ncall = module.__call__.get_concrete_function(\n tf.TensorSpec([1], tf.float32, 'x'))\ntf.saved_model.save(module, export_path.name, call)\n\n# Copy files from temp directory\nprint('Moving files:')\nfor src_dir, dirs, files in os.walk(export_path.name):\n dst_dir = src_dir.replace(export_path.name, FLAGS.dest, 1)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n for file_ in files:\n print(file_)\n src_file = os.path.join(src_dir, file_)\n dst_file = os.path.join(dst_dir, file_)\n if os.path.exists(dst_file):\n # in case of the src and dst are the same file\n if os.path.samefile(src_file, dst_file):\n continue\n os.remove(dst_file)\n shutil.move(src_file, dst_dir)\n"
] | [
[
"tensorflow.python.saved_model.load.load",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.test.main",
"tensorflow.python.platform.test.get_temp_dir",
"tensorflow.python.lib.io.file_io.delete_recursively",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.compat.v1.lookup.StaticHashTable",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.lookup.KeyValueTensorInitializer",
"tensorflow.compat.v1.TensorSpec",
"tensorflow.compat.v1.ragged.constant",
"numpy.array",
"tensorflow.compat.v1.saved_model.save",
"tensorflow.compat.v1.add",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.identity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jmkinder1/code-samples | [
"9c6cd3c6f16579a6c1f5210779b8ec6ad53fbdba"
] | [
"projectile.py"
] | [
"# projectile.py\n# -----------------------------------------------------------------------------\n# Calculate how long an object is in the air when thrown from a specified height\n# with a range of initial speeds assuming constant acceleration due to gravity:\n# \t0.5 * g * t**2 - v0 * t - y0 = 0\n# ----------------------------------------------------------------------------- \nimport numpy as np\n\n#%% Initialization of variables.\ninitial_speed = 0.0\t\t\t# v0 = initial vertical speed of ball in [m/s]\nimpact_time = 0.0\t\t\t# t = time of impact in [s] (computed in loop)\n\n#%% Initialization of parameters.\ng = 9.8066\t\t\t\t\t# gravitational acceleration in [m/s^2]\ninitial_height = 2.0\t\t# y0 = height ball is thrown from in [m]\nspeed_increment = 5.0\t\t# how much to increase speed in [m/s] for each iteration\ncutoff_time = 10.0\t\t\t# stop computing after impact time exceeds cutoff\n\n#%% Calculate and display impact time. Increment initial speed each step.\n#\tRepeat until impact time exceeds cutoff.\nwhile impact_time < cutoff_time:\n\t# Use quadratic equation to solve kinematic equation for impact time:\n\timpact_time = (np.sqrt(initial_speed**2 + 2 * g * initial_height) + initial_speed) / g\n\tprint(\"speed= {} m/s; time= {:.1f} s\".format(initial_speed, impact_time))\n\tinitial_speed += speed_increment\nprint(\"Calculation complete.\")\n"
] | [
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yil8/GPN | [
"e0ccba70db6f1d3264f8d3dd38fc4c62bcebd7ad"
] | [
"model/functional.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.7):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n scores: (N) FloatTensor\n boxes: (N, 4) FloatTensor\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n Return:\n The indices of the kept boxes with respect to N.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(dim=0, descending=True) # sort in ascending order\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[0] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[1:] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n\n keep = keep[:count]\n\n return keep\n\n\ndef n_proposals(out_cls):\n vals, idcs = out_cls.view(-1, 2).max(1)\n n_proposals = idcs.eq(1).type(torch.cuda.FloatTensor).sum() / len(out_cls)\n\n return n_proposals\n\n\ndef acc(out_cls, labels):\n pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)\n out_cls_pos = torch.index_select(out_cls.view(-1, 2), 0, pos_idcs)\n prob_pos = F.softmax(out_cls_pos, dim=1)[:, 1]\n acc_pos = prob_pos.ge(0.5).type(\n torch.cuda.FloatTensor).sum() / len(prob_pos)\n\n neg_idcs = labels.view(-1).eq(0).nonzero().view(-1)\n out_cls_neg = torch.index_select(out_cls.view(-1, 2), 0, neg_idcs)\n prob_neg = F.softmax(out_cls_neg, dim=1)[:, 0]\n acc_neg = prob_neg.ge(0.5).type(\n torch.cuda.FloatTensor).sum() / len(prob_neg)\n\n return (acc_pos, acc_neg)\n\n\ndef angle_err(out_ellipse, labels, ellipse_targets):\n pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)\n out_ellipse_keep = torch.index_select(out_ellipse.view(-1, 5), 0, pos_idcs)\n ellipse_targets_keep = torch.index_select(ellipse_targets.view(-1, 5), 0,\n pos_idcs)\n\n out_tan = out_ellipse_keep[:, 4]\n out_angle = torch.atan(out_tan) * 180 / np.pi\n targets_tan = ellipse_targets_keep[:, 4]\n targets_angle = torch.atan(targets_tan) * 180 / np.pi\n\n err = torch.abs(out_angle - targets_angle).sum() / len(out_angle)\n\n return err\n"
] | [
[
"torch.abs",
"torch.nn.functional.softmax",
"torch.mul",
"torch.clamp",
"torch.index_select",
"torch.atan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AhmedElshaarany/RoboND-Rover-Project | [
"9dad356d4585bb567ee436062afdd82d9d7eb4de"
] | [
"code/supporting_functions.py"
] | [
"import numpy as np\nimport cv2\nfrom PIL import Image\nfrom io import BytesIO, StringIO\nimport base64\nimport time\n\n# Define a function to convert telemetry strings to float independent of decimal convention\ndef convert_to_float(string_to_convert):\n if ',' in string_to_convert:\n float_value = np.float(string_to_convert.replace(',','.'))\n else: \n float_value = np.float(string_to_convert)\n return float_value\n\ndef update_rover(Rover, data):\n # Initialize start time and sample positions\n if Rover.start_time == None:\n Rover.start_time = time.time()\n Rover.total_time = 0\n samples_xpos = np.int_([convert_to_float(pos.strip()) for pos in data[\"samples_x\"].split(';')])\n samples_ypos = np.int_([convert_to_float(pos.strip()) for pos in data[\"samples_y\"].split(';')])\n Rover.samples_pos = (samples_xpos, samples_ypos)\n Rover.samples_to_find = np.int(data[\"sample_count\"])\n # Or just update elapsed time\n else:\n tot_time = time.time() - Rover.start_time\n if np.isfinite(tot_time):\n Rover.total_time = tot_time\n # Print out the fields in the telemetry data dictionary\n print(data.keys())\n # The current speed of the rover in m/s\n Rover.vel = convert_to_float(data[\"speed\"])\n # The current position of the rover\n Rover.pos = [convert_to_float(pos.strip()) for pos in data[\"position\"].split(';')]\n # The current yaw angle of the rover\n Rover.yaw = convert_to_float(data[\"yaw\"])\n # The current yaw angle of the rover\n Rover.pitch = convert_to_float(data[\"pitch\"])\n # The current yaw angle of the rover\n Rover.roll = convert_to_float(data[\"roll\"])\n # The current throttle setting\n Rover.throttle = convert_to_float(data[\"throttle\"])\n # The current steering angle\n Rover.steer = convert_to_float(data[\"steering_angle\"])\n # Near sample flag\n Rover.near_sample = np.int(data[\"near_sample\"])\n # Picking up flag\n Rover.picking_up = np.int(data[\"picking_up\"])\n # Update number of rocks collected\n Rover.samples_collected = Rover.samples_to_find - np.int(data[\"sample_count\"])\n\n print('speed =',Rover.vel, 'position =', Rover.pos, 'throttle =', \n Rover.throttle, 'steer_angle =', Rover.steer, 'near_sample:', Rover.near_sample, \n 'picking_up:', data[\"picking_up\"], 'sending pickup:', Rover.send_pickup, \n 'total time:', Rover.total_time, 'samples remaining:', data[\"sample_count\"], \n 'samples collected:', Rover.samples_collected)\n # Get the current image from the center camera of the rover\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n Rover.img = np.asarray(image)\n\n # Return updated Rover and separate image for optional saving\n return Rover, image\n\n# Define a function to create display output given worldmap results\ndef create_output_images(Rover):\n\n # Create a scaled map for plotting and clean up obs/nav pixels a bit\n if np.max(Rover.worldmap[:,:,2]) > 0:\n nav_pix = Rover.worldmap[:,:,2] > 0\n navigable = Rover.worldmap[:,:,2] * (255 / np.mean(Rover.worldmap[nav_pix, 2]))\n else: \n navigable = Rover.worldmap[:,:,2]\n if np.max(Rover.worldmap[:,:,0]) > 0:\n obs_pix = Rover.worldmap[:,:,0] > 0\n obstacle = Rover.worldmap[:,:,0] * (255 / np.mean(Rover.worldmap[obs_pix, 0]))\n else:\n obstacle = Rover.worldmap[:,:,0]\n\n likely_nav = navigable >= obstacle\n obstacle[likely_nav] = 0\n plotmap = np.zeros_like(Rover.worldmap)\n plotmap[:, :, 0] = obstacle\n plotmap[:, :, 2] = navigable\n plotmap = plotmap.clip(0, 255)\n # Overlay obstacle and navigable terrain map with ground truth map\n map_add = cv2.addWeighted(plotmap, 1, Rover.ground_truth, 0.5, 0)\n\n # Check whether any rock detections are present in worldmap\n rock_world_pos = Rover.worldmap[:,:,1].nonzero()\n # If there are, we'll step through the known sample positions\n # to confirm whether detections are real\n samples_located = 0\n if rock_world_pos[0].any():\n \n rock_size = 2\n for idx in range(len(Rover.samples_pos[0])):\n test_rock_x = Rover.samples_pos[0][idx]\n test_rock_y = Rover.samples_pos[1][idx]\n rock_sample_dists = np.sqrt((test_rock_x - rock_world_pos[1])**2 + \\\n (test_rock_y - rock_world_pos[0])**2)\n # If rocks were detected within 3 meters of known sample positions\n # consider it a success and plot the location of the known\n # sample on the map\n if np.min(rock_sample_dists) < 3:\n samples_located += 1\n Rover.samples_located = samples_located\n map_add[test_rock_y-rock_size:test_rock_y+rock_size, \n test_rock_x-rock_size:test_rock_x+rock_size, :] = 255\n\n # Calculate some statistics on the map results\n # First get the total number of pixels in the navigable terrain map\n tot_nav_pix = np.float(len((plotmap[:,:,2].nonzero()[0])))\n # Next figure out how many of those correspond to ground truth pixels\n good_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] > 0)).nonzero()[0]))\n # Next find how many do not correspond to ground truth pixels\n bad_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] == 0)).nonzero()[0]))\n # Grab the total number of map pixels\n tot_map_pix = np.float(len((Rover.ground_truth[:,:,1].nonzero()[0])))\n # Calculate the percentage of ground truth map that has been successfully found\n perc_mapped = round(100*good_nav_pix/tot_map_pix, 1)\n # Calculate the number of good map pixel detections divided by total pixels \n # found to be navigable terrain\n if tot_nav_pix > 0:\n fidelity = round(100*good_nav_pix/(tot_nav_pix), 1)\n else:\n fidelity = 0\n # Flip the map for plotting so that the y-axis points upward in the display\n map_add = np.flipud(map_add).astype(np.float32)\n # Add some text about map and rock sample detection results\n cv2.putText(map_add,\"Time: \"+str(np.round(Rover.total_time, 1))+' s', (0, 10), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Mapped: \"+str(perc_mapped)+'%', (0, 25), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Fidelity: \"+str(fidelity)+'%', (0, 40), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Rocks\", (0, 55), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\" Located: \"+str(samples_located), (0, 70), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\" Collected: \"+str(Rover.samples_collected), (0, 85), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n # Convert map and vision image to base64 strings for sending to server\n pil_img = Image.fromarray(map_add.astype(np.uint8))\n buff = BytesIO()\n pil_img.save(buff, format=\"JPEG\")\n encoded_string1 = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n \n pil_img = Image.fromarray(Rover.vision_image.astype(np.uint8))\n buff = BytesIO()\n pil_img.save(buff, format=\"JPEG\")\n encoded_string2 = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n\n return encoded_string1, encoded_string2\n\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.isfinite",
"numpy.min",
"numpy.asarray",
"numpy.flipud",
"numpy.round",
"numpy.int",
"numpy.max",
"numpy.zeros_like",
"numpy.mean",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QinchengZhang/PathologySegmentation | [
"7a2c21346739a79c33e7a7ccc081018821868eb7"
] | [
"Training/pytorch/train.py"
] | [
"# -*- coding: utf-8 -*-\n'''\nAuthor: TJUZQC\nDate: 2020-10-26 10:26:51\nLastEditors: TJUZQC\nLastEditTime: 2020-11-20 19:23:55\nDescription: None\n'''\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport yaml\nfrom torch import optim\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom evaluation import eval_net\nfrom models import ChooseModel, init_weights\nfrom utils.dataset import BasicDataset\n\nconf = yaml.load(open(os.path.join(\n sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)\ndir_img = conf['DATASET']['IMGS_DIR']\ndir_mask = conf['DATASET']['MASKS_DIR']\ndir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']\n\n\ndef train_net(net,\n device,\n epochs=5,\n batch_size=16,\n lr=0.001,\n val_percent=0.1,\n save_cp=True,\n img_scale=0.5,\n use_apex=False,\n optimizer='adam',\n classes=2,\n lr_scheduler='steplr',\n lr_scheduler_cfgs: dict = {'step_size': 10}):\n\n dataset = BasicDataset(dir_img, dir_mask, img_scale,\n train=True, classes=classes)\n n_val = int(len(dataset) * val_percent)\n n_train = len(dataset) - n_val\n train, val = random_split(dataset, [n_train, n_val])\n train_loader = DataLoader(\n train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)\n val_loader = DataLoader(val, batch_size=batch_size,\n shuffle=False, num_workers=8, pin_memory=True)\n\n writer = SummaryWriter(\n comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')\n global_step = 0\n\n logging.info(f'''Starting training:\n Epochs: {epochs}\n Batch size: {batch_size}\n Learning rate: {lr}\n Training size: {n_train}\n Validation size: {n_val}\n Checkpoints: {save_cp}\n Device: {device.type}\n Images scaling: {img_scale}\n Use apex: {use_apex}\n ''')\n optimizers = {\n 'adadelta': optim.Adadelta,\n 'adagrad': optim.Adagrad,\n 'adam': optim.Adam,\n 'adamw': optim.AdamW,\n 'sparseadam': optim.SparseAdam,\n 'adamax': optim.Adamax,\n 'asgd': optim.ASGD,\n 'lbfgs': optim.LBFGS,\n 'rmsprop': optim.RMSprop,\n 'rprop': optim.Rprop,\n 'sgd': optim.SGD,\n }\n optimizer = optimizers.get(optimizer, None)(\n net.parameters(), lr=lr, weight_decay=1e-8)\n lr_scheduler_getter = {\n 'lambdalr': torch.optim.lr_scheduler.LambdaLR,\n 'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,\n 'steplr': torch.optim.lr_scheduler.StepLR,\n 'multisteplr': torch.optim.lr_scheduler.MultiStepLR,\n 'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,\n 'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,\n 'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,\n 'cycliclr': torch.optim.lr_scheduler.CyclicLR,\n 'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,\n }\n lr_scheduler = lr_scheduler_getter.get(\n lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)\n if use_apex:\n try:\n from apex import amp\n net, optimizer = amp.initialize(net, optimizer, opt_level=\"O1\")\n except ImportError as e:\n print(e)\n use_apex = False\n\n if net.n_classes > 1:\n criterion = nn.CrossEntropyLoss()\n else:\n criterion = nn.BCEWithLogitsLoss()\n\n for epoch in range(epochs):\n net.train()\n\n epoch_loss = 0\n with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:\n for batch in train_loader:\n imgs = batch['image']\n true_masks = batch['mask']\n assert imgs.shape[1] == net.n_channels, \\\n f'Network has been defined with {net.n_channels} input channels, ' \\\n f'but loaded images have {imgs.shape[1]} channels. Please check that ' \\\n 'the images are loaded correctly.'\n\n imgs = imgs.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n if net.n_classes > 1:\n b, c, w, h = true_masks.shape\n true_masks = true_masks.view(b, w, h)\n masks_pred = net(imgs)\n loss = criterion(masks_pred, true_masks)\n epoch_loss += loss.item()\n writer.add_scalar('Loss/train', loss.item(), global_step)\n\n pbar.set_postfix(**{'loss (batch)': loss.item()})\n\n optimizer.zero_grad()\n if not use_apex:\n loss.backward()\n else:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n optimizer.step()\n\n pbar.update(imgs.shape[0])\n global_step += 1\n dataset_len = len(dataset)\n a1 = dataset_len // 10\n a2 = dataset_len / 10\n b1 = global_step % a1\n b2 = global_step % a2\n\n if global_step % (len(dataset) // (10 * batch_size)) == 0:\n dice_coeff, pA, oA, precision, recall, f1score = eval_net(\n net, val_loader, device, n_val)\n if net.n_classes > 1:\n logging.info(\n 'Validation cross entropy: {}'.format(dice_coeff))\n writer.add_scalar('Loss/test', dice_coeff, global_step)\n\n else:\n logging.info(\n 'Validation Dice Coeff: {}'.format(dice_coeff))\n writer.add_scalar('Dice/test', dice_coeff, global_step)\n logging.info(\n 'Validation Pixel Accuracy: {}'.format(pA))\n writer.add_scalar('pA/test', pA, global_step)\n logging.info(\n 'Validation Overall Accuracy: {}'.format(oA))\n writer.add_scalar('oA/test', oA, global_step)\n logging.info(\n 'Validation Precision: {}'.format(precision))\n writer.add_scalar('precision/test',\n precision, global_step)\n logging.info(\n 'Validation Recall: {}'.format(recall))\n writer.add_scalar('recall/test', recall, global_step)\n logging.info(\n 'Validation F1-score: {}'.format(f1score))\n writer.add_scalar(\n 'F1-score/test', f1score, global_step)\n\n writer.add_images('images', imgs, global_step)\n if net.n_classes == 1:\n writer.add_images(\n 'masks/true', true_masks, global_step)\n writer.add_images(\n 'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)\n lr_scheduler.step()\n\n if save_cp:\n try:\n os.mkdir(dir_checkpoint)\n logging.info('Created checkpoint directory')\n except OSError:\n pass\n torch.save(net.state_dict(),\n os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))\n logging.info(\n f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))\n\n writer.close()\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-n', '--network', metavar='NETWORK', type=str,\n default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')\n parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],\n help='Number of epochs', dest='epochs')\n parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],\n help='Batch size', dest='batchsize')\n parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],\n help='Learning rate', dest='lr')\n parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],\n help='Load model from a .pth file')\n parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],\n help='Downscaling factor of the images')\n parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],\n help='Percent of the data that is used as validation (0-100)')\n parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],\n help='Init weights type')\n parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],\n help='Automatic Mixed Precision')\n parser.add_argument('-o', '--optimizer', dest='optimizer',\n type=str, default=conf['OPTIMIZER'], help='Optimizer type')\n parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',\n type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(levelname)s: %(message)s')\n args = get_args()\n device = torch.device('cuda' if torch.cuda.is_available(\n ) and conf['DEVICE'].lower() == 'cuda' else 'cpu')\n logging.info(f'Using device {device}')\n\n network = args.network.lower()\n # Change here to adapt to your data\n # n_channels=3 for RGB images\n # n_classes is the number of probabilities you want to get per pixel\n # - For 1 class and background, use n_classes=1\n # - For 2 classes, use n_classes=1\n # - For N > 2 classes, use n_classes=N\n net = ChooseModel(network)(\n n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])\n assert net is not None, f'check your argument --network'\n\n logging.info(f'Network:\\n'\n f'\\t{net.n_channels} input channels\\n'\n f'\\t{net.n_classes} output channels (classes)\\n'\n f'\\t{\"Bilinear\" if net.bilinear else \"Dilated conv\"} upscaling\\n'\n f'\\tApex is {\"using\" if args.use_apex == \"True\" else \"not using\"}')\n init_weights(net, args.init_type)\n if args.load:\n net.load_state_dict(\n torch.load(args.load, map_location=device)\n )\n logging.info(f'Model loaded from {args.load}')\n\n net.to(device=device)\n # faster convolutions, but more memory\n # cudnn.benchmark = True\n\n try:\n train_net(net=net,\n epochs=args.epochs,\n batch_size=args.batchsize,\n lr=args.lr,\n device=device,\n img_scale=args.scale,\n val_percent=args.val / 100,\n use_apex=(args.use_apex == \"True\"),\n optimizer=args.optimizer.lower(),\n classes=conf['DATASET']['NUM_CLASSES'],\n lr_scheduler=args.lr_scheduler,\n lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n logging.info('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.load",
"torch.utils.data.DataLoader",
"torch.utils.data.random_split",
"torch.nn.BCEWithLogitsLoss",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kastman/nipype | [
"15a8d6f57067494196fe639095253217a9235c3c"
] | [
"nipype/utils/misc.py"
] | [
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Miscellaneous utility functions\n\"\"\"\nfrom __future__ import (print_function, unicode_literals, division,\n absolute_import)\nfrom builtins import next, str\n\nimport os\nimport sys\nimport re\nfrom collections import Iterator\nfrom warnings import warn\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom future.utils import raise_from\nfrom future import standard_library\ntry:\n from textwrap import indent as textwrap_indent\nexcept ImportError:\n\n def textwrap_indent(text, prefix):\n \"\"\" A textwrap.indent replacement for Python < 3.3 \"\"\"\n if not prefix:\n return text\n splittext = text.splitlines(True)\n return prefix + prefix.join(splittext)\n\n\nstandard_library.install_aliases()\n\n\ndef human_order_sorted(l):\n \"\"\"Sorts string in human order (i.e. 'stat10' will go after 'stat2')\"\"\"\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n if isinstance(text, tuple):\n text = text[0]\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n return sorted(l, key=natural_keys)\n\n\ndef trim(docstring, marker=None):\n if isinstance(docstring, bytes):\n docstring = str(docstring, 'utf-8')\n\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n for line in lines[1:]:\n # replace existing REST marker with doc level marker\n stripped = line.lstrip().strip().rstrip()\n if marker is not None and stripped and \\\n all([s == stripped[0] for s in stripped]) and \\\n stripped[0] not in [':']:\n line = line.replace(stripped[0], marker)\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\n\ndef find_indices(condition):\n \"Return the indices where ravel(condition) is true\"\n res, = np.nonzero(np.ravel(condition))\n return res\n\n\ndef is_container(item):\n \"\"\"Checks if item is a container (list, tuple, dict, set)\n\n Parameters\n ----------\n item : object\n object to check for .__iter__\n\n Returns\n -------\n output : Boolean\n True if container\n False if not (eg string)\n \"\"\"\n if isinstance(item, str):\n return False\n elif hasattr(item, '__iter__'):\n return True\n else:\n return False\n\n\ndef container_to_string(cont):\n \"\"\"Convert a container to a command line string.\n\n Elements of the container are joined with a space between them,\n suitable for a command line parameter.\n\n If the container `cont` is only a sequence, like a string and not a\n container, it is returned unmodified.\n\n Parameters\n ----------\n cont : container\n A container object like a list, tuple, dict, or a set.\n\n Returns\n -------\n cont_str : string\n Container elements joined into a string.\n\n \"\"\"\n if hasattr(cont, '__iter__') and not isinstance(cont, str):\n cont = ' '.join(cont)\n return str(cont)\n\n\n# Dependency checks. Copied this from Nipy, with some modificiations\n# (added app as a parameter).\ndef package_check(pkg_name,\n version=None,\n app=None,\n checker=LooseVersion,\n exc_failed_import=ImportError,\n exc_failed_check=RuntimeError):\n \"\"\"Check that the minimal version of the required package is installed.\n\n Parameters\n ----------\n pkg_name : string\n Name of the required package.\n version : string, optional\n Minimal version number for required package.\n app : string, optional\n Application that is performing the check. For instance, the\n name of the tutorial being executed that depends on specific\n packages. Default is *Nipype*.\n checker : object, optional\n The class that will perform the version checking. Default is\n distutils.version.LooseVersion.\n exc_failed_import : Exception, optional\n Class of the exception to be thrown if import failed.\n exc_failed_check : Exception, optional\n Class of the exception to be thrown if version check failed.\n\n Examples\n --------\n package_check('numpy', '1.3')\n package_check('scipy', '0.7', 'tutorial1')\n\n \"\"\"\n\n if app:\n msg = '%s requires %s' % (app, pkg_name)\n else:\n msg = 'Nipype requires %s' % pkg_name\n if version:\n msg += ' with version >= %s' % (version, )\n try:\n mod = __import__(pkg_name)\n except ImportError as e:\n raise_from(exc_failed_import(msg), e)\n if not version:\n return\n try:\n have_version = mod.__version__\n except AttributeError as e:\n raise_from(\n exc_failed_check('Cannot find version for %s' % pkg_name), e)\n if checker(have_version) < checker(version):\n raise exc_failed_check(msg)\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n lower = v.lower()\n if lower in (\"yes\", \"true\", \"t\", \"1\"):\n return True\n elif lower in (\"no\", \"false\", \"n\", \"f\", \"0\"):\n return False\n else:\n raise ValueError(\"%s cannot be converted to bool\" % v)\n\n\ndef flatten(S):\n if S == []:\n return S\n if isinstance(S[0], list):\n return flatten(S[0]) + flatten(S[1:])\n return S[:1] + flatten(S[1:])\n\n\ndef unflatten(in_list, prev_structure):\n if not isinstance(in_list, Iterator):\n in_list = iter(in_list)\n\n if not isinstance(prev_structure, list):\n return next(in_list)\n\n out = []\n for item in prev_structure:\n out.append(unflatten(in_list, item))\n return out\n\n\ndef normalize_mc_params(params, source):\n \"\"\"\n Normalize a single row of motion parameters to the SPM format.\n\n SPM saves motion parameters as:\n x Right-Left (mm)\n y Anterior-Posterior (mm)\n z Superior-Inferior (mm)\n rx Pitch (rad)\n ry Roll (rad)\n rz Yaw (rad)\n \"\"\"\n if source.upper() == 'FSL':\n params = params[[3, 4, 5, 0, 1, 2]]\n elif source.upper() in ('AFNI', 'FSFAST'):\n params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]\n params[3:] = params[3:] * np.pi / 180.\n elif source.upper() == 'NIPY':\n from nipy.algorithms.registration import to_matrix44, aff2euler\n matrix = to_matrix44(params)\n params = np.zeros(6)\n params[:3] = matrix[:3, 3]\n params[-1:2:-1] = aff2euler(matrix)\n\n return params\n\n\ndef dict_diff(dold, dnew, indent=0):\n \"\"\"Helper to log what actually changed from old to new values of\n dictionaries.\n\n typical use -- log difference for hashed_inputs\n \"\"\"\n # First check inputs, since they usually are lists of tuples\n # and dicts are required.\n if isinstance(dnew, list):\n dnew = dict(dnew)\n if isinstance(dold, list):\n dold = dict(dold)\n\n # Compare against hashed_inputs\n # Keys: should rarely differ\n new_keys = set(dnew.keys())\n old_keys = set(dold.keys())\n\n diff = []\n if new_keys - old_keys:\n diff += [\" * keys not previously seen: %s\" % (new_keys - old_keys)]\n\n if old_keys - new_keys:\n diff += [\" * keys not presently seen: %s\" % (old_keys - new_keys)]\n\n # Add topical message\n if diff:\n diff.insert(0, \"Dictionaries had differing keys:\")\n\n diffkeys = len(diff)\n\n # Values in common keys would differ quite often,\n # so we need to join the messages together\n for k in new_keys.intersection(old_keys):\n try:\n new, old = dnew[k], dold[k]\n same = new == old\n if not same:\n # Since JSON does not discriminate between lists and\n # tuples, we might need to cast them into the same type\n # as the last resort. And lets try to be more generic\n same = old.__class__(new) == old\n except Exception:\n same = False\n if not same:\n diff += [\" * %s: %r != %r\" % (k, dnew[k], dold[k])]\n\n if len(diff) > diffkeys:\n diff.insert(diffkeys, \"Some dictionary entries had differing values:\")\n\n return textwrap_indent('\\n'.join(diff), ' ' * indent)\n\n\ndef rgetcwd(error=True):\n \"\"\"\n Robust replacement for getcwd when folders get removed\n If error==True, this is just an alias for os.getcwd()\n \"\"\"\n if error:\n return os.getcwd()\n\n try:\n cwd = os.getcwd()\n except OSError as exc:\n # Changing back to cwd is probably not necessary\n # but this makes sure there's somewhere to change to.\n cwd = os.getenv('PWD')\n if cwd is None:\n raise OSError((\n exc.errno, 'Current directory does not exist anymore, '\n 'and nipype was not able to guess it from the environment'))\n warn('Current folder does not exist, replacing with \"%s\" instead.' % cwd)\n return cwd\n"
] | [
[
"numpy.asarray",
"numpy.ravel",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jscsmk/CenterNet | [
"d7c643bba2b373c15abfa3d25ffd5304a313fa49"
] | [
"src/lib/models/networks/msra_resnet.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\nBN_MOMENTUM = 0.1\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, heads, head_conv, **kwargs):\n self.inplanes = 64\n self.deconv_with_bias = False\n self.heads = heads\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n 3,\n [256, 256, 256],\n [4, 4, 4],\n )\n # self.final_layer = []\n\n for head in sorted(self.heads):\n num_output = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(256, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, num_output, \n kernel_size=1, stride=1, padding=0))\n else:\n fc = nn.Conv2d(\n in_channels=256,\n out_channels=num_output,\n kernel_size=1,\n stride=1,\n padding=0)\n self.__setattr__(head, fc)\n\n # self.final_layer = nn.ModuleList(self.final_layer)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = x.float()\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n ret = {}\n for head in self.heads:\n ret[head] = self.__getattr__(head)(x)\n return [ret]\n\n def init_weights(self, num_layers, pretrained=True):\n if pretrained:\n # print('=> init resnet deconv weights from normal distribution')\n for _, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n # print('=> init {}.weight as normal(0, 0.001)'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n # print('=> init {}.weight as 1'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n # print('=> init final conv weights from normal distribution')\n for head in self.heads:\n final_layer = self.__getattr__(head)\n for i, m in enumerate(final_layer.modules()):\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # print('=> init {}.weight as normal(0, 0.001)'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n if m.weight.shape[0] == self.heads[head]:\n if 'hm' in head:\n nn.init.constant_(m.bias, -2.19)\n else:\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n #pretrained_state_dict = torch.load(pretrained)\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n else:\n print('=> imagenet pretrained model dose not exist')\n print('=> please download it first')\n raise ValueError('imagenet pretrained model does not exist')\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net(num_layers, heads, head_conv):\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, heads, head_conv=head_conv)\n #model.init_weights(num_layers, pretrained=True)\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1chimaruGin/Oject_classifier | [
"d27ca8f47d2d0af107582c25a0756dda15361c2e"
] | [
"objifier/data_loader.py"
] | [
"import torch\nfrom torchvision import datasets, transforms\nimport os\n\ntransform = {\n \"train\": transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]\n ),\n ]\n ),\n \"val\": transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]\n ),\n ]\n ),\n}\n\n\ndef get_loader(root, batch_size, num_workers):\n\n dataset = {\n x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])\n for x in [\"train\", \"val\"]\n }\n\n data_loader = {\n x: torch.utils.data.DataLoader(\n dataset[x], batch_size=batch_size, shuffle=(x == \"train\"),\n num_workers=num_workers,\n )\n for x in [\"train\", \"val\"]\n }\n\n dataset_size = {x: len(dataset[x]) for x in [\"train\", \"val\"]}\n\n return data_loader, dataset_size\n\n\ndef CIFAR10(batch_size, root=\"data/\"):\n dataset = {\n x: datasets.CIFAR10(\n root, train=(x == \"train\"), download=True, transform=transform[x]\n )\n for x in [\"train\", \"val\"]\n }\n\n data_loader = {\n x: torch.utils.data.DataLoader(\n dataset[x], batch_size=batch_size, shuffle=(x == \"train\")\n )\n for x in [\"train\", \"val\"]\n }\n\n dataset_size = {x: len(dataset[x]) for x in [\"train\", \"val\"]}\n\n return data_loader, dataset_size\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexweav/Deep-Learning | [
"f245708e40f36c4734ea0d4a7e6587624e4b116f"
] | [
"LearnyMcLearnface/Layers/AffineLayer.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 06 14:54:11 2016\n\n@author: Alexander Weaver\n\"\"\"\n\n\"\"\"\nPerforms an affine (fully connected) operation on its input\nAn affine layer with out_dim neurons takes a data array of size Nx(in_dim), x\nand returns a linearly transformed Nx(out_dim) data array\nThe transformation result, z, is determined by a (in_dim)x(out_dim) weight matrix, W, and\na (out_dim) bias vector, b. The transformation of any one data point (one row in x) is given by:\n z = Wx + b\nConstructing this object initializes the parameters following a gaussian random distribution with \nstandard deviation given by weight_scale. \nForward propagating this object performs the affine transformation on the given array, X.\nBackpropagating this object returns the derivatives of x, W, and b with respect to the final output of\nthe network.\n\"\"\"\n\nimport numpy as np\n\nclass AffineLayer(object):\n \n def __init__(self, in_dim, out_dim, weight_scale, data_type=np.float32):\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.weight_scale = weight_scale\n self.data_type = data_type\n self.W = np.random.randn(in_dim, out_dim) * weight_scale\n self.W = self.W.astype(self.data_type)\n self.b = np.zeros(out_dim)\n self.b = self.b.astype(self.data_type)\n \n def forward(self, x, W=None, b=None):\n if W is None:\n W = self.W\n if b is None:\n b = self.b\n N = x.shape[0]\n reshaped_x = x.reshape(N, np.prod(x.shape[1:]))\n out = reshaped_x.dot(W) + b\n self.cache_x = x\n return out\n \n def backward(self, dout):\n x = self.cache_x\n N = x.shape[0] \n reshaped_x = x.reshape(N, np.prod(x.shape[1:]))\n dx = dout.dot(np.transpose(self.W)).reshape(x.shape)\n self.dW = np.transpose(reshaped_x).dot(dout)\n self.db = np.sum(dout, axis=0)\n return dx"
] | [
[
"numpy.random.randn",
"numpy.prod",
"numpy.transpose",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Arushacked/tensorflow | [
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828"
] | [
"tensorflow/python/distribute/collective_all_reduce_strategy.py",
"tensorflow/python/ops/clip_ops.py",
"tensorflow/python/tpu/tpu_outside_compilation_test.py",
"tensorflow/python/debug/lib/check_numerics_callback.py",
"tensorflow/python/tpu/tpu_embedding_v2_test.py",
"tensorflow/python/distribute/input_lib_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport weakref\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# TODO(yuefengz): support in-graph replication.\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for synchronous training on multiple workers.\n\n This strategy implements synchronous distributed training across multiple\n workers, each with potentially multiple GPUs. Similar to\n `tf.distribute.MirroredStrategy`, it creates copies of all variables in the\n model on each device across all workers.\n\n It uses CollectiveOps's implementation of multi-worker all-reduce to\n to keep variables in sync. A collective op is a single op in the\n TensorFlow graph which can automatically choose an all-reduce algorithm in\n the TensorFlow runtime according to hardware, network topology and tensor\n sizes.\n\n By default it uses all local GPUs or CPU for single-worker training.\n\n When 'TF_CONFIG' environment variable is set, it parses cluster_spec,\n task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy\n which mirrored models on GPUs of all machines in a cluster. In the current\n implementation, it uses all GPUs in a cluster and it assumes all workers have\n the same number of GPUs.\n\n You can also pass a `distribute.cluster_resolver.ClusterResolver` instance\n when instantiating the strategy. The task_type, task_id etc. will be parsed\n from the resolver instance instead of from the `TF_CONFIG` env var.\n\n It supports both eager mode and graph mode. However, for eager mode, it has to\n set up the eager context in its constructor and therefore all ops in eager\n mode have to run after the strategy object is created.\n\n \"\"\"\n # TODO(anjalisridhar): Update our guides with examples showing how we can use\n # the cluster_resolver argument.\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO,\n cluster_resolver=None):\n \"\"\"Creates the strategy.\n\n Args:\n communication: optional Enum of type\n `distribute.experimental.CollectiveCommunication`. This provides a way\n for the user to override the choice of collective op communication.\n Possible values include `AUTO`, `RING`, and `NCCL`.\n cluster_resolver: optional `distribute.cluster_resolver.ClusterResolver`\n object. The default ClusterResolver that is used is the\n TFConfigClusterResolver which is instantiated from the TF_CONFIG env\n var.\n \"\"\"\n # TODO(b/150151677): consider move communication to CollectiveHints.\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication,\n cluster_resolver=cluster_resolver))\n\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended._num_gpus_per_worker)\n\n @classmethod\n def _from_local_devices(\n cls,\n devices,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n def scope(self): # pylint: disable=useless-super-delegation\n \"\"\"Returns a context manager selecting this Strategy as current.\n\n Inside a `with strategy.scope():` code block, this thread\n will use a variable creator set by `strategy`, and will\n enter its \"cross-replica context\".\n\n In `MultiWorkerMirroredStrategy`, all variables created inside\n `strategy.scope() will be mirrored on all replicas of each worker.\n Moreover, it also sets a default device scope so that ops without\n specified devices will end up on the correct worker.\n\n Returns:\n A context manager to use for creating variables with this strategy.\n \"\"\"\n return super(CollectiveAllReduceStrategy, self).scope()\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"]) # pylint: disable=missing-docstring\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO,\n cluster_resolver=None):\n \"\"\"Initializes the object.\"\"\"\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication,\n cluster_resolver=cluster_resolver))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_gpu_per_worker\").set(self.extended._num_gpus_per_worker)\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n communication,\n cluster_resolver):\n self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n assert isinstance(\n communication,\n cross_device_ops_lib.CollectiveCommunication)\n self._communication = communication\n self._initialize_strategy(self._cluster_resolver)\n self._cfer_fn_cache = weakref.WeakKeyDictionary()\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local(self, cluster_resolver, devices=None):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if devices:\n local_devices = devices\n else:\n if num_gpus:\n local_devices = tuple(\"/device:GPU:%d\" % i for i in range(num_gpus))\n else:\n local_devices = (\"/device:CPU:0\",)\n\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices),\n collective_keys=self._collective_keys,\n communication=self._communication)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys,\n communication=cross_device_ops_lib.CollectiveCommunication.RING,\n )\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\"Single-worker MultiWorkerMirroredStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices, self._communication)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = config_pb2.ConfigProto()\n config_proto = self._update_config_proto(config_proto)\n\n if hasattr(cluster_resolver, \"port\"):\n port = cluster_resolver.port\n else:\n port = 0\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\",\n port=port)\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"%s/device:GPU:%d\" % (self._worker_device, i)\n for i in range(num_gpus))\n else:\n local_devices = (self._worker_device,)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices) * self._num_workers,\n collective_keys=self._collective_keys,\n communication=self._communication)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys,\n communication=cross_device_ops_lib.CollectiveCommunication.RING,\n )\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n host_device = device_util.get_host_for_device(self._worker_device)\n self._input_workers = input_lib.InputWorkers(\n [(host_device, self.worker_devices)])\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, \"\n \"task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type,\n task_id, self._num_workers, local_devices,\n self._communication)\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n if replica_id == 0: # First replica on each worker.\n assert device is not None\n assert primary_var is None\n\n def initial_value_fn(): # pylint: disable=g-missing-docstring\n # Only the first device participates in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([device])\n group_size = self._num_workers\n collective_instance_key = (\n self._collective_keys.get_variable_instance_key())\n\n with ops.device(device):\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value = initial_value()\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(\n initial_value, dtype=kwargs.get(\"dtype\", None))\n\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(initial_value.shape,\n initial_value.dtype,\n group_size, group_key,\n collective_instance_key)\n return initial_value\n\n return initial_value_fn\n else:\n return super(CollectiveAllReduceExtended,\n self)._get_variable_creator_initial_value(\n replica_id=replica_id,\n device=device,\n primary_var=primary_var,\n **kwargs)\n\n def _make_input_context(self):\n if self._cluster_spec is None:\n input_pipeline_id = 0\n else:\n input_pipeline_id = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=input_pipeline_id,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset, options):\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _experimental_distribute_datasets_from_function(self, dataset_fn,\n options):\n input_context = self._make_input_context()\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn=dataset_fn,\n input_workers=self._input_workers,\n input_contexts=[input_context],\n strategy=self._container_strategy())\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if (not ops.executing_eagerly_outside_functions() and\n self._communication ==\n cross_device_ops_lib.CollectiveCommunication.NCCL):\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _get_cross_device_ops(self, value):\n # CollectiveAllReduce works on a predefined set of devices. In most cases\n # they should be the compute devices, but certain use cases may reduce host\n # tensors as well (e.g. early stopping). We infer the cross_device_ops to\n # use based on the number of devices, since inputs don't always have device\n # annotations. The compute devices one is preferred since we can potentially\n # leverage NCCL.\n if isinstance(value, values.DistributedValues):\n num_devices = len(value._values) # pylint: disable=protected-access\n else:\n num_devices = 1\n if num_devices == len(self.worker_devices):\n return self._cross_device_ops\n else:\n return self._host_cross_device_ops\n\n def _reduce_to(self, reduce_op, value, destinations, experimental_hints):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, len(self.worker_devices))\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n experimental_hints=experimental_hints)\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication ==\n cross_device_ops_lib.CollectiveCommunication.NCCL) and\n self._num_gpus_per_worker == 0):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return self._num_workers > 1\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Operations for clipping (gradient, weight) tensors to min/max values.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"clip_by_value\")\[email protected]_dispatch_support\ndef clip_by_value(t, clip_value_min, clip_value_max,\n name=None):\n \"\"\"Clips tensor values to a specified min and max.\n\n Given a tensor `t`, this operation returns a tensor of the same type and\n shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\n Any values less than `clip_value_min` are set to `clip_value_min`. Any values\n greater than `clip_value_max` are set to `clip_value_max`.\n\n Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\n correct results.\n\n For example:\n\n Basic usage passes a scalar as the min and max value.\n\n >>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])\n >>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)\n >>> t2.numpy()\n array([[-1., -1., 0.],\n [ 0., 1., 1.]], dtype=float32)\n\n The min and max can be the same size as `t`, or broadcastable to that size.\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[2],[1]]\n >>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n >>> t3.numpy()\n array([[ 2., 2., 10.],\n [ 1., 1., 10.]], dtype=float32)\n\n Broadcasting fails, intentionally, if you would expand the dimensions of `t`\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[[2, 1]]] # Has a third axis\n >>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]\n\n It throws a `TypeError` if you try to clip an `int` to a `float` value\n (`tf.cast` the input to `float` first).\n\n >>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)\n >>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)\n Traceback (most recent call last):\n ...\n TypeError: Cannot convert ...\n\n\n Args:\n t: A `Tensor` or `IndexedSlices`.\n clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n clip_value_max: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array\n broadcasting that would make the returned tensor larger than the input.\n TypeError: If dtype of the input is `int32` and dtype of\n the `clip_value_min` or `clip_value_max` is `float32`\n \"\"\"\n with ops.name_scope(name, \"clip_by_value\",\n [t, clip_value_min, clip_value_max]) as name:\n values = ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t, name=\"t\")\n\n # Go through list of tensors, for each value in each tensor clip\n t_min = math_ops.minimum(values, clip_value_max)\n # Assert that the shape is compatible with the initial shape,\n # to prevent unintentional broadcasting.\n _ = values.shape.merge_with(t_min.shape)\n\n t_max = math_ops.maximum(t_min, clip_value_min, name=name)\n _ = values.shape.merge_with(t_max.shape)\n\n if isinstance(t, ops.IndexedSlices):\n t_max = ops.IndexedSlices(t_max, t.indices, t.dense_shape)\n\n return t_max\n # TODO(scottzhu): switch to use new implementation in 2 weeks.\n # return gen_math_ops.clip_by_value(\n # t, clip_value_min, clip_value_max, name=name)\n\n\n# TODO(scottzhu): switch to use new implementation in 2 weeks.\n# @ops.RegisterGradient(\"ClipByValue\")\ndef _clip_by_value_grad(op, grad):\n \"\"\"Returns grad of clip_by_value.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n z = op.inputs[2]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n sz = array_ops.shape(z)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xymask = math_ops.less(x, y)\n xzmask = math_ops.greater(x, z)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n rx, rz = gen_array_ops.broadcast_gradient_args(sx, sz)\n xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)\n ygrad = array_ops.where(xymask, grad, zeros)\n zgrad = array_ops.where(xzmask, grad, zeros)\n gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)\n return (gx, gy, gz)\n\n\n@tf_export(\"clip_by_norm\")\[email protected]_dispatch_support\ndef clip_by_norm(t, clip_norm, axes=None, name=None):\n \"\"\"Clips tensor values to a maximum L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,\n along the dimensions given in `axes`. Specifically, in the default case\n where all dimensions are used for calculation, if the L2-norm of `t` is\n already less than or equal to `clip_norm`, then `t` is not modified. If\n the L2-norm is greater than `clip_norm`, then this operation returns a\n tensor of the same type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm(t)`\n\n In this case, the L2-norm of the output tensor is `clip_norm`.\n\n As another example, if `t` is a matrix and `axes == [1]`, then each row\n of the output will have L2-norm less than or equal to `clip_norm`. If\n `axes == [0]` instead, each column of the output will be clipped.\n\n Code example:\n\n >>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32)\n >>> tf.clip_by_norm(some_nums, 2.0).numpy()\n array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]],\n dtype=float32)\n\n This operation is typically used to clip gradients before applying them with\n an optimizer. Most gradient data is a collection of different shaped tensors\n for different parts of the model. Thus, this is a common usage:\n\n ```\n # Get your gradients after training\n loss_value, grads = grad(model, features, labels)\n\n # Apply some clipping\n grads = [tf.clip_by_norm(g, norm)\n for g in grads]\n\n # Continue on with training\n optimizer.apply_gradients(grads)\n ```\n\n Args:\n t: A `Tensor` or `IndexedSlices`. This must be a floating point type.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also\n floating point\n axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions\n to use for computing the L2-norm. If `None` (the default), uses all\n dimensions.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n ValueError: If the clip_norm tensor is not a 0-D scalar tensor.\n TypeError: If dtype of the input is not a floating point or\n complex type.\n \"\"\"\n with ops.name_scope(name, \"clip_by_norm\", [t, clip_norm]) as name:\n values = ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t, name=\"t\")\n\n # Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm\n l2sum = math_ops.reduce_sum(values * values, axes, keepdims=True)\n pred = l2sum > 0\n # Two-tap tf.where trick to bypass NaN gradients\n l2sum_safe = array_ops.where(pred, l2sum, array_ops.ones_like(l2sum))\n l2norm = array_ops.where(pred, math_ops.sqrt(l2sum_safe), l2sum)\n intermediate = values * clip_norm\n # Assert that the shape is compatible with the initial shape,\n # to prevent unintentional broadcasting.\n _ = values.shape.merge_with(intermediate.shape)\n values_clip = array_ops.identity(\n intermediate / math_ops.maximum(l2norm, clip_norm), name=name)\n\n if isinstance(t, ops.IndexedSlices):\n return ops.IndexedSlices(values_clip, t.indices, t.dense_shape)\n\n return values_clip\n\n\n@tf_export(\"linalg.global_norm\", v1=[\"linalg.global_norm\", \"global_norm\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"global_norm\")\ndef global_norm(t_list, name=None):\n \"\"\"Computes the global norm of multiple tensors.\n\n Given a tuple or list of tensors `t_list`, this operation returns the\n global norm of the elements in all tensors in `t_list`. The global norm is\n computed as:\n\n `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`\n\n Any entries in `t_list` that are of type None are ignored.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n name: A name for the operation (optional).\n\n Returns:\n A 0-D (scalar) `Tensor` of type `float`.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n \"\"\"\n if (not isinstance(t_list, collections_abc.Sequence) or\n isinstance(t_list, six.string_types)):\n raise TypeError(\"t_list should be a sequence\")\n t_list = list(t_list)\n with ops.name_scope(name, \"global_norm\", t_list) as name:\n values = [\n ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t,\n name=\"t_%d\" % i)\n if t is not None else t\n for i, t in enumerate(t_list)]\n half_squared_norms = []\n for v in values:\n if v is not None:\n with ops.colocate_with(v):\n half_squared_norms.append(gen_nn_ops.l2_loss(v))\n\n half_squared_norm = math_ops.reduce_sum(array_ops.stack(half_squared_norms))\n\n norm = math_ops.sqrt(\n half_squared_norm *\n constant_op.constant(2.0, dtype=half_squared_norm.dtype),\n name=\"global_norm\")\n\n return norm\n\n\n@tf_export(\"clip_by_global_norm\")\[email protected]_dispatch_support\ndef clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):\n \"\"\"Clips values of multiple tensors by the ratio of the sum of their norms.\n\n Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,\n this operation returns a list of clipped tensors `list_clipped`\n and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,\n if you've already computed the global norm for `t_list`, you can specify\n the global norm with `use_norm`.\n\n To perform the clipping, the values `t_list[i]` are set to:\n\n t_list[i] * clip_norm / max(global_norm, clip_norm)\n\n where:\n\n global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))\n\n If `clip_norm > global_norm` then the entries in `t_list` remain as they are,\n otherwise they're all shrunk by the global ratio.\n\n If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`\n to signal that an error occurred.\n\n Any of the entries of `t_list` that are of type `None` are ignored.\n\n This is the correct way to perform gradient clipping (Pascanu et al., 2012).\n\n However, it is slower than `clip_by_norm()` because all the parameters must be\n ready before the clipping operation can be performed.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.\n use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global\n norm to use. If not provided, `global_norm()` is used to compute the norm.\n name: A name for the operation (optional).\n\n Returns:\n list_clipped: A list of `Tensors` of the same type as `list_t`.\n global_norm: A 0-D (scalar) `Tensor` representing the global norm.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n\n References:\n On the difficulty of training Recurrent Neural Networks:\n [Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html)\n ([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))\n \"\"\"\n if (not isinstance(t_list, collections_abc.Sequence) or\n isinstance(t_list, six.string_types)):\n raise TypeError(\"t_list should be a sequence\")\n t_list = list(t_list)\n if use_norm is None:\n use_norm = global_norm(t_list, name)\n\n with ops.name_scope(name, \"clip_by_global_norm\",\n t_list + [clip_norm]) as name:\n # Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm\n scale_for_finite = clip_norm * math_ops.minimum(\n 1.0 / use_norm,\n constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)\n # If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,\n # this will make scale NaN.\n scale = scale_for_finite + (use_norm - use_norm)\n\n values = [\n ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t,\n name=\"t_%d\" % i)\n if t is not None else t\n for i, t in enumerate(t_list)]\n\n values_clipped = []\n for i, v in enumerate(values):\n if v is None:\n values_clipped.append(None)\n else:\n with ops.colocate_with(v):\n values_clipped.append(\n array_ops.identity(v * scale, name=\"%s_%d\" % (name, i)))\n\n list_clipped = [\n ops.IndexedSlices(c_v, t.indices, t.dense_shape)\n if isinstance(t, ops.IndexedSlices)\n else c_v\n for (c_v, t) in zip(values_clipped, t_list)]\n\n return list_clipped, use_norm\n\n\[email protected](\n date=None,\n instructions=\"clip_by_average_norm is deprecated in TensorFlow 2.0. Please \"\n \"use clip_by_norm(t, clip_norm * tf.cast(tf.size(t), tf.float32), name) \"\n \"instead.\")\n@tf_export(v1=[\"clip_by_average_norm\"])\[email protected]_dispatch_support\ndef clip_by_average_norm(t, clip_norm, name=None):\n \"\"\"Clips tensor values to a maximum average L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its average L2-norm is less than or equal to\n `clip_norm`. Specifically, if the average L2-norm is already less than or\n equal to `clip_norm`, then `t` is not modified. If the average L2-norm is\n greater than `clip_norm`, then this operation returns a tensor of the same\n type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm_avg(t)`\n\n In this case, the average L2-norm of the output tensor is `clip_norm`.\n\n This operation is typically used to clip gradients before applying them with\n an optimizer.\n\n Args:\n t: A `Tensor`.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"clip_by_average_norm\", [t, clip_norm]) as name:\n t = ops.convert_to_tensor(t, name=\"t\")\n\n # Calculate L2-norm per element, clip elements by ratio of clip_norm to\n # L2-norm per element\n n_element = math_ops.cast(array_ops.size(t), dtypes.float32)\n l2norm_inv = math_ops.rsqrt(\n math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))\n tclip = array_ops.identity(\n t * clip_norm * math_ops.minimum(\n l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),\n name=name)\n\n return tclip\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TPU outside compilation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import tpu_strategy as tpu_lib\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_strategy_util\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string(\"tpu\", \"\", \"Name of TPU to connect to.\")\nflags.DEFINE_string(\"project\", None, \"Name of GCP project with TPU.\")\nflags.DEFINE_string(\"zone\", None, \"Name of GCP zone with TPU.\")\n\n\ndef get_tpu_cluster_resolver():\n resolver = tpu_cluster_resolver.TPUClusterResolver(\n tpu=FLAGS.tpu,\n zone=FLAGS.zone,\n project=FLAGS.project,\n )\n return resolver\n\n\ndef get_tpu_strategy():\n resolver = get_tpu_cluster_resolver()\n remote.connect_to_cluster(resolver)\n tpu_strategy_util.initialize_tpu_system(resolver)\n return tpu_lib.TPUStrategy(resolver)\n\n\nclass TpuOutsideCompilationTest(test.TestCase):\n\n def testResourceVariableAssignOnHost(self):\n strategy = get_tpu_strategy()\n with strategy.scope():\n v = variables.Variable(\n 0.0, aggregation=variables.VariableAggregation.MEAN)\n v2 = variables.Variable(0.0, aggregation=variables.VariableAggregation.MEAN)\n\n def assign_fn():\n v2.assign_add(4.0)\n\n @def_function.function\n def train_step():\n\n def assign_add():\n v.assign_add(2.0)\n tpu.outside_compilation(assign_fn)\n v.assign_add(3.0)\n\n strategy.run(assign_add)\n return\n\n train_step()\n self.assertAllEqual(4.0 * strategy.num_replicas_in_sync, v2.numpy())\n self.assertAllEqual(5.0, v.numpy())\n\n def testHostInputOnly(self):\n strategy = get_tpu_strategy()\n\n def outside_fn(x):\n logging_ops.print_v2(\"Outside compiled\", x)\n\n @def_function.function\n def train_step():\n\n def tpu_fn(x):\n x2 = x + 5.0\n tpu.outside_compilation(outside_fn, x2)\n return x2 + 5.0\n\n return strategy.run(tpu_fn, args=(25.0,))\n\n self.assertAllEqual(\n strategy.experimental_local_results(train_step()),\n constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))\n\n def testHostInputOutput(self):\n strategy = get_tpu_strategy()\n\n def outside_fn(x):\n logging_ops.print_v2(\"Outside compiled\", x)\n return x + 6.0\n\n @def_function.function\n def train_step():\n\n def tpu_fn(x):\n x2 = x + 5.0\n output = tpu.outside_compilation(outside_fn, x2)\n return output\n\n return strategy.run(tpu_fn, args=(25.0,))\n\n self.assertAllEqual(\n strategy.experimental_local_results(train_step()),\n constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))\n\n def testOutsideCompilationControlFlowIf(self):\n strategy = get_tpu_strategy()\n\n def outside_fn(x):\n logging_ops.print_v2(\"Outside compiled\", x)\n return x + 6.0\n\n @def_function.function\n def train_step():\n\n def tpu_fn(x):\n x2 = x + 5.0\n if x < 50.0:\n return tpu.outside_compilation(outside_fn, x2)\n else:\n return x2\n\n return strategy.run(tpu_fn, args=(25.0,))\n\n self.assertAllEqual(\n strategy.experimental_local_results(train_step()),\n constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))\n\n def testOutsideCompilationControlFlowWhile(self):\n strategy = get_tpu_strategy()\n\n def outside_fn(x):\n logging_ops.print_v2(\"Outside compiled\", x)\n return x + 6.0\n\n @def_function.function\n def train_step():\n\n def tpu_fn(x):\n x2 = x + 5.0\n while x2 < 50.0:\n x2 = tpu.outside_compilation(outside_fn, x2)\n return x2 + 4.0\n\n return strategy.run(tpu_fn, args=(25.0,))\n\n self.assertAllEqual(\n strategy.experimental_local_results(train_step()),\n constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Eager-graph unified check numerics callback.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import debug_event_pb2\nfrom tensorflow.python.debug.lib import op_callbacks_common\nfrom tensorflow.python.debug.lib import source_utils\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import op_callbacks\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_debug_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Many ops have benign NaN outputs, and running them with check_numerics\n# on will create unwanted errors\n# TODO(b/142497024): Replace this whitelist with function decorators in the ops\nIGNORE_OP_OUTPUTS = (\n # For FusedBatchNorm, if the input tensor is empty then batch_mean and\n # batch_variance will be NaN. reserve_space holds intermediate values\n # derived from batch_mean and batch_variance used for gradient calculation\n (b\"FusedBatchNorm\", 1), # batch_mean\n (b\"FusedBatchNorm\", 2), # batch_variance\n (b\"FusedBatchNorm\", 3), # reserve_space_1\n (b\"FusedBatchNorm\", 4), # reserve_space_2\n\n # Same as above\n (b\"FusedBatchNormV2\", 1), # batch_mean\n (b\"FusedBatchNormV2\", 2), # batch_variance\n (b\"FusedBatchNormV2\", 3), # reserve_space_1\n (b\"FusedBatchNormV2\", 4), # reserve_space_2\n\n # Same as above, but reserve_space_3 holds additional intermediate values\n (b\"FusedBatchNormV3\", 1), # batch_mean\n (b\"FusedBatchNormV3\", 2), # batch_variance\n (b\"FusedBatchNormV3\", 3), # reserve_space_1\n (b\"FusedBatchNormV3\", 4), # reserve_space_2\n (b\"FusedBatchNormV3\", 5), # reserve_space_3\n)\n\n# Some frequently used ops are generally safe and we can skip them to reduce\n# overhead. NOTE: This list is compiled by observing operations called by\n# models in practice and is not a comprehensive list of safe operations.\nSAFE_OPS = (\n b\"Concat\",\n b\"ConcatV2\",\n b\"ExpandDims\",\n b\"Fill\",\n b\"Gather\",\n b\"Maximum\",\n b\"Minimum\",\n b\"Reshape\",\n b\"Slice\",\n b\"Squeeze\",\n b\"Stack\",\n b\"StridedSlice\",\n b\"StridedSliceGrad\",\n b\"TensorListConcatV2\",\n b\"TensorListGather\",\n b\"TensorListGetItem\",\n b\"TensorListPopBack\",\n b\"TensorListStack\",\n b\"Transpose\",\n b\"Unpack\",\n)\n\n_state = threading.local()\n\n_check_numerics_callback_create_counter = monitoring.Counter(\n \"/tensorflow/api/python/debugging/check_numerics_callback_create_counter\",\n \"Counter for number of times the check_numerics op callback is created.\")\n\n\ndef limit_string_length(string, max_len=50):\n \"\"\"Limit the length of input string.\n\n Args:\n string: Input string.\n max_len: (int or None) If int, the length limit. If None, no limit.\n\n Returns:\n Possibly length-limited string.\n \"\"\"\n if max_len is None or len(string) <= max_len:\n return string\n else:\n return \"...\" + string[len(string) - max_len:]\n\n\n# A dictionary that supports looking up the original input tensor names.\n_CHECK_NUMERICS_INPUT_LOOKUP = collections.defaultdict(dict)\n\n\ndef _maybe_lookup_original_input_tensor(graph, tensor):\n if (graph and\n graph in _CHECK_NUMERICS_INPUT_LOOKUP and\n tensor.name in _CHECK_NUMERICS_INPUT_LOOKUP[graph]):\n return _CHECK_NUMERICS_INPUT_LOOKUP[graph][tensor.name]\n else:\n return tensor\n\n\ndef get_check_numerics_error_message(slot,\n num_outputs,\n op_type,\n tensor,\n inputs,\n graph=None,\n traceback=None,\n stack_height_limit=30,\n path_length_limit=50):\n \"\"\"Create a meaningful and user-friendly error message about offending tensor.\n\n The error message reveals the following info about the op that outputs\n NaN/Infinity: dtype, shape (to the extent known at graph-construction time),\n input tensors, stack trace for op creation (if is graph mode).\n\n Args:\n slot: (int) slot index of the tensor output.\n num_outputs: (int) total number of outputs of the op.\n op_type: (str) Type of the that generates `tensor`.\n tensor: (Tensor) the offending tensor, i.e., the tensor that contains\n Infinities or NaNs.\n inputs: (array of Tensor) inputs to the op that generates `tensor`.\n graph: (tf.Graph) the graph object that `tensor` belongs to. Available only\n under graph mode.\n traceback: (list of trace frames) the stack trace of the op's creation.\n Available only under graph model.\n stack_height_limit: (int or None) If int, limit to the height of the stack\n trace printed in the error message. If None, no limit to the height.\n path_length_limit: (int or None) Length limit for file paths included in the\n formatted stack trace.\n\n Returns:\n (str) A formatted error message.\n \"\"\"\n eager_vs_graph_qualifier = \"graph\" if graph else \"eagerly-executing\"\n message = \"\\n\"\n message += (\n \"\\n!!! Detected Infinity or NaN in output %d of \"\n \"%s op \\\"%s\\\" (# of outputs: %d) !!!\\n\" %\n (slot, eager_vs_graph_qualifier, op_type, num_outputs))\n\n message += \" dtype: %s\\n\" % tensor.dtype\n message += \" shape: %s\\n\" % (tensor.shape,)\n\n if not graph:\n # This is an eager tensor. We can get its numpy value and count\n # NaNs and Infs.\n is_inf = np.isinf(tensor)\n\n num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.), is_inf))\n num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.), is_inf))\n num_nan = np.sum(np.isnan(tensor))\n if num_neg_inf > 0:\n message += \" # of -Inf elements: %s\\n\" % num_neg_inf\n if num_pos_inf > 0:\n message += \" # of +Inf elements: %s\\n\" % num_pos_inf\n if num_nan:\n message += \" # of +NaN elements: %s\\n\" % num_nan\n\n if len(inputs) > 1:\n message += \"\\n Input tensors (%d):\\n\" % len(inputs)\n for slot, input_tensor in enumerate(inputs):\n message += \" %d: %s\\n\" % (\n slot, _maybe_lookup_original_input_tensor(graph, input_tensor))\n elif len(inputs) == 1:\n message += \"\\n Input tensor: %s\\n\" % (\n _maybe_lookup_original_input_tensor(graph, inputs[0]))\n if graph and hasattr(graph, \"name\") and graph.name:\n message += \" Graph name: \\\"%s\\\"\\n\" % graph.name\n\n # Format the stack trace for the op's creation. We omit files that\n # belong to tensorflow itself.\n if graph and traceback:\n message += (\n \"\\n Stack trace of op's creation (\\\"->\\\": inferred user code):\\n\")\n if stack_height_limit is not None and len(traceback) > stack_height_limit:\n num_omitted_frames = len(traceback) - stack_height_limit\n message += \" + ... (Omitted %d frames)\\n\" % num_omitted_frames\n for filepath, lineno, function_name, source_line in traceback[\n -stack_height_limit:]:\n user_code_indicator = \" \"\n if not source_utils.guess_is_tensorflow_py_library(filepath):\n user_code_indicator = \" -> \"\n\n message += \" + %s (L%d) %s\\n\" % (\n limit_string_length(filepath, path_length_limit), lineno,\n function_name)\n if source_line is not None:\n message += \"%s| %s\\n\" % (user_code_indicator, source_line)\n message += \"\\n\"\n return message\n\n\ndef _debug_summary(x):\n return gen_debug_ops.debug_numeric_summary_v2(\n x,\n tensor_debug_mode=(\n debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS))\n\n\nclass CheckNumericsCallback(object):\n \"\"\"Wrapper for the numerics-checking callback for thread locality.\"\"\"\n\n def __init__(self, stack_height_limit, path_length_limit):\n self._stack_height_limit = stack_height_limit\n self._path_length_limit = path_length_limit\n # A dict mapping Placeholder tensors to their instrumenting debug tensors.\n # Used only under V1 graph mode, where we can't rely on auto control\n # dependency to execute the debug tensors and hence need to attach the debug\n # tensors as control dependencies of the ops that consume the Placeholder.\n self._placeholder_to_debug_tensor = dict()\n\n def callback(self,\n op_type,\n inputs,\n attrs,\n outputs,\n op_name=None,\n graph=None):\n \"\"\"Eager-function unified callback for checking numerics.\"\"\"\n del attrs, op_name # Unused\n op_type_bytes = compat.as_bytes(op_type)\n is_v1_graph_mode = not ops.executing_eagerly_outside_functions()\n if (op_type_bytes in op_callbacks_common.OP_CALLBACK_SKIP_OPS or\n op_type_bytes in SAFE_OPS):\n return None\n if graph:\n # Under graph mode. Insert check_numerics op.\n instrumented_outputs = []\n if is_v1_graph_mode:\n for input_tensor in inputs:\n if input_tensor in self._placeholder_to_debug_tensor and outputs:\n outputs[0].op._add_control_input( # pylint: disable=protected-access\n self._placeholder_to_debug_tensor[input_tensor].op)\n for slot, output in enumerate(outputs):\n if (output.dtype.is_floating and\n (op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):\n checked_output = array_ops.check_numerics_v2(\n # TF v2 has automatic control dependencies added to stateful async\n # ops, which allows us to run check_numerics asynchronously.\n # In the above case we use debug_summary to reduce all output\n # tensors asynchronously from the op being checked and then\n # process the tensor summary with check_numerics.\n output if is_v1_graph_mode else _debug_summary(output),\n get_check_numerics_error_message(\n slot,\n len(outputs),\n op_type,\n output,\n inputs,\n graph=graph,\n traceback=output.op.traceback,\n stack_height_limit=self._stack_height_limit,\n path_length_limit=self._path_length_limit))\n _CHECK_NUMERICS_INPUT_LOOKUP[graph][checked_output.name] = output\n instrumented_outputs.append(self._get_output_tensor(\n op_type_bytes, output, checked_output, is_v1_graph_mode))\n else:\n instrumented_outputs.append(output)\n return instrumented_outputs\n else:\n if op_type_bytes == b\"CheckNumericsV2\":\n # TODO(b/140334369): Remove this special casing logic once op_callback.\n # automatically prevents infinite recursion in eager mode.\n return None\n # Under eager mode. Eagerly execute check_numerics op.\n for slot, output in enumerate(outputs):\n if (output.dtype.is_floating and\n (op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):\n array_ops.check_numerics_v2(\n output,\n get_check_numerics_error_message(\n slot, len(outputs), op_type, output, inputs,\n stack_height_limit=self._stack_height_limit,\n path_length_limit=self._path_length_limit))\n\n def _get_output_tensor(self,\n op_type,\n tensor,\n checked_tensor,\n is_v1_graph_mode):\n \"\"\"Determine what tensor to output from callback.\n\n Args:\n op_type: Type of the op that outputs the original symbolic tensor, as\n `bytes`.\n tensor: The original output symbolic tensor.\n checked_tensor: The debugger-instrumented, numerics-checking tensor.\n is_v1_graph_mode: Whether the debugged proggram is running under V1 graph\n mode.\n\n Returns:\n A symbolic tensor to be returned by the dumping op_callback.\n \"\"\"\n if is_v1_graph_mode:\n # Placeholders need special treatment under V1 graph mode. The\n # callback can't simply override the Placeholder tensor to the debug\n # tensor, as that would cause the Placeholder op to lack a value.\n # The debug tensor is remembered and will be attached as control\n # inputs to ops that consumer the Placeholders later.\n if op_type == b\"Placeholder\":\n self._placeholder_to_debug_tensor[tensor] = checked_tensor\n return tensor\n else:\n return checked_tensor\n else:\n # Under non-v1 graph mode, rely on auto control dependency to run the\n # checked tensor.\n return tensor\n\n\n@tf_export(\"debugging.enable_check_numerics\")\ndef enable_check_numerics(stack_height_limit=30,\n path_length_limit=50):\n r\"\"\"Enable tensor numerics checking in an eager/graph unified fashion.\n\n The numerics checking mechanism will cause any TensorFlow eager execution or\n graph execution to error out as soon as an op's output tensor contains\n infinity or NaN.\n\n This method is idempotent. Calling it multiple times has the same effect\n as calling it once.\n\n This method takes effect only on the thread in which it is called.\n\n When a op's float-type output tensor contains any Infinity or NaN, an\n `tf.errors.InvalidArgumentError` will be thrown, with an error message that\n reveals the following information:\n - The type of the op that generated the tensor with bad numerics.\n - Data type (dtype) of the tensor.\n - Shape of the tensor (to the extent known at the time of eager execution\n or graph construction).\n - Name of the containing graph (if available).\n - (Graph mode only): The stack trace of the intra-graph op's creation,\n with a stack-height limit and a path-length limit for visual clarity.\n The stack frames that belong to the user's code (as opposed to\n tensorflow's internal code) are highlighted with a text arrow (\"->\").\n - (Eager mode only): How many of the offending tensor's elements are\n `Infinity` and `NaN`, respectively.\n\n Once enabled, the check-numerics mechanism can be disabled by using\n `tf.debugging.disable_check_numerics()`.\n\n Example usage:\n\n 1. Catching infinity during the execution of a `tf.function` graph:\n\n ```py\n import tensorflow as tf\n\n tf.debugging.enable_check_numerics()\n\n @tf.function\n def square_log_x_plus_1(x):\n v = tf.math.log(x + 1)\n return tf.math.square(v)\n\n x = -1.0\n\n # When the following line runs, a function graph will be compiled\n # from the Python function `square_log_x_plus_1()`. Due to the\n # `enable_check_numerics()` call above, the graph will contain\n # numerics checking ops that will run during the function graph's\n # execution. The function call generates an -infinity when the Log\n # (logarithm) op operates on the output tensor of the Add op.\n # The program errors out at this line, printing an error message.\n y = square_log_x_plus_1(x)\n z = -y\n ```\n\n 2. Catching NaN during eager execution:\n\n ```py\n import numpy as np\n import tensorflow as tf\n\n tf.debugging.enable_check_numerics()\n\n x = np.array([[0.0, -1.0], [4.0, 3.0]])\n\n # The following line executes the Sqrt op eagerly. Due to the negative\n # element in the input array, a NaN is generated. Due to the\n # `enable_check_numerics()` call above, the program errors immediately\n # at this line, printing an error message.\n y = tf.math.sqrt(x)\n z = tf.matmul(y, y)\n ```\n\n NOTE: If your code is running on TPUs, be sure to call\n `tf.config.set_soft_device_placement(True)` before calling\n `tf.debugging.enable_check_numerics()` as this API uses automatic outside\n compilation on TPUs. For example:\n\n ```py\n tf.config.set_soft_device_placement(True)\n tf.debugging.enable_check_numerics()\n\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n strategy = tf.distribute.experimental.TPUStrategy(resolver)\n with strategy.scope():\n # ...\n ```\n\n Args:\n stack_height_limit: Limit to the height of the printed stack trace.\n Applicable only to ops in `tf.function`s (graphs).\n path_length_limit: Limit to the file path included in the printed stack\n trace. Applicable only to ops in `tf.function`s (graphs).\n \"\"\"\n if not hasattr(_state, \"check_numerics_callback\"):\n _state.check_numerics_callback = CheckNumericsCallback(\n stack_height_limit, path_length_limit)\n op_callbacks.add_op_callback(_state.check_numerics_callback.callback)\n\n logging.info(\n \"Enabled check-numerics callback in thread %s\",\n threading.current_thread().name)\n _check_numerics_callback_create_counter.get_cell().increase_by(1)\n\n\n@tf_export(\"debugging.disable_check_numerics\")\ndef disable_check_numerics():\n \"\"\"Disable the eager/graph unified numerics checking mechanism.\n\n This method can be used after a call to `tf.debugging.enable_check_numerics()`\n to disable the numerics-checking mechanism that catches infinity and NaN\n values output by ops executed eagerly or in tf.function-compiled graphs.\n\n This method is idempotent. Calling it multiple times has the same effect\n as calling it once.\n\n This method takes effect only on the thread in which it is called.\n \"\"\"\n if not hasattr(_state, \"check_numerics_callback\"):\n return\n try:\n op_callbacks.remove_op_callback(_state.check_numerics_callback.callback)\n delattr(_state, \"check_numerics_callback\")\n logging.info(\n \"Disabled check-numerics callback in thread %s\",\n threading.current_thread().name)\n except KeyError:\n # Tolerate disabling the check numerics callback without\n # enable_check_numerics() being called first.\n pass\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TPU Embeddings mid level API on TPU.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport itertools\nimport os\n\nfrom absl import flags\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.compat import v2_compat\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import init_ops_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.tpu import tpu_embedding\nfrom tensorflow.python.tpu import tpu_embedding_v2\nfrom tensorflow.python.tpu import tpu_embedding_v2_utils\nfrom tensorflow.python.tpu import tpu_strategy_util\nfrom tensorflow.python.training import checkpoint_utils\nfrom tensorflow.python.training.tracking import util\nfrom tensorflow.python.util import nest\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')\nflags.DEFINE_string('project', None, 'Name of GCP project with TPU.')\nflags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')\nflags.DEFINE_string('model_dir', os.environ.get('TEST_TMPDIR'),\n 'A temporary directory.')\n\n\nclass TPUEmbeddingCheckpointTest(parameterized.TestCase, test.TestCase):\n\n def setUp(self):\n super(TPUEmbeddingCheckpointTest, self).setUp()\n self.resolver = tpu_cluster_resolver.TPUClusterResolver(\n tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)\n remote.connect_to_cluster(self.resolver)\n tpu_strategy_util.initialize_tpu_system(self.resolver)\n self.strategy = tpu_strategy.TPUStrategy(self.resolver)\n self.num_rows = self.strategy.num_replicas_in_sync\n\n # These tests use two mid level API objects, initialized with different\n # values. These have the same sizes.\n with self.strategy.scope():\n self.first_mid_level_contents = np.ones((self.num_rows, 4))\n self.first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(\n learning_rate=0.1)\n self.first_mid_level = self.build_mid_level(\n self.first_mid_level_contents, self.first_mid_level_optimizer)\n\n self.second_mid_level_contents = np.ones((self.num_rows, 4)) * 2\n self.second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(\n learning_rate=0.1)\n self.second_mid_level = self.build_mid_level(\n self.second_mid_level_contents, self.second_mid_level_optimizer,\n initialize_tpu_embedding=False)\n\n self.cpu_mid_level_optimizer = tpu_embedding_v2_utils.SGD(\n learning_rate=0.1)\n self.cpu_mid_level = self.build_mid_level(\n self.second_mid_level_contents, self.cpu_mid_level_optimizer)\n\n def tearDown(self):\n tpu_strategy_util.shutdown_tpu_system(self.resolver)\n super(TPUEmbeddingCheckpointTest, self).tearDown()\n\n def test_checkpoint_save_retrieves(self):\n # Ensure that the variables from the first model are loaded.\n self.first_mid_level._load_variables()\n\n self.assertAllClose(\n self.first_mid_level_contents,\n self.make_checkpoint_and_get_embedding('before_load',\n self.first_mid_level),\n msg='Checkpoint should contain values from the first api object.')\n\n self.second_mid_level._load_variables()\n\n # When we load the variables from the second mid level API object to the TPU\n # we expect that checkpointing the first mid level API object will now\n # retrieve the values from the TPU which are now different from the current\n # variables in the first mid level.\n self.assertAllClose(\n self.second_mid_level_contents,\n self.make_checkpoint_and_get_embedding('after_load',\n self.first_mid_level),\n msg='Checkpoint should contain values from the second api object.')\n\n def test_checkpoint_restore_loads(self):\n\n def get_values(mid):\n return ops.convert_to_tensor(\n mid._variables['table']['parameters'].variables[0])\n\n self.first_mid_level._load_variables()\n\n first_checkpoint = util.Checkpoint(model=self.first_mid_level)\n first_checkpoint.save(_get_tmpdir('restore', 'save'))\n\n # Checkpoint now has values from first_mid_level. See first assert in\n # test_checkpoint_save_retrieves.\n\n self.second_mid_level._load_variables()\n\n self.assertAllClose(\n self.second_mid_level_contents,\n get_values(self.second_mid_level),\n msg='Second mid level api should contain its initial values.',\n )\n\n # We restore the checkpoint of our first model into our second model.\n # This should load the first mid level API object onto the TPU.\n second_checkpoint = util.Checkpoint(model=self.second_mid_level)\n second_checkpoint.restore(_get_tmpdir('restore', 'save-1'))\n\n # Call retrieve here as a way to check what the TPU contains contains.\n # Calling the retrieve ops directly might make for a cleaner separation of\n # test and module, though.\n self.second_mid_level._retrieve_variables()\n\n self.assertAllClose(\n self.first_mid_level_contents,\n get_values(self.second_mid_level),\n msg='Second mid level api should have retrieved the first model values.'\n )\n\n def test_checkpoint_restore_before_variable_creation(self):\n\n class TestModule(module.Module):\n\n def __init__(self, initializer, rows):\n self._initializer = initializer\n self._rows = rows\n\n def create_embedding(self):\n table = tpu_embedding_v2_utils.TableConfig(\n vocabulary_size=self._rows, dim=4, initializer=self._initializer,\n combiner='sum', name='table')\n feature_config = (tpu_embedding_v2_utils.FeatureConfig(\n table=table, name='feature'),)\n optimizer = tpu_embedding_v2_utils.SGD()\n\n self.tpu_embedding = tpu_embedding_v2.TPUEmbedding(\n feature_config, self._rows, optimizer)\n\n # We need to clear the already loaded config provided by setUp method.\n tpu_strategy_util.initialize_tpu_system(self.resolver)\n\n with self.strategy.scope():\n module1 = TestModule(init_ops_v2.Ones(),\n self.strategy.num_replicas_in_sync * 2)\n module1.create_embedding()\n\n checkpoint = util.Checkpoint(test_module=module1)\n checkpoint.save(_get_tmpdir('restore_before_create', 'save'))\n\n tpu_strategy_util.initialize_tpu_system(self.resolver)\n\n with self.strategy.scope():\n module2 = TestModule(init_ops_v2.Zeros(),\n self.strategy.num_replicas_in_sync * 2)\n\n checkpoint = util.Checkpoint(test_module=module2)\n checkpoint.restore(_get_tmpdir('restore_before_create', 'save-1'))\n\n with self.strategy.scope():\n module2.create_embedding()\n\n def get_values(mid):\n return mid._variables['table']['parameters'].variables[0].numpy()\n\n self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),\n get_values(module2.tpu_embedding))\n\n # Fetch the values from the TPU to check that they are the same.\n module2.tpu_embedding._retrieve_variables()\n\n self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),\n get_values(module2.tpu_embedding))\n\n def build_mid_level(self, embedding_values, optimizer,\n initialize_tpu_embedding=True):\n \"\"\"Creates an embedding api object initialized to embedding_values.\"\"\"\n initializer = init_ops_v2.Constant(embedding_values)\n\n table = tpu_embedding_v2_utils.TableConfig(\n vocabulary_size=self.num_rows, dim=4, initializer=initializer,\n combiner='sum', name='table')\n feature_config = (tpu_embedding_v2_utils.FeatureConfig(\n table=table, name='feature'),)\n\n # batch_size here does not matter as we aren't training in any of these\n # tests.\n return tpu_embedding_v2.TPUEmbedding(\n feature_config, 64, optimizer,\n initialize_tpu_embedding=initialize_tpu_embedding)\n\n def make_checkpoint_and_get_embedding(self, name, model):\n \"\"\"Saves model to checkpoint name, retrieves embedding variables.\"\"\"\n checkpoint = util.Checkpoint(model=model)\n checkpoint.save(_get_tmpdir(name, 'save'))\n\n # Get the name of the parameters variable which should be the only\n # [self.num_rows, 4] shaped tensor in the checkpoint. Note that we do this\n # as the key can change.\n variables = checkpoint_utils.list_variables(_get_tmpdir(name))\n variables = [name for name, size in variables if size == [self.num_rows, 4]]\n if len(variables) != 1:\n raise RuntimeError('Found {} copies of the parameter variable in the '\n 'checkpoint. Exactly one copy exported.'.format(\n len(variables)))\n return checkpoint_utils.load_variable(_get_tmpdir(name), variables[0])\n\n def test_model_export_cpu(self):\n self.first_mid_level._load_variables()\n\n tpu_checkpoint = util.Checkpoint(model=self.first_mid_level)\n tpu_checkpoint.save(_get_tmpdir('export_cpu', 'save'))\n\n # We restore the checkpoint of our tpu mid level onto our cpu mid level.\n cpu_checkpoint = util.Checkpoint(model=self.cpu_mid_level)\n cpu_checkpoint.restore(_get_tmpdir('export_cpu', 'save-1'))\n\n @def_function.function\n def serve_tensors(features):\n features = tpu_embedding_v2.cpu_embedding_lookup(\n features, None, self.cpu_mid_level.embedding_tables,\n self.cpu_mid_level._feature_config)\n return features[0]\n\n signatures = {\n 'serving_default':\n serve_tensors.get_concrete_function(\n (tensor_spec.TensorSpec(\n shape=(2,), dtype=dtypes.int32, name='feature'),))}\n save.save(self.cpu_mid_level,\n export_dir=_get_tmpdir('export_cpu', 'exported_model'),\n signatures=signatures)\n\n imported = load.load(_get_tmpdir('export_cpu', 'exported_model'))\n predict_fn = imported.signatures['serving_default']\n\n input_feature_value = np.array([1, 0])\n input_batch = (constant_op.constant(input_feature_value,\n dtype=dtypes.int32),)\n prediction = predict_fn(*input_batch)['output_0']\n self.assertAllClose(prediction.numpy(),\n self.first_mid_level_contents[input_feature_value])\n\n @parameterized.parameters(tpu_embedding_v2_utils.SGD,\n tpu_embedding_v2_utils.Adagrad,\n tpu_embedding_v2_utils.Adam)\n def test_check_checkpoint_variable_names_are_same_on_cpu_and_tpu(self,\n optimizer):\n # Reinitialize the TPU so that we can re-initialize the embeddings with the\n # given optimizer.\n tpu_strategy_util.initialize_tpu_system(self.resolver)\n optimizer = optimizer(learning_rate=0.1)\n\n with self.strategy.scope():\n tpu_mid_level = self.build_mid_level(\n self.first_mid_level_contents, optimizer)\n\n tpu_checkpoint = util.Checkpoint(model=tpu_mid_level)\n tpu_checkpoint.save(_get_tmpdir('save-tpu', 'save'))\n tpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-tpu'))\n\n cpu_mid_level = self.build_mid_level(\n self.first_mid_level_contents, optimizer)\n\n cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)\n cpu_checkpoint.save(_get_tmpdir('save-cpu', 'save'))\n cpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-cpu'))\n\n self.assertAllEqual(tpu_variables, cpu_variables)\n\n\nclass TPUEmbeddingTest(parameterized.TestCase, test.TestCase):\n\n def setUp(self):\n super(TPUEmbeddingTest, self).setUp()\n self.embedding_values = np.array(list(range(32)), dtype=np.float64)\n self.initializer = init_ops_v2.Constant(self.embedding_values)\n # Embedding for video initialized to\n # 0 1 2 3\n # 4 5 6 7\n # ...\n self.table_video = tpu_embedding_v2_utils.TableConfig(\n vocabulary_size=8,\n dim=4,\n initializer=self.initializer,\n combiner='sum',\n name='video')\n # Embedding for user initialized to\n # 0 1\n # 2 3\n # 4 5\n # 6 7\n # ...\n self.table_user = tpu_embedding_v2_utils.TableConfig(\n vocabulary_size=16,\n dim=2,\n initializer=self.initializer,\n combiner='mean',\n name='user')\n self.feature_config = (\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_video, name='watched'),\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_video, name='favorited'),\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_user, name='friends'))\n\n self.batch_size = 2\n self.data_batch_size = 4\n\n # One (global) batch of inputs\n # sparse tensor for watched:\n # row 0: 0\n # row 1: 0, 1\n # row 2: 0, 1\n # row 3: 1\n self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],\n [2, 0], [2, 1], [3, 0]]\n self.feature_watched_values = [0, 0, 1, 0, 1, 1]\n self.feature_watched_row_lengths = [1, 2, 2, 1]\n # sparse tensor for favorited:\n # row 0: 0, 1\n # row 1: 1\n # row 2: 0\n # row 3: 0, 1\n self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],\n [2, 0], [3, 0], [3, 1]]\n self.feature_favorited_values = [0, 1, 1, 0, 0, 1]\n self.feature_favorited_row_lengths = [2, 1, 1, 2]\n # sparse tensor for friends:\n # row 0: 3\n # row 1: 0, 1, 2\n # row 2: 3\n # row 3: 0, 1, 2\n self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],\n [2, 0], [3, 0], [3, 1], [3, 2]]\n self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]\n self.feature_friends_row_lengths = [1, 3, 1, 3]\n self.resolver = None\n\n def tearDown(self):\n if self.resolver:\n tpu_strategy_util.shutdown_tpu_system(self.resolver)\n super(TPUEmbeddingTest, self).tearDown()\n\n def test_tables_with_same_name(self):\n with self.assertRaisesRegex(\n ValueError, 'Multiple tables with name table found.'):\n with self._get_strategy().scope():\n tpu_embedding_v2.TPUEmbedding(\n (tpu_embedding_v2_utils.FeatureConfig(\n table=tpu_embedding_v2_utils.TableConfig(\n name='table',\n vocabulary_size=4,\n dim=2,\n initializer=self.initializer,),\n name='watched'),\n tpu_embedding_v2_utils.FeatureConfig(\n table=tpu_embedding_v2_utils.TableConfig(\n name='table',\n vocabulary_size=4,\n dim=2,\n initializer=self.initializer),\n name='favorited')),\n self.batch_size,\n tpu_embedding_v2_utils.SGD(learning_rate=0.1))\n\n def test_unsupported_optimizer(self):\n with self.assertRaisesRegex(\n ValueError, 'is an unsupported optimizer class.'):\n with self._get_strategy().scope():\n tpu_embedding_v2.TPUEmbedding(\n self.feature_config, self.batch_size,\n tpu_embedding.AdagradParameters(learning_rate=0.1))\n\n def test_pass_non_tensor_to_apply_gradients(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n @def_function.function\n def test_apply():\n mid_level_api.apply_gradients((1, 2, 3))\n\n with self.assertRaisesRegex(ValueError, 'Expected Tensor.'):\n strategy.run(test_apply)\n\n def test_pass_different_structure_to_apply_gradients(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n @def_function.function\n def test_apply():\n # This should be a tuple as feature_config is a tuple of 3 configs.\n mid_level_api.apply_gradients([1, 2, 3])\n\n with self.assertRaisesRegex(\n TypeError,\n 'The two structures don\\'t have the same nested structure.'):\n strategy.run(test_apply)\n\n def test_pass_none_to_apply_gradients(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n dataset = self._create_sparse_dataset(strategy)\n data = next(iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))))\n\n @def_function.function\n def embedding_and_set_gradients(data):\n mid_level_api.enqueue(data)\n def tpu_fn():\n results = mid_level_api.dequeue()\n mid_level_api.apply_gradients((None, None,\n array_ops.ones_like(results[2])))\n return results\n return strategy.run(tpu_fn)\n\n @def_function.function\n def embedding_only(data):\n mid_level_api.enqueue(data, training=False)\n def tpu_fn():\n return mid_level_api.dequeue()\n return strategy.run(tpu_fn)\n\n first = self._get_replica_numpy(\n embedding_and_set_gradients(data), strategy, 0)\n second = self._get_replica_numpy(embedding_only(data), strategy, 0)\n\n # First two features should be the same as None gradient was applied.\n # Third feature had gradient of 1 passed in from each core.\n # Each core received the same ids per core and returned the following batch:\n # [ row 3, row 0 + row 1 + row 2 ]\n # so gradient update was (learning rate = 0.1):\n # row 0: -1/3*0.1\n # row 1: -1/3*0.1\n # row 2: -1/3*0.1\n # row 3: -1*0.1\n # There is a factor of num_replicas because each replica gave an update.\n\n num_replicas = strategy.num_replicas_in_sync\n update = ([[0.0]], [[0.0]],\n [[0.1 * num_replicas], [0.1 / 3 * num_replicas]])\n golden = tuple([feature-np.array(up) for feature, up in zip(first, update)])\n\n self.assertAllClose(golden, second)\n\n def _get_strategy(self):\n self.resolver = tpu_cluster_resolver.TPUClusterResolver(\n tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)\n remote.connect_to_cluster(self.resolver)\n tpu_strategy_util.initialize_tpu_system(self.resolver)\n return tpu_strategy.TPUStrategy(self.resolver)\n\n def test_dequeue_on_cpu(self):\n mid_level_api = self._create_mid_level()\n with self.assertRaises(RuntimeError):\n mid_level_api.dequeue()\n\n def test_enqueue_on_cpu(self):\n mid_level_api = self._create_mid_level()\n features = {\n 'watched': sparse_tensor.SparseTensor(\n indices=self.feature_watched_indices,\n values=self.feature_watched_values,\n dense_shape=[2, 2])}\n with self.assertRaises(RuntimeError):\n mid_level_api.enqueue(features)\n\n def test_apply_gradients_on_cpu(self):\n mid_level_api = self._create_mid_level()\n with self.assertRaises(RuntimeError):\n mid_level_api.enqueue(None)\n\n def test_get_embedding_tables_on_cpu(self):\n mid_level_api = self._create_mid_level()\n self.assertEqual(\n set(mid_level_api.embedding_tables.keys()),\n set([self.table_video, self.table_user]))\n\n def test_get_embedding_tables_on_tpu(self):\n with self._get_strategy().scope():\n mid_level_api = self._create_mid_level()\n with self.assertRaises(RuntimeError):\n mid_level_api.embedding_tables()\n\n def test_enqueue_weight_for_dense_tensor(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n input_fn = self._create_dense_input_fn(strategy, include_weights=True)\n dist = strategy.experimental_distribute_datasets_from_function(\n input_fn,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))\n dist_iter = iter(dist)\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n features, weights = next(dist_iter)\n mid_level_api.enqueue(features, weights=weights, training=False)\n return strategy.run(step)\n\n with self.assertRaisesRegex(ValueError, 'Weight specified for dense input'):\n test_fn()\n\n def test_enqueue_wrong_weight_type_for_sparse_tensor(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy)\n ragged = self._create_ragged_dataset(strategy, include_weights=True)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n ragged_iter = iter(strategy.experimental_distribute_dataset(\n ragged,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n features = next(sparse_iter)\n _, weights = next(ragged_iter)\n mid_level_api.enqueue(features, weights=weights, training=False)\n return strategy.run(step)\n\n with self.assertRaisesRegex(\n ValueError, 'which does not match type input which is SparseTensor.'):\n test_fn()\n\n def test_enqueue_wrong_weight_type_for_ragged_tensor(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy, include_weights=True)\n ragged = self._create_ragged_dataset(strategy)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n ragged_iter = iter(strategy.experimental_distribute_dataset(\n ragged,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n _, weights = next(sparse_iter)\n features = next(ragged_iter)\n mid_level_api.enqueue(features, weights=weights, training=False)\n return strategy.run(step)\n\n with self.assertRaisesRegex(\n ValueError, 'which does not match type input which is RaggedTensor.'):\n test_fn()\n\n def test_enqueue_sparse_and_ragged(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy)\n ragged = self._create_ragged_dataset(strategy)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n ragged_iter = iter(strategy.experimental_distribute_dataset(\n ragged,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n sparse_features = next(sparse_iter)\n ragged_features = next(ragged_iter)\n features = (sparse_features[0], ragged_features[1], sparse_features[2])\n mid_level_api.enqueue(features, training=False)\n return strategy.run(step)\n\n with self.assertRaisesRegex(\n ValueError, 'Found both SparseTensors and RaggedTensors'):\n test_fn()\n\n def test_enqueue_incorrect_structure_for_features(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n features = next(sparse_iter)\n features = (features[0],)\n mid_level_api.enqueue(features, training=False)\n return strategy.run(step)\n\n # The error here is raised from nest.assert_same_structure\n with self.assertRaises(ValueError):\n test_fn()\n\n def test_enqueue_incorrect_structure_for_weights(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy, include_weights=True)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n features, weights = next(sparse_iter)\n weights = (weights[0],)\n mid_level_api.enqueue(features, weights=weights, training=False)\n return strategy.run(step)\n\n # The error here is raised from nest.assert_same_structure\n with self.assertRaises(ValueError):\n test_fn()\n\n def test_enqueue_ragged_tensor(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n sparse = self._create_sparse_dataset(strategy)\n ragged = self._create_ragged_dataset(strategy)\n sparse_iter = iter(strategy.experimental_distribute_dataset(\n sparse,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n ragged_iter = iter(strategy.experimental_distribute_dataset(\n ragged,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def test_fn():\n def get_activations():\n return mid_level_api.dequeue()\n\n sparse_features = next(sparse_iter)\n ragged_features = next(ragged_iter)\n mid_level_api.enqueue(sparse_features, training=False)\n sparse_activations = strategy.run(get_activations)\n mid_level_api.enqueue(ragged_features, training=False)\n ragged_activations = strategy.run(get_activations)\n return sparse_activations, ragged_activations\n\n sparse_activations, ragged_activations = test_fn()\n\n # Extact per core numpy arrays and check that both sparse and ragged have\n # the same results.\n sparse0 = self._get_replica_numpy(sparse_activations, strategy, 0)\n ragged0 = self._get_replica_numpy(ragged_activations, strategy, 0)\n self.assertAllClose(sparse0, ragged0)\n\n def test_enqueue_cpu_tensor(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n input_fn = self._create_dense_input_fn(strategy)\n sparse_iter = iter(strategy.experimental_distribute_datasets_from_function(\n input_fn))\n\n @def_function.function\n def test_fn():\n def get_activations():\n return mid_level_api.dequeue()\n\n features = next(sparse_iter)\n mid_level_api.enqueue(features, training=False)\n activations = strategy.run(get_activations)\n return activations\n\n with self.assertRaisesRegex(ValueError, 'which is on a TPU input device'):\n test_fn()\n\n @parameterized.parameters([True, False])\n def test_enqueue_cpu_tensor_with_outside_compilation(self, use_mlir):\n if use_mlir:\n config.enable_mlir_bridge()\n\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n input_fn = self._create_dense_input_fn(strategy)\n sparse_iter = iter(strategy.experimental_distribute_datasets_from_function(\n input_fn))\n\n @def_function.function\n def test_fn():\n def get_activations(features):\n mid_level_api.enqueue(features, training=False)\n return mid_level_api.dequeue()\n\n activations = strategy.run(get_activations, args=(next(sparse_iter),))\n return activations\n\n with self.assertRaisesRegex(ValueError, 'which is on a TPU input device'):\n test_fn()\n\n @parameterized.parameters(True, False)\n def test_enqueue_with_weights(self, ragged):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n weight = 0.5\n if ragged:\n dataset = self._create_ragged_dataset(strategy, include_weights=True,\n weight=weight)\n else:\n dataset = self._create_sparse_dataset(strategy, include_weights=True,\n weight=weight)\n\n dataset_iter = iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def enqueue_and_get(features, weights):\n def get_activations():\n return mid_level_api.dequeue()\n mid_level_api.enqueue(features, weights=weights, training=False)\n return strategy.run(get_activations)\n\n features, weights = next(dataset_iter)\n # Replace the weight for the second feature by None to test.\n weights = (weights[0], None, weights[2])\n\n no_weights_activations = enqueue_and_get(features, weights=None)\n weights_activations = enqueue_and_get(features, weights=weights)\n\n # Extact per core numpy arrays.\n no_weights0 = self._get_replica_numpy(no_weights_activations, strategy, 0)\n weights0 = self._get_replica_numpy(weights_activations, strategy, 0)\n # videos table has sum combiner and users table has mean combiner.\n # i.e. users table lookups isn't affected by the weights as all the weights\n # are the same.\n # Tuple entry 0 and 1 are the watched and favorited features from the videos\n # table and entry 2 is the friends feature from the users table.\n # Note that None was passed as a weight for entry 1 so weight should have no\n # effect.\n weight = (0.5, 1.0, 1.0)\n golden = tuple([no_weight * w for no_weight, w in zip(no_weights0, weight)])\n\n self.assertAllClose(golden, weights0)\n\n @parameterized.parameters([True, False])\n def test_enqueue_with_outside_compilation(self, use_mlir):\n if use_mlir:\n config.enable_mlir_bridge()\n\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n dataset = self._create_sparse_dataset(strategy)\n dataset_iter = iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def enqueue_with_outside_compilation(data):\n def get_activations(features):\n mid_level_api.enqueue(features, training=False)\n return mid_level_api.dequeue()\n return strategy.run(get_activations, args=(data,))\n\n @def_function.function\n def enqueue_without_outside_compilation(data):\n def get_activations():\n return mid_level_api.dequeue()\n mid_level_api.enqueue(data, training=False)\n return strategy.run(get_activations)\n\n features = next(dataset_iter)\n\n activations_oc = enqueue_with_outside_compilation(features)\n activations = enqueue_without_outside_compilation(features)\n\n # Extact per core numpy arrays.\n activations_oc0 = self._get_replica_numpy(activations_oc, strategy, 0)\n activations0 = self._get_replica_numpy(activations, strategy, 0)\n\n self.assertAllClose(activations_oc0, activations0)\n\n @parameterized.parameters(True, False)\n def test_enqueue_with_outside_compilation_in_control_flow(self, use_mlir):\n if use_mlir:\n config.enable_mlir_bridge()\n\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n dataset = self._create_sparse_dataset(strategy)\n dataset_iter = iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n # This is one way to force the enqueue in some control flow. @tf.functions\n # aren't inlined in the calling tf.function. An alternative would be to\n # place the enqueue in a switch_v2 or something similar.\n @def_function.function\n def enqueue_fn(features):\n mid_level_api.enqueue(features, training=False)\n\n @def_function.function\n def enqueue_with_outside_compilation():\n def get_activations(features):\n enqueue_fn(features)\n return mid_level_api.dequeue()\n return strategy.run(get_activations, args=(next(dataset_iter),))\n\n with self.assertRaisesRegex(\n RuntimeError,\n 'does not match graph which contains TPUReplicateContext'):\n enqueue_with_outside_compilation()\n\n def test_enqueue_with_outside_compilation_non_direct_input(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n dataset = self._create_sparse_dataset(strategy)\n dataset_iter = iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def enqueue_with_outside_compilation():\n def get_activations(features):\n # This inserts a mul operation on the TPU to trigger the direct input\n # error.\n features = (features[0]*2, features[1]*2, features[2]*2)\n mid_level_api.enqueue(features, training=False)\n return mid_level_api.dequeue()\n return strategy.run(get_activations, args=(next(dataset_iter),))\n\n with self.assertRaisesRegex(\n ValueError, 'which does not have the `_tpu_input_identity` attr'):\n enqueue_with_outside_compilation()\n\n def test_enqueue_with_outside_compilation_auto_mode(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n dataset = self._create_sparse_dataset(strategy)\n dataset_iter = iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False)))\n\n @def_function.function\n def enqueue_with_no_gradient_apply(data):\n def get_activations(features):\n # Note the lack of setting training=False, so training defaults to true\n # here even though we don't have apply gradients.\n # We detect the correct mode based on which ops exist that share the\n # same 'name'.\n mid_level_api.enqueue(features, name='call1')\n return mid_level_api.dequeue(name='call1')\n return strategy.run(get_activations, args=(data,))\n\n @def_function.function\n def enqueue_with_gradient_apply(data):\n def get_activations(features):\n mid_level_api.enqueue(features, name='call2')\n activations = mid_level_api.dequeue(name='call2')\n # Apply an all ones gradient\n gradients = nest.map_structure(array_ops.ones_like, activations)\n mid_level_api.apply_gradients(gradients, name='call2')\n return activations\n return strategy.run(get_activations, args=(data,))\n\n data = next(dataset_iter)\n before_gradient_apply = enqueue_with_gradient_apply(data)\n after_gradient_apply = enqueue_with_no_gradient_apply(data)\n before_gradient_apply0 = self._get_replica_numpy(before_gradient_apply,\n strategy, 0)\n after_gradient_apply0 = self._get_replica_numpy(after_gradient_apply,\n strategy, 0)\n\n num_replicas = strategy.num_replicas_in_sync\n # We are passing a gradient of 1 for all lookups, optimizer is SGD with a\n # learning rate of 0.1. Feature 0 and 1 are looked up with a sum combiner\n # with the following ids:\n # Feature 0: [0, 0, 1], [0, 1, 1], ... repeated over num_replicas\n # Feature 1: [0, 1, 1], [0, 0, 1], ... repeated over num_replicas\n # i.e. Row 0 and 1 were looked up 3*num_replicas times over all cores and as\n # the gradient is 1, the accumulated gradient is 3*num_replicas for each\n # position in row 0 and 1 in table.\n #\n # See comments in test_pass_none_to_apply_gradients for the update to\n # Feature 2 and its table.\n # The *2 in the next tests are because those rows have 2 lookups vs\n # the 1 lookup in the other row.\n update = ([[0.3 * num_replicas], [0.3 * num_replicas * 2]],\n [[0.3 * num_replicas * 2], [0.3 * num_replicas]],\n [[0.1 * num_replicas], [0.1 / 3 * num_replicas]])\n golden = tuple([before - np.array(up) for before, up in\n zip(before_gradient_apply0, update)])\n\n self.assertAllClose(golden, after_gradient_apply0)\n\n def _create_strategy_and_mid_level(self, optimizer_name):\n strategy = self._get_strategy()\n\n with strategy.scope():\n if optimizer_name == 'sgd':\n optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)\n elif optimizer_name == 'adagrad':\n optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)\n elif optimizer_name == 'adam':\n optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)\n else:\n raise ValueError('optimizer is not recognized: ', optimizer_name)\n mid_level_api = self._create_mid_level(optimizer=optimizer)\n\n return strategy, mid_level_api, optimizer\n\n @parameterized.parameters(\n *itertools.product(\n ['sgd', 'adagrad', 'adam'],\n [True, False]))\n def test_embedding(self, optimizer_name, training):\n strategy, mid_level_api, optimizer = (\n self._create_strategy_and_mid_level(optimizer_name))\n\n dataset = self._create_sparse_dataset(strategy)\n dist = strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))\n dist_iter = iter(dist)\n\n @def_function.function\n def test_fn():\n\n def step():\n \"\"\"Create and run computation that returns the embedding activations.\"\"\"\n if not training:\n activations = mid_level_api.dequeue()\n total_loss = _get_total_loss_tensor(activations)\n ret_val = [total_loss] + list(activations)\n return ret_val\n else:\n with backprop.GradientTape() as tape:\n activations = mid_level_api.dequeue()\n tape.watch(activations)\n total_loss = _get_total_loss_tensor(activations)\n loss_per_replica = total_loss / strategy.num_replicas_in_sync\n gradients = tape.gradient(loss_per_replica, activations)\n mid_level_api.apply_gradients(gradients)\n ret_val = [total_loss] + list(activations)\n return ret_val\n\n mid_level_api.enqueue(next(dist_iter), training=training)\n result = strategy.run(step)\n return result\n\n # Run model.\n shard_out_val = test_fn()\n\n # Retrieve TPU weights to CPU.\n mid_level_api._retrieve_variables()\n\n # Compute sparse tensors for global batch.\n input_data = next(iter(self._create_sparse_dataset(strategy)))\n\n # Check results.\n self._check_results(strategy, shard_out_val, training, input_data,\n mid_level_api._variables,\n optimizer)\n\n def _create_mid_level(self, optimizer=None):\n # Create `TPUEmbedding` object.\n if optimizer is None:\n optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)\n\n num_replicas = (\n distribution_strategy_context.get_strategy().num_replicas_in_sync)\n return tpu_embedding_v2.TPUEmbedding(\n feature_config=self.feature_config,\n batch_size=self.batch_size * num_replicas,\n optimizer=optimizer)\n\n def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):\n # Create dataset for enqueue operation\n sparse_features = (\n sparse_tensor.SparseTensor(\n indices=self.feature_watched_indices,\n values=self.feature_watched_values,\n dense_shape=[self.data_batch_size, 2]),\n sparse_tensor.SparseTensor(\n indices=self.feature_favorited_indices,\n values=self.feature_favorited_values,\n dense_shape=[self.data_batch_size, 2]),\n sparse_tensor.SparseTensor(\n indices=self.feature_friends_indices,\n values=self.feature_friends_values,\n dense_shape=[self.data_batch_size, 3]))\n if include_weights:\n weights = []\n for sparse in sparse_features:\n values = (\n array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)\n weights.append(sparse_tensor.SparseTensor(\n indices=sparse.indices,\n values=values,\n dense_shape=sparse.dense_shape))\n sparse_features = (sparse_features, tuple(weights))\n\n dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)\n\n # Data is batched to self.data_batch_size, rebatch to global batch size.\n return dataset.unbatch().repeat().batch(\n self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)\n\n def _create_ragged_dataset(self, strategy, include_weights=False, weight=0.5):\n # Create dataset for enqueue operation\n ragged_features = (\n ragged_tensor.RaggedTensor.from_row_lengths(\n row_lengths=self.feature_watched_row_lengths,\n values=self.feature_watched_values),\n ragged_tensor.RaggedTensor.from_row_lengths(\n row_lengths=self.feature_favorited_row_lengths,\n values=self.feature_favorited_values),\n ragged_tensor.RaggedTensor.from_row_lengths(\n row_lengths=self.feature_friends_row_lengths,\n values=self.feature_friends_values))\n if include_weights:\n weights = []\n for ragged in ragged_features:\n weights.append(ragged.with_values(\n array_ops.ones_like(ragged.values, dtype=dtypes.float32) * weight))\n ragged_features = (ragged_features, tuple(weights))\n\n dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)\n\n # Data is batched to self.data_batch_size, rebatch to global batch size.\n return dataset.unbatch().repeat().batch(\n self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)\n\n def _create_dense_input_fn(self, strategy, include_weights=False, weight=0.5):\n\n def input_fn(ctx):\n del ctx\n features = (\n constant_op.constant(self.feature_watched_values[-2:],\n dtype=dtypes.int32),\n constant_op.constant(self.feature_favorited_values[-2:],\n dtype=dtypes.int32),\n constant_op.constant(self.feature_friends_values[-2:],\n dtype=dtypes.int32))\n if include_weights:\n weights = [array_ops.ones_like(t, dtype=dtypes.float32) * weight\n for t in features]\n features = (features, tuple(weights))\n return dataset_ops.DatasetV2.from_tensors(features).repeat()\n\n return input_fn\n\n def _check_results(self, strategy, shard_out_val, training, input_data,\n table_to_variable, optimizer):\n num_replicas = strategy.num_replicas_in_sync\n\n # Unpack the values `strategy.run()` returns.\n loss = _unpack(strategy, shard_out_val[0])\n activation_watched = _unpack(strategy, shard_out_val[1])\n activation_favorited = _unpack(strategy, shard_out_val[2])\n activation_friends = _unpack(strategy, shard_out_val[3])\n\n # Core 0:\n # Calculate the values of embedding activations.\n activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])\n activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])\n # Second row of `activation_friends_gold0` is the mean of the following.\n # row 0: 0 1\n # row 1: 2 3\n # row 2: 4 5\n activation_friends_gold0 = np.array([[6, 7], [2, 3]])\n\n loss_gold0 = _compute_loss(activation_watched_gold0,\n activation_favorited_gold0,\n activation_friends_gold0)\n\n # Add on values from other cores:\n # Activations for watched are an alternating sequence of\n # activation_watched_gold0 and activation_favorited_gold0.\n # For favorited it is the same but in the opposite order.\n activation_watched_gold = np.concatenate(\n (np.concatenate((np.expand_dims(activation_watched_gold0, axis=0),) *\n (num_replicas // 2)),\n np.concatenate((np.expand_dims(activation_favorited_gold0, axis=0),) *\n (num_replicas // 2))),\n axis=1).reshape([self.batch_size * num_replicas, 4])\n activation_favorited_gold = np.concatenate(\n (activation_watched_gold[self.batch_size:,],\n activation_watched_gold[0:self.batch_size,]))\n activation_friends_gold = np.concatenate(\n (activation_friends_gold0,) * num_replicas)\n\n loss_gold = [loss_gold0] * num_replicas\n\n # Test values.\n self.assertAllClose(activation_watched_gold, activation_watched)\n self.assertAllClose(activation_favorited_gold, activation_favorited)\n self.assertAllClose(activation_friends_gold, activation_friends)\n\n self.assertAllClose(loss_gold, loss)\n\n embedding_table_video_before = np.copy(\n np.reshape(self.embedding_values, [8, 4]))\n embedding_table_user_before = np.copy(\n np.reshape(self.embedding_values, [16, 2]))\n\n global_batch_size = self.batch_size * num_replicas\n if training:\n gradient_wrt_watched_gold = (2 * activation_watched_gold /\n global_batch_size)\n gradient_wrt_favorited_gold = (2 * activation_favorited_gold /\n global_batch_size)\n gradient_wrt_friends_gold = (2 * activation_friends_gold /\n global_batch_size)\n\n # Calculate gradients wrt embedding tables.\n gradients_wrt_user = (\n _compute_gradients_wrt_embedding_table(\n global_batch_size, gradient_wrt_friends_gold,\n embedding_table_user_before, input_data[2].indices.numpy(),\n input_data[2].values.numpy(), self.table_user.combiner))\n gradients_wrt_video = (\n _compute_gradients_wrt_embedding_table(\n global_batch_size, gradient_wrt_favorited_gold,\n embedding_table_video_before, input_data[1].indices.numpy(),\n input_data[1].values.numpy(), self.table_video.combiner) +\n _compute_gradients_wrt_embedding_table(\n global_batch_size, gradient_wrt_watched_gold,\n embedding_table_video_before, input_data[0].indices.numpy(),\n input_data[0].values.numpy(), self.table_video.combiner))\n\n self._check_embedding_and_slot_variables(embedding_table_user_before,\n gradients_wrt_user,\n embedding_table_video_before,\n gradients_wrt_video,\n optimizer,\n table_to_variable)\n\n def _check_embedding_and_slot_variables(self, embedding_table_user_before,\n gradients_wrt_user,\n embedding_table_video_before,\n gradients_wrt_video,\n optimizer,\n table_to_variable):\n if isinstance(optimizer, tpu_embedding_v2_utils.SGD):\n check_fn = self._check_embedding_and_slot_variables_for_sgd\n elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):\n check_fn = self._check_embedding_and_slot_variables_for_adagrad\n elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):\n check_fn = self._check_embedding_and_slot_variables_for_adam\n else:\n raise ValueError('optimizer is not recognized: ', type(optimizer))\n check_fn(embedding_table_user_before, gradients_wrt_user,\n optimizer, table_to_variable[self.table_user.name])\n check_fn(embedding_table_video_before, gradients_wrt_video,\n optimizer, table_to_variable[self.table_video.name])\n\n def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,\n gradients,\n optimizer,\n variables):\n embedding_table = np.copy(embedding_table_before)\n embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)\n self.assertAllClose(_get_variable(variables['parameters']).numpy(),\n embedding_table)\n\n def _check_embedding_and_slot_variables_for_adagrad(self,\n embedding_table_before,\n gradients,\n optimizer,\n variable):\n embedding_table = np.copy(embedding_table_before)\n accumulator = (\n optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)\n embedding_table -= (\n optimizer.learning_rate * np.sum(gradients, axis=0) /\n np.sqrt(accumulator))\n self.assertAllClose(_get_variable(variable['parameters']).numpy(),\n embedding_table)\n self.assertAllClose(_get_variable(variable['accumulators']).numpy(),\n accumulator)\n\n def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,\n gradients,\n optimizer,\n variable):\n embedding_table = np.copy(embedding_table_before)\n g = np.sum(gradients, axis=0)\n v = g**2 * (1 - optimizer.beta_2)\n m = g * (1 - optimizer.beta_1)\n epsilon = optimizer.epsilon\n # TPU Embeddings don't have the LR decay factor for Adam.\n lr_modifier = 1\n embedding_table -= (\n m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))\n self.assertAllClose(_get_variable(variable['parameters']).numpy(),\n embedding_table, rtol=1e-4)\n self.assertAllClose(_get_variable(variable['momenta']).numpy(),\n m, rtol=1e-4)\n self.assertAllClose(_get_variable(variable['velocities']).numpy(),\n v, rtol=1e-4)\n\n def _get_replica_numpy(self, structured, strategy, replica_id):\n def select_replica(x):\n x = strategy.experimental_local_results(x)\n if len(x) == 1:\n return x.numpy()\n return x[replica_id].numpy()\n return nest.map_structure(select_replica, structured)\n\n def test_dense_lookup(self):\n strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')\n\n input_fn = self._create_dense_input_fn(strategy)\n dist = strategy.experimental_distribute_datasets_from_function(\n input_fn,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))\n dist_iter = iter(dist)\n\n @def_function.function\n def test_fn():\n def step():\n return mid_level_api.dequeue()\n\n mid_level_api.enqueue(next(dist_iter), training=False)\n return strategy.run(step)\n\n # Run model.\n shard0 = self._get_replica_numpy(test_fn(), strategy, 0)\n\n # embedding_values is a linear list, so we reshape to match the correct\n # shape of the corresponding table before performing the lookup.\n numpy_videos = np.reshape(self.embedding_values, (8, 4))\n numpy_users = np.reshape(self.embedding_values, (16, 2))\n golden = ((numpy_videos[self.feature_watched_values[-2:]],\n numpy_videos[self.feature_favorited_values[-2:]],\n numpy_users[self.feature_friends_values[-2:]]))\n self.assertAllClose(shard0, golden)\n\n def test_variable_learning_rate(self):\n num_steps = 10\n num_steps_float = float(num_steps)\n starting_lr = 1.0\n ending_lr = 0.5\n\n strategy = self._get_strategy()\n num_replicas = strategy.num_replicas_in_sync\n\n # Create model with Keras.\n with strategy.scope():\n step_counter = tf_variables.Variable(0.0, dtypes.float32)\n\n def lr_function():\n return gen_math_ops.maximum(\n ending_lr,\n starting_lr + ((ending_lr - starting_lr) * step_counter) /\n num_steps_float)\n\n optimizer = tpu_embedding_v2_utils.SGD(learning_rate=lr_function)\n table_config = tpu_embedding_v2_utils.TableConfig(\n vocabulary_size=num_replicas,\n dim=4,\n initializer=init_ops_v2.Constant(np.zeros((num_replicas, 4))),\n combiner='sum', name='table')\n mid_level_api = tpu_embedding_v2.TPUEmbedding(\n feature_config={\n 'feature': tpu_embedding_v2_utils.FeatureConfig(\n table=table_config, name='feature')},\n batch_size=num_replicas,\n optimizer=optimizer)\n\n feature = {'feature': constant_op.constant([0], dtype=dtypes.int32)}\n\n def input_fn(ctx):\n del ctx\n return dataset_ops.DatasetV2.from_tensors(feature).repeat()\n dist = strategy.experimental_distribute_datasets_from_function(\n input_fn,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))\n dist_iter = iter(dist)\n\n @def_function.function\n def test_fn():\n def step():\n with backprop.GradientTape() as tape:\n activations = mid_level_api.dequeue()\n tape.watch(activations)\n result = math_ops.reduce_sum(activations['feature'])\n loss = result / num_replicas\n grads = tape.gradient(loss, activations)\n mid_level_api.apply_gradients(grads)\n return activations['feature']\n\n mid_level_api.enqueue(next(dist_iter), training=True)\n return strategy.run(step)\n\n # Run model.\n results = []\n for _ in range(num_steps):\n result = test_fn()\n results.append(_unpack(strategy, result))\n step_counter.assign_add(1.0)\n\n # Table is 2 elements wide, per-replica batch size of 1, with id 0.\n # Loss for the gradient is the sum of the entries divided by the number of\n # replicas. Thus the per replica gradient is 1/#of replicas for row 0 and no\n # other updates. The reduced gradient is therefore 1.\n # Learning rate schedule over num_steps steps:\n # 1.0 0.95 0.9 0.85 0.8 ...\n # Since use SGD and the gradient is one, the first row of the table is\n # [0, 0] [-1.0, -1.0] [-1.95, -1.95] [-2.85, -2.85] ... (the negative\n # partial sums of the above).\n\n learning_rates = [starting_lr - (starting_lr - ending_lr) / num_steps * j\n for j in range(num_steps)]\n cumsum = [sum(learning_rates[0:j]) for j in range(num_steps)]\n goldens = [[[-cumsum[i]] * table_config.dim] * num_replicas\n for i in range(10)]\n self.assertAllClose(results, goldens)\n\n @parameterized.parameters([True, False])\n def test_optimizer_with_slot_creation_fn(self, use_tpu):\n def slot_creation_fn(table, slot_names):\n slots = {}\n for slot in slot_names:\n slots[slot] = tf_variables.Variable(\n name='{}_{}'.format(table.name, slot),\n initial_value=functools.partial(\n init_ops_v2.Zeros(), shape=table.shape, dtype=dtypes.float32),\n trainable=False)\n return slots\n optimizer = tpu_embedding_v2_utils.Adagrad(\n learning_rate=0.1,\n slot_variable_creation_fn=slot_creation_fn)\n if use_tpu:\n strategy = self._get_strategy()\n else:\n strategy = distribution_strategy_context.get_strategy()\n num_replicas = strategy.num_replicas_in_sync\n with strategy.scope():\n mid_level = tpu_embedding_v2.TPUEmbedding(\n feature_config=self.feature_config,\n batch_size=self.batch_size * num_replicas,\n optimizer=optimizer)\n video_accumulator = mid_level._variables['video']['accumulators']\n user_accumulator = mid_level._variables['user']['accumulators']\n if use_tpu:\n # To check the table contents (ensure that it is zero rather than the\n # normal initial accumulator value specified to in the optimizer config),\n # we need to select the underlying table variable on TPU.\n # We only have one shard on Forge.\n video_accumulator = video_accumulator.variables[0]\n user_accumulator = user_accumulator.variables[0]\n\n self.assertAllClose(video_accumulator.numpy(),\n np.zeros((self.table_video.vocabulary_size,\n self.table_video.dim)))\n self.assertAllClose(user_accumulator.numpy(),\n np.zeros((self.table_user.vocabulary_size,\n self.table_user.dim)))\n\n def test_optimizer_with_slot_creation_fn_non_partial(self):\n def slot_creation_fn(table, slot_names):\n slots = {}\n for slot in slot_names:\n # Note that we don't pass functools.partial here, so on TPU we can't\n # extract the shape. We expect the error below.\n slots[slot] = tf_variables.Variable(\n name='{}_{}'.format(table.name, slot),\n initial_value=init_ops_v2.Zeros()(shape=table.shape,\n dtype=dtypes.float32),\n trainable=False)\n return slots\n optimizer = tpu_embedding_v2_utils.Adagrad(\n learning_rate=0.1,\n slot_variable_creation_fn=slot_creation_fn)\n strategy = self._get_strategy()\n num_replicas = strategy.num_replicas_in_sync\n with strategy.scope():\n with self.assertRaisesRegex(ValueError,\n 'Unable to extract initializer function'):\n tpu_embedding_v2.TPUEmbedding(\n feature_config=self.feature_config,\n batch_size=self.batch_size*num_replicas,\n optimizer=optimizer)\n\n def test_sequence_embeddings(self):\n feature_config = (\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_video, name='watched',\n max_sequence_length=2),\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_video, name='favorited',\n max_sequence_length=2),\n tpu_embedding_v2_utils.FeatureConfig(\n table=self.table_user, name='friends',\n max_sequence_length=3))\n optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)\n strategy = self._get_strategy()\n num_replicas = strategy.num_replicas_in_sync\n with strategy.scope():\n mid_level = tpu_embedding_v2.TPUEmbedding(\n feature_config=feature_config,\n batch_size=self.batch_size * num_replicas,\n optimizer=optimizer)\n\n dataset = self._create_sparse_dataset(strategy)\n data = next(iter(strategy.experimental_distribute_dataset(\n dataset,\n options=distribute_lib.InputOptions(\n experimental_prefetch_to_device=False))))\n\n @def_function.function\n def embedding_and_set_gradients(data):\n def tpu_fn():\n activations = mid_level.dequeue()\n mid_level.apply_gradients(nest.map_structure(array_ops.ones_like,\n activations))\n return activations\n mid_level.enqueue(data)\n return strategy.run(tpu_fn)\n\n @def_function.function\n def embedding_only(data):\n def tpu_fn():\n return mid_level.dequeue()\n mid_level.enqueue(data)\n return strategy.run(tpu_fn)\n\n # Only check core 0.\n before_update = self._get_replica_numpy(\n embedding_and_set_gradients(data), strategy, 0)\n after_update = self._get_replica_numpy(embedding_only(data), strategy, 0)\n\n # For videos table, row 0 and row 1 are looked up 3*num_replicas times as\n # they occur 3 times per replica (considering the features 0 and 1 which are\n # both looked up in the videos table).\n # Feature 0 has ids [0, 0, 1], [0, 1, 1], ... repeated over num_replicas\n # Feature 1 has ids [0, 1, 1], [0, 0, 1], ... repeated over num_replicas\n # This means that both rows 0 and 1 get a -0.1*3*num_replicas update\n # For users table, each row is looked up twice:\n # Feature 2 has ids [3, 0, 1, 2], .. repeated over num_replicas\n # This means that we get a -0.1*num_replicas update to the third feature.\n\n # In general this means that after the update, if we lookup feature 0 and 1\n # the values will be 0.3*num_replicas lower per entry and for feature 2 they\n # will be 0.1*num_replicas lower.\n # The one issue that that these lookups contain padding values.\n # For core 0, we get the first 2 elements of the 4 element batch.\n # For feature 0, the indices are [[0, 0], [1, 0], [1, 1]] with max sequence\n # length of 2, which means that [0, 1] will be 0s.\n # For feature 1, the indices are [[0, 0], [0, 1], [1, 0]] with max sequence\n # length of 2, which means that [1, 1] will be 0s.\n # For feature 2, the indices are [[0, 0], [1, 0], [1, 1], [1, 2]] with max\n # sequence length of 3, which means that [0, 1], [0, 2] will be 0s.\n # The following masks represent that so that we only apply the above updates\n # to the non-padding rows:\n masks = (\n np.array([[[1], [0]], [[1], [1]]]),\n np.array([[[1], [1]], [[1], [0]]]),\n np.array([[[1], [0], [0]], [[1], [1], [1]]]))\n\n per_row_update = (0.3 * num_replicas,\n 0.3 * num_replicas,\n 0.1 * num_replicas)\n golden = tuple([before - update * mask for before, update, mask in\n zip(before_update, per_row_update, masks)])\n self.assertAllClose(golden, after_update)\n\n\ndef _compute_gradients_wrt_embedding_table(batch_size,\n gradient_wrt_activation,\n embedding_table,\n feature_indices,\n feature_values,\n combiner,\n max_sequence_length=0):\n \"\"\"Compute gradients wrt embedding_table.\n\n Args:\n batch_size: `int`, batch size.\n gradient_wrt_activation: `np.array` with shape `batch_size` by\n embedding `dimension`.\n embedding_table: `np.array` with shape `vocabulary_size` by embedding\n `dimension`.\n feature_indices: `indices` as used to construct `SparseTensor`.\n feature_values: `values` as used to construct `SparseTensor`.\n combiner: `String`, 'mean' or 'sum'.\n max_sequence_length: If non-zero, a sequence feature with the given length.\n\n Returns:\n Gradients wrt `embedding_table`, an `np.array`s with shape\n `batch_size` by `vocabulary_size` by\n embedding `dimension`.\n\n Raises:\n ValueError: if `combiner` is not one of 'mean' or 'sum'.\n \"\"\"\n if combiner not in ('mean', 'sum'):\n raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))\n grads = []\n for i in range(batch_size):\n grad = np.zeros_like(embedding_table)\n count = 0\n for (batch_i, seq_index), vocabulary_id in zip(feature_indices,\n feature_values):\n if batch_i == i:\n count += 1\n if max_sequence_length > 0:\n if seq_index < max_sequence_length:\n grad[vocabulary_id, :] += gradient_wrt_activation[i, seq_index, :]\n else:\n grad[vocabulary_id, :] += gradient_wrt_activation[i, :]\n if combiner == 'mean' and not max_sequence_length:\n grad = grad / count\n grads.append(grad)\n return np.stack(grads)\n\n\ndef _unpack(strategy, per_replica_output):\n per_replica_output = strategy.experimental_local_results(per_replica_output)\n per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()\n return per_replica_output\n\n\ndef _get_total_loss_tensor(activations):\n losses = []\n for activation in activations:\n losses.append(\n math_ops.reduce_mean(\n math_ops.reduce_sum(\n gen_math_ops.squared_difference(activation, 0), 1)))\n total_loss = array_ops.expand_dims_v2(sum(losses), 0)\n return total_loss\n\n\ndef _compute_loss(activation_watched, activation_favorited, activation_friends):\n watched_loss = np.mean(np.sum(activation_watched**2, axis=1))\n if len(activation_favorited.shape) == 2:\n favorited_loss = np.mean(np.sum(activation_favorited**2, axis=1))\n else:\n favorited_loss = np.mean(np.sum(activation_favorited**2, axis=(1, 2)))\n if len(activation_friends.shape) == 2:\n friends_loss = np.mean(np.sum(activation_friends**2, axis=1))\n else:\n friends_loss = np.mean(np.sum(activation_friends**2, axis=(1, 2)))\n loss = watched_loss + favorited_loss + friends_loss\n return loss\n\n\ndef _get_tmpdir(name, subdir=''):\n segments = [FLAGS.model_dir, name] + ([subdir] if subdir else [])\n return os.path.join(*segments)\n\n\ndef _get_variable(variable):\n if isinstance(variable, tpu_embedding_v2.TPUShardedVariable):\n return variable.variables[0]\n return variable\n\n\nif __name__ == '__main__':\n v2_compat.enable_v2_behavior()\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the input_lib library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport threading\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute import parameter_server_strategy\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor as ragged_tensor_lib\nfrom tensorflow.python.util import nest\n\n\nclass DistributedIteratorTestBase(test.TestCase):\n\n # The passed input_context is to create a sharded dataset in between-graph\n # case.\n def _wrap_iterator(self,\n input_type,\n dataset_or_input_fn,\n input_workers,\n devices,\n split_batch_by,\n strategy,\n input_context=None):\n # The `input_context` passed in is to shard dataset for\n # MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where\n # multiple InputContexts are needed.\n if input_type == \"input_fn\":\n self.assertIsNone(\n input_context,\n msg=(\"`The input_context` arg is only used to shard dataset in \"\n \"`MultiWorkerMirroredStrategy` when the input type is dataset.\"))\n\n input_contexts = []\n for i in range(input_workers.num_workers):\n input_contexts.append(\n distribute_lib.InputContext(\n # Note: `input_workers.num_workers` is always 1 in between-graph\n # case.\n num_input_pipelines=input_workers.num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=len(devices)))\n\n iterator = input_lib.InputFunctionIterator(\n dataset_or_input_fn,\n input_workers,\n input_contexts,\n strategy)\n else:\n iterator = input_lib.DatasetIterator(\n dataset_or_input_fn,\n input_workers,\n strategy,\n split_batch_by=split_batch_by,\n input_context=input_context)\n return iterator\n\n def _wrap_dataset(self,\n input_type,\n dataset,\n input_workers,\n split_batch_by,\n strategy,\n input_context=None):\n if input_type == \"dataset\":\n if tf2.enabled():\n return input_lib.DistributedDataset(\n dataset,\n input_workers,\n strategy,\n split_batch_by=split_batch_by,\n input_context=input_context)\n else:\n return input_lib.DistributedDatasetV1(\n dataset,\n input_workers,\n strategy,\n split_batch_by=split_batch_by,\n input_context=input_context)\n else:\n return strategy.experimental_distribute_datasets_from_function(dataset)\n\n def _test_input_iteration(self,\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n strategy,\n sess=None,\n split_batch_by=None,\n input_context=None):\n if iteration_type == \"for_loop\" and not context.executing_eagerly():\n self.skipTest(\"unsupported test combination.\")\n\n if api_type == \"wrap_into_iterator\" and iteration_type == \"for_loop\":\n self.skipTest(\"unsupported test combination.\")\n\n if api_type == \"wrap_into_iterator\" and input_type == \"input_fn\":\n self.skipTest(\"unsupported test combination.\")\n\n devices = nest.flatten([ds for _, ds in worker_device_pairs])\n input_workers = input_lib.InputWorkers(worker_device_pairs)\n\n if api_type == \"wrap_into_iterator\":\n iterator = self._wrap_iterator(\n input_type,\n dataset_or_input_fn,\n input_workers,\n devices,\n split_batch_by,\n strategy,\n input_context=input_context)\n else:\n # wrapping into a dataset:\n dataset = self._wrap_dataset(\n input_type,\n dataset_or_input_fn,\n input_workers,\n split_batch_by,\n strategy,\n input_context=input_context)\n\n if ops.executing_eagerly_outside_functions():\n iterator = iter(dataset)\n else:\n if isinstance(dataset, input_lib.DistributedDatasetV1):\n iterator = dataset.make_initializable_iterator()\n else:\n self.skipTest(\"unsupported test combination\")\n\n if isinstance(iterator, composite_tensor.CompositeTensor):\n nest.assert_same_structure(iterator, iterator._type_spec,\n expand_composites=True)\n\n if iteration_type == \"get_next\":\n evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)\n if not ops.executing_eagerly_outside_functions():\n evaluate(control_flow_ops.group(iterator.initializer))\n\n for expected_value in expected_values:\n next_element = iterator.get_next()\n computed_value = evaluate(\n [distribute_utils.select_replica(r, next_element)\n for r in range(len(devices))])\n self.assertEqual(len(expected_value), len(computed_value))\n for i in range(len(expected_value)):\n self.assertAllEqual(expected_value[i], computed_value[i])\n\n with self.assertRaises(errors.OutOfRangeError):\n next_element = iterator.get_next()\n evaluate(\n [distribute_utils.select_replica(r, next_element)\n for r in range(len(devices))])\n\n # After re-initializing the iterator, should be able to iterate again.\n if not ops.executing_eagerly_outside_functions():\n evaluate(control_flow_ops.group(iterator.initializer))\n else:\n if api_type == \"wrap_into_iterator\":\n self.skipTest(\"unsupported test combination\")\n else:\n iterator = iter(dataset)\n\n for expected_value in expected_values:\n next_element = iterator.get_next()\n computed_value = evaluate(\n [distribute_utils.select_replica(r, next_element)\n for r in range(len(devices))])\n self.assertEqual(len(expected_value), len(computed_value))\n for i in range(len(expected_value)):\n self.assertAllEqual(expected_value[i], computed_value[i])\n\n if iteration_type == \"for_loop\" and context.executing_eagerly():\n actual_values = []\n for x in dataset:\n computed_value = self.evaluate(\n [distribute_utils.select_replica(r, x)\n for r in range(len(devices))])\n actual_values.append(computed_value)\n for i, expected_value in enumerate(expected_values):\n self.assertEqual(len(expected_value), len(actual_values[i]))\n for j in range(len(expected_value)):\n self.assertAllEqual(expected_value[j], actual_values[i][j])\n\n def _create_dataset_or_input_fn(self, input_type, input_fn):\n if input_type == \"input_fn\":\n return input_fn\n else:\n return input_fn(distribute_lib.InputContext())\n\n\nclass DistributedIteratorSingleWorkerTest(DistributedIteratorTestBase,\n parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu\n ]))\n def testDisablingOwnedIteratorsInTF2(self, distribution, input_type):\n if not tf2.enabled():\n self.skipTest(\"unsupported test combination\")\n\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:CPU:0\"])]\n input_workers = input_lib.InputWorkers(worker_device_pairs)\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n input_workers = input_lib.InputWorkers(worker_device_pairs)\n if input_type == \"dataset\":\n dist_dataset = input_lib.get_distributed_dataset(dataset_or_input_fn,\n input_workers,\n distribution)\n else:\n dist_dataset = input_lib.get_distributed_datasets_from_function(\n dataset_or_input_fn, input_workers, [distribute_lib.InputContext()],\n distribution)\n\n # Default Iterator types in TF2.\n iterator = iter(dist_dataset)\n self.assertIsInstance(iterator, input_lib.DistributedIterator)\n self.assertIsInstance(iterator._iterators[0],\n input_lib._SingleWorkerOwnedDatasetIterator)\n\n # Disable creating owned iterators by setting a property on the strategy.\n distribution._enable_legacy_iterators = True\n iterator = iter(dist_dataset)\n self.assertIsInstance(iterator, input_lib.DistributedIteratorV1)\n self.assertIsInstance(iterator._iterators[0],\n input_lib._SingleWorkerDatasetIterator)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu\n ]))\n def testMultiDeviceIterInitialize(self, distribution):\n if tf2.enabled():\n self.skipTest(\"Only V1 is supported.\")\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:GPU:0\",\n \"/device:CPU:0\"])]\n dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)\n\n input_workers = input_lib.InputWorkers(worker_device_pairs)\n\n dist_dataset = input_lib.get_distributed_dataset(\n dataset_fn(distribute_lib.InputContext()), input_workers, distribution)\n\n iterator = dataset_ops.make_one_shot_iterator(dist_dataset)\n\n @def_function.function\n def init_func_for_iter():\n self.evaluate(iterator.initializer)\n\n init_func_for_iter()\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu\n ],\n enable_get_next_as_optional=[True, False]))\n def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,\n enable_get_next_as_optional):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:CPU:0\"])]\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)\n else:\n dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n expected_values = [[i] for i in range(10)]\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu\n ],\n enable_get_next_as_optional=[True, False]))\n def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type,\n distribution, enable_get_next_as_optional):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:GPU:0\",\n \"/device:CPU:0\"])]\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(10)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n expected_values = [[i, i+1] for i in range(0, 10, 2)]\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n distribution=[strategy_combinations.tpu_strategy],\n enable_get_next_as_optional=[True, False]))\n def testTPU(self, input_type, api_type, iteration_type, distribution,\n enable_get_next_as_optional):\n worker_device_pairs = collections.OrderedDict()\n for tpu_device in distribution.extended.worker_devices:\n host_device = device_util.get_host_for_device(tpu_device)\n worker_device_pairs.setdefault(host_device, [])\n worker_device_pairs[host_device].append(tpu_device)\n worker_device_pairs = worker_device_pairs.items()\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(10)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n expected_values = [[i, i + 1] for i in range(0, 10, 2)]\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu\n ],\n enable_get_next_as_optional=[True, False]))\n def testTupleDataset(self, input_type, api_type, iteration_type, distribution,\n enable_get_next_as_optional):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:GPU:0\",\n \"/device:CPU:0\"])]\n\n def dataset_fn(ctx):\n del ctx\n if tf2.enabled():\n dataset1 = dataset_ops.DatasetV2.range(10)\n dataset2 = dataset_ops.DatasetV2.range(10).map(lambda x: x**2)\n return dataset_ops.DatasetV2.zip((dataset1, dataset2))\n else:\n dataset1 = dataset_ops.Dataset.range(10)\n dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)\n return dataset_ops.Dataset.zip((dataset1, dataset2))\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n expected_values = [[(i, i**2), (i+1, (i+1)**2)] for i in range(0, 10, 2)]\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu\n ]))\n def testIterableIterator(self, distribution):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:CPU:0\"])]\n input_workers = input_lib.InputWorkers(worker_device_pairs)\n\n dataset = dataset_ops.DatasetV2.range(10)\n dist_dataset = input_lib.get_distributed_dataset(dataset, input_workers,\n distribution)\n\n iterator = iter(dist_dataset)\n for i, element in enumerate(iterator):\n self.assertEqual(i, element.numpy())\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n drop_remainder=[True, False],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu\n ]))\n def testUnevenDatasetBatches(self, input_type, api_type, iteration_type,\n drop_remainder, distribution):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:GPU:0\",\n \"/device:CPU:0\"])]\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch( # pylint: disable=g-long-lambda\n 2, drop_remainder=drop_remainder)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch( # pylint: disable=g-long-lambda\n 2, drop_remainder=drop_remainder)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n # The last global batch only contains data for one replica.\n if drop_remainder:\n expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]\n else:\n expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]\n distribution.extended.experimental_enable_get_next_as_optional = True\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\", \"eager\"],\n input_type=[\"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n split_batch_by=[None, 2],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu\n ],\n enable_get_next_as_optional=[True, False]))\n def testBatchSplitting(self, input_type, api_type, iteration_type,\n split_batch_by, distribution,\n enable_get_next_as_optional):\n worker_device_pairs = [(\"/device:CPU:0\", [\"/device:GPU:0\",\n \"/device:CPU:0\"])]\n batch_size = 10\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(100).batch(batch_size)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n updated_batch_size = (\n batch_size // split_batch_by if split_batch_by else batch_size)\n expected_values = [[range(i, i+updated_batch_size),\n range(i+updated_batch_size, i+2*updated_batch_size)]\n for i in range(0, 100, updated_batch_size*2)]\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_device_pairs,\n expected_values,\n distribution,\n sess=None,\n split_batch_by=split_batch_by)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n ))\n def testCacheAcrossIteration(self, distribution):\n if not tf2.enabled():\n self.skipTest(\"Only V2 is supported.\")\n\n dataset = dataset_ops.Dataset.range(10).shuffle(10).cache().batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n\n first_epoch = list(\n distribution.experimental_local_results(x) for x in dist_dataset)\n second_epoch = list(\n distribution.experimental_local_results(x) for x in dist_dataset)\n\n self.assertAllEqual(first_epoch, second_epoch)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ],\n reshuffle=[True, False]))\n def testShuffleAcrossIterations(self, distribution, reshuffle):\n if not tf2.enabled():\n self.skipTest(\"Only V2 is supported.\")\n\n if not reshuffle and not compat.forward_compatible(2020, 5, 22):\n self.skipTest(\"Functionality currently not supported.\")\n\n dataset = dataset_ops.Dataset.range(10).shuffle(\n 10, reshuffle_each_iteration=reshuffle).batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n\n first_epoch = list(\n distribution.experimental_local_results(x) for x in dist_dataset)\n second_epoch = list(\n distribution.experimental_local_results(x) for x in dist_dataset)\n\n if reshuffle:\n self.assertNotAllEqual(first_epoch, second_epoch)\n else:\n self.assertAllEqual(first_epoch, second_epoch)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy,\n strategy_combinations.central_storage_strategy_with_two_gpus,\n ]))\n def testGetNextOptionalShape(self, distribution):\n batch_size = 8\n dataset = dataset_ops.DatasetV2.from_tensor_slices({\n \"feature\": array_ops.ones([batch_size, 10]),\n \"label\": array_ops.ones([batch_size]),\n })\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n per_replica_batch_size = batch_size // distribution.num_replicas_in_sync\n\n @def_function.function\n def train_fn():\n for data in dist_dataset:\n data = nest.map_structure(distribution.experimental_local_results, data)\n feature = data[\"feature\"]\n label = data[\"label\"]\n\n # Asser the shapes are still staic from all replicas.\n for replica_id in range(distribution.num_replicas_in_sync):\n self.assertEqual([per_replica_batch_size, 10],\n feature[replica_id].shape)\n self.assertEqual([per_replica_batch_size], label[replica_id].shape)\n\n train_fn()\n\n\nclass DistributedIteratorTensorTypeTest(DistributedIteratorTestBase,\n parameterized.TestCase):\n \"\"\"Tests for DistributedDataset with non-dense tensors.\"\"\"\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu,\n ],\n input_type=[\"dataset\", \"input_fn\"],\n drop_remainder=[False, True],\n defun_type=[\"lambda\", \"tf_function\"],\n ))\n def testRaggedSparse(self, distribution, input_type, drop_remainder,\n defun_type):\n \"\"\"Test with `RaggedTensor`s and `SparseTensor`s.\"\"\"\n if not tf2.enabled():\n self.skipTest(\"Only V2 is supported.\")\n\n defun = {\"lambda\": lambda f: f,\n \"tf_function\": def_function.function}[defun_type]\n distribution.extended.experimental_enable_get_next_as_optional = True\n global_batch_size = 8\n\n def dataset_fn(ctx=None):\n ctx = ctx or distribute_lib.InputContext()\n batch_size = ctx.get_per_replica_batch_size(global_batch_size)\n # Use 20 which isn't divisible by 8 to test partial batch behavior.\n row_lengths = np.mod(np.arange(20), 4).astype(np.int64)\n ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(\n np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)\n dataset = dataset_ops.DatasetV2.from_tensor_slices({\n \"dense\": ragged_tensor.to_tensor(),\n \"ragged\": ragged_tensor,\n \"sparse\": ragged_tensor.to_sparse(),\n })\n dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)\n return dataset.batch(batch_size, drop_remainder=drop_remainder)\n\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n dataset = self._wrap_dataset(input_type, dataset_or_input_fn,\n distribution.extended._input_workers,\n len(distribution.extended.worker_devices),\n distribution)\n # Assert that the tensors are rebatched and sparsity is preserved.\n per_replica_batch = defun(lambda x: next(iter(x)))(dataset)\n self.assertAllEqual(\n distribute_utils.select_replica(0, per_replica_batch[\"dense\"]),\n [[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])\n self.assertAllEqual(\n distribute_utils.select_replica(1, per_replica_batch[\"dense\"]),\n [[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])\n # Transitively check the ragged and sparse tensors by densification.\n for i in range(2):\n self.assertLen(\n distribute_utils.select_replica(i,\n per_replica_batch[\"ragged\"]).values,\n 6)\n self.assertAllEqual(\n distribute_utils.select_replica(\n i, per_replica_batch[\"ragged\"]).to_tensor(),\n distribute_utils.select_replica(i, per_replica_batch[\"dense\"]))\n self.assertLen(\n distribute_utils.select_replica(i,\n per_replica_batch[\"sparse\"]).indices,\n 6)\n self.assertAllEqual(\n sparse_ops.sparse_tensor_to_dense(\n distribute_utils.select_replica(i, per_replica_batch[\"sparse\"])),\n distribute_utils.select_replica(i, per_replica_batch[\"dense\"]))\n # Iterate through all the batches and sum them up.\n def sum_batch(per_replica_features):\n \"\"\"Sums the `PerReplica` values in the `per_replica_features` map.\"\"\"\n\n def map_fn(per_replica_values):\n per_replica_sums = distribution.run(\n (lambda x: math_ops.reduce_sum(x.values)) if all(\n map(sparse_tensor.is_sparse, per_replica_values.values)) else\n math_ops.reduce_sum, (per_replica_values,))\n return distribution.reduce(\n reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)\n\n return nest.map_structure(map_fn, per_replica_features)\n\n def _reduce(state, batch):\n sums = sum_batch(batch)\n return {name: value + sums[name] for name, value in state.items()}\n\n def sum_for_loop(dataset):\n sums = {\"dense\": 0., \"ragged\": 0., \"sparse\": 0.}\n for batch in dataset:\n sums = _reduce(sums, batch)\n return sums\n\n def sum_while_loop(iterator, reduce_fn):\n sums = {\"dense\": 0., \"ragged\": 0., \"sparse\": 0.}\n while True:\n try:\n sums = reduce_fn(sums, iterator)\n except (StopIteration, errors.OutOfRangeError):\n return sums\n\n while_sums = sum_while_loop(\n iter(dataset),\n defun(lambda state, iterator: _reduce(state, next(iterator))))\n self.assertAllEqual(\n nest.flatten(while_sums),\n # When there's no partial batch, the sum is smaller.\n [200. if drop_remainder else 310.] * 3)\n for_sums = defun(sum_for_loop)(dataset)\n # For loops always call get next as optional inside tf functions, so we\n # expect 310 here when using an input function (as there are 5 batches of\n # size 4 round robined over 2 replicas.\n expected_for_sum = 200.\n if (not drop_remainder or (\n defun_type == \"tf_function\" and input_type == \"input_fn\")):\n expected_for_sum = 310.\n self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"eager\"],\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.central_storage_strategy_with_gpu_and_cpu,\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_one_cpu\n ],\n input_type=[\"dataset\", \"input_fn\"],\n drop_remainder=[False, True],\n tensor_type=[\"sparse\", \"ragged\"],\n enable_get_next_as_optional=[True, False]\n ))\n def testRaggedSparseGetNextAsOptional(\n self, distribution, input_type, drop_remainder, tensor_type,\n enable_get_next_as_optional):\n \"\"\"Test with `RaggedTensor`s and `SparseTensor`s.\"\"\"\n if not tf2.enabled():\n self.skipTest(\"Only V2 is supported.\")\n\n distribution.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n global_batch_size = 8\n\n def dataset_fn(ctx=None):\n ctx = ctx or distribute_lib.InputContext()\n batch_size = ctx.get_per_replica_batch_size(global_batch_size)\n # Use 20 which isn't divisible by 8 to test partial batch behavior.\n row_lengths = np.mod(np.arange(20), 4).astype(np.int64)\n ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(\n np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)\n dataset = dataset_ops.DatasetV2.from_tensor_slices({\n tensor_type: (ragged_tensor if tensor_type == \"ragged\" else\n ragged_tensor.to_sparse()),\n })\n dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)\n return dataset.batch(batch_size, drop_remainder=drop_remainder)\n\n if input_type == \"dataset\":\n ds = distribution.experimental_distribute_dataset(\n dataset_fn(distribute_lib.InputContext()))\n else:\n ds = distribution.experimental_distribute_datasets_from_function(\n dataset_fn)\n iterator = iter(ds)\n\n self.assertEqual(iterator._enable_get_next_as_optional,\n (not drop_remainder) and enable_get_next_as_optional)\n\n\nclass DistributedIteratorMultiWorkerTest(\n multi_worker_test_base.MultiWorkerTestBase, DistributedIteratorTestBase,\n parameterized.TestCase):\n\n def _cpu_devices(self):\n return [\n (\"/job:worker/replica:0/task:0\",\n [\"/job:worker/replica:0/task:0/device:CPU:0\"]),\n (\"/job:worker/replica:0/task:1\",\n [\"/job:worker/replica:0/task:1/device:CPU:0\"])]\n\n def _cpu_and_one_gpu_devices(self):\n return [\n (\"/job:worker/replica:0/task:0\", [\n \"/job:worker/replica:0/task:0/device:GPU:0\",\n \"/job:worker/replica:0/task:0/device:CPU:0\"\n ]),\n (\"/job:worker/replica:0/task:1\", [\n \"/job:worker/replica:0/task:1/device:GPU:0\",\n \"/job:worker/replica:0/task:1/device:CPU:0\"\n ])\n ]\n\n @combinations.generate(combinations.combine(\n mode=[\"graph\"],\n input_type=[\"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))\n def testAutoshardingOption(self, input_type, api_type, iteration_type,\n auto_shard_policy):\n ds_option = dataset_ops.Options()\n ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy\n if tf2.enabled():\n dataset_fn = (\n lambda _: dataset_ops.DatasetV2.range(4).with_options(ds_option))\n else:\n dataset_fn = (\n lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 1))\n worker_devices = self._cpu_devices()\n with context.graph_mode(), self.cached_session() as sess:\n if auto_shard_policy == AutoShardPolicy.AUTO:\n expected_values = [[0, 1], [2, 3]]\n else:\n expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]]\n self._test_input_iteration(input_type, api_type, iteration_type,\n dataset_or_input_fn, worker_devices,\n expected_values, strategy, sess)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n enable_get_next_as_optional=[True, False]))\n def testOneDevicePerWorker(self, input_type, api_type, iteration_type,\n enable_get_next_as_optional):\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(4)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 1))\n worker_devices = self._cpu_devices()\n with context.graph_mode(), strategy.scope(), self.cached_session() as sess:\n\n if input_type == \"dataset\":\n # Autosharded\n expected_values = [[0, 1], [2, 3]]\n else:\n expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]]\n strategy.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_devices,\n expected_values,\n strategy,\n sess=sess)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n enable_get_next_as_optional=[True, False],\n required_gpus=1))\n def testTwoDevicesPerWorker(self, input_type, api_type, iteration_type,\n enable_get_next_as_optional):\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(4)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_and_one_gpu_devices()[0][1] +\n self._cpu_and_one_gpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 2))\n worker_devices = self._cpu_and_one_gpu_devices()\n with context.graph_mode(), strategy.scope(), self.cached_session() as sess:\n\n if input_type == \"dataset\":\n # Autosharded\n expected_values = [[0, 2, 1, 3]]\n else:\n expected_values = [[0, 1, 0, 1], [2, 3, 2, 3]]\n strategy.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_devices,\n expected_values,\n strategy,\n sess=sess)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n enable_get_next_as_optional=[True, False]))\n def testTupleDataset(self, input_type, api_type, iteration_type,\n enable_get_next_as_optional):\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 1))\n worker_devices = self._cpu_devices()\n\n def dataset_fn(ctx):\n del ctx\n if tf2.enabled():\n dataset1 = dataset_ops.DatasetV2.range(4)\n dataset2 = dataset_ops.DatasetV2.range(4).map(lambda x: x**2)\n return dataset_ops.DatasetV2.zip((dataset1, dataset2))\n else:\n dataset1 = dataset_ops.Dataset.range(4)\n dataset2 = dataset_ops.Dataset.range(4).map(lambda x: x**2)\n return dataset_ops.Dataset.zip((dataset1, dataset2))\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n with context.graph_mode(), strategy.scope(), self.cached_session() as sess:\n\n if input_type == \"dataset\":\n # Autosharded\n expected_values = [[(0, 0), (1, 1)], [(2, 4), (3, 9)]]\n else:\n expected_values = [[(i, i**2), (i, i**2)] for i in range(0, 4)]\n strategy.extended.experimental_enable_get_next_as_optional = (\n enable_get_next_as_optional)\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_devices,\n expected_values,\n strategy,\n sess=sess)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n required_gpus=1))\n def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_and_one_gpu_devices()[0][1] +\n self._cpu_and_one_gpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 2))\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n worker_devices = self._cpu_and_one_gpu_devices()\n with context.graph_mode(), strategy.scope(), self.cached_session() as sess:\n if input_type == \"dataset\":\n # Autosharded\n expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]]\n else:\n expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],\n [[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]]\n strategy.extended.experimental_enable_get_next_as_optional = True\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_devices,\n expected_values,\n strategy,\n sess=sess)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"],\n input_type=[\"input_fn\", \"dataset\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\"],\n strategy_cls=[\n collective_all_reduce_strategy.CollectiveAllReduceStrategy,\n parameter_server_strategy.ParameterServerStrategy,\n ],\n required_gpus=0))\n def testUnevenDatasetBatchesBetweenGraph(self, input_type, api_type,\n iteration_type, strategy_cls):\n if api_type == \"wrap_into_dataset\" and input_type == \"input_fn\":\n self.skipTest(\"unsupported test combination.\")\n if tf2.enabled():\n # The V2 tests are skipped since we don't support creating an\n # iterator for DistributedDataset in graph mode.\n self.skipTest(\"unsupported test combination\")\n # Environment variable is global, we need locking when patching TF_CONFIG.\n lock = threading.Lock()\n\n def _worker_fn(task_type, task_id, num_gpus):\n del num_gpus\n tf_config = {\n \"cluster\": self._cluster_spec,\n \"task\": {\n \"type\": task_type,\n \"index\": task_id\n }\n }\n with context.graph_mode(), lock, test.mock.patch.dict(\n \"os.environ\", {\"TF_CONFIG\": json.dumps(tf_config)}):\n strategy = strategy_cls()\n with context.graph_mode(), strategy.scope(), self.cached_session(\n target=\"grpc://\" + self._cluster_spec[task_type][task_id]) as sess:\n if tf2.enabled():\n dataset_fn = lambda _: dataset_ops.DatasetV2.range(5).batch(2)\n else:\n dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n if (input_type == \"dataset\" and strategy_cls is\n collective_all_reduce_strategy.CollectiveAllReduceStrategy):\n # Autosharded\n if task_id == 0:\n expected_values = [[[0, 1]], [[4]]]\n else:\n expected_values = [[[2, 3]], [[]]]\n\n # input_context is for between-graph auto-sharding.\n input_context = distribute_lib.InputContext(\n num_input_pipelines=2,\n input_pipeline_id=task_id,\n num_replicas_in_sync=2)\n else:\n expected_values = [[[0, 1]], [[2, 3]], [[4]]]\n input_context = None\n\n strategy.extended.experimental_enable_get_next_as_optional = True\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n [(\"/job:%s/task:%d\" %\n (task_type, task_id), strategy.extended.worker_devices)],\n expected_values,\n strategy,\n sess=sess,\n input_context=input_context)\n\n self._run_between_graph_clients(_worker_fn, self._cluster_spec, 0)\n\n @combinations.generate(\n combinations.combine(\n mode=[\"graph\"], input_type=[\"input_fn\"],\n api_type=[\"wrap_into_iterator\", \"wrap_into_dataset\"],\n iteration_type=[\"get_next\", \"for_loop\"],\n required_gpus=1))\n def testDifferentDatasets(self, input_type, api_type, iteration_type):\n def dataset_fn(ctx):\n if ctx.input_pipeline_id == 0:\n return dataset_ops.Dataset.range(8).batch(2)\n else:\n return dataset_ops.Dataset.range(9).batch(2)\n dataset_or_input_fn = self._create_dataset_or_input_fn(\n input_type, dataset_fn)\n\n strategy = mirrored_strategy.MirroredStrategy(\n devices=(self._cpu_and_one_gpu_devices()[0][1] +\n self._cpu_and_one_gpu_devices()[1][1]),\n cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(\n [\"/job:worker/task:0\", \"/job:worker/task:1\"], 2))\n worker_devices = self._cpu_and_one_gpu_devices()\n with context.graph_mode(), strategy.scope(), self.cached_session() as sess:\n\n expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],\n [[4, 5], [6, 7], [4, 5], [6, 7]], [[], [], [8], []]]\n strategy.extended.experimental_enable_get_next_as_optional = True\n self._test_input_iteration(\n input_type,\n api_type,\n iteration_type,\n dataset_or_input_fn,\n worker_devices,\n expected_values,\n strategy,\n sess=sess)\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.core.protobuf.config_pb2.ConfigProto"
],
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_nn_ops.l2_loss",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.gen_array_ops.broadcast_gradient_args",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.test.main",
"tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver",
"tensorflow.python.tpu.tpu.outside_compilation",
"tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.logging_ops.print_v2",
"tensorflow.python.eager.remote.connect_to_cluster",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.python.distribute.tpu_strategy.TPUStrategy",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.greater",
"numpy.isnan",
"numpy.less",
"tensorflow.python.framework.op_callbacks.remove_op_callback",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.op_callbacks.add_op_callback",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.ops.gen_debug_ops.debug_numeric_summary_v2",
"tensorflow.python.eager.monitoring.Counter",
"tensorflow.python.debug.lib.source_utils.guess_is_tensorflow_py_library",
"numpy.isinf"
],
[
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"numpy.expand_dims",
"tensorflow.python.tpu.tpu_embedding.AdagradParameters",
"numpy.sqrt",
"tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver",
"tensorflow.python.tpu.tpu_embedding_v2_utils.Adagrad",
"tensorflow.python.tpu.tpu_embedding_v2.TPUEmbedding",
"tensorflow.python.tpu.tpu_embedding_v2.cpu_embedding_lookup",
"numpy.concatenate",
"tensorflow.python.ops.variables.Variable",
"numpy.zeros_like",
"tensorflow.python.eager.remote.connect_to_cluster",
"tensorflow.python.ops.init_ops_v2.Constant",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.distribute.distribute_lib.InputOptions",
"tensorflow.python.ops.gen_math_ops.maximum",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensors",
"numpy.reshape",
"tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system",
"numpy.stack",
"numpy.copy",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.gen_math_ops.squared_difference",
"tensorflow.python.util.nest.map_structure",
"numpy.zeros",
"tensorflow.python.ops.init_ops_v2.Ones",
"tensorflow.python.tpu.tpu_embedding_v2_utils.TableConfig",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_lengths",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.config.enable_mlir_bridge",
"tensorflow.python.ops.init_ops_v2.Zeros",
"numpy.array",
"tensorflow.python.tpu.tpu_strategy_util.shutdown_tpu_system",
"numpy.sum",
"tensorflow.python.distribute.tpu_strategy.TPUStrategy",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.tpu.tpu_embedding_v2_utils.SGD",
"numpy.ones",
"tensorflow.python.tpu.tpu_embedding_v2_utils.Adam",
"tensorflow.python.tpu.tpu_embedding_v2_utils.FeatureConfig",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.distribute.cross_device_ops.MultiWorkerAllReduce",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.distribute.input_lib.DatasetIterator",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.data.ops.dataset_ops.DatasetV1.range",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.distribute.input_lib.DistributedDataset",
"numpy.arange",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.distribute.input_lib.get_distributed_dataset",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.distribute.distribute_utils.select_replica",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.zip",
"tensorflow.python.eager.test.main",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.range",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.distribute.input_lib.InputFunctionIterator",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip",
"tensorflow.python.tf2.enabled",
"tensorflow.python.distribute.input_lib.DistributedDatasetV1",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.nest.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.5",
"2.6",
"2.4"
]
}
] |
hirakjyoti08/jina | [
"437943dd2dab87e22b0662b2081f13250918ec01",
"437943dd2dab87e22b0662b2081f13250918ec01"
] | [
"tests/unit/clients/python/test_on_err.py",
"tests/distributed/test_rolling_update_container_runtime/test_rolling_update.py"
] | [
"from typing import Optional\n\nimport aiohttp\nimport grpc\n\nfrom jina.excepts import BadClientCallback\nfrom jina import Flow, Client\n\nimport numpy as np\nimport pytest\nfrom docarray import DocumentArray\nfrom docarray.document.generators import from_ndarray\n\n\ndef validate(x):\n raise NotImplementedError\n\n\[email protected](\n reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'\n)\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_error(protocol):\n # In this particular test, when you write two tests in a row, you are testing the following case:\n #\n # You are testing exception in client's callback, not error in client's request generator\n # 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client\n # 2. Server probably has something hold in the stream\n # 3. Restart the client, keep server untouched.\n # 4. Now, server stucks (because it considers the last connection wasn't end yet)\n def validate(x):\n raise NotImplementedError\n\n with Flow(protocol=protocol).add() as f:\n t = 0\n try:\n f.index(\n from_ndarray(np.random.random([5, 4])),\n on_done=validate,\n continue_on_error=False,\n )\n except BadClientCallback:\n # bad client callback will break the `async for req in stub.Call(req_iter)`\n t = 1\n # now query the gateway again, make sure gateway's channel is still usable\n f.index(\n from_ndarray(np.random.random([5, 4])),\n on_done=validate,\n continue_on_error=True,\n )\n assert t == 1\n\n\[email protected](\n 'protocol,exception',\n [\n ('websocket', aiohttp.ClientError),\n ('grpc', grpc.aio._call.AioRpcError),\n ('http', aiohttp.ClientError),\n ],\n)\ndef test_client_on_error_call(protocol, exception):\n\n with pytest.raises(exception):\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n )\n\n\[email protected](\n 'protocol,exception',\n [\n ('websocket', aiohttp.client_exceptions.ClientConnectorError),\n ('grpc', grpc.aio._call.AioRpcError),\n ('http', aiohttp.client_exceptions.ClientConnectorError),\n ],\n)\ndef test_client_on_error_raise_exception(protocol, exception):\n class OnError:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response, exception_param: Optional[Exception] = None):\n self.is_called = True\n assert type(exception_param) == exception\n\n on_error = OnError()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_error=on_error,\n )\n\n assert on_error.is_called\n\n\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_error_deprecation(protocol):\n class OnError:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response): # this is deprecated\n self.is_called = True\n\n on_error = OnError()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_error=on_error,\n )\n\n assert on_error.is_called\n\n\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_always_after_exception(protocol):\n class OnAlways:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response):\n self.is_called = True\n\n on_always = OnAlways()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_always=on_always,\n )\n\n assert on_always.is_called\n",
"import os\nimport time\n\nimport numpy as np\nimport pytest\nfrom daemon.models.id import DaemonID\n\nfrom jina import Document, Client, __default_host__\nfrom jina.logging.logger import JinaLogger\nfrom daemon.clients import JinaDClient\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\ncompose_yml = os.path.join(cur_dir, 'docker-compose.yml')\n\nHOST = __default_host__\nJINAD_PORT = 8003\nREST_PORT_DBMS = 9000\nREST_PORT_QUERY = 9001\nDUMP_PATH = '/jinad_workspace/dump'\n\nlogger = JinaLogger('test-dump')\nclient = JinaDClient(host=HOST, port=JINAD_PORT)\n\nSHARDS = 3\nEMB_SIZE = 10\n\n\[email protected]\ndef executor_images():\n import docker\n\n client = docker.from_env()\n\n dbms_dir = os.path.join(cur_dir, 'deployments', 'dbms')\n query_dir = os.path.join(cur_dir, 'deployments', 'query')\n client.images.build(path=dbms_dir, tag='dbms-executor')\n client.images.build(path=query_dir, tag='query-executor')\n client.close()\n yield\n time.sleep(2)\n client = docker.from_env()\n client.containers.prune()\n client.close()\n\n\ndef _create_flows():\n workspace_id = client.workspaces.create(paths=[cur_dir])\n dbms_flow_id = client.flows.create(\n workspace_id=workspace_id,\n filename='flow_dbms.yml',\n envs={'JINAD_WORKSPACE': f'/tmp/jinad/{workspace_id}'},\n )\n query_flow_id = client.flows.create(\n workspace_id=workspace_id,\n filename='flow_query.yml',\n envs={'JINAD_WORKSPACE': f'/tmp/jinad/{workspace_id}'},\n )\n return dbms_flow_id, query_flow_id, workspace_id\n\n\[email protected]('docker_compose', [compose_yml], indirect=['docker_compose'])\ndef test_dump_dbms_remote(executor_images, docker_compose):\n nr_docs = 100\n nr_search = 1\n docs = list(_get_documents(nr=nr_docs, index_start=0, emb_size=EMB_SIZE))\n\n dbms_flow_id, query_flow_id, workspace_id = _create_flows()\n\n # check that there are no matches in Query Flow\n r = Client(\n host=HOST, port=REST_PORT_QUERY, protocol='http', return_responses=True\n ).search(inputs=[doc for doc in docs[:nr_search]])\n assert r[0].data.docs[0].matches is None or len(r[0].data.docs[0].matches) == 0\n\n # index on DBMS flow\n Client(\n host=HOST, port=REST_PORT_DBMS, protocol='http', return_responses=True\n ).index(inputs=docs)\n\n # dump data for DBMS flow\n Client(host=HOST, port=REST_PORT_DBMS, protocol='http', return_responses=True).post(\n on='/dump',\n parameters={'shards': SHARDS, 'dump_path': DUMP_PATH},\n target_executor='indexer_dbms',\n )\n\n # rolling_update on Query Flow\n assert (\n DaemonID(\n client.flows.rolling_update(\n id=query_flow_id,\n deployment_name='indexer_query',\n uses_with={'dump_path': DUMP_PATH},\n )\n )\n == DaemonID(query_flow_id)\n )\n\n # validate that there are matches now\n r = Client(\n host=HOST, port=REST_PORT_QUERY, protocol='http', return_responses=True\n ).search(\n inputs=[doc for doc in docs[:nr_search]],\n parameters={'top_k': 10},\n )\n for doc in r[0].data.docs:\n assert len(doc.matches) == 10\n\n assert client.flows.delete(dbms_flow_id)\n assert client.flows.delete(query_flow_id)\n assert client.workspaces.delete(workspace_id)\n\n\ndef _get_documents(nr=10, index_start=0, emb_size=7):\n for i in range(index_start, nr + index_start):\n yield Document(\n id=f'I am document {i}',\n text=f'hello world {i}',\n embedding=np.random.random(emb_size),\n tags={'tag_field': f'tag data {i}'},\n )\n"
] | [
[
"numpy.random.random"
],
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thundergolfer/catboost | [
"60942dee40f1407466d0b1e486f0a1d445e6aa91"
] | [
"catboost/pytest/test.py"
] | [
"from itertools import permutations\nimport yatest.common\nfrom yatest.common import ExecutionTimeoutError, ExecutionError\nimport pytest\nimport os\nimport filecmp\nimport numpy as np\nimport pandas as pd\nimport timeit\nimport json\n\nimport catboost\n\nfrom catboost_pytest_lib import (\n apply_catboost,\n compare_evals_with_precision,\n compare_fit_evals_with_precision,\n compare_evals,\n data_file,\n execute_catboost_fit,\n execute_dist_train,\n format_crossvalidation,\n generate_concatenated_random_labeled_dataset,\n get_catboost_binary_path,\n get_limited_precision_dsv_diff_tool,\n local_canonical_file,\n permute_dataset_columns,\n remove_time_from_json,\n)\n\nCATBOOST_PATH = yatest.common.binary_path(\"catboost/app/catboost\")\n\nBOOSTING_TYPE = ['Ordered', 'Plain']\nGROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']\nBOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),\n ('Plain', 'Lossguide'), ('Plain', 'Depthwise')]\n\nPREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']\n\nBINCLASS_LOSSES = ['Logloss', 'CrossEntropy']\nMULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']\nCLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES\nREGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'RMSEWithUncertainty', 'LogLinQuantile', 'Lq']\nPAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']\nGROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']\nRANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES\nALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES\n\nSAMPLING_UNIT_TYPES = ['Object', 'Group']\n\nOVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']\n\nLOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',\n 'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']\n\nLEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']\n\n# test both parallel in and non-parallel modes\n# default block size (5000000) is too big to run in parallel on these tests\nSCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']\nSCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']\n\nSEPARATOR_TYPES = [\n 'ByDelimiter',\n 'BySense',\n]\n\nTEXT_FEATURE_ESTIMATORS = [\n 'BoW',\n 'NaiveBayes',\n 'BM25',\n 'BoW,NaiveBayes',\n 'BoW,NaiveBayes,BM25'\n]\n\nROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE = data_file('rotten_tomatoes_small_with_embeddings', 'train')\nROTTEN_TOMATOES_WITH_EMBEDDINGS_CD_BINCLASS_FILE = data_file(\n 'rotten_tomatoes_small_with_embeddings',\n 'cd_binclass'\n)\nROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE = data_file(\n 'rotten_tomatoes_small_with_embeddings',\n 'cd_binclass_only_embeddings'\n)\n\n\ndef diff_tool(threshold=None):\n return get_limited_precision_dsv_diff_tool(threshold, True)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_multiregression(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiRMSE',\n '-f', data_file('multiregression', 'train'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--cv-rand', '42',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--boost-from-average', '0'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression_single(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train_single.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--boost-from-average', '0'))))]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('n_trees', [100, 500])\ndef test_multiregression(boosting_type, grow_policy, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\[email protected]('target_count', [1, 2, 3])\ndef test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):\n np.random.seed(42)\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n evals = []\n for perm in permutations(range(target_count)):\n inv_perm = range(target_count)\n for i, j in enumerate(perm):\n inv_perm[j] = i\n\n np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\\t')\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))\n evals.append(eval[:, inv_perm])\n\n for eva in evals:\n assert np.allclose(eva, evals[0])\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [10, 100, 1000])\[email protected]('target_count', [1, 2, 3])\ndef test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):\n np.random.seed(42)\n ERR_PERC = 0.1\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\\t')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n\n for i in range(target_count):\n with open(get_cd_path(i), 'w') as cd:\n cd.write(''.join((('{}\\tTarget\\n'.format(j) if j == i else '{}\\tAuxiliary\\n'.format(j)) for j in range(target_count))))\n\n rmse_fit_cmd = (\n '--loss-function', 'RMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(i),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(i),\n '--eval-file', get_eval_path(i),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', rmse_fit_cmd)\n\n multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1))\n rmse_eval = np.stack([\n np.loadtxt(get_eval_path(i), delimiter='\\t', skiprows=1, usecols=1)\n for i in range(target_count)\n ], axis=1)\n\n # cannot compare approxes because they are very different due to different boosting algorithms\n multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)\n rmse_loss = np.mean((rmse_eval - y_test)**2)\n\n assert rmse_loss.shape == multi_rmse_loss.shape\n assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_single(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_with_cat_features(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_with_cat_features.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):\n newton_eval_path = yatest.common.test_output_path('newton.eval')\n gradient_eval_path = yatest.common.test_output_path('gradient.eval')\n\n def run_catboost(eval_path, leaf_estimation_method):\n cmd = [\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--leaf-estimation-method', leaf_estimation_method,\n '-i', '20',\n '-T', '4',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(newton_eval_path, 'Newton')\n run_catboost(gradient_eval_path, 'Gradient')\n assert filecmp.cmp(newton_eval_path, gradient_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_pool_with_QueryId(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_averagegain(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryauc(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryaverage(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])\[email protected]('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])\ndef test_stochastic_filter(sigma, num_estimations):\n model_path = yatest.common.test_output_path('model.bin')\n cd_path = yatest.common.test_output_path('pool.cd')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n\n prng = np.random.RandomState(seed=0)\n\n n_samples_by_query = 20\n n_features = 10\n n_queries = 50\n\n n_samples = n_samples_by_query * n_queries\n\n features = prng.uniform(0, 1, size=(n_samples, n_features))\n weights = prng.uniform(0, 1, size=n_features)\n\n labels = np.dot(features, weights)\n query_ids = np.arange(0, n_samples) // n_queries\n money = (n_queries - np.arange(0, n_samples) % n_queries) * 10\n\n labels = labels.reshape((n_samples, 1))\n query_ids = query_ids.reshape((n_samples, 1))\n money = money.reshape((n_samples, 1))\n\n features = np.hstack((labels, query_ids, money, features))\n\n n_learn = int(0.7 * n_samples)\n learn = features[:n_learn, :]\n test = features[n_learn:, :]\n np.savetxt(train_path, learn, fmt='%.5f', delimiter='\\t')\n np.savetxt(test_path, test, fmt='%.5f', delimiter='\\t')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')\n test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')\n loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations\n\n cmd = [\n '--loss-function', loss_description,\n '--leaf-estimation-backtracking', 'No',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-m', model_path,\n '--use-best-model', 'false',\n ]\n\n cmd_one_thread = cmd + [\n '--learn-err-log', learn_error_one_thread_path,\n '--test-err-log', test_error_one_thread_path,\n '-T', '1'\n ]\n\n cmd_four_thread = cmd + [\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '-T', '4'\n ]\n execute_catboost_fit('CPU', cmd_one_thread)\n execute_catboost_fit('CPU', cmd_four_thread)\n\n compare_evals(learn_error_one_thread_path, learn_error_path)\n compare_evals(test_error_one_thread_path, test_error_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('metric', ['DCG', 'NDCG'])\[email protected]('top', [-1, 1, 10])\[email protected]('dcg_type', ['Base', 'Exp'])\[email protected]('denominator', ['Position', 'LogPosition'])\ndef test_stochastic_rank(metric, top, dcg_type, denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(\n metric, top, dcg_type, denominator)\n\n cmd = (\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound_with_many_ones(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n np.random.seed(0)\n train_with_ones = yatest.common.test_output_path('train_with_ones')\n TARGET_COLUMN = 2\n with open(data_file('querywise', 'train')) as fin:\n with open(train_with_ones, 'w') as fout:\n for line in fin.readlines():\n if np.random.random() < 0.25:\n parts = line.split('\\t')\n parts[TARGET_COLUMN] = '1.0'\n line = '\\t'.join(parts)\n fout.write(line)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', train_with_ones,\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('top', [2, 100])\ndef test_averagegain_with_query_weights(boosting_type, top):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('top_size', [2, 5, 10, -1])\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('cd_file', ['train.cd', 'train.cd.subgroup_id'])\ndef test_pfound(top_size, boosting_type, cd_file):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_params_ordering():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')\n test_error_path = yatest.common.test_output_path('ignored.tsv')\n\n def get_cmd(custom_metric, learn_error_path):\n return (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '--custom-metric', custom_metric,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', get_cmd(\"PFound:top=1;decay=0.6;hints=skip_train~false\", learn_error_path))\n execute_catboost_fit('CPU', get_cmd(\"PFound:decay=0.6;top=1;hints=skip_train~false\", learn_error_reversed_path))\n\n with open(learn_error_path) as f:\n assert 'PFound:top=1;decay=0.6' in f.read()\n with open(learn_error_reversed_path) as f:\n assert 'PFound:decay=0.6;top=1' in f.read()\n\n\ndef test_recall_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'RecallAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_precision_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'PrecisionAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mapk(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'MAP:top={}'.format(10),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ndcg_power_mode', ['Base', 'Exp'])\[email protected]('metric_type', ['DCG', 'NDCG'])\[email protected]('ndcg_denominator', ['None', 'LogPosition', 'Position'])\ndef test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_queryrmse_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairlogit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path, learn_pairs):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', learn_pairs),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path, 'train.pairs')\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation_with_max_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit:max_pairs=30',\n '--eval-metric', 'PairLogit:max_pairs=30',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--fstr-file', output_fstr_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path),\n local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_pairlogit_no_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_pairlogit_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogitPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_pairwise_reproducibility(loss_function):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '5',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\ndef test_pairs_vs_grouped_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n\n def run_catboost(learn_pairs_path_with_scheme, test_pairs_path_with_scheme, eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', learn_pairs_path_with_scheme,\n '--test-pairs', test_pairs_path_with_scheme,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n eval_path_ungrouped = yatest.common.test_output_path('test_eval_ungrouped')\n run_catboost(\n data_file('querywise', 'train.pairs'),\n data_file('querywise', 'test.pairs'),\n eval_path_ungrouped\n )\n\n eval_path_grouped = yatest.common.test_output_path('test_eval_grouped')\n run_catboost(\n 'dsv-grouped://' + data_file('querywise', 'train.grouped_pairs'),\n 'dsv-grouped://' + data_file('querywise', 'test.grouped_pairs'),\n eval_path_grouped\n )\n\n assert filecmp.cmp(eval_path_ungrouped, eval_path_grouped)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank:permutations=5;decay=0.9',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_pairwise(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRankPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ('YetiRank', 'YetiRankPairwise'))\ndef test_yetirank_default_metric(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--model-file', output_model_path,\n '--boosting-type', 'Plain',\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path)]\n\n\[email protected]('eval_metric', ['MRR', 'MRR:top=1', 'ERR', 'ERR:top=1'])\ndef test_reciprocal_rank_metrics(eval_metric):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'YetiRank',\n '--eval-metric', eval_metric,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\nNAN_MODE = ['Min', 'Max']\n\n\[email protected]('nan_mode', NAN_MODE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode(nan_mode, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult_nan', 'train_small'),\n '-t', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', nan_mode,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('border_count', [64, 255, 350, 1000, 2500])\ndef test_different_border_count(border_count):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '20',\n '-T', '4',\n '-x', str(border_count),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode_forbidden(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', 'Forbidden',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_iter(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_inc_to_dec(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-pval', '0.5',\n '--od-type', 'IncToDec',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)\ndef test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n snapshot_path = yatest.common.test_output_path('snapshot')\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--leaf-estimation-iterations', '10',\n '--max-ctr-complexity', '4',\n '--snapshot-file', snapshot_path,\n '--od-type', overfitting_detector_type\n )\n if overfitting_detector_type == 'IncToDec':\n cmd_prefix += (\n '--od-wait', '2',\n '--od-pval', '0.5'\n )\n elif overfitting_detector_type == 'Iter':\n cmd_prefix += ('--od-wait', '2')\n\n cmd_first = cmd_prefix + ('-i', '10')\n execute_catboost_fit('CPU', cmd_first)\n\n cmd_second = cmd_prefix + ('-i', '2000')\n execute_catboost_fit('CPU', cmd_second)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\ndef test_per_object_approx_on_full_history(leaf_estimation_method):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '--approx-on-full-history',\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-w', '0.5',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '20',\n '--use-best-model', 'false')\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_shrink_model(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '1',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--use-best-model', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '2',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'column_name',\n ['doc_id', 'sample_id'],\n ids=['column_name=doc_id', 'column_name=sample_id']\n)\ndef test_sample_id(loss_function, column_name):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n column_description = data_file('adult_' + column_name, 'train.cd')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('adult_doc_id', 'train'),\n '-t', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nPOOLS = ['amazon', 'adult']\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_apply_missing_vals(boosting_type, grow_policy):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('test_adult_missing_val.tsv'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_crossentropy(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'CrossEntropy',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_permutation_block(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-permutation-block', '239',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ignored_features(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '0:1:3:5-7:10000',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('adult', 'train.cd')\n cd_path = yatest.common.test_output_path('train.cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n # Corrupt some features by making them 'Num'\n if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4\n cd_line = cd_line.replace('Categ', 'Num')\n if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6\n cd_line = cd_line.replace('Categ', 'Num')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '4:6', # Ignore the corrupted features\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n # Not needed: return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('black_friday', 'cd')\n cd_path = yatest.common.test_output_path('cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n if cd_line.split() == ('2', 'Categ', 'Gender'):\n cd_line = cd_line.replace('2', 'Num', 'Gender')\n if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):\n cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Gender:Product_Category_3',\n )\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_baseline(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline(boosting_type, loss_function):\n labels = ['0', '1', '2', '3']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4'\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(eval_path, formula_predict_path))\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_lost_class(boosting_type, loss_function):\n labels = [0, 1, 2, 3]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--bootstrap-type', 'No',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Gradient'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_not_binarized', 'train_small'),\n '-t', data_file('adult_not_binarized', 'test_small'),\n '--column-description', data_file('adult_not_binarized', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--target-border', '0.5',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n base_cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '--counter-calc-method', 'SkipTest', # TODO(kirillovs): remove after setting SkipTest as default type\n '-w', '0.03',\n '-T', '4',\n )\n\n train_with_test_cmd = base_cmd + (\n '-t', data_file('adult', 'test_small'),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', train_with_test_cmd)\n\n train_without_test_cmd = base_cmd + (\n '-m', output_model_path_without_test,\n )\n execute_catboost_fit('CPU', train_without_test_cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')\n\n base_calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--prediction-type', 'RawFormulaVal'\n )\n calc_cmd = base_calc_cmd + (\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n )\n calc_cmd_without_test = base_calc_cmd + (\n '-m', output_model_path_without_test,\n '--output-path', formula_predict_without_test_path,\n )\n yatest.common.execute(calc_cmd)\n yatest.common.execute(calc_cmd_without_test)\n if loss_function == 'MAPE':\n # TODO(kirillovs): uncomment this after resolving MAPE problems\n # assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]\n else:\n assert(compare_evals(output_eval_path, formula_predict_path))\n assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_cv(is_inverted, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_query(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_pairs(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('bad_cv_params', ['XX', 'YY', 'XY'])\ndef test_multiple_cv_spec(bad_cv_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if bad_cv_params == 'XX':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=False, n=4, k=7))\n elif bad_cv_params == 'XY':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n elif bad_cv_params == 'YY':\n cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n else:\n raise Exception('bad bad_cv_params value:' + bad_cv_params)\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('error_type', ['0folds', 'fold_idx_overflow'])\ndef test_bad_fold_cv_spec(is_inverted, error_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n ('--cv:Inverted' if is_inverted else '--cv:Classical'),\n {'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],\n '--eval-file', output_eval_path,\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_empty_eval(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_time(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--has-time',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '40',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--leaf-estimation-iterations', '7',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_priors(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'\n 'Counter:Prior=0',\n '--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'\n '6:Borders:Prior=0.666,Counter:Prior=0.666;'\n '8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Buckets'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-len-multiplier', '1.5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nFSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']\nDATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr(fstr_type, boosting_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr_with_text_features(fstr_type, boosting_type, grow_policy):\n pool = 'rotten_tomatoes'\n\n separator_type = 'ByDelimiter'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train'),\n cd_path=data_file(pool, 'cd_binclass'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--text-processing', json.dumps(text_processing)) +\n (('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', ['LossFunctionChange', 'ShapValues'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr_with_text_features_shap(fstr_type, boosting_type, grow_policy):\n pool = 'rotten_tomatoes'\n\n separator_type = 'ByDelimiter'\n feature_estimators = 'NaiveBayes'\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train'),\n cd_path=data_file(pool, 'cd_binclass'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--random-strength', '0', '--text-processing', json.dumps(text_processing)) +\n (('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_normalized_model(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=True,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_target_border(fstr_type, grow_policy):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--target-border', '0.4')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_weights(fstr_type, grow_policy):\n return do_test_fstr(\n fstr_type,\n loss_function='RMSE',\n input_path=data_file('querywise', 'train'),\n cd_path=data_file('querywise', 'train.cd.weight'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_class_weights(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\ndef test_fstr_with_target_border_and_class_weights(fstr_type):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy='SymmetricTree',\n normalize=False,\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\ndef do_test_fstr(\n fstr_type,\n loss_function,\n input_path,\n cd_path,\n boosting_type,\n grow_policy,\n normalize,\n additional_train_params=()\n):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', input_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '-m', model_path\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n if fstr_type == 'PredictionDiff':\n with open(input_path) as input:\n fstr_pool_path = yatest.common.test_output_path('input.tsv')\n with open(fstr_pool_path, \"w\") as output:\n output.write(input.readline())\n output.write(input.readline())\n input_path = fstr_pool_path\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', input_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', output_fstr_path,\n '--fstr-type', fstr_type\n )\n\n if normalize:\n make_model_normalized(model_path)\n if not(\n fstr_type == 'PredictionValuesChange' or\n fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES\n ):\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\ndef make_model_normalized(model_path):\n yatest.common.execute([\n CATBOOST_PATH,\n 'normalize-model',\n '--model-path', model_path,\n '--output-model', model_path,\n '--set-scale', '0.5',\n '--set-bias', '0.125',\n ])\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_loss_change_fstr(loss_function):\n return do_test_loss_change_fstr(loss_function, normalize=False)\n\n\ndef test_loss_change_fstr_normalized():\n return do_test_loss_change_fstr('QueryRMSE', normalize=True)\n\n\ndef do_test_loss_change_fstr(loss_function, normalize):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')\n\n def add_loss_specific_params(cmd, fstr_mode):\n if loss_function in ['PairLogit', 'PairLogitPairwise']:\n cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))\n if fstr_mode:\n cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--column-description', data_file('querywise', 'train.cd'))\n return cmd\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '--learn-set', data_file('querywise', 'train'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', model_path\n )\n cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd_prefix = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)\n if normalize:\n make_model_normalized(model_path)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fit_output, fstr_output, rtol=1e-6))\n\n return [local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ranking_parameters', [\n {'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},\n {'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}\n])\ndef test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):\n model_path = yatest.common.test_output_path('model.bin')\n fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')\n fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')\n internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')\n internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')\n\n pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'\n pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')\n cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')\n has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()\n\n cmd = (\n '--use-best-model', 'false',\n '--learn-set', pool_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path,\n '--loss-function', ranking_parameters['loss-function']\n ) + has_header_suffix\n\n if ranking_parameters['loss-function'] == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_0,\n '--fstr-internal-file', internal_fstr_path_0,\n '--fstr-type', 'FeatureImportance')\n )\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_1,\n '--fstr-internal-file', internal_fstr_path_1,\n '--fstr-type', ranking_parameters['fstr-type'])\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', pool_path,\n '--column-description', cd_path,\n '--model-file', model_path,\n ) + has_header_suffix\n\n yatest.common.execute(\n fstr_cmd + ('--output-path', fstr_path_1,\n '--fstr-type', 'FeatureImportance')\n )\n yatest.common.execute(\n fstr_cmd + ('--output-path', internal_fstr_path_1,\n '--fstr-type', 'InternalFeatureImportance')\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loss_change_fstr_without_pairs(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path\n\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n\n try:\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--model-file', model_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n except:\n return [local_canonical_file(output_fstr_path)]\n\n assert False\n\n\ndef test_loss_change_fstr_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')\n output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')\n train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', get_pool_path('train', True),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '-i', '10',\n '-T', '4',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train'),\n '--column-description', cd_file,\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_dsv_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train', True),\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_quantized_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\\t')\n fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\\t')\n train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))\n assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('grow_policy', GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '25',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\nBORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']\n\n\[email protected]('border_type', BORDER_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_border_types(border_type, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--feature-border-type', border_type,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('depth', [4, 8])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_deep_tree_classification(depth, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--depth', str(depth),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n '--l2-leaf-reg', '5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nREG_LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',\n 'Huber:delta=1.0']\n\n\[email protected]('loss_function', REG_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n\n if boosting_type == 'Plain':\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nBORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']\n\n\[email protected](\n 'border_type',\n BORDER_TYPES,\n ids=lambda border_type: 'border_type=%s' % border_type\n)\[email protected](\n 'border_count',\n [1, 3, 10],\n ids=lambda border_count: 'border_count=%d' % border_count\n)\[email protected](\n 'boosting_type',\n BOOSTING_TYPE,\n ids=lambda boosting_type: 'boosting_type=%s' % boosting_type\n)\ndef test_ctr_target_quantization(border_type, border_count, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Borders:TargetBorderType=' + border_type,\n '--ctr-target-border-count', str(border_count)\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCOUNTER_METHODS = ['Full', 'SkipTest']\n\n\[email protected]('counter_calc_method', COUNTER_METHODS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_counter_calc(counter_calc_method, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '60',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--counter-calc-method', counter_calc_method\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']\n\n\[email protected]('ctr_type', CTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', ctr_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_overfitting_detector_metric(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC:hints=skip_train~false',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_same_metric_skip_different(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')\n learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n ]\n\n cmd_without_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n ]\n cmd_with_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~true',\n '--custom-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path_with_custom_metric,\n '--test-err-log', test_error_path_with_custom_metric,\n ]\n\n execute_catboost_fit('CPU', cmd_without_custom_metric)\n execute_catboost_fit('CPU', cmd_with_custom_metric)\n\n assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)\n\n\[email protected]('loss_function', BINCLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_classification(loss_function, boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n custom_metrics = [\n metric for metric in\n [\n 'AUC:hints=skip_train~false',\n 'Logloss',\n 'CrossEntropy',\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'BalancedAccuracy',\n 'BalancedErrorRate',\n 'Kappa',\n 'WKappa',\n 'BrierScore',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss',\n 'NormalizedGini'\n ]\n if metric != loss_function\n ]\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', ','.join(custom_metrics),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loglikelihood_of_prediction(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'LogLikelihoodOfPrediction',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_multiclassification(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric',\n 'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_prediction_type(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'Probability'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_no_target(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')\n calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--counter-calc-method', 'SkipTest',\n '--eval-file', fit_output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('train_notarget.cd'),\n '-m', model_path,\n '--output-path', calc_output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(fit_output_eval_path, calc_output_eval_path))\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_classification_progress_restore(boosting_type):\n\n def run_catboost(iters, model_path, eval_path, additional_params=None):\n import random\n import shutil\n import string\n letters = string.ascii_lowercase\n train_random_name = ''.join(random.choice(letters) for i in xrange(8))\n shutil.copy(data_file('adult', 'train_small'), train_random_name)\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', train_random_name,\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', str(iters),\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n execute_catboost_fit('CPU', cmd)\n\n canon_model_path = yatest.common.test_output_path('canon_model.bin')\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n run_catboost(30, canon_model_path, canon_eval_path)\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n assert filecmp.cmp(canon_eval_path, eval_path)\n # TODO(kirillovs): make this active when progress_file parameter will be deleted from json params\n # assert filecmp.cmp(canon_model_path, model_path)\n\n\[email protected]('loss_function', CLASSIFICATION_LOSSES)\[email protected]('prediction_type', PREDICTION_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_prediction_type(prediction_type, loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--prediction-type', prediction_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_const_feature(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = yatest.common.test_output_path('train_small')\n test_path = yatest.common.test_output_path('test_small')\n train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\\t')\n test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\\t')\n train_dataset[:, 14] = '0'\n test_dataset[:, 14] = '0'\n np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\\t')\n np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\\t')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nQUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']\n\n\[email protected]('loss_function', QUANTILE_LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_quantile_targets(loss_function, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function + ':alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_targets_exact(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_weights(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('higgs', 'train_small'),\n '-t', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_categorical(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_quantile_exact_distributed():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MAE',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n other_options=(\n '--leaf-estimation-method', 'Exact',\n '--boost-from-average', 'False'\n )\n )))]\n\n\nCUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',\n 'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',\n 'FairLoss:smoothness=0.9']\n\n\[email protected]('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss(custom_loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '50',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric', custom_loss_function,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n eps = 0 if 'MSLE' not in custom_loss_function else 1e-9\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),\n local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]\n\n\ndef test_train_dir():\n output_model_path = 'model.bin'\n output_eval_path = 'test.eval'\n train_dir_path = 'trainDir'\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--train-dir', train_dir_path,\n '--fstr-file', 'fstr.tsv',\n '--fstr-internal-file', 'ifstr.tsv'\n )\n execute_catboost_fit('CPU', cmd)\n outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']\n for output in outputs:\n assert os.path.isfile(train_dir_path + '/' + output)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n borders_file = yatest.common.test_output_path('borders.tsv')\n borders_file_output = borders_file + '.out'\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--output-borders-file': borders_file_output,\n }\n\n params_binarized = dict(params)\n params_binarized['--input-borders-file'] = borders_file_output\n params_binarized['--output-borders-file'] = borders_file\n params_binarized['-m'] = output_model_path_binarized\n\n execute_catboost_fit(task_type='CPU', params=params)\n\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n execute_catboost_fit(\n task_type='CPU',\n params=params_binarized,\n )\n\n apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)\n apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)\n\n assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))\n assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(predictions_path_test),\n local_canonical_file(predictions_path_learn),\n local_canonical_file(borders_file)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_id_fstr(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train_with_id.cd'),\n '-m', model_path,\n '-o', output_fstr_path,\n )\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-names', '1,0'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,1.,0.25,0.75'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass_last_class_missed(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,0.25,0.75,1.',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_params_from_file(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '6',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--params-file', data_file('params.json')\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_lost_class(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_with_lost_class(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--class-weights', '0.5,2,2',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_one_hot(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--one-hot-max-size', '10'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', calc_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(output_eval_path, calc_eval_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_random_strength(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--random-strength', '100'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult_all_categorical.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('used_ram_limit', ['1Kb', '4Gb'])\[email protected](\n 'dev_score_calc_obj_block_size',\n ['600', '5000000'],\n ids=['calc_block=600', 'calc_block=5000000']\n)\ndef test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--used-ram-limit', used_ram_limit,\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '--depth', '7',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'ignored_features',\n [True, False],\n ids=['ignored_features=True', 'ignored_features=False']\n)\ndef test_apply_with_permuted_columns(ignored_features):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if ignored_features:\n cmd += ('--ignore-features', '0:2:5')\n\n execute_catboost_fit('CPU', cmd)\n\n permuted_test_path, permuted_cd_path = permute_dataset_columns(\n data_file('airlines_5K', 'test'),\n data_file('airlines_5K', 'cd'),\n seed=123)\n\n permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', permuted_test_path,\n '--has-header',\n '--column-description', permuted_cd_path,\n '-m', output_model_path,\n '--output-path', permuted_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal,Label'\n )\n yatest.common.execute(calc_cmd)\n assert filecmp.cmp(output_eval_path, permuted_predict_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTreeLevel',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n if grow_policy == 'Lossguide':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n else:\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--bagging-temperature', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_plain(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--boosting-type', 'Plain',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n bootstrap_option = {\n 'no': ('--bootstrap-type', 'No',),\n 'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),\n 'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)\n }\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n )\n for bootstrap in bootstrap_option:\n model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')\n eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')\n execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])\n\n ref_eval_path = yatest.common.test_output_path('test_no.eval')\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_json_logging():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_json_logging_metric_period():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n '--metric-period', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_output_columns_format():\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n # Intentionally skipped: -t ...\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--output-columns', 'SampleId,RawFormulaVal,#2,Label',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path, formula_predict_path)\n\n\ndef test_eval_period():\n model_path = yatest.common.test_output_path('adult_model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--eval-period', '2'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(formula_predict_path)\n\n\ndef test_weights_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Weight,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_from_file_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--learn-baseline', data_file('higgs', 'train_baseline'),\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '-i', '10',\n '--ignore-features', '0', # baseline column\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_1_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_group_weight_output():\n model_path = yatest.common.test_output_path('model.bin')\n fit_eval_path = yatest.common.test_output_path('test_0.eval')\n calc_eval_path = yatest.common.test_output_path('test_1.eval')\n\n fit_cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'QueryRMSE',\n '--learn-set', data_file('querywise', 'train'),\n '--test-set', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '-i', '10',\n '-m', model_path,\n '--eval-file', fit_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(fit_cmd)\n fit_eval = pd.read_csv(fit_eval_path, sep='\\t')\n test_group_weight = pd.read_csv(data_file('querywise', 'test'), sep='\\t', header=None)[0]\n assert 'GroupWeight' in fit_eval.columns\n assert np.allclose(fit_eval['GroupWeight'], test_group_weight)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--output-path', calc_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(calc_cmd)\n calc_eval = pd.read_csv(calc_eval_path, sep='\\t')\n assert 'GroupWeight' in calc_eval.columns\n assert np.allclose(calc_eval['GroupWeight'], test_group_weight)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_from_file(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path_0 = yatest.common.test_output_path('test_0.eval')\n output_eval_path_1 = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path_0,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.,0.25,0.5,0.75',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n\n try:\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.5,0.25,0.75.,0.',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n except:\n return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]\n\n assert False\n\n\ndef test_baseline_from_file_output_on_quantized_pool():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n )\n execute_catboost_fit('CPU', cmd + ('-i', '10'))\n execute_catboost_fit('CPU', cmd + (\n '-i', '10',\n '--learn-baseline', eval_0_path,\n '--test-baseline', eval_0_path,\n '--eval-file', eval_0_path))\n\n execute_catboost_fit('CPU', cmd + (\n '-i', '20',\n '--eval-file', eval_1_path))\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_query_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_subgroup_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.subgroup_id'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-w', '0.1',\n '--one-hot-max-size', '102',\n '--bootstrap-type', 'No',\n '--random-strength', '0',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):\n pool_path = schema + data_file(pool, train)\n test_path = test_schema + data_file(pool, test)\n cd_path = data_file(pool, cd)\n cmd = (\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--random-strength', '0',\n '--has-time',\n '--bootstrap-type', 'No',\n '--boosting-type', 'Plain',\n )\n if dev_score_calc_obj_block_size:\n cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)\n return cmd + other_options\n\n\ndef run_dist_train(cmd, output_file_switch='--eval-file'):\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))\n\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n execute_dist_train(cmd + (output_file_switch, eval_1_path,))\n\n eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\\t', skiprows=1)\n eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\\t', skiprows=1)\n assert(np.allclose(eval_0, eval_1, atol=1e-5))\n return eval_1_path\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='test_small',\n cd='train.cd',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\[email protected]('target', ['PairLogitPairwise', 'QuerySoftMax'])\ndef test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=target,\n pool='querywise',\n train='train_x128_greedylogsum_aqtaa.bin',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-pairs', data_file('querywise', pairs_file)))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train.quantized',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-group-weights', data_file('querywise', 'train.group_weights')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='train_small_x128_greedylogsum.bin',\n cd='train_baseline.cd',\n schema='quantized://',\n test_schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '--learn-baseline', data_file('higgs', 'train_baseline')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_queryrmse(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_subgroup(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--eval-metric', 'PFound')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_pairlogit(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogit',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.query_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))\n )))]\n\n\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_dist_train_pairlogitpairwise(pairs_file):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogitPairwise',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd',\n other_options=('--learn-pairs', data_file('querywise', pairs_file))\n )))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_querysoftmax(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QuerySoftMax',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n other_options=('--eval-metric', 'AUC')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc_weight(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](reason='Boost from average for distributed training')\[email protected]('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])\ndef test_dist_train_snapshot(schema, train):\n train_cmd = make_deterministic_train_cmd(\n loss_function='RMSE',\n pool='higgs',\n train=train,\n test='test_small',\n schema=schema,\n cd='train.cd')\n\n eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')\n execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))\n\n snapshot_path = yatest.common.test_output_path('snapshot')\n execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))\n\n eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')\n execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))\n\n assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))\n return [local_canonical_file(eval_5_plus_5_trees_path)]\n\n\ndef test_dist_train_yetirank():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='YetiRank',\n pool='querywise',\n train='repeat_same_query_8_times',\n test='repeat_same_query_8_times',\n cd='train.cd'\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected](\n 'one_hot_max_size',\n [2, 255],\n ids=['one_hot_max_size=2', 'one_hot_max_size=255']\n)\ndef test_dist_train_with_cat_features(dev_score_calc_obj_block_size, one_hot_max_size):\n cmd = make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='adult',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--one-hot-max-size', str(one_hot_max_size))\n )\n\n if one_hot_max_size == 2:\n with pytest.raises(yatest.common.ExecutionError):\n run_dist_train(cmd)\n else:\n return [local_canonical_file(run_dist_train(cmd))]\n\n\ndef test_no_target():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n pairs_path = yatest.common.test_output_path('pairs')\n\n np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\\t', fmt='%.4f')\n np.savetxt(cd_path, [('0', 'Num')], delimiter='\\t', fmt='%s')\n np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\\t', fmt='%i')\n\n cmd = (\n '-f', train_path,\n '--cd', cd_path,\n '--learn-pairs', pairs_path\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('loss_function', ALL_LOSSES)\ndef test_const_target(loss_function):\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n np.savetxt(\n train_path,\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 2],\n [0, 0, 3],\n [0, 0, 4]],\n delimiter='\\t',\n fmt='%.4f'\n )\n np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\\t', fmt='%s')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_negative_weights():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write('0\\tNum\\n1\\tWeight\\n2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, -1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_zero_learning_rate():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write(\n '0\\tNum\\n'\n '1\\tNum\\n'\n '2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, 1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n '--learning-rate', '0.0',\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=(), additional_eval_params=()):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n ) + additional_eval_params\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])\ndef test_eval_metrics(metric, metric_period):\n if metric == 'PFound':\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'\n elif metric == 'PairAccuracy':\n # note: pairs are autogenerated\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'\n else:\n train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'\n\n return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)\n\n\ndef test_eval_metrics_with_target_border():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_eval_metrics_with_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult', 'train_small'),\n test=data_file('adult', 'test_small'),\n cd=data_file('adult', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_eval_metrics_with_target_border_and_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_eval_metrics_with_boost_from_average_and_model_shrinkage(config):\n mode, rate, lr = config\n train = data_file('higgs', 'train_small')\n test = data_file('higgs', 'test_small')\n cd = data_file('higgs', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Logloss',\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', '10',\n '--learn-err-log', learn_error_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--boost-from-average', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n test_eval_path = yatest.common.test_output_path('test_output.tsv')\n learn_eval_path = yatest.common.test_output_path('learn_output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', train,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', learn_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', test_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n test_first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1:], 8)\n test_second_metrics = np.round(np.loadtxt(test_eval_path, skiprows=1)[:, 1:], 8)\n learn_first_metrics = np.round(np.loadtxt(learn_error_path, skiprows=1)[:, 1:], 8)\n learn_second_metrics = np.round(np.loadtxt(learn_eval_path, skiprows=1)[:, 1:], 8)\n assert test_first_metrics[-1] == test_second_metrics[-1]\n assert learn_first_metrics[-1] == learn_second_metrics[-1]\n\n\[email protected]('metrics', ['AUC', 'AUC,Precision'])\ndef test_eval_metrics_with_binarized_target(metrics):\n train = data_file('adult', 'train_small')\n test = data_file('adult', 'test_small')\n cd = data_file('adult', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--target-border', '0.25',\n '--custom-metric', metrics,\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metrics,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('dataset', ['cloudness_small', 'cloudness_lost_class'])\ndef test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):\n if metric in MULTICLASS_LOSSES and metric != loss_function:\n # MultiClass and MultiClassOneVsAll are incompatible\n return\n\n train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '3',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n start_index = 1 if metric == loss_function else 2\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\ndef test_eval_metrics_class_names():\n labels = ['a', 'b', 'c', 'd']\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'MultiClass',\n '--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--class-names', ','.join(labels),\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats'\n )\n execute_catboost_fit('CPU', cmd)\n yatest.common.execute(eval_cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy', 'AUC:type=Ranking'])\ndef test_eval_metrics_with_baseline(metric_period, metric):\n train = data_file('adult_weight', 'train_weight')\n test = data_file('adult_weight', 'test_weight')\n cd = data_file('train_adult_baseline.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy'])\ndef test_eval_metrics_multiclass_with_baseline(metric_period, metric):\n labels = [0, 1, 2, 3]\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '--eval-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr-leaf-count-limit', '10',\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])\ndef test_boost_from_average(boosting_type, grow_policy, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')\n output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')\n output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')\n baselined_train = yatest.common.test_output_path('baselined_train')\n baselined_test = yatest.common.test_output_path('baselined_test')\n baselined_cd = yatest.common.test_output_path('baselined.cd')\n\n train_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n original_cd = data_file('adult', 'train.cd')\n\n # use float32 beacause we use float in C++\n sum_target = np.float32(0)\n obj_count = np.float32(0)\n with open(train_path) as train_f:\n for line in train_f:\n obj_count += 1\n sum_target += np.float32(line.split()[1])\n\n mean_target = sum_target / obj_count\n if loss_function in ['Logloss', 'CrossEntropy']:\n mean_target = -np.log(1 / mean_target - 1)\n mean_target_str = str(mean_target)\n\n def append_baseline_to_pool(source, target):\n with open(source) as source_f, open(target, 'w') as target_f:\n for line in source_f:\n target_f.write(line.rstrip('\\n') + '\\t' + mean_target_str + '\\n')\n\n append_baseline_to_pool(train_path, baselined_train)\n append_baseline_to_pool(test_path, baselined_test)\n\n with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:\n for line in cd_input:\n cd_output.write(line)\n cd_output.write('18\\tBaseline\\n')\n\n base_cmd = (\n '--loss-function', loss_function,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', base_cmd + (\n '-f', baselined_train,\n '-t', baselined_test,\n '--boost-from-average', '0',\n '--column-description', baselined_cd,\n '--eval-file', output_eval_path_with_baseline,\n ))\n execute_catboost_fit('CPU', base_cmd + (\n '-f', train_path,\n '-t', test_path,\n '--boost-from-average', '1',\n '--column-description', original_cd,\n '--eval-file', output_eval_path_with_avg,\n ))\n yatest.common.execute((\n CATBOOST_PATH, 'calc',\n '--cd', original_cd,\n '--input-path', test_path,\n '-m', output_model_path,\n '-T', '1',\n '--output-path', output_calc_eval_path,\n ))\n\n assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)\n assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)\n return [local_canonical_file(output_eval_path_with_avg)]\n\n\[email protected]('eval_period', ['1', '2'])\ndef test_eval_non_additive_metric(eval_period):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_path,\n '--eval-period', eval_period,\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_in_parts,\n '--eval-period', eval_period,\n '--calc-on-parts',\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.loadtxt(output_eval_path, skiprows=1)\n second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('max_ctr_complexity', [1, 2])\ndef test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):\n one_hot_max_size = 2\n cd_path = yatest.common.test_output_path('cd.txt')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n model_path = yatest.common.test_output_path('model.bin')\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n np.savetxt(cd_path, [['0', 'Target'],\n ['1', 'Categ'],\n ['2', 'Categ']\n ], fmt='%s', delimiter='\\t')\n np.savetxt(train_path, [['1', 'A', 'X'],\n ['1', 'B', 'Y'],\n ['1', 'C', 'Y'],\n ['0', 'A', 'Z'],\n ['0', 'B', 'Z'],\n ], fmt='%s', delimiter='\\t')\n np.savetxt(test_path, [['1', 'A', 'Y'],\n ['1', 'D', 'U'],\n ['1', 'D', 'U']\n ], fmt='%s', delimiter='\\t')\n cmd_fit = ('--loss-function', 'Logloss',\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--cd', cd_path,\n '-f', train_path,\n '-t', test_path,\n '-m', model_path,\n '--eval-file', test_eval_path,\n '-i', '5',\n '-T', '1',\n '--max-ctr-complexity', str(max_ctr_complexity),\n '--one-hot-max-size', str(one_hot_max_size),\n )\n cmd_calc = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_path,\n '-m', model_path,\n '-T', '1',\n '--output-path', calc_eval_path,\n )\n execute_catboost_fit('CPU', cmd_fit)\n yatest.common.execute(cmd_calc)\n assert(compare_evals(test_eval_path, calc_eval_path))\n\n\ndef do_test_object_importances(pool, loss_function, additional_train_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n object_importances_path = yatest.common.test_output_path('object_importances.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '10',\n '--boosting-type', 'Plain',\n '-T', '4',\n '-m', output_model_path,\n '--use-best-model', 'false'\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'ostr',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path,\n '-o', object_importances_path,\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(object_importances_path)]\n\n\[email protected]('loss_function', ['RMSE', 'Logloss', 'Poisson'])\[email protected]('leaf_estimation_iteration', ['1', '2'])\ndef test_object_importances(loss_function, leaf_estimation_iteration):\n additional_train_params = (\n '--leaf-estimation-method', 'Gradient',\n '--leaf-estimation-iterations', leaf_estimation_iteration\n )\n return do_test_object_importances(\n pool='adult',\n loss_function=loss_function,\n additional_train_params=additional_train_params\n )\n\n\ndef test_object_importances_with_target_border():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_object_importances_with_class_weights():\n return do_test_object_importances(\n pool='adult',\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_object_importances_with_target_border_and_class_weights():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\n# Create `num_tests` test files from `test_input_path`.\ndef split_test_to(num_tests, test_input_path):\n test_input_lines = open(test_input_path).readlines()\n test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]\n for testno in range(num_tests):\n test_path = test_paths[testno]\n test_lines = test_input_lines[testno::num_tests]\n open(test_path, 'wt').write(''.join(test_lines))\n return test_paths\n\n\n# Create a few shuffles from list of test files, for use with `-t` option.\ndef create_test_shuffles(test_paths, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n num_tests = len(test_paths)\n num_shuffles = num_tests # if num_tests < 3 else num_tests * (num_tests - 1)\n test_shuffles = set()\n while len(test_shuffles) < num_shuffles:\n test_shuffles.add(tuple(prng.permutation(test_paths)))\n return [','.join(shuffle) for shuffle in test_shuffles]\n\n\ndef fit_calc_cksum(fit_stem, calc_stem, test_shuffles):\n import hashlib\n last_cksum = None\n for i, shuffle in enumerate(test_shuffles):\n model_path = yatest.common.test_output_path('model{}.bin'.format(i))\n eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))\n execute_catboost_fit('CPU', fit_stem + (\n '-t', shuffle,\n '-m', model_path,\n ))\n yatest.common.execute(calc_stem + (\n '-m', model_path,\n '--output-path', eval_path,\n ))\n cksum = hashlib.md5(open(eval_path).read()).hexdigest()\n if last_cksum is None:\n last_cksum = cksum\n continue\n assert(last_cksum == cksum)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_order_independent(boosting_type, num_tests):\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = (\n '--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (\n CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n fit_stem = (\n '--loss-function', 'QueryRMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\ndef test_multiple_eval_sets_no_empty():\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = ('--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n test0_path = yatest.common.test_output_path('test0.txt')\n open(test0_path, 'wt').write('')\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', fit_stem + (\n '-t', ','.join((test_input_path, test0_path))\n ))\n\n\[email protected]('loss_function', ['RMSE', 'QueryRMSE'])\ndef test_multiple_eval_sets(loss_function):\n num_tests = 5\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n eval_path = yatest.common.test_output_path('test.eval')\n test_paths = list(reversed(split_test_to(num_tests, test_input_path)))\n cmd = ('--loss-function', loss_function,\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(eval_path)]\n\n\ndef test_multiple_eval_sets_err_log():\n num_tests = 3\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n test_err_log_path = yatest.common.test_output_path('test-err.log')\n json_log_path = yatest.common.test_output_path('json.log')\n test_paths = reversed(split_test_to(num_tests, test_input_path))\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_err_log_path,\n '--json-log', json_log_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(test_err_log_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\n# Cast<float>(CityHash('Quvena')) is QNaN\n# Cast<float>(CityHash('Sineco')) is SNaN\[email protected]('cat_value', ['Normal', 'Quvena', 'Sineco'])\ndef test_const_cat_feature(cat_value):\n\n def make_a_set(nrows, value, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n label = prng.randint(0, nrows, [nrows, 1])\n feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))\n return np.concatenate([label, feature], axis=1)\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=20181219)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_model_metadata():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-w', '0.1',\n '--set-metadata-from-freeargs',\n 'A', 'A',\n 'BBB', 'BBB',\n 'CCC', 'A'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(output_model_path)\n\n assert 'A' == py_catboost.get_metadata()['A']\n assert 'BBB' == py_catboost.get_metadata()['BBB']\n assert 'CCC' == py_catboost.get_metadata()['CCC']\n\n\ndef test_fit_multiclass_with_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--use-best-model', 'false',\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', eval_path\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_extract_multiclass_labels_from_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-T', '4',\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])\ndef test_save_class_labels_from_data(loss_function):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n if loss_function in MULTICLASS_LOSSES:\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n elif loss_function == 'Logloss':\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n else:\n assert 'class_params' not in py_catboost.get_metadata()\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_apply_multiclass_labels_from_data(prediction_type):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type,\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n if prediction_type in ['Probability', 'RawFormulaVal']:\n with open(eval_path, \"rt\") as f:\n for line in f:\n assert line[:-1] == 'SampleId\\t{}:Class=0.0\\t{}:Class=7.0\\t{}:Class=9999.0\\t{}:Class=10000000.0' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n break\n else: # Class\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in labels\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', loss_function,\n '--classes-count', '4',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type\n )\n\n yatest.common.execute(calc_cmd)\n\n if prediction_type == 'RawFormulaVal':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity\n\n if prediction_type == 'Probability':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert (abs(float(line[:-1].split()[1])) < 1e-307\n and abs(float(line[:-1].split()[4])) < 1e-307) # fictitious probabilities must be virtually zero\n\n if prediction_type == 'Class':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_set_class_names_implicitly():\n INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']\n SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n yatest.common.execute(calc_cmd)\n\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\t{}:Class=19.2\\t{}:Class=7.\\t{}:Class=8.0\\t{}:Class=a\\t{}:Class=bc\\tClass' \\\n .format(*(['RawFormulaVal'] * 5))\n else:\n label = line[:-1].split()[-1]\n assert label in SAVED_CLASS_LABELS\n\n return [local_canonical_file(eval_path)]\n\n\nCANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_multiclass_model_backward_compatibility(prediction_type):\n model = catboost.CatBoost()\n model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)\n\n assert 'class_params' not in model.get_metadata()\n\n pool = catboost.Pool(data_file('cloudness_small', 'train_small'),\n column_description=data_file('cloudness_small', 'train.cd'))\n model.predict(data=pool, prediction_type='Class')\n model.eval_metrics(data=pool, metrics=['Accuracy'])\n\n output_path = yatest.common.test_output_path('out.txt')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'train_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,\n '--prediction-type', prediction_type,\n '--output-path', output_path,\n )\n\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(output_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('use_best_model', ['true', 'false'])\ndef test_learning_rate_auto_set(boosting_type, use_best_model):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', use_best_model,\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_paths_with_dsv_scheme():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', 'dsv://' + data_file('querywise', 'train'),\n '-t', 'dsv://' + data_file('querywise', 'test'),\n '--column-description', 'dsv://' + data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_skip_train():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n json_log_path = yatest.common.test_output_path('json_log.json')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~true',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--json-log', json_log_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd', output_eval_path_first)\n run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['QueryRMSE', 'RMSE'])\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)\n run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n\ndef test_snapshot_without_random_seed():\n\n def run_catboost(iters, eval_path, additional_params=None):\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', str(iters),\n '-T', '4',\n '--use-best-model', 'False',\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n tmpfile = 'test_data_dumps'\n with open(tmpfile, 'w') as f:\n execute_catboost_fit('CPU', cmd, stdout=f)\n with open(tmpfile, 'r') as output:\n line_count = sum(1 for line in output)\n return line_count\n\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n additional_params = ['--snapshot-file', progress_path, '-m', model_path]\n\n first_line_count = run_catboost(15, eval_path, additional_params=additional_params)\n second_line_count = run_catboost(30, eval_path, additional_params=additional_params)\n third_line_count = run_catboost(45, eval_path, additional_params=additional_params)\n assert first_line_count == second_line_count == third_line_count\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n cb_model = catboost.CatBoost()\n cb_model.load_model(model_path)\n random_seed = cb_model.random_seed_\n run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_interval():\n\n def run_with_timeout(cmd, timeout):\n try:\n execute_catboost_fit('CPU', cmd, timeout=timeout)\n except ExecutionTimeoutError:\n return True\n return False\n\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n ]\n\n measure_time_iters = 100\n exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)\n\n SNAPSHOT_INTERVAL = 1\n TIMEOUT = 5\n TOTAL_TIME = 25\n iters = int(TOTAL_TIME / (exec_time / measure_time_iters))\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]\n execute_catboost_fit('CPU', canon_params)\n\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n model_path = yatest.common.test_output_path('model.bin')\n params = cmd + ['--snapshot-file', progress_path,\n '--snapshot-interval', str(SNAPSHOT_INTERVAL),\n '-m', model_path,\n '--eval-file', eval_path,\n '-i', str(iters)]\n\n was_timeout = False\n while run_with_timeout(params, TIMEOUT):\n was_timeout = True\n assert was_timeout\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_different_params():\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n '-i', '10',\n '--snapshot-file', 'snapshot.cbp'\n ]\n\n cmd_1 = cmd + ['--eval-metric', 'Logloss']\n cmd_2 = cmd + ['--eval-metric', 'Accuracy']\n execute_catboost_fit('CPU', cmd_1)\n try:\n execute_catboost_fit('CPU', cmd_2)\n except ExecutionError:\n return\n\n assert False\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QuerySoftMax',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--leaf-estimation-method', leaf_estimation_method,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_shap_verbose():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n output_log = yatest.common.test_output_path('log')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '12',\n '--fstr-type', 'ShapValues',\n '-T', '4',\n '-m', output_model_path,\n ]\n with open(output_log, 'w') as log:\n yatest.common.execute(cmd_shap, stdout=log)\n with open(output_log, 'r') as log:\n line_count = sum(1 for line in log)\n assert line_count == 5\n\n\ndef test_shap_approximate():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Approximate',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\ndef test_shap_exact():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Exact',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':\n return pytest.xfail(reason='MLTOOLS-1801')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))\n eps = 0 if yatest.common.context.sanitize is None else 0.1\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]\n\n\[email protected]('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])\[email protected]('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])\ndef test_bad_metrics_combination(loss_function, metric):\n BAD_PAIRS = {\n 'Logloss': ['RMSE', 'MultiClass'],\n 'RMSE': ['Logloss', 'MultiClass'],\n 'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],\n 'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],\n 'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],\n 'YetiRank': ['Logloss', 'RMSE', 'MultiClass']\n }\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\\t')\n\n data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, data, fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, data, fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '4',\n '-T', '4',\n )\n\n try:\n execute_catboost_fit('CPU', cmd)\n except Exception:\n assert metric in BAD_PAIRS[loss_function]\n return\n\n assert metric not in BAD_PAIRS[loss_function]\n\n\[email protected]('metric', [('good', ',AUC,'), ('bad', ',')])\ndef test_extra_commas(metric):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', metric[1]\n )\n if metric[0] == 'good':\n execute_catboost_fit('CPU', cmd)\n if metric[0] == 'bad':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,\n border_count=128, other_options=()):\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--cd', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-x', str(border_count),\n '--feature-border-type', 'GreedyLogSum',\n '-m', model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd + other_options)\n\n\ndef test_quantized_pool():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_features():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path,\n other_options=('-I', '5',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path,\n other_options=('-I', '5',)\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_groupid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_during_quantization():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n other_options=('-I', '18-36',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_quantized_test():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_with_large_grid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n border_count=1024\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),\n test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_learn_without_header_eval_with_header():\n train_path = yatest.common.test_output_path('airlines_without_header')\n with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:\n with open(train_path, 'w') as without_header_file:\n without_header_file.writelines(with_header_file.readlines()[1:])\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd_fit = (\n '--loss-function', 'Logloss',\n '-f', train_path,\n '--cd', data_file('airlines_5K', 'cd'),\n '-i', '10',\n '-m', model_path\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('airlines_5K', 'test'),\n '--cd', data_file('airlines_5K', 'cd'),\n '-m', model_path,\n '--has-header'\n )\n yatest.common.execute(cmd_calc)\n\n\ndef test_group_weights_file():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, cd_file, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.cd', True)\n run_catboost(second_eval_path, 'train.cd.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_group_weights_file_quantized():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, train, test, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', 'quantized://' + data_file('querywise', train),\n '-t', 'quantized://' + data_file('querywise', test),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)\n run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_mode_roc():\n eval_path = yatest.common.test_output_path('eval.tsv')\n output_roc_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--counter-calc-method', 'SkipTest',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n roc_cmd = (\n CATBOOST_PATH,\n 'roc',\n '--eval-file', eval_path,\n '--output-path', output_roc_path\n )\n yatest.common.execute(roc_cmd)\n\n return local_canonical_file(output_roc_path)\n\n\[email protected]('pool', ['adult', 'higgs', 'adult_nan'])\ndef test_convert_model_to_json(pool):\n output_model_path = yatest.common.test_output_path('model')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '-m', output_model_path,\n '--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',\n '--model-format', 'CatboostBinary,Json'\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')\n formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.json',\n '--model-format', 'Json',\n '--output-path', formula_predict_path_json\n )\n yatest.common.execute(calc_cmd)\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.bin',\n '--output-path', formula_predict_path_bin\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))\n\n\nLOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']\n\n\[email protected]('loss_function', LOSS_FUNCTIONS_NO_MAPE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_adult_pool(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')\n quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', quantized_train_file,\n '-t', quantized_test_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n cd_file = data_file('quantized_adult', 'pool.cd')\n test_file = data_file('quantized_adult', 'test_small.tsv')\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_with_one_thread(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', quantized_train_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '1',\n '-m', output_model_path,\n '--target-border', '0.5',\n )\n print(cmd)\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_eval_result_on_different_pool_type():\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def run_catboost(train, test, eval_path):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--border-count', '128',\n '-f', train,\n '-t', test,\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--eval-file', eval_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)\n run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)\n\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_apply_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', get_pool_path('train', True),\n '--test-set', get_pool_path('test', True),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test'),\n '--column-description', cd_file,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test', True),\n '--model-file', output_model_path,\n '--output-path', output_quantized_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n\n\ndef test_apply_output_column_by_idx():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n learn = data_file('black_friday', 'train')\n test = data_file('black_friday', 'test')\n cd = data_file('black_friday', 'cd')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '--learn-set', learn,\n '--test-set', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '--model-file', output_model_path,\n '--has-header'\n )\n execute_catboost_fit('CPU', cmd)\n\n column_names = [\n 'Gender',\n 'Age',\n 'Occupation',\n 'City_Category',\n 'Stay_In_Current_City_Years',\n 'Marital_Status',\n 'Product_Category_1',\n 'Product_Category_2',\n 'Product_Category_3',\n ]\n output_columns = ['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)]\n output_columns = ['RawFormulaVal'] + ['GroupId', 'SampleId'] + output_columns + ['Label']\n output_columns = ','.join(output_columns)\n\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', test,\n '--column-description', cd,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--output-columns', output_columns,\n '--has-header'\n )\n yatest.common.execute(cmd)\n\n with open(output_eval_path, 'r') as f:\n f.readline()\n eval_lines = f.readlines()\n with open(test, 'r') as f:\n f.readline()\n test_lines = f.readlines()\n\n assert len(eval_lines) == len(test_lines)\n for i in range(len(eval_lines)):\n eval_line = eval_lines[i].split('\\t')[1:] # skip RawFormulaVal\n test_line = test_lines[i].split('\\t')\n\n for eval_column, test_column in zip(eval_line, test_line):\n assert eval_column == test_column\n\n\[email protected](\n 'dataset_name,loss_function,has_pairs,has_group_weights',\n [\n ('adult_small_broken_features', 'Logloss', False, False),\n ('querywise_broken_pairs', 'RMSE', True, False),\n ('querywise_broken_group_weights', 'RMSE', False, True),\n ]\n)\ndef test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n # iterations and threads are specified just to finish fast if test is xpass\n cmd = (\n '--loss-function', loss_function,\n '--learn-set', data_file('broken_format', dataset_name, 'train'),\n '--test-set', data_file('broken_format', dataset_name, 'test'),\n '--column-description', data_file('broken_format', dataset_name, 'train.cd'),\n '-i', '1',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if has_pairs:\n cmd += (\n '--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),\n '--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),\n )\n if has_group_weights:\n cmd += (\n '--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),\n '--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]_fixtures('compressed_data')\[email protected](\n 'loss_function,eval_metric,boosting_type',\n [\n ('QueryRMSE', 'NDCG', 'Plain'),\n ('QueryRMSE', 'NDCG', 'Ordered'),\n # Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise\n ('YetiRankPairwise', 'NDCG', 'Plain'),\n ('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ],\n ids=[\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',\n 'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'\n ]\n)\ndef test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),\n '-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),\n '--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),\n '--boosting-type', boosting_type,\n '-i', '100',\n '-T', '8',\n '--eval-metric', eval_metric,\n '--metric-period', '100',\n '--use-best-model', 'false',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]\n\n\ndef test_gradient_walker():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n '--max-ctr-complexity', '4',\n '--leaf-estimation-iterations', '10',\n '--leaf-estimation-backtracking', 'AnyImprovement',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\n# training with pairwise scoring with categorical features on CPU does not yet support one-hot features\n# so they are disabled by default, explicit non-default specification should be an error\[email protected](\n 'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],\n ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']\n)\ndef test_groupwise_with_bad_one_hot_max_size(loss_function):\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-T', '4',\n '--eval-metric', 'NDCG',\n '--one_hot_max_size', '10'\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_load_quantized_pool_with_double_baseline():\n # Dataset with 3 random columns, first column is Target, seconds columns is Num, third column\n # is Baseline.\n #\n # There are only 10 rows in dataset.\n cmd = (\n '-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),\n '-i', '10')\n\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_write_predictions_to_streams():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')\n\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--eval-file', output_eval_path,\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', output_model_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stdout',\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:\n yatest.common.execute(calc_cmd, stdout=catboost_stdout)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stderr'\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:\n yatest.common.execute(calc_cmd, stderr=catboost_stderr)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mvs_bootstrap(boosting_type):\n def run_catboost(eval_path, mvs_sample_rate):\n cmd = [\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--bootstrap-type', 'MVS',\n '--subsample', mvs_sample_rate,\n '-i', '50',\n '-w', '0.03',\n '-T', '6',\n '-r', '0',\n '--leaf-estimation-iterations', '10',\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n ref_eval_path = yatest.common.test_output_path('test.eval')\n run_catboost(ref_eval_path, '0.5')\n\n for sample_rate in ('0.1', '0.9'):\n eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))\n run_catboost(eval_path, sample_rate)\n assert (filecmp.cmp(ref_eval_path, eval_path) is False)\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_simple_ctr():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n simple_ctr = ','.join((\n 'Borders:TargetBorderCount=15',\n 'Buckets:TargetBorderCount=15',\n 'Borders:TargetBorderType=MinEntropy',\n 'Counter:CtrBorderCount=20',\n ))\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--simple-ctr', simple_ctr,\n ))\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_output_options():\n output_options_path = 'training_options.json'\n train_dir = 'catboost_info'\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--train-dir', train_dir,\n '--training-options-file', output_options_path,\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(os.path.join(train_dir, output_options_path))\n\n\ndef test_target_border():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--target-border', '0.3'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_monotonic_constraint():\n train_pool = catboost.Pool(\n data_file('higgs', 'train_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n test_pool = catboost.Pool(\n data_file('higgs', 'test_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]\n model = catboost.CatBoostRegressor(\n n_estimators=100,\n learning_rate=0.2,\n monotone_constraints=monotone_constraints,\n verbose=False\n ).fit(train_pool, eval_set=test_pool)\n\n dummy_data = np.zeros((1, test_pool.num_col()))\n dummy_target = np.zeros(len(dummy_data))\n feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)\n for feature_index, feature_name in enumerate(model.feature_names_):\n monotonicity = monotone_constraints[feature_index]\n if monotonicity == 0:\n continue\n feature_borders = feature_stats[feature_name]['borders']\n if len(feature_borders) == 0:\n continue\n mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2\n min_value = feature_borders[0] - 1\n max_value = feature_borders[-1] + 1\n feature_values = np.array([min_value] + list(mid_values) + [max_value])\n for obj in test_pool.get_features():\n obj_variations = np.zeros((len(feature_values), test_pool.num_col()))\n obj_variations[:] = obj.reshape((1, -1))\n obj_variations[:, feature_index] = feature_values\n model_predicts = model.predict(obj_variations)\n prediction_deltas = model_predicts[1:] - model_predicts[:-1]\n assert np.all(prediction_deltas * monotonicity >= 0)\n\n\ndef test_different_formats_of_monotone_constraints():\n eval_path = yatest.common.test_output_path('eval.tsv')\n eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')\n eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train_with_id.cd'),\n '-i', '20'\n ]\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])\n assert not filecmp.cmp(eval_path_with_monotone1, eval_path)\n\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n params_file = yatest.common.test_output_path(\"params.json\")\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:\n json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n\nclass TestModelWithoutParams(object):\n\n @pytest.fixture(\n params=[\n ('cut-info', 'RMSE'),\n ('cut-params', 'RMSE'),\n ('cut-info', 'QueryRMSE'),\n ('cut-params', 'QueryRMSE'),\n ],\n ids=lambda param: '-'.join(param),\n )\n def model_etc(self, request):\n cut, loss = request.param\n model_json = yatest.common.test_output_path('model.json')\n learn_set = data_file('querywise', 'train')\n test_set = data_file('querywise', 'test')\n cd = data_file('querywise', 'train.cd')\n cmd = (\n '--loss-function', loss,\n '--learn-set', learn_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--iterations', '10',\n '--model-file', model_json,\n '--model-format', 'Json',\n '--use-best-model', 'false'\n )\n execute_catboost_fit('CPU', cmd)\n model = json.load(open(model_json))\n if cut == 'cut-info':\n model.pop('model_info')\n if cut == 'cut-params':\n model['model_info'].pop('params')\n json.dump(model, open(model_json, 'wt'))\n return model_json, learn_set, test_set, cd\n\n def test_ostr(self, model_etc):\n model_json, train_set, test_set, cd = model_etc\n ostr_result = yatest.common.test_output_path('result.txt')\n ostr_cmd = (\n CATBOOST_PATH, 'ostr',\n '--learn-set', train_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', ostr_result,\n )\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(ostr_cmd)\n\n @pytest.mark.parametrize('should_fail,fstr_type', [\n (False, 'FeatureImportance'),\n (False, 'PredictionValuesChange'),\n (True, 'LossFunctionChange'),\n (False, 'ShapValues'),\n ])\n def test_fstr(self, model_etc, fstr_type, should_fail):\n model_json, train_set, _, cd = model_etc\n fstr_result = yatest.common.test_output_path('result.txt')\n fstr_cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', train_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', fstr_result,\n '--fstr-type', fstr_type,\n )\n if should_fail:\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n else:\n yatest.common.execute(fstr_cmd)\n\n\ndef test_equal_feature_names():\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.equal_names'),\n ))\n\n\ndef enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):\n if eval_mode == 'OneVsOthers':\n baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'\n else:\n baseline = 'Baseline_fold_{fold_idx}'\n if not only_baseline:\n testing = 'Testing_set_{set_idx}_fold_{fold_idx}'\n dirs = []\n for set_idx in range(set_count):\n for fold_idx in range(offset, offset + fold_count):\n fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)\n if fold not in dirs:\n dirs += [fold]\n if not only_baseline:\n fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)\n dirs += [fold]\n return dirs\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature(eval_mode, features_to_eval, offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('offset', [0, 2])\ndef test_eval_feature_empty_feature_set(offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n eval_mode = 'OneVsNone'\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = 1\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('fold_size_unit', ['Object', 'Group'])\ndef test_eval_feature_timesplit(eval_mode, fold_size_unit):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n features_to_eval = '2-5;10-15'\n offset = 2\n fold_size = 500\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', fold_size_unit,\n '--fold-size', str(fold_size),\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n '--learn-timestamps', data_file('querywise', 'train.timestamps'),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\[email protected]('fstr_mode', ['fstr', 'model'])\ndef test_eval_feature_snapshot(eval_mode, features_to_eval, offset, fstr_mode):\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n model_file = 'model.bin'\n fold_count = 2\n snapshot_interval = 1\n\n def make_cmd(summary, train_dir):\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '200',\n '-T', '4',\n '-w', '0.1',\n '--boost-from-average', 'False',\n '--permutations', '1',\n '--snapshot-interval', str(snapshot_interval),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '--feature-eval-output-file', summary,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n )\n if fstr_mode == 'fstr':\n cmd += ('--fstr-file', fstr_file,)\n else:\n cmd += (\n '--model-file', model_file,\n '--use-best-model', 'False',\n )\n return cmd\n\n reference_summary = yatest.common.test_output_path('reference_feature.eval')\n reference_dir = yatest.common.test_output_path('reference')\n yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))\n\n snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')\n snapshot_dir = yatest.common.test_output_path('snapshot')\n snapshot = yatest.common.test_output_path('eval_feature.snapshot')\n eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n resume_from_snapshot_count = 15\n for idx in range(resume_from_snapshot_count):\n timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1\n stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)\n yatest.common.execute(['rm', '-rf', snapshot_dir])\n yatest.common.execute(eval_with_snapshot_cmd)\n\n assert filecmp.cmp(reference_summary, snapshot_summary)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))\n if fstr_mode == 'fstr':\n assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))\n else:\n def load_json_model(model_path):\n model = catboost.CatBoost()\n model.load_model(model_path)\n model.save_model(model_path + '.json', format='json')\n with open(model_path + '.json') as json_model_file:\n json_model = json.load(json_model_file)\n json_model[\"model_info\"][\"output_options\"] = \"\"\n json_model[\"model_info\"][\"train_finish_time\"] = \"\"\n json_model[\"model_info\"][\"model_guid\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"snapshot_file\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"save_snapshot\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"train_dir\"] = \"\"\n return json_model\n assert load_json_model(pj(reference_dir, output_dir, model_file)) == load_json_model(pj(snapshot_dir, output_dir, model_file))\n\n\ndef test_eval_feature_snapshot_wrong_options():\n summary = yatest.common.test_output_path('eval_feature_summary')\n snapshot = yatest.common.test_output_path('eval_feature_snapshot')\n\n def make_cmd(fold_size):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', str(fold_size),\n '--snapshot-file', snapshot\n )\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd(fold_size=20))\n\n\ndef test_eval_feature_parse_timestamps():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd(timestamps_file):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--learn-timestamps', data_file('querywise', timestamps_file),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(make_cmd('train.timestamps'))\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd('train.group_weights'))\n\n\ndef test_eval_feature_relative_fold_size():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd():\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '100',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--relative-fold-size', '0.1',\n )\n\n yatest.common.execute(make_cmd())\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd() + ('--fold-size', '40',))\n\n\nTEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']\n\n\[email protected]('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])\[email protected]('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('eval_metric_use_weights', [True, False, None],\n ids=['eval_weights=' + str(mode) for mode in [True, False, None]])\[email protected]('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('custom_metric_use_weights', [True, False, None],\n ids=['custom_weights=' + str(mode) for mode in [True, False, None]])\ndef test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n if dataset_has_weights:\n train_pool_filename = data_file('adult_weight', 'train_weight')\n test_pool_filename = data_file('adult_weight', 'test_weight')\n pool_cd_filename = data_file('adult_weight', 'train.cd')\n else:\n train_pool_filename = data_file('adult', 'train_small')\n test_pool_filename = data_file('adult', 'test_small')\n pool_cd_filename = data_file('adult', 'train.cd')\n\n eval_metric = eval_metric_loss\n if eval_metric == 'AUC':\n eval_metric += ':hints=skip_train~false'\n if eval_metric_use_weights is not None:\n eval_metric += ';' if eval_metric_loss == 'AUC' else ':'\n eval_metric += 'use_weights=' + str(eval_metric_use_weights)\n\n custom_metric = custom_metric_loss\n if custom_metric == 'AUC':\n custom_metric += ':hints=skip_train~false'\n if custom_metric_use_weights is not None:\n custom_metric += ';' if custom_metric_loss == 'AUC' else ':'\n custom_metric += 'use_weights=' + str(custom_metric_use_weights)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', train_pool_filename,\n '-t', test_pool_filename,\n '--cd', pool_cd_filename,\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-metric', eval_metric,\n '--custom-metric', custom_metric,\n )\n should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)\n try:\n execute_catboost_fit('CPU', cmd)\n except ExecutionError:\n assert should_fail\n return\n for filename in [learn_error_path, test_error_path]:\n with open(filename, 'r') as f:\n metrics_descriptions = f.readline().split('\\t')[1:] # without 'iter' column\n metrics_descriptions[-1] = metrics_descriptions[-1][:-1] # remove '\\n' symbol\n unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])\n assert len(metrics_descriptions) == len(unique_metrics_descriptions)\n expected_objective_metric_description = 'Logloss'\n\n if dataset_has_weights:\n expected_eval_metric_description = \\\n eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)\n\n if custom_metric_loss == 'AUC':\n expected_custom_metrics_descriptions = \\\n ['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]\n else:\n expected_custom_metrics_descriptions = (\n [custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']\n if custom_metric_use_weights is None\n else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])\n else:\n expected_eval_metric_description = eval_metric_loss\n expected_custom_metrics_descriptions = [custom_metric_loss]\n assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_leafwise_scoring():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', cmd)\n learn_errors_log = open(learn_error_path).read()\n execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])\n new_learn_errors_log = open(learn_error_path).read()\n assert new_learn_errors_log == learn_errors_log\n\n\ndef test_group_features():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')\n model_path = yatest.common.test_output_path('model.bin')\n fit_cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '-m', model_path,\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', fit_cmd)\n calc_cmd = [\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', test_predictions_path,\n '--output-columns', 'Probability'\n ]\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]\n\n\ndef test_model_sum():\n model_path = yatest.common.test_output_path('model.bin')\n model_eval = yatest.common.test_output_path('model_eval.txt')\n execute_catboost_fit('CPU', [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', model_path,\n '-t', data_file('adult', 'test_small'),\n '--eval-file', model_eval,\n '--output-columns', 'SampleId,RawFormulaVal',\n ])\n\n sum_path = yatest.common.test_output_path('sum.bin')\n yatest.common.execute([\n CATBOOST_PATH,\n 'model-sum',\n '--model-with-weight', '{}={}'.format(model_path, 0.75),\n '--model-with-weight', '{}={}'.format(model_path, 0.25),\n '--output-path', sum_path,\n ])\n\n sum_eval = yatest.common.test_output_path('sum_eval.txt')\n yatest.common.execute([\n CATBOOST_PATH,\n 'calc',\n '-m', sum_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', sum_eval,\n ])\n yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])\n\n\ndef test_external_feature_names():\n fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')\n fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')\n\n for cd_has_feature_names in [False, True]:\n if cd_has_feature_names:\n cd_file = data_file('adult', 'train_with_id.cd')\n fstr_path = fstr_cd_with_id_path\n else:\n cd_file = data_file('adult', 'train.cd')\n fstr_path = fstr_cd_without_id_path\n\n cmd = (\n '--loss-function', 'Logloss',\n '--target-border', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--feature-names-path', data_file('adult', 'feature_names'),\n '--fstr-type', 'FeatureImportance',\n '--fstr-file', fstr_path\n )\n execute_catboost_fit('CPU', cmd)\n\n assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)\n\n return [local_canonical_file(fstr_cd_with_id_path)]\n\n\ndef test_diffusion_temperature():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--langevin', 'True',\n '--diffusion-temperature', '1000',\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_model_shrink_correct(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])\ndef test_model_shrink_incorrect(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('average', ['Macro', 'Micro', 'Weighted'])\ndef test_total_f1_params(average):\n return do_test_eval_metrics(\n metric='TotalF1:average=' + average,\n metric_period='1',\n train=data_file('cloudness_small', 'train_small'),\n test=data_file('cloudness_small', 'test_small'),\n cd=data_file('cloudness_small', 'train.cd'),\n loss_function='MultiClass'\n )\n\n\ndef test_eval_metrics_with_pairs():\n do_test_eval_metrics(\n metric='PairAccuracy',\n metric_period='1',\n train=data_file('querywise', 'train'),\n test=data_file('querywise', 'test'),\n cd=data_file('querywise', 'train.cd'),\n loss_function='PairLogit',\n additional_train_params=(\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs')\n ),\n additional_eval_params=(\n '--input-pairs', data_file('querywise', 'test.pairs')\n )\n )\n\n\ndef test_tweedie():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--loss-function', 'Tweedie:variance_power=1.5',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '-i', '100',\n '--learning-rate', '0.5',\n '--learn-err-log', learn_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\ndef test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [\n local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(test_eval_path)\n ]\n\n\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n return [\n local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(test_eval_path)\n ]\n\n\[email protected]('grow_policy', GROW_POLICIES)\ndef test_shrink_model_with_text_features(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n loss_function = 'MultiClass'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--text-processing', json.dumps(text_processing),\n '--grow-policy', grow_policy,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'true',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss'])\ndef test_virtual_ensembles(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train_small')\n test_path = data_file('querywise', 'test') if loss_function in REGRESSION_LOSSES else data_file('adult', 'test_small')\n cd_path = data_file('querywise', 'train.cd') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train.cd')\n test_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '--posterior-sampling', 'true',\n '--eval-file', test_eval_path,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n ]\n if loss_function == 'RMSEWithUncertainty':\n cmd += ['--prediction-type', 'RMSEWithUncertainty']\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--virtual-ensembles-count', '1',\n '--prediction-type', 'VirtEnsembles',\n )\n yatest.common.execute(calc_cmd)\n assert compare_evals(test_eval_path, formula_predict_path, skip_header=True)\n\n\[email protected]('virtual_ensembles_count', ['1', '10'])\[email protected]('prediction_type', ['TotalUncertainty', 'VirtEnsembles'])\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MultiClass'])\ndef test_uncertainty_prediction(virtual_ensembles_count, prediction_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n pool_names = {\n 'RMSE' : 'querywise',\n 'RMSEWithUncertainty' : 'querywise',\n 'Logloss' : 'adult',\n 'MultiClass' : 'cloudness_small'\n }\n pool_name = pool_names[loss_function]\n train_path = data_file(pool_name, 'train') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train_small')\n test_path = data_file(pool_name, 'test') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'test_small')\n cd_path = data_file(pool_name, 'train.cd') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '--posterior-sampling', 'true',\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--virtual-ensembles-count', virtual_ensembles_count,\n '--prediction-type', prediction_type,\n )\n yatest.common.execute(calc_cmd)\n\n model = catboost.CatBoost()\n model.load_model(output_model_path)\n pool = catboost.Pool(test_path, column_description=cd_path)\n py_preds = model.virtual_ensembles_predict(\n pool,\n prediction_type=prediction_type,\n virtual_ensembles_count=int(virtual_ensembles_count))\n\n cli_preds = np.genfromtxt(\n formula_predict_path,\n delimiter='\\t',\n dtype=float,\n skip_header=True)\n assert(np.allclose(py_preds.reshape(-1,), cli_preds[:, 1:].reshape(-1,), rtol=1e-10))\n\n return local_canonical_file(formula_predict_path)\n\n\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty'])\ndef test_uncertainty_prediction_requirements(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'VirtEnsembles'\n )\n try:\n yatest.common.execute(calc_cmd)\n except:\n return\n # assert replaced to warning\n # assert False\n\n\nDICTIONARIES_OPTIONS = [\n {\n \"Simple\": \"token_level_type=Word:occurrence_lower_bound=50\"\n },\n {\n \"UniGramOccur5\": \"occurrence_lower_bound=5:token_level_type=Letter\",\n \"BiGramOccur2\": \"occurrence_lower_bound=2:gram_order=2:token_level_type=Letter\",\n \"WordDictOccur1\": \"occurrence_lower_bound=1:token_level_type=Word\",\n \"WordDictOccur2\": \"occurrence_lower_bound=2:token_level_type=Word\",\n \"WordDictOccur3\": \"occurrence_lower_bound=3:token_level_type=Word\"\n },\n {\n \"Unigram\": \"gram_order=1:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Bigram\": \"gram_order=2:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Trigram\": \"gram_order=3:token_level_type=Letter:occurrence_lower_bound=50\"\n },\n {\n \"Letter\": \"token_level_type=Letter:occurrence_lower_bound=50\",\n \"Word\": \"token_level_type=Word:occurrence_lower_bound=50\"\n }\n]\n\n\[email protected]('dictionaries', DICTIONARIES_OPTIONS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_text_processing_options(dictionaries, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])\n feature_estimators = 'BM25,BoW,NaiveBayes'\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--dictionaries', dictionaries,\n '--feature-calcers', feature_estimators,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fit_with_per_feature_text_options(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n text_processing = {\n 'tokenizers': [\n {'tokenizer_id': 'Space', 'delimiter': ' '},\n {'tokenizer_id': 'Comma', 'delimiter': ','},\n ],\n 'dictionaries': [\n {'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},\n ],\n 'feature_processing': {\n '0': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n '1': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},\n ],\n '2': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n }\n }\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_embeddings_train(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n '-t', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n '--column-description', ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(\n output_model_path,\n ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,\n calc_eval_path,\n output_columns=['RawFormulaVal']\n )\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_dump_options():\n snapshot_path = yatest.common.test_output_path('snapshot.bin')\n key = 'summary'\n value = '{\"key1\":\"value1\", \"key2\":\"value2\"}'\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--snapshot-file', snapshot_path,\n '--use-best-model', 'false',\n '--set-metadata-from-freeargs', '--', key, value,\n )\n execute_catboost_fit('CPU', cmd)\n\n options_path = yatest.common.test_output_path('options.json')\n dump_options_cmd = (\n get_catboost_binary_path(),\n 'dump-options',\n '--input', snapshot_path,\n '--output', options_path\n )\n yatest.common.execute(dump_options_cmd)\n with open(options_path) as options:\n options_json = json.load(options)\n assert options_json['metadata'][key] == value\n\n\ndef prepare_pool_metainfo_with_feature_tags():\n pool_metainfo = {\n 'tags': {\n 'A': {\n 'features': [0, 1, 2, 3, 4, 5, 6, 7]\n },\n 'B': {\n 'features': [12, 13, 14, 15, 16]\n },\n 'C': {\n 'features': [5, 6, 7, 8, 9, 10, 11, 12, 13]\n }\n }\n }\n pool_metainfo_path = yatest.common.test_output_path('pool_metainfo.json')\n with open(pool_metainfo_path, 'w') as f:\n json.dump(pool_metainfo, f)\n\n return pool_metainfo, pool_metainfo_path\n\n\ndef test_feature_tags_in_ignore_features():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n )\n\n for ignored_tags in (['A'], ['A', 'B'], ['B', 'C']):\n output_eval_path_1 = yatest.common.test_output_path('1_test.eval')\n ignored_features = sum((pool_metainfo['tags'][tag]['features'] for tag in ignored_tags), [])\n cmd_1 = base_cmd + (\n '--eval-file', output_eval_path_1,\n '--ignore-features', ':'.join(map(str, ignored_features)),\n )\n\n output_eval_path_2 = yatest.common.test_output_path('2_test.eval')\n cmd_2 = base_cmd + (\n '--eval-file', output_eval_path_2,\n '--ignore-features', ':'.join('#{}'.format(tag) for tag in ignored_tags),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_eval_path_1, output_eval_path_2)\n\n\ndef test_feature_tags_in_features_for_select():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'select-features',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n '--num-features-to-select', '3',\n '--features-selection-algorithm', 'RecursiveByPredictionValuesChange',\n '--features-selection-steps', '2',\n '--train-final-model',\n )\n\n for selection_tags in (['A', 'B'], ['A', 'C'], ['B', 'C'], ['A', 'B', 'C']):\n output_summary_path_1 = yatest.common.test_output_path('1_summary.json')\n features_for_select = sum((pool_metainfo['tags'][tag]['features'] for tag in selection_tags), [])\n cmd_1 = base_cmd + (\n '--features-selection-result-path', output_summary_path_1,\n '--features-for-select', ','.join(map(str, features_for_select)),\n )\n\n output_summary_path_2 = yatest.common.test_output_path('2_summary.json')\n cmd_2 = base_cmd + (\n '--features-selection-result-path', output_summary_path_2,\n '--features-for-select', ','.join('#{}'.format(tag) for tag in selection_tags),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_summary_path_1, output_summary_path_2)\n\n\ndef test_feature_tags_in_features_to_evaluate():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--feature-eval-mode', 'OneVsAll',\n '-i', '30',\n '-T', '4',\n '--fold-count', '2',\n '--fold-size-unit', 'Object',\n '--fold-size', '50'\n )\n\n features_to_evaluate_1 = []\n features_to_evaluate_2 = []\n for tags_set in (['A'], ['A', 'B'], ['B', 'C']):\n features_set = sum((pool_metainfo['tags'][tag]['features'] for tag in tags_set), [])\n features_to_evaluate_1.append(','.join(map(str, features_set)))\n features_to_evaluate_2.append(','.join('#{}'.format(tag) for tag in tags_set))\n\n output_eval_path_1 = yatest.common.test_output_path('1_feature.eval')\n cmd_1 = base_cmd + (\n '--feature-eval-output-file', output_eval_path_1,\n '--features-to-evaluate', ';'.join(map(str, features_to_evaluate_1)),\n )\n\n output_eval_path_2 = yatest.common.test_output_path('2_feature.eval')\n cmd_2 = base_cmd + (\n '--feature-eval-output-file', output_eval_path_2,\n '--features-to-evaluate', ';'.join(features_to_evaluate_2),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_eval_path_1, output_eval_path_2)\n\n\ndef test_feature_tags_in_options_file():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n training_options_path = yatest.common.test_output_path('training_options.json')\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n '--pool-metainfo-path', pool_metainfo_path,\n '--training-options-file', training_options_path,\n )\n yatest.common.execute(cmd)\n\n with open(training_options_path) as f:\n options = json.load(f)\n assert options['pool_metainfo_options'] == pool_metainfo\n"
] | [
[
"numpy.dot",
"numpy.hstack",
"pandas.read_csv",
"numpy.log",
"numpy.allclose",
"numpy.random.seed",
"numpy.random.random",
"numpy.arange",
"numpy.genfromtxt",
"numpy.all",
"numpy.concatenate",
"numpy.random.randn",
"numpy.mean",
"numpy.float32",
"numpy.savetxt",
"numpy.array",
"numpy.random.RandomState",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hz512/Smart-Parking-Enforcement-System | [
"e990903de545693ad6e2536bf167c69ab672d16a"
] | [
"utils/callbacks.py"
] | [
"import tensorflow.keras as tfk\r\nimport tensorflow as tf\r\nimport tensorflow.keras.layers as layers\r\nimport json\r\nimport collections\r\nfrom datetime import datetime\r\nimport os\r\n\r\n\r\nclass LrStepDecay(tfk.callbacks.Callback):\r\n def __init__(self,\r\n decay_rate,\r\n decay_at):\r\n super(LrStepDecay, self).__init__()\r\n self.decay_rate = decay_rate\r\n self.decay_at = decay_at\r\n self.counter = 0\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n if self.counter >= len(self.decay_at):\r\n return\r\n\r\n if epoch >= self.decay_at[self.counter]:\r\n self.counter += 1\r\n new_lr = float(tfk.backend.get_value(self.model.optimizer.learning_rate)) * self.decay_rate\r\n tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)\r\n print(\"\\nEpoch %05d: Learning rate is %3.6f.\" % (epoch, new_lr))\r\n\r\n\r\nclass Logger(tfk.callbacks.Callback):\r\n\r\n def __init__(self,\r\n name,\r\n log_dir):\r\n super(Logger, self).__init__()\r\n self.name = name\r\n self.log_dir = log_dir\r\n self.log = collections.defaultdict(list)\r\n self.start_time = datetime.now()\r\n if not os.path.isdir(self.log_dir):\r\n os.mkdir(self.log_dir)\r\n\r\n def on_epoch_begin(self, epoch, logs=None):\r\n self.start_time = datetime.now()\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n file = open('{}/{}.json'.format(self.log_dir, self.name), 'w')\r\n for key in logs:\r\n self.log[key].append(logs[key])\r\n self.log['epoch'].append(epoch)\r\n self.log['walltime'].append((datetime.now() - self.start_time).seconds)\r\n json.dump(self.log, file)\r\n file.close()"
] | [
[
"tensorflow.keras.backend.get_value",
"tensorflow.keras.backend.set_value"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
CenIII/pose-ae-train | [
"8780ba9f3d80ca3a724bbee7b815073adc3d3e6e"
] | [
"data/coco_pose/ref.py"
] | [
"import numpy as np\nimport pickle\nimport h5py\nfrom scipy.misc import imread\nimport os \nfrom pycocotools.coco import COCO\nfrom pycocotools import mask \n\ndata_dir = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/images'\nann_path = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/annotations/person_keypoints_train2014.json'\n\nref_dir = os.path.dirname(__file__)\n\nassert os.path.exists(data_dir)\nassert os.path.exists(ann_path)\ncoco, img_ids, num_examples = None, None, None\n\nwith open(ref_dir + '/valid_id', 'r') as f:\n valid_id = list(map(lambda x:int(x.strip()), f.readlines()))\nvalid_id_set = set(valid_id)\n\ndef init():\n global coco, img_ids, num_examples\n ann_file = os.path.join(ann_path)\n coco = COCO(ann_file)\n img_ids = coco.getImgIds()\n num_examples = len(img_ids)\n\n# num_parts = 17\n# part_mask = np.array([0,0,0,0,0,0,0,1,1,1,1,0,0,1,1,1,1])\n# part_ref = {'ankle':[15,16],'knee':[13,14],'hip':[11,12],\n# 'wrist':[9,10],'elbow':[7,8],'shoulder':[5,6],\n# 'face':[0,1,2],'ears':[3,4]}\n# part_labels = ['nose','eye_l','eye_r','ear_l','ear_r',\n# 'sho_l','sho_r','elb_l','elb_r','wri_l','wri_r',\n# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']\n# basic_order = ['sho_l','sho_r', 'nose', 'eye_l','eye_r','ear_l',\n# 'ear_r','elb_l','elb_r','wri_l','wri_r',\n# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']\n# pairRef = [\n# [1,2],[2,3],[1,3],\n# [6,8],[8,10],[12,14],[14,16],\n# [7,9],[9,11],[13,15],[15,17],\n# [6,7],[12,13],[6,12],[7,13]\n# ]\n# pairRef = np.array(pairRef) - 1\n\nflipRef = [i-1 for i in [1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16] ]\n\n# part_idx = {b:a for a, b in enumerate(part_labels)}\n# basic_order = [part_idx[i] for i in basic_order]\n\n\ndef initialize(opt):\n return\n\ndef image_path(idx):\n img_info = coco.loadImgs(img_ids[idx])[0]\n path = img_info['file_name'].split('_')[1] + '/' + img_info['file_name']\n return os.path.join(data_dir, path)\n\ndef load_image(idx):\n return imread(image_path(idx),mode='RGB')\n\n\ndef num_objects(idx, anns=None, should_greater_than_1 = False):\n if anns is None: anns = get_anns(idx)\n return len(anns)\n\ndef setup_val_split(opt = None):\n if coco is None:\n return [], []\n\n tmp_idxs = []\n for i in range(num_examples):\n if num_objects(i, None) > 0:\n tmp_idxs += [i]\n ref_idxs = np.array(tmp_idxs,dtype=int) #39935 images that # of ppl > 0\n ### choose image_id from valid_id_set\n\n valid = {}\n train = []\n for i in ref_idxs:\n if img_ids[i] in valid_id_set:\n valid[ img_ids[i] ]=i\n else:\n train.append(i)\n return np.array(train), np.array([valid[i] for i in valid_id if i in valid])\n\ndef get_anns(idx):\n ann_ids = coco.getAnnIds(imgIds=img_ids[idx])\n tmp_ann = coco.loadAnns(ann_ids)\n # Filter tmp_ann for people with no keypoints annotated\n return [tmp_ann[i] for i in range(len(tmp_ann)) if tmp_ann[i]['num_keypoints'] > 0]\n\ndef get_mask(idx):\n ann_ids = coco.getAnnIds(imgIds=img_ids[idx])\n anns = coco.loadAnns(ann_ids)\n img = coco.loadImgs(img_ids[idx])[0]\n m = np.zeros((img['height'], img['width']))\n for j in anns:\n if j['iscrowd']:\n rle = mask.frPyObjects(j['segmentation'], img['height'], img['width'])\n m += mask.decode(rle)\n return m < 0.5\n\ndef get_keypoints(idx, anns=None):\n if anns is None: anns = get_anns(idx)\n num_people = num_objects(idx, anns)\n kps = np.zeros((num_people, 17, 3))\n for i in range(num_people):\n kps[i] = np.array(anns[i]['keypoints']).reshape([-1,3])\n return kps\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sujit-O/dyngem | [
"a879bf362d1e9409faa4e1186c345337ad6d0189",
"a879bf362d1e9409faa4e1186c345337ad6d0189"
] | [
"dynamicgem/test/test_dynRNN.py",
"dynamicgem/embedding/dynAERNN.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module is for testing dynRNN\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nfrom dynamicgem.embedding.dynRNN import DynRNN\nfrom dynamicgem.graph_generation import dynamic_SBM_graph as sbm\nfrom dynamicgem.visualization import plot_dynamic_sbm_embedding\nfrom time import time\n\n\ndef test_dynRNN():\n # Parameters for Stochastic block model graph\n # Todal of 1000 nodes\n node_num = 100\n # Test with two communities\n community_num = 2\n # At each iteration migrate 10 nodes from one community to the another\n node_change_num = 2\n # Length of total time steps the graph will dynamically change\n length = 7\n # output directory for result\n outdir = './output'\n intr = './intermediate'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n if not os.path.exists(intr):\n os.mkdir(intr)\n testDataType = 'sbm_cd'\n # Generate the dynamic graph\n dynamic_sbm_series = list(sbm.get_community_diminish_series_v2(node_num,\n community_num,\n length,\n 1, # comminity ID to perturb\n node_change_num))\n graphs = [g[0] for g in dynamic_sbm_series]\n # parameters for the dynamic embedding\n # dimension of the embedding\n dim_emb = 8\n lookback = 2\n\n # dynRNN\n embedding = DynRNN(d=dim_emb,\n beta=5,\n n_prev_graphs=lookback,\n nu1=1e-6,\n nu2=1e-6,\n n_enc_units=[500, 300],\n n_dec_units=[500, 300],\n rho=0.3,\n n_iter=2,\n xeta=1e-3,\n n_batch=100,\n modelfile=['./intermediate/enc_model_dynRNN.json',\n './intermediate/dec_model_dynRNN.json'],\n weightfile=['./intermediate/enc_weights_dynRNN.hdf5',\n './intermediate/dec_weights_dynRNN.hdf5'],\n savefilesuffix=\"testing\")\n embs = []\n t1 = time()\n for temp_var in range(lookback + 1, length + 1):\n emb, _ = embedding.learn_embeddings(graphs[:temp_var])\n embs.append(emb)\n print(embedding._method_name + ':\\n\\tTraining time: %f' % (time() - t1))\n plt.figure()\n plt.clf()\n plot_dynamic_sbm_embedding.plot_dynamic_sbm_embedding_v2(embs[-5:-1], dynamic_sbm_series[-5:])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom keras.layers import Input, Subtract\nfrom keras.models import Model, model_from_json\nfrom keras.optimizers import SGD, Adam\nfrom keras.callbacks import TensorBoard, EarlyStopping\nfrom keras import backend as KBack\n\nimport tensorflow as tf\nfrom time import time\n\nfrom dynamicgem.embedding.dynamic_graph_embedding import DynamicGraphEmbedding\nfrom dynamicgem.utils.dnn_utils import *\n\n\n\n\nclass DynAERNN(DynamicGraphEmbedding):\n \"\"\"Dynamic AutoEncoder with Recurrent Neural Network\n \n dyngraph2vecAERNN or DynAERNN is a dynamic graph embedding algorithm \n which combines the auto-encoder with the recurrent neural network\n to perform the embedding for the temporally evolving graphs.\n\n Args:\n d (int): dimension of the embedding\n beta (float): penalty parameter in matrix B of 2nd order objective\n n_prev_graphs (int): Lookback (number of previous graphs to be considered) for the dynamic graph embedding\n nu1 (float): L1-reg hyperparameter\n nu2 (float): L2-reg hyperparameter\n K (float): number of hidden layers in encoder/decoder\n rho (float): bounding ratio for number of units in consecutive layers (< 1)\n n_aeunits (list) = List of embedding dimension for auto encoder layers\n n_lstmunits= List of embedding dimension for lstm layers\n n_iter (int): number of sgd iterations for first embedding (const)\n xeta (float): sgd step size parameter\n n_batch (int): minibatch size for SGD\n modelfile (str): Files containing previous encoder and decoder models\n weightfile (str): Files containing previous encoder and decoder weights\n \n Examples:\n >>> from dynamicgem.embedding.dynAERNN import DynAERNN\n >>> from dynamicgem.graph_generation import dynamic_SBM_graph\n >>> node_num = 1000\n >>> community_num = 2\n >>> node_change_num = 10\n >>> length =5\n >>> dynamic_sbm_series = dynamic_SBM_graph.get_community_diminish_series_v2(node_num,\n community_num,\n length,\n 1,\n node_change_num)\n >>> embedding = DynAERNN(d=dim_emb,\n beta=5,\n n_prev_graphs=lookback,\n nu1=1e-6,\n nu2=1e-6,\n n_units=[500, 300, ],\n rho=0.3,\n n_iter=epochs,\n xeta=args.learningrate,\n n_batch=args.batch,\n modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],\n weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5'],\n savefilesuffix=\"testing\")\n\n >>> graphs = [g[0] for g in dynamic_sbm_series]\n >>> embs = []\n\n >>> for temp_var in range(length):\n >>> emb, _ = embedding.learn_embeddings(graphs[temp_var])\n >>> embs.append(emb)\n \"\"\"\n\n def __init__(self, d, *hyper_dict, **kwargs):\n self._d = d\n hyper_params = {\n 'method_name': 'dynAERNN',\n 'actfn': 'relu',\n 'modelfile': None,\n 'weightfile': None,\n 'savefilesuffix': None\n }\n hyper_params.update(kwargs)\n for key in hyper_params.keys():\n self.__setattr__('_%s' % key, hyper_params[key])\n for dictionary in hyper_dict:\n for key in dictionary:\n self.__setattr__('_%s' % key, dictionary[key])\n\n def get_method_name(self):\n \"\"\"Function to return the method name.\n \n Returns:\n String: Name of the method.\n \"\"\"\n return self._method_name\n\n def get_method_summary(self):\n \"\"\"Function to return the summary of the algorithm. \n \n Returns:\n String: Method summary\n \"\"\"\n return '%s_%d' % (self._method_name, self._d)\n\n def learn_embeddings(self, graphs):\n \"\"\"Learns the embedding of the nodes.\n \n Attributes:\n graph (Object): Networkx Graph Object\n\n Returns:\n List: Node embeddings and time taken by the algorithm\n \"\"\"\n self._node_num = graphs[0].number_of_nodes()\n t1 = time()\n ###################################\n # TensorFlow wizardry\n config = tf.ConfigProto()\n # Don't pre-allocate memory; allocate as-needed\n config.gpu_options.allow_growth = True\n # Only allow a total of half the GPU memory to be allocated\n config.gpu_options.per_process_gpu_memory_fraction = 0.2\n # Create a session to pass the above configuration\n # sess=tf.Session(config=config)\n # Create a tensorflow debugger wrapper\n # sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n # Create a session with the above options specified.\n KBack.tensorflow_backend.set_session(tf.Session(config=config))\n # KBack.tensorflow_backend.set_session(sess)\n ###################################\n\n # Generate encoder, decoder and autoencoder\n self._num_iter = self._n_iter\n self._aeencoders = [None] * self._n_prev_graphs\n for i in range(self._n_prev_graphs):\n self._aeencoders[i] = get_encoder_dynaernn(\n self._node_num,\n self._d,\n self._n_aeunits,\n self._nu1,\n self._nu2,\n self._actfn\n )\n self._aeencoders[i].name = \"ae_encoder_%d\" % i\n self._lstmencoder = get_lstm_encoder(\n self._d,\n self._n_prev_graphs,\n self._d,\n self._n_lstmunits,\n self._actfn,\n None,\n None,\n None,\n False\n )\n self._lstmencoder.name = \"lstm_encoder\"\n self._aedecoder = get_decoder_dynaernn(\n self._node_num,\n self._d,\n self._n_aeunits,\n self._nu1,\n self._nu2,\n self._actfn\n )\n self._aedecoder.name = \"decoder\"\n self._autoencoder = get_aelstm_autoencoder(\n self._aeencoders,\n self._lstmencoder,\n self._aedecoder\n )\n # Initialize self._model\n # Input\n x_in = Input(\n shape=(self._n_prev_graphs * self._node_num,),\n name='x_in'\n )\n x_pred = Input(\n shape=(self._node_num,),\n name='x_pred'\n )\n\n [x_hat, y] = self._autoencoder(x_in)\n # Outputs\n x_diff = Subtract()([x_hat, x_pred])\n\n # Objectives\n def weighted_mse_x(y_true, y_pred):\n ''' Hack: This fn doesn't accept additional arguments.\n We use y_true to pass them.\n y_pred: Contains x_hat - x_pred\n y_true: Contains b\n '''\n return KBack.sum(\n KBack.square(y_pred * y_true[:, 0:self._node_num]),\n axis=-1\n )\n\n # Model\n self._model = Model(input=[x_in, x_pred], output=x_diff)\n sgd = SGD(lr=self._xeta, decay=1e-5, momentum=0.99, nesterov=True)\n adam = Adam(lr=self._xeta, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n # self._model.compile(optimizer=sgd, loss=weighted_mse_x)\n self._model.compile(optimizer=adam, loss=weighted_mse_x)\n\n # tensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)\n history = self._model.fit_generator(\n generator=batch_generator_dynaernn(\n graphs,\n self._beta,\n self._n_batch,\n self._n_prev_graphs,\n True\n ),\n nb_epoch=self._num_iter,\n samples_per_epoch=(\n graphs[0].number_of_nodes() * self._n_prev_graphs\n ) // self._n_batch,\n verbose=1\n # callbacks=[tensorboard]\n )\n loss = history.history['loss']\n # Get embedding for all points\n if loss[0] == np.inf or np.isnan(loss[0]):\n print('Model diverged. Assigning random embeddings')\n self._Y = np.random.randn(self._node_num, self._d)\n else:\n self._Y, self._next_adj = model_batch_predictor_dynaernn(\n self._autoencoder,\n graphs[len(graphs) - self._n_prev_graphs:],\n self._n_batch\n )\n t2 = time()\n # Save the autoencoder and its weights\n if self._weightfile is not None:\n pass\n # saveweights(self._encoder, self._weightfile[0])\n # saveweights(self._decoder, self._weightfile[1])\n if self._modelfile is not None:\n pass\n # savemodel(self._encoder, self._modelfile[0])\n # savemodel(self._decoder, self._modelfile[1])\n if self._savefilesuffix is not None:\n pass\n # saveweights(self._encoder,\n # 'encoder_weights_' + self._savefilesuffix + '.hdf5')\n # saveweights(self._decoder,\n # 'decoder_weights_' + self._savefilesuffix + '.hdf5')\n # savemodel(self._encoder,\n # 'encoder_model_' + self._savefilesuffix + '.json')\n # savemodel(self._decoder,\n # 'decoder_model_' + self._savefilesuffix + '.json')\n # # Save the embedding\n # np.savetxt('embedding_' + self._savefilesuffix + '.txt',\n # self._Y)\n # np.savetxt('next_pred_' + self._savefilesuffix + '.txt',\n # self._next_adj)\n # sess.close()\n return self._Y, (t2 - t1)\n\n def get_embeddings(self):\n \"\"\"Function to return the embeddings\"\"\"\n return self._Y\n\n def get_edge_weight(self, i, j, embed=None, filesuffix=None):\n \"\"\"Function to get edge weight.\n \n Attributes:\n i (int): source node for the edge.\n j (int): target node for the edge.\n embed (Matrix): Embedding values of all the nodes.\n filesuffix (str): File suffix to be used to load the embedding.\n\n Returns:\n Float: Weight of the given edge.\n \"\"\"\n if embed is None:\n if filesuffix is None:\n embed = self._Y\n else:\n embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n if i == j:\n return 0\n else:\n S_hat = self.get_reconst_from_embed(embed[(i, j), :], filesuffix)\n return (S_hat[i, j] + S_hat[j, i]) / 2\n\n def get_reconstructed_adj(self, embed=None, node_l=None, filesuffix=None):\n \"\"\"Function to reconstruct the adjacency list for the given node.\n \n Attributes:\n node_l (int): node for which the adjacency list will be created.\n embed (Matrix): Embedding values of all the nodes.\n filesuffix (str): File suffix to be used to load the embedding.\n\n Returns:\n List : Adjacency list of the given node.\n \"\"\"\n if embed is None:\n if filesuffix is None:\n embed = self._Y\n else:\n embed = np.loadtxt('embedding_' + filesuffix + '.txt')\n S_hat = self.get_reconst_from_embed(embed, filesuffix)\n return graphify(S_hat)\n\n def get_reconst_from_embed(self, embed, filesuffix=None):\n \"\"\"Function to reconstruct the graph from the embedding.\n \n Attributes:\n node_l (int): node for which the adjacency list will be created.\n embed (Matrix): Embedding values of all the nodes.\n filesuffix (str): File suffix to be used to load the embedding.\n\n Returns:\n List: REconstructed graph for the given nodes.\n \"\"\"\n if filesuffix is None:\n return self._decoder.predict(embed, batch_size=self._n_batch)\n else:\n try:\n decoder = model_from_json(open('./intermediate/decoder_model_' + filesuffix + '.json').read())\n except:\n print('Error reading file: {0}. Cannot load previous model'.format(\n 'decoder_model_' + filesuffix + '.json'))\n exit()\n try:\n decoder.load_weights('./intermediate/decoder_weights_' + filesuffix + '.hdf5')\n except:\n print('Error reading file: {0}. Cannot load previous weights'.format(\n 'decoder_weights_' + filesuffix + '.hdf5'))\n exit()\n return decoder.predict(embed, batch_size=self._n_batch)\n\n def predict_next_adj(self, node_l=None):\n \"\"\"Function to predict the next adjacency for the given node.\n \n Attributes:\n node_l (int): node for which the adjacency list will be created.\n\n Returns:\n List: Reconstructed adjancey list.\n \"\"\"\n if node_l is not None:\n return self._next_adj[node_l]\n else:\n return self._next_adj\n"
] | [
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kobeeraveendran/hackfsu5 | [
"5614d832423f56913bd35d96e2472068a106b376",
"5614d832423f56913bd35d96e2472068a106b376"
] | [
"fft_prototype.py",
"fft_new.py"
] | [
"import matplotlib.pyplot as plt\r\nfrom scipy.io import wavfile # get the api\r\nfrom scipy.fftpack import fft\r\nfrom pylab import *\r\n\r\ndef f(filename):\r\n # song files are in ogg... we need it to be in wav.\r\n fs, data = wavfile.read(filename) \r\n \r\n # songs have multiple channels, but we only need one channel\r\n a = data.T[0]\r\n \r\n # this is 8-bit track, b is now normalized on [-1,1)\r\n #b=[(ele/2**16)*2-1 for ele in a] \r\n\r\n # create a list of complex number\r\n c = fft(a)\r\n\r\n # only need half of the fft list (because the internet says so)\r\n d = len(c)//2 \r\n\r\n #bam, it is plotted and saved. \r\n #plt.plot(abs(c[:(d-1)]),'r')\r\n #savefig(filename+'.png',bbox_inches='tight')\r\n\t\r\n return c\r\n\r\nguitar = f(\"auldlangguitar.wav\")\r\nviolin = f(\"auldlangviolin.wav\")\r\nharmon = f(\"auldlangharmonica.wav\")\r\ncombine= f(\"combined.wav\")\r\ncut = combine[:-14]\r\ncombined2 = guitar + violin\r\n\r\nplt.plot(np.abs(guitar), 'r')\r\n#plt.show()\r\nsavefig('guitarplot.png',bbox_inches='tight')\r\n\r\ngc = np.dot(guitar, combined2)\r\nvc = np.dot(violin, combined2)\r\nhc = np.dot(harmon, combined2)\r\n\r\nng = guitar #/ np.linalg.norm(guitar)\r\nnv = violin #/ np.linalg.norm(violin)\r\nnh = harmon #/ np.linalg.norm(harmon)\r\nnc = combined2 #/ np.linalg.norm(cut)\r\n\r\na = np.column_stack((ng, nv, nh))\r\n\r\nx, res, rank, s = np.linalg.lstsq(a, nc)\r\nplt.plot(np.abs(ng * x[0]), 'r')\r\n#plt.show()\r\nsavefig('decompguitarplot.png',bbox_inches='tight')\r\ndecompGuitar = np.fft.ifft(ng * 1 + nv *1)\r\nprint(\"X\\n\")\r\nprint(x)\r\n\r\n\r\nprint(\"decomp real\")\r\nprint(np.real(decompGuitar))\r\ntest = np.fft.ifft(guitar)\r\n\r\ndecompreal = (decompGuitar)\r\ndecompreal = decompreal #/ np.min(np.abs(decompreal[np.nonzero(decompreal)]))\r\n\r\n\r\norigfs, origdata = wavfile.read(\"auldlangguitar.wav\")\r\nb = np.column_stack((decompGuitar.astype(origdata.dtype), decompGuitar.astype(origdata.dtype)))\r\nwavfile.write(\"decompguitar.wav\", origfs, b)\r\nnp.savetxt(\"guitar.csv\", test.astype(uint8) , delimiter= \",\")\r\nnp.savetxt(\"combined.csv\", combine, delimiter= \",\")\r\nnp.savetxt(\"channel2.csv\", decompreal.astype(uint8), delimiter= \",\")\r\nprint(\"decomp orig\")\r\nprint(np.min(decompreal[np.nonzero(decompreal)]))\r\n",
"import matplotlib.pyplot as plt\r\nfrom scipy.io import wavfile # get the api\r\nfrom scipy.fftpack import fft\r\nfrom pylab import *\r\n\r\ndef f(filename):\r\n # song files are in ogg... we need it to be in wav.\r\n fs, data = wavfile.read(filename) \r\n \r\n # songs have multiple channels, but we only need one channel\r\n a = data.T[0]\r\n \r\n # this is 8-bit track, b is now normalized on [-1,1)\r\n #b=[(ele/2**16)*2-1 for ele in a] \r\n\r\n # create a list of complex number\r\n c = fft(a)\r\n\r\n # only need half of the fft list (because the internet says so)\r\n d = len(c)//2 \r\n\r\n #bam, it is plotted and saved. \r\n #plt.plot(abs(c[:(d-1)]),'r')\r\n #savefig(filename+'.png',bbox_inches='tight')\r\n\t\r\n return c\r\n\r\nguitar = f(\"auldlangguitar.wav\")\r\nviolin = f(\"auldlangviolin.wav\")\r\nharmon = f(\"auldlangharmonica.wav\")\r\ncombine= f(\"combined.wav\")\r\ncut = combine[:-14]\r\ncombined2 = guitar + violin\r\n\r\nplt.plot(np.abs(guitar), 'r')\r\n#plt.show()\r\nsavefig('guitarplot.png',bbox_inches='tight')\r\n\r\ngc = np.dot(guitar, combined2)\r\nvc = np.dot(violin, combined2)\r\nhc = np.dot(harmon, combined2)\r\n\r\nng = guitar - (np.dot(guitar, violin) * (violin / len(violin))) - (np.dot(guitar, violin) * (harmon / len(harmon))) #/ np.linalg.norm(guitar)\r\nnv = violin - (np.dot(violin, guitar) * (guitar / len(guitar))) - (np.dot(violin, harmon) * (harmon / len(harmon)))#/ np.linalg.norm(violin)\r\nnh = harmon - (np.dot(harmon, guitar) * (guitar / len(guitar))) - (np.dot(harmon, violin) * (violin / len(violin)))#/ np.linalg.norm(harmon)\r\nnc = combined2 #/ np.linalg.norm(cut)\r\n \r\na = np.column_stack((ng, nv, nh))\r\na2 = np.column_stack((ng, nv))\r\nx, res, rank, s = np.linalg.lstsq(a, nc)\r\nx2, res2, rank2, s2 = np.linalg.lstsq(a2, nc)\r\nplt.plot(np.abs(nv * x2[0]), 'r')\r\n#plt.show()\r\nsavefig('decompguitarplot.png',bbox_inches='tight')\r\ndecompGuitar = np.fft.ifft(ng * x[0] + nv * x[1] + nh * x[2])\r\nprint(\"X\\n\")\r\nprint(x)\r\n\r\nprint(\"decomp real\")\r\nprint(np.real(decompGuitar))\r\n\r\ndecompreal = np.real(decompGuitar)\r\ndecompreal = decompreal #/ np.min(np.abs(decompreal[np.nonzero(decompreal)]))\r\n\r\nb = np.column_stack((decompreal.astype(uint8), decompreal.astype(uint8)))\r\n\r\norigfs, origdata = wavfile.read(\"auldlangguitar.wav\")\r\nwavfile.write(\"decompguitar.wav\", origfs, b)\r\nnp.savetxt(\"guitar.csv\", guitar, delimiter= \",\")\r\nnp.savetxt(\"combined.csv\", combine, delimiter= \",\")\r\nnp.savetxt(\"channel2.csv\", b, delimiter= \",\")\r\nprint(\"decomp orig\")\r\nprint(np.min(decompreal[np.nonzero(decompreal)]))\r\n"
] | [
[
"scipy.fftpack.fft",
"scipy.io.wavfile.write",
"scipy.io.wavfile.read"
],
[
"scipy.fftpack.fft",
"scipy.io.wavfile.write",
"scipy.io.wavfile.read"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
rathbird/Birds_of_Prey_CNN_Classifier | [
"13ceb78db2408709804263395175482cff6c6973"
] | [
"src/googlexfr.py"
] | [
"# import the necessary packages\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom minigooglenet import minigooglenet_functional\n\n#set param values\n#classes (eagles, vultures)\nn_categories = 2\ndir_train = '.'\ntrain_size = 2183\ntest_size = 501\nbatch_size = 16\nEPOCHS = 6\n\n#train data - 2 classes, 1000 per class\ndatagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n#test data, no transformation\ndatagen_validation = ImageDataGenerator(preprocessing_function=preprocess_input)\n\n#load images while model is running\ntrain_generator = datagen_train.flow_from_directory(\n directory='./data/train/', \n target_size=(100,100),\n color_mode='rgb',\n batch_size=32,\n class_mode='categorical',\n shuffle=True,\n seed=42)\n\nvalid_generator = datagen_validation.flow_from_directory(\n directory=\"./data/test/\",\n target_size=(100, 100),\n color_mode=\"rgb\",\n batch_size=1,\n class_mode=\"categorical\",\n shuffle=False,\n seed=42)\n\n#create model\ngoogle = minigooglenet_functional(100, 100, 3, n_categories)\n\n#compile model with very slow learning rate\ngoogle.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])\n\nhistory = google.fit(train_generator, steps_per_epoch=train_size//batch_size, epochs=1, validation_data=valid_generator, validation_steps= test_size//batch_size)\n\n#save model\ngoogle.save('models/googlexfr')\n\n#analyze results\n\n#Confution Matrix and Classification Report\nY_pred = google.predict(valid_generator, test_size // batch_size+1)\ny_pred = np.argmax(Y_pred, axis=1)\n\nprint('Confusion Matrix')\nprint(confusion_matrix(valid_generator.classes, y_pred))\nprint('Classification Report')\ntarget_names = ['eagle', 'vulture']\nprint(classification_report(valid_generator.classes, y_pred, target_names=target_names))\n\n\n\n"
] | [
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.optimizers.Adam",
"numpy.argmax",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
az2104nas/sztnb302alsr2bs21on | [
"6084c82c59a4a89498a191d96c231f47df10317d"
] | [
"naslib/search_spaces/nasbench1shot1/search_spaces/search_space_1.py"
] | [
"import itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nasbench import api\n\nfrom naslib.search_spaces.nasbench1shot1.search_space import SearchSpace\nfrom naslib.search_spaces.nasbench1shot1.utils import upscale_to_nasbench_format, OUTPUT_NODE, INPUT, CONV1X1, OUTPUT\nfrom naslib.search_spaces.nasbench1shot1.wrappers import Model, Architecture, NasbenchWrapper\n\n\nclass SearchSpace1(SearchSpace):\n def __init__(self):\n super(SearchSpace1, self).__init__(search_space_number=1, num_intermediate_nodes=4)\n \"\"\"\n SEARCH SPACE 1\n \"\"\"\n self.num_parents_per_node = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 2,\n '4': 2,\n '5': 2\n }\n if sum(self.num_parents_per_node.values()) > 9:\n raise ValueError('Each nasbench cell has at most 9 edges.')\n\n self.test_min_error = 0.05448716878890991\n self.valid_min_error = 0.049278855323791504\n\n def create_nasbench_adjacency_matrix(self, parents, **kwargs):\n adjacency_matrix = self._create_adjacency_matrix(parents, adjacency_matrix=np.zeros([6, 6]),\n node=OUTPUT_NODE - 1)\n # Create nasbench compatible adjacency matrix\n return upscale_to_nasbench_format(adjacency_matrix)\n\n def create_nasbench_adjacency_matrix_with_loose_ends(self, parents):\n return upscale_to_nasbench_format(self._create_adjacency_matrix_with_loose_ends(parents))\n\n def generate_adjacency_matrix_without_loose_ends(self):\n for adjacency_matrix in self._generate_adjacency_matrix(adjacency_matrix=np.zeros([6, 6]),\n node=OUTPUT_NODE - 1):\n yield upscale_to_nasbench_format(adjacency_matrix)\n\n def objective_function(self, nasbench, config, budget=108):\n adjacency_matrix, node_list = super(SearchSpace1, self).convert_config_to_nasbench_format(config)\n # adjacency_matrix = upscale_to_nasbench_format(adjacency_matrix)\n node_list = [INPUT, *node_list, CONV1X1, OUTPUT]\n adjacency_list = adjacency_matrix.astype(np.int).tolist()\n model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)\n nasbench_data = nasbench.query(model_spec, epochs=budget)\n\n # record the data to history\n architecture = Model()\n arch = Architecture(adjacency_matrix=adjacency_matrix,\n node_list=node_list)\n architecture.update_data(arch, nasbench_data, budget)\n self.run_history.append(architecture)\n\n return nasbench_data['validation_accuracy'], nasbench_data['training_time']\n\n def generate_with_loose_ends(self):\n for _, parent_node_3, parent_node_4, output_parents in itertools.product(\n *[itertools.combinations(list(range(int(node))), num_parents) for node, num_parents in\n self.num_parents_per_node.items()][2:]):\n parents = {\n '0': [],\n '1': [0],\n '2': [0, 1],\n '3': parent_node_3,\n '4': parent_node_4,\n '5': output_parents\n }\n adjacency_matrix = self.create_nasbench_adjacency_matrix_with_loose_ends(parents)\n yield adjacency_matrix\n\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
apapaion/menpowidgets | [
"237a39ddf4e65c57e8165f8a87f25a25f34d4698"
] | [
"menpowidgets/base.py"
] | [
"from collections import Sized, OrderedDict\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as mc\nimport numpy as np\n\nimport ipywidgets\nimport IPython.display as ipydisplay\n\nfrom menpo.base import name_of_callable\nfrom menpo.image import MaskedImage, Image\nfrom menpo.image.base import _convert_patches_list_to_single_array\nfrom menpo.shape import TriMesh, ColouredTriMesh, TexturedTriMesh\nfrom menpo.visualize import print_dynamic\nfrom menpo.landmark import LandmarkManager\n\nfrom .options import (RendererOptionsWidget, TextPrintWidget,\n SaveMatplotlibFigureOptionsWidget, AnimationOptionsWidget,\n ImageOptionsWidget, LandmarkOptionsWidget,\n PlotMatplotlibOptionsWidget, PatchOptionsWidget,\n LinearModelParametersWidget, CameraSnapshotWidget,\n Shape2DOptionsWidget, Shape3DOptionsWidget,\n SaveMayaviFigureOptionsWidget, Mesh3DOptionsWidget)\nfrom .tools import LogoWidget, SwitchWidget\nfrom .utils import (extract_group_labels_from_landmarks,\n extract_groups_labels_from_image, render_image,\n render_patches)\nfrom .checks import check_n_parameters\nfrom .style import map_styles_to_hex_colours\n\n\ndef menpowidgets_src_dir_path():\n r\"\"\"\n The path to the top of the menpowidgets package.\n\n Useful for locating where the logos folder is stored.\n\n Returns\n -------\n path : ``pathlib.Path``\n The full path to the top of the Menpo package\n \"\"\"\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n return Path(os.path.abspath(__file__)).parent\n\n\ndef visualize_shapes_2d(shapes, figure_size=(7, 7), browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n 2D shapes. The supported objects are:\n\n ================================== =\n Object\n ================================== =\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n `menpo.shape.TriMesh`\n ================================== =\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n shapes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ================================== =\n Object\n ================================== =\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n `menpo.shape.TriMesh`\n ================================== =\n\n or subclasses of those.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 2D shape\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that shapes is a list even with one member\n if not isinstance(shapes, Sized):\n shapes = [shapes]\n\n # Get the number of shapes\n n_shapes = len(shapes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected shape index\n i = shape_number_wid.selected_values if n_shapes > 1 else 0\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options['image_view'] = shape_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(shapes[i], 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n idx = shapes[i].labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_face_colour.append(options['marker_face_colour'][idx])\n marker_edge_colour.append(options['marker_edge_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = shapes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Update info text widget\n update_info(shapes[i], custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(shape, custom_info_callback=None):\n min_b, max_b = shape.bounds()\n rang = shape.range()\n cm = shape.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(shape)),\n \"> {} points\".format(shape.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H\".format(\n min_b[0], max_b[0], min_b[1], max_b[1]),\n \"> Range: {0:.1f}W, {1:.1f}H\".format(rang[0], rang[1]),\n \"> Centre of mass: ({0:.1f}, {1:.1f})\".format(cm[0], cm[1]),\n \"> Norm: {0:.2f}\".format(shape.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(shape):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # If the object is a LabelledPointUndirectedGraph, grab the labels\n labels = None\n if hasattr(shapes[0], 'labels'):\n labels = shapes[0].labels\n\n # Create widgets\n shape_options_wid = Shape2DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=None, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_shapes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get current shape and check if it has labels\n i = change['new']\n labels = None\n if hasattr(shapes[i], 'labels'):\n labels = shapes[i].labels\n\n # Update shape options\n shape_options_wid.set_widget_state(labels=labels,\n allow_callback=True)\n\n # Shape selection slider\n index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}\n shape_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n [info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_shapes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_shapes_3d(shapes, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n 3D shapes. The supported objects are:\n\n ==================================\n Object\n ==================================\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n ==================================\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n shapes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ==================================\n Object\n ==================================\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n ==================================\n\n or subclasses of those.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 2D shape\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print_dynamic('Initializing...')\n\n # Make sure that shapes is a list even with one member\n if not isinstance(shapes, Sized):\n shapes = [shapes]\n\n # Get the number of shapes\n n_shapes = len(shapes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # Get selected shape index\n i = shape_number_wid.selected_values if n_shapes > 1 else 0\n\n # Update info text widget\n update_info(shapes[i], custom_info_callback=custom_info_callback)\n\n # Create options dictionary\n options = dict()\n if isinstance(shapes[i], TriMesh):\n # Note that 3D TriMesh has a totally different set of options\n # compared to any other PointCloud or PointGraph. However, in order\n # for visualize_shapes_3d to support TriMeshes, we simply use the\n # options that are common. This means that most of the widget's\n # options will have no effect on rendering...\n options['mesh_type'] = 'wireframe'\n if shape_options_wid.selected_values['markers']['render_markers']:\n options['mesh_type'] = 'fancymesh'\n options['line_width'] = \\\n shape_options_wid.selected_values['lines']['line_width']\n options['colour'] = \\\n shape_options_wid.selected_values['lines']['line_colour'][0]\n options['marker_style'] = \\\n shape_options_wid.selected_values['markers']['marker_style']\n options['marker_size'] = \\\n shape_options_wid.selected_values['markers']['marker_size']\n options['marker_resolution'] = \\\n shape_options_wid.selected_values['markers']['marker_resolution']\n options['step'] = \\\n shape_options_wid.selected_values['markers']['step']\n else:\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n\n # Correct options based on the type of the shape\n if hasattr(shapes[i], 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = shapes[i].labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Render shape with selected options\n save_figure_wid.renderer = shapes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n alpha=1.0, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(shape, custom_info_callback=None):\n min_b, max_b = shape.bounds()\n rang = shape.range()\n cm = shape.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(shape)),\n \"> {} points\".format(shape.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(min_b[0], max_b[0], min_b[1], max_b[1],\n min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(shape.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(shape):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # If the object is a LabelledPointUndirectedGraph, grab the labels\n labels = None\n if hasattr(shapes[0], 'labels'):\n labels = shapes[0].labels\n\n # Create widgets\n shape_options_wid = Shape3DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_shapes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get current shape and check if it has labels\n i = change['new']\n labels = None\n if hasattr(shapes[i], 'labels'):\n labels = shapes[i].labels\n\n # Update shape options\n shape_options_wid.set_widget_state(labels=labels,\n allow_callback=True)\n\n # Shape selection slider\n index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}\n shape_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n [info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_shapes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef visualize_landmarks_2d(landmarks, figure_size=(7, 7),\n browser_style='buttons', custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n `menpo.landmark.LandmarkManager` (or subclass) objects. The landmark\n managers can have a combination of different attributes, e.g.\n landmark groups and labels etc.\n\n Parameters\n ----------\n landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass\n The `list` of landmark managers to be visualized.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts a landmark group and\n returns a list of custom messages to be printed per landmark group.\n Each custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that landmarks is a list even with one landmark manager member\n if isinstance(landmarks, LandmarkManager):\n landmarks = [landmarks]\n\n # Get the number of landmark managers\n n_landmarks = len(landmarks)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = landmark_number_wid.selected_values if n_landmarks > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n if landmark_options_wid.selected_values['landmarks']['render_landmarks']:\n # get shape\n shape = landmarks[i][g]\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options['image_view'] = landmark_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(shape, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n landmark_options_wid.selected_values['landmarks']['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = shape.labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = shape.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n else:\n ipydisplay.clear_output()\n\n # update info text widget\n update_info(landmarks[i], g, custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(landmarks, group, custom_info_callback=None):\n if group is not None:\n min_b, max_b = landmarks[group].bounds()\n rang = landmarks[group].range()\n cm = landmarks[group].centre()\n text_per_line = [\n \"> {} landmark points\".format(landmarks[group].n_points),\n \"> {}\".format(name_of_callable(landmarks[group])),\n \"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H\".format(\n min_b[0], max_b[0], min_b[1], max_b[1]),\n \"> Range: {0:.1f}W, {1:.1f}H\".format(rang[0], rang[1]),\n \"> Centre of mass: ({0:.1f}, {1:.1f})\".format(cm[0], cm[1]),\n \"> Norm: {0:.2f}\".format(landmarks[group].norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(landmarks[group]):\n text_per_line.append('> {}'.format(msg))\n else:\n text_per_line = [\"No landmarks available.\"]\n\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])\n first_label = labels_keys[0] if labels_keys else None\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=first_label, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_landmarks > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels\n i = landmark_number_wid.selected_values\n g_keys, l_keys = extract_group_labels_from_landmarks(\n landmarks[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=True)\n\n # Landmark selection slider\n index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}\n landmark_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, landmark_options_wid, renderer_options_wid,\n save_figure_wid])\n tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_landmarks > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_landmarks_3d(landmarks, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n `menpo.landmark.LandmarkManager` (or subclass) objects. The landmark\n managers can have a combination of different attributes, e.g.\n landmark groups and labels etc.\n\n Parameters\n ----------\n landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass\n The `list` of landmark managers to be visualized.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts a landmark group and\n returns a list of custom messages to be printed per landmark group.\n Each custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that landmarks is a list even with one landmark manager member\n if not isinstance(landmarks, list):\n landmarks = [landmarks]\n\n # Get the number of landmark managers\n n_landmarks = len(landmarks)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = landmark_number_wid.selected_values if n_landmarks > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # update info text widget\n update_info(landmarks[i], g, custom_info_callback=custom_info_callback)\n\n if landmark_options_wid.selected_values['landmarks']['render_landmarks']:\n # get shape\n shape = landmarks[i][g]\n\n options = dict()\n if isinstance(shape, TriMesh):\n # Note that 3D TriMesh has a totally different set of options\n # compared to any other PointCloud or PointGraph. However, in\n # order for visualize_landmarks_3d to support TriMeshes, we\n # simply use the options that are common. This means that most\n # of the widget's options will have no effect on rendering...\n options['mesh_type'] = 'wireframe'\n if landmark_options_wid.selected_values['markers'][\n 'render_markers']:\n options['mesh_type'] = 'fancymesh'\n options['line_width'] = \\\n landmark_options_wid.selected_values['lines']['line_width']\n options['colour'] = \\\n landmark_options_wid.selected_values['lines']['line_colour'][0]\n options['marker_style'] = \\\n landmark_options_wid.selected_values['markers']['marker_style']\n options['marker_size'] = \\\n landmark_options_wid.selected_values['markers']['marker_size']\n options['marker_resolution'] = \\\n landmark_options_wid.selected_values['markers'][\n 'marker_resolution']\n options['step'] = \\\n landmark_options_wid.selected_values['markers']['step']\n else:\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n\n # Correct options based on the type of the shape\n if hasattr(shape, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n landmark_options_wid.selected_values['landmarks']['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = shape.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Render shape with selected options\n save_figure_wid.renderer = shape.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n alpha=1.0, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n else:\n ipydisplay.clear_output()\n\n # Define function that updates the info text\n def update_info(landmarks, group, custom_info_callback=None):\n if group is not None:\n min_b, max_b = landmarks[group].bounds()\n rang = landmarks[group].range()\n cm = landmarks[group].centre()\n text_per_line = [\n \"> {} landmark points\".format(landmarks[group].n_points),\n \"> {}\".format(name_of_callable(landmarks[group])),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(\n min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(landmarks[group].norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(landmarks[group]):\n text_per_line.append('> {}'.format(msg))\n else:\n text_per_line = [\"No landmarks available.\"]\n\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])\n first_label = labels_keys[0] if labels_keys else None\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='3D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=first_label,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_landmarks > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels\n i = landmark_number_wid.selected_values\n g_keys, l_keys = extract_group_labels_from_landmarks(\n landmarks[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=True)\n\n # Landmark selection slider\n index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}\n landmark_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, landmark_options_wid, renderer_options_wid,\n save_figure_wid])\n tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_landmarks > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef visualize_meshes_3d(meshes, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of 3D meshes. The supported\n objects are:\n\n ==================================\n Object\n ==================================\n `menpo.shape.TriMesh`\n `menpo.shape.ColouredTriMesdh`\n `menpo.shape.TexturedTriMesh`\n ==================================\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n meshes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ==================================\n Object\n ==================================\n `menpo.shape.TriMesh`\n `menpo.shape.ColouredTriMesdh`\n `menpo.shape.TexturedTriMesh`\n ==================================\n\n or subclasses of those.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 3D mesh\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!!\n from menpowidgets.utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that meshes is a list even with one member\n if not isinstance(meshes, Sized):\n meshes = [meshes]\n\n # Get the number of meshes\n n_meshes = len(meshes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(_):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected mesh index\n i = mesh_number_wid.selected_values if n_meshes > 1 else 0\n\n # Update info text widget\n update_info(meshes[i], custom_info_callback=custom_info_callback)\n\n # Render instance\n save_figure_wid.renderer = meshes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **mesh_options_wid.selected_values)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(mesh, custom_info_callback=None):\n min_b, max_b = mesh.bounds()\n rang = mesh.range()\n cm = mesh.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(mesh)),\n \"> {} points\".format(mesh.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(\n min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(mesh.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(mesh):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n mesh_options_wid = Mesh3DOptionsWidget(\n textured=(isinstance(meshes[0], ColouredTriMesh) or\n isinstance(meshes[0], TexturedTriMesh)),\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_meshes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n i = change['new']\n\n # Update shape options\n mesh_options_wid.set_widget_state(\n textured=(isinstance(meshes[i], ColouredTriMesh) or\n isinstance(meshes[i], TexturedTriMesh)),\n allow_callback=True)\n\n # selection slider\n index = {'min': 0, 'max': n_meshes-1, 'step': 1, 'index': 0}\n mesh_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Mesh', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, mesh_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab([info_wid, mesh_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Mesh', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_meshes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n with out:\n print_dynamic('')\n\n\ndef visualize_images(images, figure_size=(7, 7), browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of `menpo.image.Image` (or\n subclass) objects. The images can have a combination of different\n attributes, e.g. masked or not, landmarked or not, without multiple\n landmark groups and labels etc.\n\n Parameters\n ----------\n images : `list` of `menpo.image.Image` or subclass\n The `list` of images to be visualized.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts an image and returns\n a list of custom messages to be printed per image. Each custom message\n will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that images is a list even with one member\n if not isinstance(images, Sized):\n images = [images]\n\n # Get the number of images\n n_images = len(images)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = image_number_wid.selected_values if n_images > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # check if image is masked\n image_is_masked = isinstance(images[i], MaskedImage)\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n options.update(renderer_options_wid.selected_values['legend'])\n options.update(image_options_wid.selected_values)\n options.update(landmark_options_wid.selected_values['landmarks'])\n\n # Correct options based on the type of the shape\n if (images[i].has_landmarks and\n hasattr(images[i].landmarks[g], 'labels')):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = images[i].landmarks[g].labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = render_image(\n image=images[i], renderer=save_figure_wid.renderer,\n image_is_masked=image_is_masked, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(images[i], image_is_masked, g,\n custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(img, image_is_masked, group, custom_info_callback=None):\n # Prepare masked (or non-masked) string\n masked_str = 'Masked Image' if image_is_masked else 'Image'\n # Get image path, if available\n path_str = img.path if hasattr(img, 'path') else 'No path available'\n # Create text lines\n text_per_line = [\n \"> {} of size {} with {} channel{}\".format(\n masked_str, img._str_shape(), img.n_channels,\n 's' * (img.n_channels > 1)),\n \"> Path: '{}'\".format(path_str)]\n if image_is_masked:\n text_per_line.append(\n \"> {} masked pixels (attached mask {:.1%} true)\".format(\n img.n_true_pixels(), img.mask.proportion_true()))\n text_per_line.append(\"> min={:.3f}, max={:.3f}\".format(\n img.pixels.min(), img.pixels.max()))\n if img.has_landmarks:\n text_per_line.append(\"> {} landmark points\".format(\n img.landmarks[group].n_points))\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(img):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_groups_labels_from_image(images[0])\n first_label = labels_keys[0] if labels_keys else None\n image_options_wid = ImageOptionsWidget(\n n_channels=images[0].n_channels,\n image_is_masked=isinstance(images[0], MaskedImage),\n render_function=render_function)\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=first_label, axes_x_limits=None, axes_y_limits=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_images > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels, then update landmark options\n i = image_number_wid.selected_values\n g_keys, l_keys = extract_groups_labels_from_image(images[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=images[i].n_channels,\n image_is_masked=isinstance(images[i], MaskedImage),\n allow_callback=True)\n\n # Image selection slider\n index = {'min': 0, 'max': n_images-1, 'step': 1, 'index': 0}\n image_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Image', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, image_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, image_options_wid, landmark_options_wid,\n renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Image', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_images > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_patches(patches, patch_centers, figure_size=(7, 7),\n browser_style='buttons', custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of patch-based images.\n\n The patches argument can have any of the two formats that are returned from\n the `extract_patches()` and `extract_patches_around_landmarks()` methods\n of `menpo.image.Image`. Specifically it can be:\n\n 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`\n 2. `list` of ``n_center * n_offset`` `menpo.image.Image` objects\n\n The patches can have a combination of different attributes, e.g. number of\n centers, number of offsets, number of channels etc.\n\n Parameters\n ----------\n patches : `list`\n The `list` of patch-based images to be visualized. It can consist of\n objects with any of the two formats that are returned from the\n `extract_patches()` and `extract_patches_around_landmarks()` methods.\n Specifically, it can either be an\n ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a\n `list` of ``n_center * n_offset`` `menpo.image.Image` objects.\n patch_centers : `list` of `menpo.shape.PointCloud`\n The centers to set the patches around. If the `list` has only one\n `menpo.shape.PointCloud` then this will be used for all patches members.\n Otherwise, it needs to have the same length as patches.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts an image and returns\n a list of custom messages to be printed per image. Each custom message\n will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that patches is a list even with one member\n if (isinstance(patches, list) and isinstance(patches[0], Image)) or \\\n not isinstance(patches, list):\n patches = [patches]\n\n # Make sure that patch_centers is a list even with one shape\n if not isinstance(patch_centers, list):\n patch_centers = [patch_centers] * len(patches)\n elif isinstance(patch_centers, list) and len(patch_centers) == 1:\n patch_centers *= len(patches)\n\n # Make sure all patch-based images are in the single array format\n for i in range(len(patches)):\n if isinstance(patches[i], list):\n patches[i] = _convert_patches_list_to_single_array(\n patches[i], patch_centers[i].n_points)\n\n # Get the number of patch_based images\n n_patches = len(patches)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index\n i = image_number_wid.selected_values if n_patches > 1 else 0\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n image_options = dict(image_options_wid.selected_values)\n del image_options['masked_enabled']\n options.update(image_options)\n options.update(patch_options_wid.selected_values)\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render image with selected options\n save_figure_wid.renderer = render_patches(\n patches=patches[i], patch_centers=patch_centers[i],\n renderer=save_figure_wid.renderer, figure_size=new_figure_size,\n **options)\n\n # update info text widget\n update_info(patches[i], custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(ptchs, custom_info_callback=None):\n text_per_line = [\n \"> Patch-Based Image with {} patche{} and {} offset{}.\".format(\n ptchs.shape[0], 's' * (ptchs.shape[0] > 1), ptchs.shape[1],\n 's' * (ptchs.shape[1] > 1)),\n \"> Each patch has size {}H x {}W with {} channel{}.\".format(\n ptchs.shape[3], ptchs.shape[4], ptchs.shape[2],\n 's' * (ptchs.shape[2] > 1)),\n \"> min={:.3f}, max={:.3f}\".format(ptchs.min(), ptchs.max())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(ptchs):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n shape_options_wid = Shape2DOptionsWidget(\n labels=None, render_function=None)\n shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False\n shape_options_wid.add_render_function(render_function)\n patch_options_wid = PatchOptionsWidget(\n n_patches=patches[0].shape[0], n_offsets=patches[0].shape[1],\n render_function=render_function)\n image_options_wid = ImageOptionsWidget(\n n_channels=patches[0].shape[2], image_is_masked=False,\n render_function=None)\n image_options_wid.interpolation_checkbox.button_wid.value = False\n image_options_wid.add_render_function(render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,\n axes_x_limits=None, axes_y_limits=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_patches > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Selected object\n i = image_number_wid.selected_values\n\n # Update patch options\n patch_options_wid.set_widget_state(\n n_patches=patches[i].shape[0], n_offsets=patches[i].shape[1],\n allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=patches[i].shape[2], image_is_masked=False,\n allow_callback=True)\n\n # Image selection slider\n index = {'min': 0, 'max': n_patches-1, 'step': 1, 'index': 0}\n image_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Image', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, image_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, patch_options_wid, image_options_wid,\n shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Patches', 'Image', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_patches > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef plot_graph(x_axis, y_axis, legend_entries=None, figure_size=(9, 5)):\n r\"\"\"\n Widget that allows plotting various curves in a graph.\n\n The widget has options tabs regarding the graph and the renderer (lines,\n markers, legend, figure, axes, grid) and saving the figure to file.\n\n Parameters\n ----------\n x_axis : `list` of `float`\n The values of the horizontal axis. Note that these values are common for\n all the curves.\n y_axis : `list` of `lists` of `float`\n A `list` that stores a `list` of values to be plotted for each curve.\n legend_entries : `list` or `str` or ``None``, optional\n The `list` of names that will appear on the legend for each curve. If\n ``None``, then the names format is ``curve {}.format(i)``.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n from menpo.visualize import plot_curve\n print('Initializing...')\n\n # Get number of curves to be plotted\n n_curves = len(y_axis)\n\n # Define the styling options\n main_style = 'danger'\n\n # Parse options\n if legend_entries is None:\n legend_entries = [\"curve {}\".format(i) for i in range(n_curves)]\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # plot with selected options\n opts = plot_wid.selected_values.copy()\n new_figure_size = (\n plot_wid.selected_values['zoom'][0] * figure_size[0],\n plot_wid.selected_values['zoom'][1] * figure_size[1])\n del opts['zoom']\n save_figure_wid.renderer = plot_curve(\n x_axis=x_axis, y_axis=y_axis, figure_size=new_figure_size,\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **opts)\n\n # show plot\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n plot_wid = PlotMatplotlibOptionsWidget(\n legend_entries=legend_entries, render_function=render_function)\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n logo = LogoWidget(style=main_style)\n logo.layout.margin = '0px 10px 0px 0px'\n tmp_children = list(plot_wid.tab_box.children)\n tmp_children.append(save_figure_wid)\n plot_wid.tab_box.children = tmp_children\n plot_wid.tab_box.set_title(0, 'Labels')\n plot_wid.tab_box.set_title(1, 'Style')\n plot_wid.tab_box.set_title(2, 'Legend')\n plot_wid.tab_box.set_title(3, 'Axes')\n plot_wid.tab_box.set_title(4, 'Zoom')\n plot_wid.tab_box.set_title(5, 'Grid')\n plot_wid.tab_box.set_title(6, 'Export')\n\n # Display final widget\n wid = ipywidgets.HBox([logo, plot_wid])\n wid.box_style = main_style\n wid.layout.border = '2px solid' + map_styles_to_hex_colours(main_style)\n plot_wid.container.border = '0px'\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef save_matplotlib_figure(renderer):\n r\"\"\"\n Widget that allows to save a figure, which was generated with Matplotlib,\n to file.\n\n Parameters\n ----------\n renderer : `menpo.visualize.viewmatplotlib.MatplotlibRenderer`\n The Matplotlib renderer object.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n # Create sub-widgets\n logo_wid = LogoWidget()\n logo_wid.layout.margin = '0px 10px 0px 0px'\n save_figure_wid = SaveMatplotlibFigureOptionsWidget(renderer,\n style='warning')\n wid = ipywidgets.HBox([logo_wid, save_figure_wid])\n\n # Display widget\n ipydisplay.display(wid)\n\n\ndef save_mayavi_figure(renderer):\n r\"\"\"\n Widget that allows to save a figure, which was generated with Mayavi,\n to file.\n\n Parameters\n ----------\n renderer : `menpo3d.visualize.viewmayavi.MayaviRenderer`\n The Mayavi renderer object.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n # Create sub-widgets\n logo_wid = LogoWidget()\n logo_wid.layout.margin = '0px 10px 0px 0px'\n save_figure_wid = SaveMayaviFigureOptionsWidget(renderer,\n style='warning')\n wid = ipywidgets.HBox([logo_wid, save_figure_wid])\n\n # Display widget\n ipydisplay.display(wid)\n\n\ndef visualize_shape_model_2d(shape_model, n_parameters=5, mode='multiple',\n parameters_bounds=(-3.0, 3.0), figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical 2D shape model.\n\n Parameters\n ----------\n shape_model : `list` of `menpo.shape.PCAModel` or `subclass`\n The multi-scale shape model to be visualized. Note that each level can\n have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n from menpo.visualize.viewmatplotlib import (_set_axes_options,\n _parse_axes_limits)\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that shape_model is a list even with one member\n if not isinstance(shape_model, list):\n shape_model = [shape_model]\n\n # Get the number of levels (i.e. number of shape models)\n n_levels = len(shape_model)\n\n # Define the styling options\n main_style = 'warning'\n\n # Get the maximum number of components per level\n max_n_params = [sp.n_active_components for sp in shape_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Compute weights\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n shape_model[level].eigenvalues[:len(parameters)] ** 0.5)\n\n # Get the mean\n mean = shape_model[level].mean()\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options['image_view'] = shape_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(mean, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n idx = mean.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_face_colour.append(options['marker_face_colour'][idx])\n marker_edge_colour.append(options['marker_edge_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render with selected options\n if mode_wid.value == 1:\n # Deformation mode\n # Compute instance\n instance = shape_model[level].instance(weights)\n\n # Render mean shape\n if mean_wid.selected_values:\n save_figure_wid.renderer = mean.view(\n figure_id=save_figure_wid.renderer.figure_id,\n new_figure=False, figure_size=None,\n image_view=options['image_view'],\n render_lines=options['render_lines'],\n line_colour='yellow', line_style=options['line_style'],\n line_width=options['line_width'],\n render_markers=options['render_markers'],\n marker_style=options['marker_style'],\n marker_size=options['marker_size'],\n marker_face_colour='yellow', marker_edge_colour='yellow',\n marker_edge_width=options['marker_edge_width'])\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Get instance range\n instance_range = instance.range()\n else:\n # Vectors mode\n # Compute instance\n instance_lower = shape_model[level].instance([-p for p in weights])\n instance_upper = shape_model[level].instance(weights)\n\n # Render mean shape\n save_figure_wid.renderer = mean.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Render vectors\n ax = plt.gca()\n x_min = np.Inf\n y_min = np.Inf\n x_max = -np.Inf\n y_max = -np.Inf\n for p in range(mean.n_points):\n xm = mean.points[p, 0]\n ym = mean.points[p, 1]\n xl = instance_lower.points[p, 0]\n yl = instance_lower.points[p, 1]\n xu = instance_upper.points[p, 0]\n yu = instance_upper.points[p, 1]\n if options['image_view']:\n # image mode\n lines = [[(ym, xm), (yl, xl)], [(ym, xm), (yu, xu)]]\n else:\n # point cloud mode\n lines = [[(xm, ym), (xl, yl)], [(xm, ym), (xu, yu)]]\n lc = mc.LineCollection(lines, colors=('g', 'b'),\n linestyles='solid', linewidths=2)\n # update min, max\n y_min = np.min([y_min, xl, xu])\n y_max = np.max([y_max, xl, xu])\n x_min = np.min([x_min, yl, yu])\n x_max = np.max([x_max, yl, yu])\n\n # add collection\n ax.add_collection(lc)\n\n # parse axes limits\n axes_x_limits, axes_y_limits = _parse_axes_limits(\n x_min, x_max, y_min, y_max, options['axes_x_limits'],\n options['axes_y_limits'])\n _set_axes_options(\n ax, render_axes=options['render_axes'],\n inverted_y_axis=options['image_view'],\n axes_font_name=options['axes_font_name'],\n axes_font_size=options['axes_font_size'],\n axes_font_style=options['axes_font_style'],\n axes_font_weight=options['axes_font_weight'],\n axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,\n axes_x_ticks=options['axes_x_ticks'],\n axes_y_ticks=options['axes_y_ticks'])\n\n # Get instance range\n instance_range = mean.range()\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Update info\n update_info(level, instance_range)\n\n # Define function that updates the info text\n def update_info(level, instance_range):\n text_per_line = [\n \"> Level {} out of {}\".format(level + 1, n_levels),\n \"> {} components in total\".format(shape_model[level].n_components),\n \"> {} active components\".format(\n shape_model[level].n_active_components),\n \"> {:.1f}% variance kept\".format(\n shape_model[level].variance_ratio() * 100),\n \"> Instance range: {:.1f} x {:.1f}\".format(instance_range[0],\n instance_range[1]),\n \"> {} landmark points, {} features\".format(\n shape_model[level].mean().n_points,\n shape_model[level].n_features)]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = shape_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n shape_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n mode_dict = OrderedDict()\n mode_dict['Deformation'] = 1\n mode_dict['Vectors'] = 2\n mode_wid = ipywidgets.RadioButtons(\n options=mode_dict, description='Mode', value=1,\n layout=ipywidgets.Layout(width='6cm'))\n mode_wid.observe(render_function, names='value', type='change')\n mean_wid = SwitchWidget(\n selected_value=False, description='Render mean shape',\n description_location='right', switch_type='checkbox')\n mean_wid.observe(render_function, names='selected_values', type='change')\n\n # Function that controls mean shape checkbox visibility\n def mean_visible(change):\n if change['new'] == 1:\n mean_wid.button_wid.disabled = False\n else:\n mean_wid.button_wid.disabled = True\n mean_wid.set_widget_state(False, allow_callback=False)\n mode_wid.observe(mean_visible, names='value', type='change')\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n labels = None\n if hasattr(shape_model[0].mean(), 'labels'):\n labels = shape_model[0].mean().labels\n shape_options_wid = Shape2DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=None, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n model_parameters_wid.set_widget_state(\n n_parameters=n_parameters[change['new']],\n params_str='Parameter ', allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n radio_children = [level_wid, mode_wid, mean_wid]\n else:\n radio_children = [mode_wid, mean_wid]\n radio_wids = ipywidgets.VBox(radio_children)\n tmp_wid = ipywidgets.HBox([radio_wids, model_parameters_wid])\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_shape_model_3d(shape_model, n_parameters=5, mode='multiple',\n parameters_bounds=(-15.0, 15.0)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical 3D shape model.\n\n Parameters\n ----------\n shape_model : `list` of `menpo.shape.PCAModel` or `subclass`\n The multi-scale shape model to be visualized. Note that each level can\n have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that shape_model is a list even with one member\n if not isinstance(shape_model, list):\n shape_model = [shape_model]\n\n # Get the number of levels (i.e. number of shape models)\n n_levels = len(shape_model)\n\n # Check if the model is TriMesh or any other 3D shape class\n is_trimesh = isinstance(shape_model[0].template_instance, TriMesh)\n\n # Define the styling options\n main_style = 'warning'\n\n # Get the maximum number of components per level\n max_n_params = [sp.n_active_components for sp in shape_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n save_figure_wid.renderer.clear_figure()\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Compute weights\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n shape_model[level].eigenvalues[:len(parameters)] ** 0.5)\n\n # Compute instance\n instance = shape_model[level].instance(weights)\n\n # Create options dictionary\n options = dict()\n if is_trimesh:\n options.update(shape_options_wid.selected_values)\n else:\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n # Correct options based on the type of the shape\n if hasattr(instance, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = instance.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Update info\n update_info(level, instance.range())\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(level, instance_range):\n text_per_line = [\n \"> Level {} out of {}\".format(level + 1, n_levels),\n \"> {} components in total\".format(shape_model[level].n_components),\n \"> {} active components\".format(\n shape_model[level].n_active_components),\n \"> {:.1f}% variance kept\".format(\n shape_model[level].variance_ratio() * 100),\n \"> Instance range: {:.1f} x {:.1f}\".format(instance_range[0],\n instance_range[1]),\n \"> {} points\".format(\n shape_model[level].mean().n_points)]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n with out:\n ipydisplay.clear_output(wait=True)\n plt.subplot(121)\n shape_model[level].plot_eigenvalues_ratio()\n plt.subplot(122)\n shape_model[level].plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n if is_trimesh:\n shape_options_wid = Mesh3DOptionsWidget(textured=False,\n render_function=render_function)\n else:\n labels = None\n if hasattr(shape_model[0].mean(), 'labels'):\n labels = shape_model[0].mean().labels\n shape_options_wid = Shape3DOptionsWidget(labels=labels,\n render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n model_parameters_wid.set_widget_state(\n n_parameters=n_parameters[change['new']],\n params_str='Parameter ', allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_wid = ipywidgets.HBox([level_wid, model_parameters_wid])\n else:\n tmp_wid = ipywidgets.HBox(children=[model_parameters_wid])\n if is_trimesh:\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, info_wid, save_figure_wid])\n tab_titles = ['Model', 'Mesh', 'Info', 'Export']\n else:\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n with out:\n print_dynamic('')\n\n\ndef visualize_appearance_model(appearance_model, n_parameters=5,\n mode='multiple', parameters_bounds=(-3.0, 3.0),\n figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical appearance model.\n\n Parameters\n ----------\n appearance_model : `list` of `menpo.model.PCAModel` or subclass\n The multi-scale appearance model to be visualized. Note that each level\n can have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that appearance_model is a list even with one member\n if not isinstance(appearance_model, list):\n appearance_model = [appearance_model]\n\n # Get the number of levels (i.e. number of appearance models)\n n_levels = len(appearance_model)\n\n # Define the styling options\n main_style = 'success'\n\n # Get the maximum number of components per level\n max_n_params = [ap.n_active_components for ap in appearance_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Compute weights and instance\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)\n instance = appearance_model[level].instance(weights)\n image_is_masked = isinstance(instance, MaskedImage)\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n options.update(renderer_options_wid.selected_values['legend'])\n options.update(image_options_wid.selected_values)\n options.update(landmark_options_wid.selected_values['landmarks'])\n\n # Correct options based on the type of the shape\n if (instance.has_landmarks and\n hasattr(instance.landmarks[g], 'labels')):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = instance.landmarks[g].labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = render_image(\n image=instance, renderer=save_figure_wid.renderer,\n image_is_masked=image_is_masked, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(instance, level, g)\n\n # Define function that updates the info text\n def update_info(image, level, group):\n lvl_app_mod = appearance_model[level]\n lp = 0 if group is None else image.landmarks[group].n_points\n text_per_line = [\n \"> Level: {} out of {}.\".format(level + 1, n_levels),\n \"> {} components in total.\".format(lvl_app_mod.n_components),\n \"> {} active components.\".format(lvl_app_mod.n_active_components),\n \"> {:.1f}% variance kept.\".format(\n lvl_app_mod.variance_ratio() * 100),\n \"> Reference shape of size {} with {} channel{}.\".format(\n image._str_shape(),\n image.n_channels, 's' * (image.n_channels > 1)),\n \"> {} features.\".format(lvl_app_mod.n_features),\n \"> {} landmark points.\".format(lp),\n \"> Instance: min={:.3f}, max={:.3f}\".format(image.pixels.min(),\n image.pixels.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n groups_keys, labels_keys = extract_groups_labels_from_image(\n appearance_model[0].mean())\n image_options_wid = ImageOptionsWidget(\n n_channels=appearance_model[0].mean().n_channels,\n image_is_masked=isinstance(appearance_model[0].mean(),\n MaskedImage),\n render_function=render_function)\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n axes_x_limits=None, axes_y_limits=None, labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n tmp_children = [model_parameters_wid]\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n value = change['new']\n # Update model parameters widget\n model_parameters_wid.set_widget_state(\n n_parameters[value], params_str='Parameter ',\n allow_callback=False)\n\n # Update landmarks options\n g_keys, l_keys = extract_groups_labels_from_image(\n appearance_model[value].mean())\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=appearance_model[value].mean().n_channels,\n image_is_masked=isinstance(\n appearance_model[value].mean(), MaskedImage),\n allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_children.insert(0, level_wid)\n tmp_wid = ipywidgets.HBox(tmp_children)\n options_box = ipywidgets.Tab(\n children=[tmp_wid, image_options_wid, landmark_options_wid,\n renderer_options_wid, info_wid, save_figure_wid])\n tab_titles = ['Model', 'Image', 'Landmarks', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_patch_appearance_model(appearance_model, centers,\n n_parameters=5, mode='multiple',\n parameters_bounds=(-3.0, 3.0),\n figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical patch-based appearance model.\n\n Parameters\n ----------\n appearance_model : `list` of `menpo.model.PCAModel` or subclass\n The multi-scale patch-based appearance model to be visualized. Note that\n each level can have different number of components.\n centers : `list` of `menpo.shape.PointCloud` or subclass\n The centers to set the patches around. If the `list` has only one\n `menpo.shape.PointCloud` then this will be used for all appearance model\n levels. Otherwise, it needs to have the same length as\n `appearance_model`.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that appearance_model is a list even with one member\n if not isinstance(appearance_model, list):\n appearance_model = [appearance_model]\n\n # Get the number of levels (i.e. number of appearance models)\n n_levels = len(appearance_model)\n\n # Make sure that centers is a list even with one pointcloud\n if not isinstance(centers, list):\n centers = [centers] * n_levels\n elif isinstance(centers, list) and len(centers) == 1:\n centers *= n_levels\n\n # Define the styling options\n main_style = 'success'\n\n # Get the maximum number of components per level\n max_n_params = [ap.n_active_components for ap in appearance_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Compute weights and instance\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)\n instance = appearance_model[level].instance(weights)\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n image_options = dict(image_options_wid.selected_values)\n del image_options['masked_enabled']\n options.update(image_options)\n options.update(patch_options_wid.selected_values)\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render image with selected options\n save_figure_wid.renderer = render_patches(\n patches=instance.pixels, patch_centers=centers[level],\n renderer=save_figure_wid.renderer, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(instance, level)\n\n # Define function that updates the info text\n def update_info(image, level):\n lvl_app_mod = appearance_model[level]\n text_per_line = [\n \"> Level: {} out of {}.\".format(level + 1, n_levels),\n \"> {} components in total.\".format(lvl_app_mod.n_components),\n \"> {} active components.\".format(lvl_app_mod.n_active_components),\n \"> {:.1f}% variance kept.\".format(\n lvl_app_mod.variance_ratio() * 100),\n \"> Each patch has size {}H x {}W with {} channel{}.\".format(\n image.pixels.shape[3], image.pixels.shape[4],\n image.pixels.shape[2], 's' * (image.pixels.shape[2] > 1)),\n \"> {} features.\".format(lvl_app_mod.n_features),\n \"> {} landmark points.\".format(image.pixels.shape[0]),\n \"> Instance: min={:.3f}, max={:.3f}\".format(image.pixels.min(),\n image.pixels.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n shape_options_wid = Shape2DOptionsWidget(\n labels=None, render_function=None)\n shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False\n shape_options_wid.add_render_function(render_function)\n patch_options_wid = PatchOptionsWidget(\n n_patches=appearance_model[0].mean().pixels.shape[0],\n n_offsets=appearance_model[0].mean().pixels.shape[1],\n render_function=render_function)\n image_options_wid = ImageOptionsWidget(\n n_channels=appearance_model[0].mean().pixels.shape[2],\n image_is_masked=isinstance(appearance_model[0].mean(), MaskedImage),\n render_function=None)\n image_options_wid.interpolation_checkbox.button_wid.value = False\n image_options_wid.add_render_function(render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,\n axes_x_limits=None, axes_y_limits=None, render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n tmp_children = [model_parameters_wid]\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n value = change['new']\n # Update model parameters widget\n model_parameters_wid.set_widget_state(\n n_parameters[value], params_str='Parameter ',\n allow_callback=False)\n\n # Update patch options\n patch_options_wid.set_widget_state(\n n_patches=appearance_model[value].mean().pixels.shape[0],\n n_offsets=appearance_model[value].mean().pixels.shape[1],\n allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=appearance_model[value].mean().pixels.shape[2],\n image_is_masked=isinstance(appearance_model[value].mean(),\n MaskedImage),\n allow_callback=True)\n\n # Define pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_children.insert(0, level_wid)\n tmp_wid = ipywidgets.HBox(tmp_children)\n options_box = ipywidgets.Tab(\n children=[tmp_wid, patch_options_wid, image_options_wid,\n shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Patches', 'Channels', 'Shape', 'Renderer', 'Info',\n 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_morphable_model(mm, n_shape_parameters=5, n_texture_parameters=5,\n mode='multiple', parameters_bounds=(-15.0, 15.0)):\n r\"\"\"\n Widget that allows the dynamic visualization of a 3D Morphable Model.\n\n Parameters\n ----------\n mm : `menpo3d.morhpablemodel.ColouredMorphableModel` or `subclass`\n The multi-scale 3D Morphable Model to be visualized.\n n_shape_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the shape parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n n_texture_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the tecture\n parameters sliders. If `int`, then the number of sliders per level is\n the minimum between `n_parameters` and the number of active components\n per level. If `list` of `int`, then a number of sliders is defined per\n level. If ``None``, all the active components per level will have a\n slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print_dynamic('Initializing...')\n\n # Define the styling options\n main_style = 'info'\n\n # Check the given number of parameters\n n_shape_parameters = check_n_parameters(\n n_shape_parameters, 1, [mm.shape_model.n_active_components])\n n_texture_parameters = check_n_parameters(\n n_texture_parameters, 1, [mm.texture_model.n_active_components])\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # Compute weights\n shape_weights = shape_model_parameters_wid.selected_values\n shape_weights = (\n shape_weights *\n mm.shape_model.eigenvalues[:len(shape_weights)] ** 0.5)\n texture_weights = texture_model_parameters_wid.selected_values\n texture_weights = (\n texture_weights *\n mm.texture_model.eigenvalues[:len(texture_weights)] ** 0.5)\n instance = mm.instance(shape_weights=shape_weights,\n texture_weights=texture_weights)\n # TODO: Is this really needed?\n instance = instance.clip_texture()\n\n # Update info\n update_info(mm, instance)\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **mesh_options_wid.selected_values)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(mm, instance):\n text_per_line = [\n \"> {} vertices, {} triangles\".format(mm.n_vertices,\n mm.n_triangles),\n \"> {} shape components ({:.2f}% of variance)\".format(\n mm.shape_model.n_components,\n mm.shape_model.variance_ratio() * 100),\n \"> {} texture channels\".format(mm.n_channels),\n \"> {} texture components ({:.2f}% of variance)\".format(\n mm.texture_model.n_components,\n mm.texture_model.variance_ratio() * 100),\n \"> Instance: min={:.3f} , max={:.3f}\".format(\n instance.colours.min(), instance.colours.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot shape variance function\n def plot_shape_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Render\n plt.subplot(121)\n mm.shape_model.plot_eigenvalues_ratio()\n plt.subplot(122)\n mm.shape_model.plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Plot texture variance function\n def plot_texture_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Render\n plt.subplot(121)\n mm.texture_model.plot_eigenvalues_ratio()\n plt.subplot(122)\n mm.texture_model.plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Create widgets\n shape_model_parameters_wid = LinearModelParametersWidget(\n n_shape_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_shape_variance,\n animation_step=0.5, interval=0., loop_enabled=True)\n texture_model_parameters_wid = LinearModelParametersWidget(\n n_texture_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_texture_variance,\n animation_step=0.5, interval=0., loop_enabled=True)\n mesh_options_wid = Mesh3DOptionsWidget(textured=True,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n model_parameters_wid = ipywidgets.HBox(\n [ipywidgets.Tab([shape_model_parameters_wid,\n texture_model_parameters_wid])])\n model_parameters_wid.children[0].set_title(0, 'Shape')\n model_parameters_wid.children[0].set_title(1, 'Texture')\n options_box = ipywidgets.Tab([model_parameters_wid, mesh_options_wid,\n info_wid, save_figure_wid])\n tab_titles = ['Model', 'Mesh', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef webcam_widget(canvas_width=640, hd=True, n_preview_windows=5):\n r\"\"\"\n Webcam widget for taking snapshots. The snapshots are dynamically previewed\n in a FIFO stack of thumbnails.\n\n Parameters\n ----------\n canvas_width : `int`, optional\n The initial width of the rendered canvas. Note that this doesn't actually\n change the webcam resolution. It simply rescales the rendered image, as\n well as the size of the returned screenshots.\n hd : `bool`, optional\n If ``True``, then the webcam will be set to high definition (HD), i.e.\n 720 x 1280. Otherwise the default resolution will be used.\n n_preview_windows : `int`, optional\n The number of preview thumbnails that will be used as a FIFO stack to\n show the captured screenshots. It must be at least 4.\n\n Returns\n -------\n snapshots : `list` of `menpo.image.Image`\n The list of captured images.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n\n # Set update function\n images = []\n\n def update(_):\n images.append(wid.selected_values[-1])\n\n # Create widgets\n wid = CameraSnapshotWidget(\n canvas_width=canvas_width, hd=hd, n_preview_windows=n_preview_windows,\n preview_windows_margin=3, style='danger', preview_style='warning',\n render_function=update)\n wid.container.layout.border = (\n '2px solid' + map_styles_to_hex_colours('danger'))\n\n # Display widget\n ipydisplay.display(wid)\n\n # Return\n return images\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.min",
"matplotlib.collections.LineCollection",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sumanthd17/mt5 | [
"c99b4e3ad1c69908c852c730a1323ccb52d48f58"
] | [
"multilingual_t5/baseline_mr/baseline_mr.py"
] | [
"\"\"\"baseline_mr dataset.\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\n\n# TODO(baseline_mr): Markdown description that will appear on the catalog page.\n_DESCRIPTION = \"\"\"\nDescription is **formatted** as markdown.\n\nIt should also contain any processing which has been applied (if any),\n(e.g. corrupted example skipped, images cropped,...):\n\"\"\"\n\n# TODO(baseline_mr): BibTeX citation\n_CITATION = \"\"\"\n\"\"\"\n\n\nclass BaselineMr(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for baseline_mr dataset.\"\"\"\n\n VERSION = tfds.core.Version('1.0.0')\n RELEASE_NOTES = {\n '1.0.0': 'Initial release.',\n }\n\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n # TODO(baseline_mr): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'source': tfds.features.Text(),\n 'target': tfds.features.Text(),\n }),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n # TODO(baseline_mr): Downloads the data and defines the splits\n path = dl_manager.download_and_extract('https://storage.googleapis.com/ai4b-anuvaad-nmt/baselines/mT5/baseline_mr/strict-en-mr.zip')\n\n # TODO(baseline_mr): Returns the Dict[split names, Iterator[Key, Example]]\n return {\n 'train': self._generate_examples(source=path/'en-mr/train/train.mr', target=path/'en-mr/train/train.en'),\n 'validation': self._generate_examples(source=path/'en-mr/dev/dev.mr', target=path/'en-mr/dev/dev.en')\n }\n\n def _generate_examples(self, source, target):\n \"\"\"Yields examples.\"\"\"\n # TODO(baseline_mr): Yields (key, example) tuples from the dataset\n src = tf.io.gfile.GFile(source, 'r').readlines()\n tgt = tf.io.gfile.GFile(target, 'r').readlines()\n for idx, row in enumerate(zip(src, tgt)):\n yield idx, {\n 'source': row[0],\n 'target': row[1],\n }\n"
] | [
[
"tensorflow.io.gfile.GFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dixiak/gnes | [
"12513d29157a06bd22923717fd0c19a856f20193"
] | [
"gnes/preprocessor/video/shotdetect.py"
] | [
"# Tencent is pleased to support the open source community by making GNES available.\n#\n# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nimport numpy as np\n\nfrom ..base import BaseVideoPreprocessor\nfrom ..helper import compute_descriptor, compare_descriptor, detect_peak_boundary, compare_ecr\nfrom ..io_utils import video as video_util\nfrom ...proto import gnes_pb2, array2blob\n\n\nclass ShotDetectPreprocessor(BaseVideoPreprocessor):\n store_args_kwargs = True\n\n def __init__(self,\n frame_size: str = '192:168',\n descriptor: str = 'block_hsv_histogram',\n distance_metric: str = 'bhattacharya',\n detect_method: str = 'threshold',\n frame_rate: int = 10,\n frame_num: int = -1,\n *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.frame_size = frame_size\n self.descriptor = descriptor\n self.distance_metric = distance_metric\n self.detect_method = detect_method\n self.frame_rate = frame_rate\n self.frame_num = frame_num\n self._detector_kwargs = kwargs\n\n def detect_shots(self, frames: 'np.ndarray') -> List[List['np.ndarray']]:\n descriptors = []\n for frame in frames:\n descriptor = compute_descriptor(\n frame, method=self.descriptor, **self._detector_kwargs)\n descriptors.append(descriptor)\n\n # compute distances between frames\n if self.distance_metric == 'edge_change_ration':\n dists = compare_ecr(descriptors)\n else:\n dists = [\n compare_descriptor(pair[0], pair[1], self.distance_metric)\n for pair in zip(descriptors[:-1], descriptors[1:])\n ]\n\n shot_bounds = detect_peak_boundary(dists, self.detect_method)\n\n shots = []\n for ci in range(0, len(shot_bounds) - 1):\n shots.append(frames[shot_bounds[ci]:shot_bounds[ci + 1]])\n\n return shots\n\n def apply(self, doc: 'gnes_pb2.Document') -> None:\n super().apply(doc)\n\n if doc.raw_bytes:\n all_frames = video_util.capture_frames(\n input_data=doc.raw_bytes,\n scale=self.frame_size,\n fps=self.frame_rate,\n vframes=self.frame_num)\n num_frames = len(all_frames)\n assert num_frames > 0\n shots = self.detect_shots(all_frames)\n\n for ci, frames in enumerate(shots):\n c = doc.chunks.add()\n c.doc_id = doc.doc_id\n # chunk_data = np.concatenate(frames, axis=0)\n chunk_data = np.array(frames)\n c.blob.CopyFrom(array2blob(chunk_data))\n c.offset = ci\n c.weight = len(frames) / num_frames\n else:\n self.logger.error('bad document: \"raw_bytes\" is empty!')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chokyzhou/gym-flappy-bird | [
"ffe1089501f3e2e113a8868cd27480653dbe0ef7"
] | [
"src/flappy_bird_gym/envs/flappy_bird_env_simple.py"
] | [
"#\n# Copyright (c) 2020 Gabriel Nogueira (Talendar)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\n\"\"\" Implementation of a Flappy Bird OpenAI Gym environment that yields simple\nnumerical information about the game's state as observations.\n\"\"\"\n\nfrom typing import Dict, Tuple, Optional, Union\n\nimport gym\nimport numpy as np\nimport pygame\n\nfrom flappy_bird_gym.envs.game_logic import FlappyBirdLogic\nfrom flappy_bird_gym.envs.game_logic import PIPE_WIDTH, PIPE_HEIGHT\nfrom flappy_bird_gym.envs.game_logic import PLAYER_WIDTH, PLAYER_HEIGHT\nfrom flappy_bird_gym.envs.renderer import FlappyBirdRenderer\n\n\nclass FlappyBirdEnvSimple(gym.Env):\n \"\"\" Flappy Bird Gym environment that yields simple observations.\n\n The observations yielded by this environment are simple numerical\n information about the game's state. Specifically, the observations are:\n\n * Horizontal distance to the next pipe;\n * Difference between the player's y position and the next hole's y\n position.\n\n The reward received by the agent in each step is equal to the score obtained\n by the agent in that step. A score point is obtained every time the bird\n passes a pipe.\n\n Args:\n screen_size (Tuple[int, int]): The screen's width and height.\n normalize_obs (bool): If `True`, the observations will be normalized\n before being returned.\n pipe_gap (int): Space between a lower and an upper pipe.\n bird_color (str): Color of the flappy bird. The currently available\n colors are \"yellow\", \"blue\" and \"red\".\n pipe_color (str): Color of the pipes. The currently available colors are\n \"green\" and \"red\".\n background (Optional[str]): Type of background image. The currently\n available types are \"day\" and \"night\". If `None`, no background will\n be drawn.\n \"\"\"\n\n metadata = {'render.modes': ['human']}\n\n def __init__(self,\n screen_size: Tuple[int, int] = (288, 512),\n normalize_obs: bool = True,\n pipe_gap: int = 100,\n bird_color: str = \"yellow\",\n pipe_color: str = \"green\",\n background: Optional[str] = \"day\") -> None:\n self.action_space = gym.spaces.Discrete(2)\n self.observation_space = gym.spaces.Box(-np.inf, np.inf,\n shape=(3,),\n dtype=np.float32)\n self._screen_size = screen_size\n self._normalize_obs = normalize_obs\n self._pipe_gap = pipe_gap\n\n self._game = None\n self._renderer = None\n\n self._bird_color = bird_color\n self._pipe_color = pipe_color\n self._bg_type = background\n\n def _get_observation(self):\n up_pipe = low_pipe = None\n h_dist = 0\n for up_pipe, low_pipe in zip(self._game.upper_pipes,\n self._game.lower_pipes):\n h_dist = (low_pipe[\"x\"] + PIPE_WIDTH / 2\n - (self._game.player_x - PLAYER_WIDTH / 2))\n h_dist += 3 # extra distance to compensate for the buggy hit-box\n if h_dist >= 0:\n break\n\n upper_pipe_y = up_pipe[\"y\"] + PIPE_HEIGHT\n lower_pipe_y = low_pipe[\"y\"]\n player_y = self._game.player_y\n y_vel = self._game.player_vel_y\n\n v_dist = (upper_pipe_y + lower_pipe_y) / 2 - (player_y\n + PLAYER_HEIGHT/2)\n\n if self._normalize_obs:\n h_dist /= self._screen_size[0]\n v_dist /= self._screen_size[1]\n\n return np.array([\n h_dist,\n v_dist,\n y_vel,\n ])\n\n def step(self,\n action: Union[FlappyBirdLogic.Actions, int],\n ) -> Tuple[np.ndarray, float, bool, Dict]:\n \"\"\" Given an action, updates the game state.\n\n Args:\n action (Union[FlappyBirdLogic.Actions, int]): The action taken by\n the agent. Zero (0) means \"do nothing\" and one (1) means \"flap\".\n\n Returns:\n A tuple containing, respectively:\n\n * an observation (horizontal distance to the next pipe;\n difference between the player's y position and the next hole's\n y position);\n * a reward (always 1);\n * a status report (`True` if the game is over and `False`\n otherwise);\n * an info dictionary.\n \"\"\"\n alive = self._game.update_state(action)\n obs = self._get_observation()\n\n reward = 1\n\n done = not alive\n info = {\"score\": self._game.score}\n\n return obs, reward, done, info\n\n def reset(self):\n \"\"\" Resets the environment (starts a new game). \"\"\"\n self._game = FlappyBirdLogic(screen_size=self._screen_size,\n pipe_gap_size=self._pipe_gap)\n if self._renderer is not None:\n self._renderer.game = self._game\n\n return self._get_observation()\n\n def render(self, mode='human') -> None:\n \"\"\" Renders the next frame. \"\"\"\n if self._renderer is None:\n self._renderer = FlappyBirdRenderer(screen_size=self._screen_size,\n bird_color=self._bird_color,\n pipe_color=self._pipe_color,\n background=self._bg_type)\n self._renderer.game = self._game\n self._renderer.make_display()\n\n self._renderer.draw_surface(show_score=True)\n self._renderer.update_display()\n\n def close(self):\n \"\"\" Closes the environment. \"\"\"\n if self._renderer is not None:\n pygame.display.quit()\n self._renderer = None\n super().close()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mapattacker/flask-serverless | [
"9612b7cbc5157770d88f352e0676911658c4de9a"
] | [
"project/app.py"
] | [
"import pickle\nimport traceback\n\nimport numpy as np\nfrom flask import Flask, request\n\nfrom config import MODELPATH, DEBUG\n\n\napp = Flask(__name__)\nmodel = pickle.load(open(MODELPATH, 'rb'))\n\n\[email protected](\"/predict\", methods=[\"POST\"])\ndef predict():\n \"\"\"{\"input\": [5.8, 2.8, 5.1, 2.4]}\"\"\"\n try:\n content = request.json\n sample = content[\"input\"]\n\n sample = np.array(sample).reshape(1, -1)\n prediction = model.predict(sample).tolist()[0]\n\n return {\"prediction\": prediction}\n except Exception as e:\n tb = traceback.format_exc()\n return {\"errorMessages\": tb.replace(\"\\n\",\"\")}\n\n\nif __name__ == \"__main__\":\n app.run(debug=DEBUG)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeonardoSaccotelli/Numerical-Calculus-Project | [
"becb480a611c9a57416127f6b0289085fe180ee4"
] | [
"5_Quadrature Formulas/Algoritmi_Quadratura.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 7 17:58:09 2020\n\n@author: Leonardo Saccotelli\n\"\"\"\n\nimport numpy as np\n\n\"\"\"\nFORMULA DEI TRAPEZI\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n\"\"\"\ndef Trapezoid(f_x, a, b):\n #Calcolo l'integrale\n T = (b-a)*(f_x(a)+f_x(b))/2\n return T\n\n\"\"\"\nFORMULA DEI TRAPEZI COMPOSTI\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n - il numero di intervallini \n\"\"\"\ndef CompositeTrapezoid(f_x, a, b, N):\n #Estrpolo N+1 intervalli equidistanti da [a,b]\n z = np.linspace(a,b,N+1)\n \n #Calcolo f_x() in ogni punto di z\n fz = f_x(z)\n \n S = 0\n #Calcolo del trapezio composto\n for i in range(1,N):\n S = S + fz[i]\n\n TC = (fz[0] + 2*S + fz[N])*(b-a)/2/N\n \n return TC\n\n\"\"\"\nFORMULA DI SIMPSON\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n\"\"\"\ndef Simpson(f_x, a, b):\n #Calcolo l'integrale\n T = ((b-a)/6) * (f_x(a) +4 * f_x((b+a)/2) + f_x(b))\n return T\n\n\"\"\"\nFORMULA DI SIMPSON COMPOSTA\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n - il numero di intervalli\n\"\"\"\ndef CompositeSimpson(f, a, b, N):\n #Genero n+1 intervallini in [a,b]\n z = np.linspace(a,b,N+1)\n #Calcolo f negli intervalli z\n fz = f(z)\n \n #Definisco le somme dispari e le somme pari\n S_d = 0\n S_p = 0\n \n #Definisco l'ampiezza dei singoli intervalli\n h = (b-a)/N\n \n #Calcolo le somme dispari\n for i in range(1,N,2):\n S_d = S_d + fz[i]\n #Calcolo le somme pari\n for i in range(2,N-1,2):\n S_p = S_p + fz[i]\n \n Tsc = (fz[0] + 4*S_d + 2*S_p + fz[N])*h/3\n \n return Tsc\n \n \n \n\n\n\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KristinaRay/english-arabic-nmt-bot | [
"1e0baddc81b829b3ee1abe95143cdef5c1206dd2"
] | [
"data/get_dataset.py"
] | [
"import os\nimport tqdm\nimport numpy as np\nimport requests\nimport youtokentome as yttm\nfrom argparse import ArgumentParser\nfrom zipfile import ZipFile\n\nfrom config import *\nfrom data.preprocessing import *\nfrom utils import *\n\nDATA_FILE_PATH = f'{DATA_PATH}/data.zip'\nDATA_URL = 'https://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/ar-en.txt.zip'\nTRG_FILE_NAME = 'OpenSubtitles.ar-en.ar'\nSRC_FILE_NAME = 'OpenSubtitles.ar-en.en'\nTRG_SAMPLE_FILE_PATH = f'{DATA_PATH}/ar.txt'\nSRC_SAMPLE_FILE_PATH = f'{DATA_PATH}/en.txt'\nTRG_ORIG_FILE_PATH = f'{DATA_PATH}/{TRG_FILE_NAME}'\nSRC_ORIG_FILE_PATH = f'{DATA_PATH}/{SRC_FILE_NAME}'\n\ndef fetch_dataset(data_url, data_path, data_file_path):\n \n \"\"\" Download data \"\"\"\n \n if not os.path.exists(data_path):\n os.makedirs(data_path)\n print(\"Dataset not found, downloading...\")\n response = requests.get(data_url, stream=True)\n filename = data_url.split(\"/\")[-1]\n total_size_in_bytes= int(response.headers.get('content-length', 0))\n progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n\n with open(data_file_path, 'wb') as file:\n for data in response.iter_content(1024):\n progress_bar.update(len(data))\n file.write(data)\n progress_bar.close()\n \n log(\"Download complete\")\n log(\"Extracting...\")\n \n zip = ZipFile(DATA_FILE_PATH, \"r\")\n zip.extract(TRG_FILE_NAME, DATA_PATH)\n zip.extract(SRC_FILE_NAME, DATA_PATH)\n zip.close()\n log(\"Extracting complete\")\n \n num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH)) # number of lines in arabic file\n num_lines_en = sum(1 for line in open(SRC_ORIG_FILE_PATH)) # number of lines in english file\n \n assert num_lines_ar == num_lines_en, \"Lost some data\"\n assert os.path.exists(data_path)\n\n else:\n\n log('Datasets are found')\n\ndef create_sample(sample_size, max_text_len):\n \"\"\"\n Clean data sample and remove duplicates\n \"\"\"\n log('Creating txt files for both languages...')\n num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH)) \n sample_data_size = 2 * sample_size \n chosen_lines = set(np.random.choice(np.arange(num_lines_ar), size=sample_data_size, replace=False))\n en_sub = open(SRC_ORIG_FILE_PATH, \"r\") \n ar_sub = open(TRG_ORIG_FILE_PATH, \"r\") \n unique_pairs = set()\n with open(SRC_TXT_FILE_PATH, \"a+\") as en, open(TRG_TXT_FILE_PATH, \"a+\") as ar:\n for idx, (en_line, ar_line) in enumerate(zip(en_sub, ar_sub)):\n if idx in chosen_lines:\n src = clean_en_text(en_line)\n trg = clean_ar_text(ar_line)\n if 2 < len(src) <= max_text_len and 2 < len(trg) < max_text_len:\n if ((src + trg) not in unique_pairs and (len(unique_pairs) < sample_size)): \n en.write(src)\n ar.write(trg)\n unique_pairs.add((src + trg))\n elif len(unique_pairs) >= sample_size: \n break\n assert len(unique_pairs) == sample_size, \"Not enough data\"\n en_sub.close()\n ar_sub.close()\n en.close()\n ar.close()\n log(\"Done\")\n log(f'Number of unique pairs of sentences: {len(unique_pairs)}')\n \n\ndef main(): \n fetch_dataset(DATA_URL, DATA_PATH, DATA_FILE_PATH)\n parser = ArgumentParser()\n parser.add_argument(\"--sample_size\", required=True, type=int, help='Number of the sentence pairs to prepare for the training')\n parser.add_argument(\"--max_text_len\", required=True, type=int, help='Max character length of the sentences')\n args = parser.parse_args()\n \n create_sample(args.sample_size, args.max_text_len)\n \n log('Training tokenizers...')\n \n yttm.BPE.train(data=TRG_TXT_FILE_PATH, vocab_size=TRG_VOCAB_SIZE, model=TRG_TOKENIZER_PATH)\n yttm.BPE.train(data=SRC_TXT_FILE_PATH, vocab_size=SRC_VOCAB_SIZE, model=SRC_TOKENIZER_PATH)\n \n log(\"Done\")\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anmolmore/Chatbot-for-COVID-19-FAQ-using-Dialogflow | [
"f80670e9ee67e18c790da85d49e9c9617753c6f8",
"f80670e9ee67e18c790da85d49e9c9617753c6f8"
] | [
"Model_codebase_2_flask.py",
"Flask CoronaHelpDesk/api.py"
] | [
"#11915010\tRaghu Punnamraju\n#11915043\tAnmol More\n#11915001\tSriganesh Balamurugan\n#11915052\tKapil Bindal\n\nimport pandas as pd\nfrom ast import literal_eval\n\nfrom cdqa.utils.filters import filter_paragraphs\nfrom cdqa.utils.download import download_model, download_bnpp_data\nfrom cdqa.pipeline.cdqa_sklearn import QAPipeline\n\n#read the cleaned dataset and just take question and context for our model\ndf = pd.read_csv('data/dataset_collected.csv', usecols=['question', 'context'])\n\n#convert paragraphs to a list\ndf['paragraphs'] = df[df.columns[1:]].apply(\n lambda x: x.dropna().values.tolist(),\n axis=1)\n\ndf.rename(columns={\"question\": \"title\"}, inplace=True)\ndf.drop(columns='context', inplace=True)\ndf.to_csv('df_corona.csv', index=False)\n\n#use a lighter pipleline model to build pipeline on top of it\ncdqa_pipeline = QAPipeline(reader='models/distilbert_qa.joblib')\ncdqa_pipeline.fit_retriever(df=df)\n\nprint('Welcome to Corona Chatbot ! How can I help you ? ')\nprint('Press enter twice to quit')\n\nwhile True:\n\tquery = input()\n\tprediction = cdqa_pipeline.predict(query=query)\n\tprint('Query : {}\\n'.format(query))\n\tprint('Reply from Bot: {}\\n'.format(prediction[0]))",
"from flask import Flask, request, make_response, jsonify, render_template\nfrom flask_cors import CORS\n\nimport os\nfrom ast import literal_eval\nimport pandas as pd\nimport json\n\nimport dialogflow\n\nfrom cdqa.utils.filters import filter_paragraphs\nfrom cdqa.pipeline import QAPipeline\n\napp = Flask(__name__)\nCORS(app)\n\ndataset_path = 'data/df_corona.csv'\nreader_path = 'model/model.joblib'\nproject_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n\ndf = pd.read_csv(dataset_path, usecols=['context', 'question'])\ndf = df.fillna(method='ffill')\n\ndf['paragraphs'] = df[df.columns[1:]].apply(\n lambda x: x.dropna().values.tolist(),\n axis=1)\n\ndf.rename(columns={\"question\": \"title\"}, inplace=True)\ndf.drop(columns='context', inplace=True)\n\ncdqa_pipeline = QAPipeline(reader=reader_path)\ncdqa_pipeline.fit_retriever(df=df)\n\n\ndef detect_intent_texts(project_id, session_id, text, language_code):\n session_client = dialogflow.SessionsClient()\n session = session_client.session_path(project_id, session_id)\n\n if text:\n text_input = dialogflow.types.TextInput(\n text=text, language_code=language_code)\n query_input = dialogflow.types.QueryInput(text=text_input)\n response = session_client.detect_intent(\n session=session, query_input=query_input)\n print(\"...................................................\")\n print(response)\n print(\"...................................................\")\n return response.query_result.fulfillment_text\n \[email protected]('/send_message', methods=['POST'])\ndef send_message():\n message = request.form['message']\n project_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n fulfillment_text = detect_intent_texts(project_id, \"unique\", message, 'en')\n response_text = { \"message\": fulfillment_text }\n return jsonify(response_text)\n\[email protected](\"/api\", methods=[\"GET\"])\ndef api():\n\n query = request.args.get(\"query\")\n prediction = cdqa_pipeline.predict(query=query)\n\n return jsonify(\n query=query, answer=prediction[0], title=prediction[1], paragraph=prediction[2]\n )\n\[email protected]('/')\ndef my_form():\n return render_template('my-form.html')\n\[email protected]('/', methods=['POST'])\ndef my_form_post():\n text = request.form['text']\n query = text.lower()\n prediction = cdqa_pipeline.predict(query)\n\n return jsonify(\n query=query, answer=prediction[0], title=prediction[1], paragraph=prediction[2]\n )\n \[email protected]('/webhook', methods=['GET', 'POST'])\ndef webhook():\n text_message = request.get_json(force=True)\n print(text_message)\n query = text_message['queryResult']['queryText']\n print('user query', query)\n query = query.lower()\n prediction = cdqa_pipeline.predict(query=query)\n \n print('answer to query', prediction)\n response_text = {\"fulfillmentText\": prediction}\n \n return make_response(jsonify(response_text))\n \n # return jsonify(\n# query=query, answer=prediction[0], title=prediction[1], paragraph=prediction[2]\n# )\n \nif __name__ == '__main__':\n app.run()\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hellopikaqiu/AIchallenger_MachineReadingComprehension | [
"03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42",
"03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42"
] | [
"best_single_model/focal_loss.py",
"baseline/config.py"
] | [
"\"\"\"\nAI Challenger观点型问题阅读理解\n\nfocal_loss.py\n\n@author: yuhaitao\n\"\"\"\n# -*- coding:utf-8 -*-\nimport tensorflow as tf\n\n\ndef sparse_focal_loss(logits, labels, gamma=2):\n \"\"\"\n Computer focal loss for multi classification\n Args:\n labels: A int32 tensor of shape [batch_size].\n logits: A float32 tensor of shape [batch_size,num_classes].\n gamma: A scalar for focal loss gamma hyper-parameter.\n Returns:\n A tensor of the same shape as `lables`\n \"\"\"\n with tf.name_scope(\"focal_loss\"):\n y_pred = tf.nn.softmax(logits, dim=-1) # [batch_size,num_classes]\n labels = tf.one_hot(labels, depth=y_pred.shape[1])\n L = -labels * ((1 - y_pred)**gamma) * tf.log(y_pred)\n L = tf.reduce_sum(L, axis=1)\n return L\n\n'''\nif __name__ == '__main__':\n labels = tf.constant([0, 1], name=\"labels\")\n logits = tf.constant([[0.7, 0.2, 0.1], [0.6, 0.1, 0.3]], name=\"logits\")\n a = tf.reduce_mean(sparse_focal_loss(logits, tf.stop_gradient(labels)))\n with tf.Session() as sess:\n print(sess.run(a))'''\n",
"\"\"\"\nAI Challenger观点型问题阅读理解\n\nconfig.py:配置文件,程序运行入口\n\n@author: yuhaitao\n\"\"\"\n# -*- coding:utf-8 -*-\nimport os\nimport tensorflow as tf\n\nimport data_process\nfrom main import train, test, dev\nfrom file_save import *\nfrom examine_dev import examine_dev\n\nflags = tf.flags\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\ntrain_file = os.path.join(\"file\", \"ai_challenger_oqmrc_trainingset.json\")\ndev_file = os.path.join(\"file\", \"ai_challenger_oqmrc_validationset.json\")\ntest_file = os.path.join(\"file\", \"ai_challenger_oqmrc_testa.json\")\n'''\ntrain_file = os.path.join(\"file\", \"train_demo.json\")\ndev_file = os.path.join(\"file\", \"val_demo.json\")\ntest_file = os.path.join(\"file\", \"test_demo.json\")'''\n\ntarget_dir = \"data\"\nlog_dir = \"log/event\"\nsave_dir = \"log/model\"\nprediction_dir = \"log/prediction\"\ntrain_record_file = os.path.join(target_dir, \"train.tfrecords\")\ndev_record_file = os.path.join(target_dir, \"dev.tfrecords\")\ntest_record_file = os.path.join(target_dir, \"test.tfrecords\")\nid2vec_file = os.path.join(target_dir, \"id2vec.json\") # id号->向量\nword2id_file = os.path.join(target_dir, \"word2id.json\") # 词->id号\ntrain_eval = os.path.join(target_dir, \"train_eval.json\")\ndev_eval = os.path.join(target_dir, \"dev_eval.json\")\ntest_eval = os.path.join(target_dir, \"test_eval.json\")\n\nif not os.path.exists(target_dir):\n os.makedirs(target_dir)\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\nif not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir)\n\nflags.DEFINE_string(\"mode\", \"train\", \"train/debug/test\")\nflags.DEFINE_string(\"gpu\", \"0\", \"0/1\")\nflags.DEFINE_string(\"experiment\", \"lalala\", \"每次存不同模型分不同的文件夹\")\nflags.DEFINE_string(\"model_name\", \"default\", \"选取不同的模型\")\n\nflags.DEFINE_string(\"target_dir\", target_dir, \"\")\nflags.DEFINE_string(\"log_dir\", log_dir, \"\")\nflags.DEFINE_string(\"save_dir\", save_dir, \"\")\nflags.DEFINE_string(\"prediction_dir\", prediction_dir, \"\")\nflags.DEFINE_string(\"train_file\", train_file, \"\")\nflags.DEFINE_string(\"dev_file\", dev_file, \"\")\nflags.DEFINE_string(\"test_file\", test_file, \"\")\n\nflags.DEFINE_string(\"train_record_file\", train_record_file, \"\")\nflags.DEFINE_string(\"dev_record_file\", dev_record_file, \"\")\nflags.DEFINE_string(\"test_record_file\", test_record_file, \"\")\nflags.DEFINE_string(\"train_eval_file\", train_eval, \"\")\nflags.DEFINE_string(\"dev_eval_file\", dev_eval, \"\")\nflags.DEFINE_string(\"test_eval_file\", test_eval, \"\")\nflags.DEFINE_string(\"word2id_file\", word2id_file, \"\")\nflags.DEFINE_string(\"id2vec_file\", id2vec_file, \"\")\n\nflags.DEFINE_integer(\"para_limit\", 150, \"Limit length for paragraph\")\nflags.DEFINE_integer(\"ques_limit\", 30, \"Limit length for question\")\nflags.DEFINE_integer(\"min_count\", 1, \"embedding 的最小出现次数\")\nflags.DEFINE_integer(\"embedding_size\", 300, \"the dimension of vector\")\n\nflags.DEFINE_integer(\"capacity\", 15000, \"Batch size of dataset shuffle\")\nflags.DEFINE_integer(\"num_threads\", 4, \"Number of threads in input pipeline\")\n# 使用cudnn训练,提升6倍速度\nflags.DEFINE_boolean(\"use_cudnn\", True, \"Whether to use cudnn (only for GPU)\")\nflags.DEFINE_boolean(\"is_bucket\", False, \"Whether to use bucketing\")\n\nflags.DEFINE_integer(\"batch_size\", 64, \"Batch size\")\nflags.DEFINE_integer(\"num_steps\", 250000, \"Number of steps\")\nflags.DEFINE_integer(\"checkpoint\", 1000, \"checkpoint for evaluation\")\nflags.DEFINE_integer(\"period\", 500, \"period to save batch loss\")\nflags.DEFINE_integer(\"val_num_batches\", 150, \"Num of batches for evaluation\")\nflags.DEFINE_float(\"init_learning_rate\", 0.001,\n \"Initial learning rate for Adam\")\nflags.DEFINE_float(\"init_emb_lr\", 0., \"\")\nflags.DEFINE_float(\"keep_prob\", 0.7, \"Keep prob in rnn\")\nflags.DEFINE_float(\"grad_clip\", 5.0, \"Global Norm gradient clipping rate\")\nflags.DEFINE_integer(\"hidden\", 60, \"Hidden size\") # best:128\nflags.DEFINE_integer(\"patience\", 5, \"Patience for learning rate decay\")\nflags.DEFINE_string(\"optimizer\", \"Adam\", \"\")\nflags.DEFINE_string(\"loss_function\", \"default\", \"\")\nflags.DEFINE_boolean(\"use_dropout\", True, \"\")\n\n\ndef main(_):\n config = flags.FLAGS\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu # 选择一块gpu\n if config.mode == \"train\":\n train(config)\n elif config.mode == \"prepro\":\n data_process.prepro(config)\n elif config.mode == \"debug\":\n config.num_steps = 2\n config.val_num_batches = 1\n config.checkpoint = 1\n config.period = 1\n train(config)\n elif config.mode == \"test\":\n test(config)\n elif config.mode == \"examine\":\n examine_dev(config)\n elif config.mode == \"save_dev\":\n save_dev(config)\n elif config.mode == \"save_test\":\n save_test(config)\n else:\n print(\"Unknown mode\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.log",
"tensorflow.one_hot",
"tensorflow.name_scope"
],
[
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
muell-monster/google-research | [
"294a888bbb6678ac255c6422fd703c325cbb0772",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"294a888bbb6678ac255c6422fd703c325cbb0772",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"294a888bbb6678ac255c6422fd703c325cbb0772",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467"
] | [
"flax_models/t5x/train.py",
"tf3d/utils/instance_segmentation_utils_test.py",
"eli5_retrieval_large_lm/util_scripts/create_data_subset_realm.py",
"non_semantic_speech_benchmark/eval_embedding/keras/eval_keras.py",
"kws_streaming/models/utils.py",
"etcmodel/models/openkp/input_validate_and_dedup.py",
"yoto/optimizers/scalarization.py",
"etcmodel/models/nq/run_nq_lib.py",
"non_semantic_speech_benchmark/export_model/combine_frontend_and_savedmodel.py",
"supcon/classification_head.py",
"model_pruning/python/pruning.py",
"hipi/relabelling_replay_buffer.py",
"kws_streaming/data/input_data.py",
"etcmodel/feature_utils_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script pre-trains or fine-tunes a Transformer using the T5 data pipeline.\"\"\"\nfrom concurrent.futures import thread\nimport functools\nimport importlib\nimport os\nfrom typing import Any, Mapping, Sequence, Tuple\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\n# Set Linen to add profiling information when constructing Modules.\n# Must be set before flax imports.\n# pylint:disable=g-import-not-at-top\nos.environ['FLAX_PROFILE'] = 'true'\nfrom flax import linen as nn\nfrom flax import optim\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nimport jax\nfrom jax import lax\nfrom jax import random\nfrom jax.interpreters.sharded_jit import sharded_jit\nimport jax.numpy as jnp\nimport ml_collections\nfrom ml_collections import config_flags\nimport numpy as np\nimport t5\nfrom t5x import checkpoint_importer\nfrom t5x import input_pipeline\nfrom t5x import models\nfrom t5x import partitions\nfrom t5x import train_lib\nimport tensorflow as tf\n\n# pylint:disable=g-long-lambda\n\n\nFLAGS = flags.FLAGS\nCFG = None\nPyTreeDef = type(jax.tree_structure(None))\nTransformerConfig = models.TransformerConfig\njax.config.parse_flags_with_absl()\n\nflags.DEFINE_string(\n 'model_dir', default=None, help='Directory to store model data.')\n\nflags.DEFINE_string(\n 'data_dir', default=None, help='Tensorflow datasets directory.')\n\nconfig_flags.DEFINE_config_file(\n name='config',\n default='configs/t5_small_glue.py',\n help_string='training config file.')\n\nConfigDict = ml_collections.ConfigDict\n\n\ndef get_configs(\n config\n):\n \"\"\"Get train, eval, and predict model configs.\n\n Args:\n config: The config dict for the experiment.\n\n Returns:\n A triple (train_config, eval_config, predict_config).\n \"\"\"\n train_config = TransformerConfig(\n vocab_size=config.vocab_size,\n output_vocab_size=config.vocab_size,\n share_embeddings=config.share_embeddings,\n logits_via_embedding=config.logits_via_embedding,\n dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32,\n emb_dim=config.emb_dim,\n num_heads=config.num_heads,\n num_layers=config.num_layers,\n qkv_dim=config.qkv_dim,\n mlp_dim=config.mlp_dim,\n mlp_activations=config.mlp_activations,\n position_embeddings='relative',\n relative_attention_num_buckets=config.relative_attention_num_buckets,\n relative_attention_max_distance=config.relative_attention_max_distance,\n max_len=max(config.max_input_length, config.max_target_length,\n config.max_eval_input_length, config.max_eval_target_length),\n dropout_rate=config.dropout_rate,\n attention_dropout_rate=config.attention_dropout_rate,\n deterministic=False,\n decode=False,\n kernel_init=nn.initializers.xavier_uniform(),\n bias_init=nn.initializers.normal(stddev=1e-6))\n eval_config = train_config.replace(deterministic=True) # pytype: disable=attribute-error\n predict_config = train_config.replace( # pytype: disable=attribute-error\n deterministic=True,\n decode=True,\n max_decode_len=config.max_eval_target_length)\n\n return (train_config, eval_config, predict_config)\n\n\ndef get_initial_params(rng, config,\n transformer_config,\n optimizer_def):\n \"\"\"Get the initial parameter tree.\"\"\"\n input_shape = (config.batch_size, CFG.max_input_length)\n target_shape = (config.batch_size, CFG.max_target_length)\n initial_variables = models.Transformer(transformer_config).init(\n rng, jnp.ones(input_shape, jnp.float32),\n jnp.ones(target_shape, jnp.float32))\n # apply an optimizer to the parameters\n return optimizer_def.create(initial_variables['params'])\n\n\ndef main(argv):\n global CFG\n CFG = FLAGS.config\n\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Guarantee that the JAX bfloat16 extension is used rather than TF bfloat16.\n _ = np.array(jnp.array([1.0], dtype=jnp.bfloat16))\n\n # Use hardware RNG for bernoulli randoms in dropout mask creation.\n if CFG.hardware_rng:\n models.set_hardware_bernoulli()\n\n if 'module_import' in CFG and CFG.module_import:\n for module in CFG.module_import:\n importlib.import_module(module)\n\n if 'additional_task_cache_dirs' in CFG and CFG.additional_task_cache_dirs:\n t5.data.add_global_cache_dirs(CFG.additional_task_cache_dirs)\n\n num_partitions = CFG.num_partitions\n topology = train_lib.compute_multihost_topology(num_partitions)\n batch_size = CFG.batch_size\n eval_batch_size = CFG.eval_batch_size\n per_replica_set_eval_batch_size = eval_batch_size // topology.num_replica_sets\n if batch_size % topology.num_replicas:\n raise ValueError('Batch size must be divisible by the number of replicas.')\n\n steps_per_epoch = CFG.steps_per_epoch\n logging.info('steps per epoch: %d', steps_per_epoch)\n\n broadcast = functools.partial(\n train_lib.broadcast,\n num_replicas=topology.per_replica_set_num_replicas,\n num_partitions=topology.per_host_num_partitions,\n devices=topology.this_host_device_assignment)\n\n if jax.host_id() == 0:\n tf.io.gfile.makedirs(FLAGS.model_dir)\n tf.io.gfile.copy(FLAGS['config'].config_filename,\n os.path.join(FLAGS.model_dir, 'config.py'),\n overwrite=True)\n train_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'train'))\n eval_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'eval'))\n else:\n train_summary_writer = None\n eval_summary_writer = None\n\n # Write summaries in background thread to avoid blocking on device sync\n if CFG.infeed:\n # Infeed is currently synchronous, so do it in a background thread too\n infeed_pool = thread.ThreadPoolExecutor(jax.local_device_count(), 'infeed')\n\n (train_ds, eval_ds), eval_cache = input_pipeline.get_datasets_and_cache(\n CFG, topology.num_replica_sets, topology.replica_set_id,\n topology.per_replica_set_host_id)\n\n vocab = input_pipeline.get_vocabulary(CFG.mixture_or_task_name)\n encoder = vocab.tf_tokenizer\n eos_id = vocab.tokenizer.eos_id()\n\n def decode_tokens(toks,\n eos_id = eos_id,\n max_id = 32000):\n \"\"\"Decode tokens back to unicode.\"\"\"\n del eos_id\n # TODO(levskaya): T5 doesn't seem to emit EOS tokens? double check this\n # is the best decoding function or just switch to using tf_decode.\n # valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)\n valid_toks = toks.astype(np.int32)\n valid_toks[valid_toks >= max_id] = 3\n return encoder.detokenize(valid_toks).numpy().decode('utf-8')\n\n logging.info('Initializing model, optimizer, and step functions.')\n\n train_config, eval_config, predict_config = get_configs(CFG)\n\n rng = random.PRNGKey(CFG.random_seed)\n rng, init_rng = random.split(rng)\n # This is used for infeed conversion from feature dict <--> tuple\n train_keys = [\n 'inputs', 'targets', 'inputs_position', 'targets_position',\n 'inputs_segmentation', 'targets_segmentation'\n ]\n device_train_input_shape = tuple([\n (batch_size // topology.num_replicas,\n CFG.max_input_length if 'inputs' in k else CFG.max_target_length)\n for k in train_keys\n ])\n\n learning_rate_fn = train_lib.create_learning_rate_scheduler(\n factors=CFG.schedule,\n base_learning_rate=CFG.learning_rate,\n warmup_steps=CFG.warmup_steps)\n\n # First, we only abstractly initialize the optimizer and model parameters,\n # since the parameters may not even fit in device memory!\n # TODO(jekbradbury): make optimizer_defs compare by value so it can be created\n # in get_initial_params without causing pytree incompatibility\n optimizer_def = optim.Adafactor(\n CFG.learning_rate, decay_rate=0.8, step_offset=CFG.step_offset)\n initialize_params_fn = functools.partial(\n get_initial_params,\n config=CFG,\n transformer_config=eval_config,\n optimizer_def=optimizer_def)\n optimizer = jax.eval_shape(initialize_params_fn, init_rng)\n # tuple-like pytree leaves for global_arg_shapes\n optimizer_shapes = jax.tree_map(lambda x: partitions.Spec(*x.shape),\n optimizer)\n\n # Build parameter partition annotations for preserving partitions from train\n # to eval.\n if num_partitions > 1:\n optimizer_partitions = optimizer.restore_state(\n partitions.set_partitions(num_partitions, optimizer.state_dict()))\n per_host_optimizer_partitions = optimizer.restore_state(\n partitions.set_partitions(topology.per_host_num_partitions,\n optimizer.state_dict()))\n\n # Restore unreplicated optimizer + model state from last checkpoint.\n # TODO(jekbradbury,levskaya): implement sharded native checkpoint/restore\n existing_checkpoint_found = False\n if CFG.restore_checkpoints:\n existing_checkpoint_found = train_lib.checkpoint_exists(FLAGS.model_dir)\n optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)\n\n # Import a pretrained-T5 checkpoint only if we didn't import a local\n # \"native\" checkpoint (e.g. due to resuming a pre-empted finetuning run.)\n # TODO(jekbradbury,levskaya): implement sharded T5 checkpoint/restore\n if CFG.restore_t5_checkpoint and not existing_checkpoint_found:\n optimizer = checkpoint_importer.restore_from_t5_checkpoint(\n optimizer, CFG.restore_t5_checkpoint)\n\n if CFG.restore_t5_checkpoint or existing_checkpoint_found:\n if num_partitions > 1:\n # Until checkpoint/restore is sharded, the restored checkpoint is global\n # and we need to slice each sharded parameter into the chunk containing\n # only the partitions that are present on this host.\n def per_host_chunk(x, spec):\n if spec is None or spec is x: # unsharded or not a parameter\n return x\n if spec[0] == 1:\n dim_size = x.shape[1]\n elif spec[1] == 1:\n dim_size = x.shape[0]\n else:\n raise NotImplementedError()\n chunk_size = (\n dim_size * topology.per_host_num_partitions // num_partitions)\n lower = topology.per_replica_set_host_id * chunk_size\n upper = (topology.per_replica_set_host_id + 1) * chunk_size\n if spec[0] == 1:\n return x[:, lower:upper]\n else:\n return x[lower:upper]\n\n optimizer = jax.tree_multimap(per_host_chunk, optimizer,\n optimizer_partitions)\n else:\n # If pretraining and no checkpoint imported, we jit the (sharded-) init\n # function to minimize fragmentation. We use the same pmap(sharded_jit)\n # setup as the training step/loop to initialize everything \"in-place\" and\n # avoid communication or OOM.\n if num_partitions > 1:\n initialize_params_fn = sharded_jit(\n initialize_params_fn,\n in_parts=None,\n local_in_parts=None,\n out_parts=optimizer_partitions,\n local_out_parts=per_host_optimizer_partitions,\n # devices=one_replica_device_assignment,\n )\n initialize_params_fn = jax.pmap(\n initialize_params_fn,\n 'batch',\n in_axes=0,\n axis_size=topology.num_replicas,\n devices=topology.device_assignment)\n init_rng = broadcast(init_rng)\n optimizer = initialize_params_fn(init_rng)\n # We maintain the optimizer in unbroadcasted form (i.e. with no leading\n # replica axis). This is equivalent to the as-yet-nonexistent pmap kwarg\n # out_axes=None.\n optimizer = train_lib.unbroadcast(optimizer)\n else:\n optimizer = jax.jit(initialize_params_fn)(init_rng)\n\n # ---------------------------------------------------------------------------\n # Compile multidevice versions of train/eval/predict step and cache init fn.\n # ---------------------------------------------------------------------------\n\n # We can use either a single train-step for a host training loop:\n\n # train_step(optimizer, batch, prev_metrics, dropout_rng, **kwargs)\n # --> new_optimizer, metrics, new_dropout_rng\n def p_train_step(optimizer, batch,\n prev_metrics,\n dropout_rng):\n return train_lib.train_step(\n optimizer,\n batch,\n prev_metrics,\n dropout_rng,\n config=train_config,\n learning_rate_fn=learning_rate_fn,\n num_microbatches=CFG.microbatches,\n label_smoothing=CFG.label_smoothing,\n z_loss=CFG.z_loss,\n use_bfloat16=CFG.use_bfloat16)\n\n if num_partitions > 1:\n p_train_step = sharded_jit(\n p_train_step,\n in_parts=(optimizer_partitions, None, None, None),\n local_in_parts=(per_host_optimizer_partitions, None, None, None),\n out_parts=(optimizer_partitions, None, None),\n local_out_parts=(per_host_optimizer_partitions, None, None))\n # TODO(levskaya): the in_axes spec below might be wrong, double-check.\n p_train_step = jax.pmap(\n p_train_step,\n axis_name='batch',\n in_axes=(None, 0, 0, 0),\n donate_argnums=(0,),\n global_arg_shapes=(optimizer_shapes, None, None, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # OR, we use an on-device loop that feeds the training step via infeed queue.\n def device_train_loop_cond(\n args\n ):\n \"\"\"Stopping criterion for on-device loop.\"\"\"\n _, _, _, _, step, epoch = args\n return step // steps_per_epoch == epoch\n\n def device_train_loop_body(\n args\n ):\n \"\"\"On-device loop body.\"\"\"\n optimizer, dropout_rngs, metrics, token, step, epoch = args\n # Ordering input data from infeed requires threading a symbolic token\n # through the computation.\n input_data, token = lax.infeed(\n token,\n shape=tuple(\n [jax.ShapedArray(s, jnp.int32) for s in device_train_input_shape]))\n # Rebuild input dict from infeed data tuple.\n batch = {k: v for k, v in zip(train_keys, input_data)}\n # Run the train_step function and return the loop state.\n optimizer, metrics, dropout_rngs = train_lib.train_step(\n optimizer,\n batch,\n metrics,\n dropout_rngs,\n train_config,\n learning_rate_fn,\n num_microbatches=CFG.microbatches,\n label_smoothing=CFG.label_smoothing,\n z_loss=CFG.z_loss)\n step += 1\n return optimizer, dropout_rngs, metrics, token, step, epoch\n\n def device_train_loop(optimizer, dropout_rngs,\n metrics, step,\n epoch):\n # Create symbolic token for threading infeed data.\n token = lax.create_token(step)\n # Run on-device loop.\n optimizer, dropout_rngs, metrics, _, step, _ = lax.while_loop(\n device_train_loop_cond, device_train_loop_body,\n (optimizer, dropout_rngs, metrics, token, step, epoch))\n return optimizer, dropout_rngs, metrics, step\n\n if num_partitions > 1:\n device_train_loop = sharded_jit(\n device_train_loop,\n in_parts=(optimizer_partitions, None, None, None, None),\n local_in_parts=(per_host_optimizer_partitions, None, None, None, None),\n out_parts=(optimizer_partitions, None, None, None),\n local_out_parts=(per_host_optimizer_partitions, None, None, None))\n p_train_epoch = jax.pmap(\n device_train_loop,\n axis_name='batch',\n in_axes=(None, 0, 0, None, None),\n donate_argnums=(0,),\n global_arg_shapes=(optimizer_shapes, None, None, None, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # Reduction psum for metric data.\n\n def p_allreduce_metrics(x):\n return lax.psum(x, axis_name='batch')\n\n if num_partitions > 1:\n p_allreduce_metrics = sharded_jit(\n p_allreduce_metrics,\n in_parts=None,\n local_in_parts=None,\n out_parts=None,\n local_out_parts=None,\n num_partitions=num_partitions,\n local_num_partitions=topology.per_host_num_partitions)\n p_allreduce_metrics = jax.pmap(\n p_allreduce_metrics,\n axis_name='batch',\n global_arg_shapes=None,\n axis_size=topology.num_replicas,\n devices=topology.device_assignment)\n\n # Training evaluation computation.\n\n # eval_step(params, batch, config, label_smoothing=0.0) --> metrics\n def p_eval_step(params, batch):\n return train_lib.eval_step(\n params, batch, config=eval_config, label_smoothing=CFG.label_smoothing)\n\n if num_partitions > 1:\n p_eval_step = sharded_jit(\n p_eval_step,\n in_parts=(optimizer_partitions.target, None),\n local_in_parts=(per_host_optimizer_partitions.target, None),\n out_parts=None,\n local_out_parts=None)\n p_eval_step = jax.pmap(\n p_eval_step,\n axis_name='batch',\n in_axes=(None, 0),\n global_arg_shapes=(optimizer_shapes.target, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # Fast autoregressive decoding loop.\n # For inference and model evaluation.\n\n # predict_step(inputs, params,\n # eos_id, max_decode_len, config, beam_size=4) --> beam_seqs\n def p_pred_step(inputs, params):\n return train_lib.predict_step(inputs, params, eos_id,\n CFG.max_eval_target_length, predict_config,\n CFG.beam_size)\n\n if num_partitions > 1:\n p_pred_step = sharded_jit(\n p_pred_step,\n in_parts=(None, optimizer_partitions.target),\n local_in_parts=(None, per_host_optimizer_partitions.target),\n out_parts=None,\n local_out_parts=None)\n p_pred_step = jax.pmap(\n p_pred_step,\n axis_name='batch',\n in_axes=(0, None),\n global_arg_shapes=(None, optimizer_shapes.target),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # ---------------------------------------------------------------------------\n # Main Train Loop\n # ---------------------------------------------------------------------------\n\n # We init the first set of dropout PRNG keys, but update it afterwards inside\n # the main pmap'd training update for performance.\n # There should be a unique dropout key for each replica represented on this\n # host, but the key should be the same for the same replica on other hosts.\n # Again, this is what the replica set abstraction is for.\n dropout_rngs = random.split(\n random.fold_in(rng, topology.replica_set_id),\n topology.per_replica_set_num_replicas)\n # restore step from last checkpoint\n host_step = int(optimizer.state.step)\n empty_metrics = broadcast({\n 'loss': 0.0,\n 'accuracy': 0.0,\n 'learning_rate': 0.0,\n 'denominator': 0.0\n })\n if CFG.infeed:\n # TODO(jekbradbury): support something like this for the Python-loop case\n logging.info('Precompiling training loop and moving optimizer to device.')\n optimizer, _, metrics, _ = p_train_epoch(optimizer, dropout_rngs,\n empty_metrics,\n jnp.array(0, dtype=jnp.int32), 1)\n optimizer = train_lib.unbroadcast(optimizer)\n metrics['loss'].block_until_ready()\n\n logging.info('Starting training loop.')\n\n local_devices = jax.local_devices()\n device_step = broadcast(host_step)\n first_epoch = host_step // steps_per_epoch\n\n # Main Loop over \"epochs\".\n train_iter = train_ds.as_numpy_iterator()\n for epoch in range(first_epoch, first_epoch + CFG.num_epochs):\n metrics = empty_metrics\n\n # NOTE: 'optimizer' is unbroadcast by construction at initialization or\n # when loading a checkpoint. It is maintained in 'unbroadcast' state to\n # enable the XLA cross-replica sharding optimization. The broadcasting is\n # handled automatically by the pmap'd functions that use it.\n\n # Gather all task evaluation metrics.\n logging.info('Evaluating tasks.')\n if epoch == first_epoch + 1:\n train_lib.sync_devices()\n for task in eval_cache.tasks:\n logging.info('Evaluating task %s', task.name)\n all_predicted, all_bs = [], []\n for pred_batch in eval_cache.preprocessed_examples[task.name]:\n # Handle final odd-sized batch by padding instead of dropping it.\n input_batch, unpadded_batch_size = train_lib.pad_batch_to_size(\n pred_batch['inputs'], per_replica_set_eval_batch_size)\n all_bs.append(unpadded_batch_size)\n # Split batch dimensions for pmap.\n input_batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]),\n input_batch)\n # Run fast inference on batch.\n all_predicted.append(p_pred_step(input_batch, optimizer.target))\n\n # Pad out the number of batches so each host has the same number.\n max_host_batch_number = np.max(\n eval_cache.preprocessed_batch_sizes[task.name])\n batch_shortfall = max_host_batch_number - len(all_predicted)\n if batch_shortfall > 0:\n # TODO(levskaya): Fix for case of entirely empty all_predicted.\n # To make sure the cross-host barriers work, we run the program the same\n # number of times on all hosts. The results of this call is ignored, and\n # the predictions are populated with zeros instead.\n p_pred_step(input_batch, optimizer.target) # Dummy call.\n all_predicted.extend([jnp.zeros_like(all_predicted[0])] *\n batch_shortfall)\n all_bs.extend([0] * batch_shortfall)\n all_predicted = jnp.concatenate(all_predicted)\n all_bs = jnp.array(all_bs)\n\n # Collect all batches from across hosts and reverse sharding.\n all_predicted = train_lib.host_allgather(\n all_predicted, topology.num_replica_sets, topology.replica_set_id,\n topology.per_replica_set_host_id == 0)\n seqlength = all_predicted.shape[-1]\n total_examples = np.sum(\n train_lib.host_allgather(all_bs, topology.num_replica_sets,\n topology.replica_set_id,\n topology.per_replica_set_host_id == 0))\n del all_bs\n assert total_examples == len(eval_cache.examples[task.name]), (\n 'Total number of batches incorrect for task %s.' % task.name)\n # De-shard the collected predicted tokens and remove padding.\n all_predicted = np.transpose(all_predicted, (1, 2, 0, 3)).reshape(\n -1, seqlength)[:total_examples]\n\n # We now run the post-processing and metric-fns on a single host.\n if jax.host_id() == 0:\n assert eval_summary_writer\n raw_predictions = []\n for tokens in all_predicted:\n raw_predictions.append(decode_tokens(tokens))\n\n # post-process predictions for metric fns\n predictions = [\n task.postprocess_fn(p, example=ex)\n for p, ex in zip(raw_predictions, eval_cache.examples[task.name])\n ]\n\n for metric_fn in task.metric_fns:\n scores = metric_fn(eval_cache.targets[task.name], predictions)\n for metric_name, metric_value in scores.items():\n tag = f'eval/{task.name}/{metric_name}'\n eval_summary_writer.scalar(tag, metric_value, host_step)\n logging.info('EVAL %s at step %d: %.3f', tag, host_step,\n metric_value)\n eval_summary_writer.flush()\n\n # Save text samples for tensorboard.\n exemplars = ''\n for n in np.random.choice(np.arange(len(predictions)), 8):\n tgt_txt = tf.compat.as_text(\n eval_cache.examples[task.name][n]['targets_plaintext'])\n pred_txt = raw_predictions[n]\n exemplars += (f'{eval_cache.inputs[task.name][n]}\\n\\n'\n f'target: {tgt_txt}\\n\\n'\n f'prediction: {pred_txt}\\n\\n')\n eval_summary_writer.text(f'{task.name} samples', exemplars, host_step)\n eval_summary_writer.flush()\n\n # Take an Xprof trace after the first loop has compiled everything.\n if epoch == first_epoch + 1:\n train_lib.sync_devices()\n\n # For on-device loop, we launch the computation before feeding data.\n logging.info('BEGIN Train loop.')\n if CFG.infeed:\n optimizer, dropout_rngs, metrics, device_step = p_train_epoch(\n optimizer, dropout_rngs, metrics, train_lib.unbroadcast(device_step),\n epoch)\n optimizer = train_lib.unbroadcast(optimizer)\n\n # Epoch loop.\n while int(host_step // steps_per_epoch) == epoch:\n batch = next(train_iter)\n batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]), batch)\n # Feed the on-device training loop.\n if CFG.infeed:\n for i, device in enumerate(local_devices):\n # When using infeed to provide data to the computation, we're on our\n # own for feeding the right values to the right devices. Each device\n # should get the minibatch corresponding to its replica, a slice of\n # the larger batch corresponding to the host's replica set.\n if device.platform == 'tpu':\n device_coords = (*device.coords, device.id % 2)\n else:\n device_coords = (device.host_id, i)\n per_replica_set_device_coords = tuple(\n dc % prsm\n for dc, prsm in zip(device_coords, topology.per_replica_set_mesh))\n per_replica_set_replica_coords = tuple(\n prsdc // prm for prsdc, prm in zip(per_replica_set_device_coords,\n topology.per_replica_mesh))\n per_replica_set_replica_id = 0\n for prsm, prm, prsrc in zip(topology.per_replica_set_mesh,\n topology.per_replica_mesh,\n per_replica_set_replica_coords):\n per_replica_set_replica_id = (\n per_replica_set_replica_id * prsm // prm + prsrc)\n input_tuple = tuple(\n [batch[k][per_replica_set_replica_id] for k in train_keys])\n # Safety check: infeed does not check shape or types but requires\n # them to agree with on-device spec, otherwise the queue and program\n # stalls.\n tuple_shapes = jax.tree_map(jnp.shape, input_tuple)\n tuple_dtypes = jax.tree_map(lambda x: x.dtype, input_tuple)\n assert tuple_shapes == device_train_input_shape, (\n 'infeed shape error %s != %s' %\n (tuple_shapes, device_train_input_shape))\n assert tuple(set(tuple_dtypes)) == (jnp.int32,), \\\n ('infeed dtype error %s not all of type %s' % (\n tuple_dtypes, jnp.int32))\n infeed_pool.submit(\n functools.partial(device.transfer_to_infeed, input_tuple))\n # Host training loop.\n else:\n optimizer, metrics, dropout_rngs = p_train_step(optimizer, batch,\n metrics, dropout_rngs)\n optimizer = train_lib.unbroadcast(optimizer)\n host_step += 1\n logging.info('END Train loop.')\n\n # Maybe save a checkpoint on one host.\n if (CFG.save_checkpoints and\n epoch % CFG.checkpoint_freq == CFG.checkpoint_freq - 1 and\n jax.host_id() == 0):\n checkpoints.save_checkpoint(FLAGS.model_dir, optimizer, host_step)\n\n # Gather training metrics.\n metrics = p_allreduce_metrics(metrics)\n metrics = jax.tree_map(lambda x: jax.device_get(x[0]), metrics)\n denominator = metrics.pop('denominator')\n summary = jax.tree_map(lambda x: x / denominator, metrics) # pylint: disable=cell-var-from-loop\n logging.info('train in step: %s, %s', host_step, summary)\n if jax.host_id() == 0:\n assert train_summary_writer\n for key, val in summary.items():\n train_summary_writer.scalar(key, val, host_step)\n train_summary_writer.flush()\n\n # Gather training evaluation metrics.\n logging.info('Gathering training evaluation metrics.')\n eval_metrics = []\n eval_iter = eval_ds.as_numpy_iterator()\n for _, eval_batch in zip(range(CFG.num_eval_steps), eval_iter):\n eval_batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]),\n eval_batch)\n metrics = p_eval_step(optimizer.target, eval_batch)\n eval_metrics.append(metrics)\n # average metrics across devices\n eval_metrics = p_allreduce_metrics(eval_metrics)\n eval_metrics = common_utils.get_metrics(eval_metrics)\n # average metrics across steps\n eval_metrics = jax.tree_map(np.sum, eval_metrics)\n eval_denominator = eval_metrics.pop('denominator')\n eval_summary = jax.tree_map(\n lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop\n eval_metrics)\n logging.info('eval in step: %s, %s', host_step, eval_summary)\n if jax.host_id() == 0:\n assert eval_summary_writer\n for key, val in eval_summary.items():\n eval_summary_writer.scalar(key, val, host_step)\n eval_summary_writer.flush()\n\n # Wait until computations are done before exiting\n logging.info('Finished.')\n train_lib.sync_devices()\n # Shut down the infeed threadpool.\n if CFG.infeed:\n infeed_pool.shutdown()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for instance_segmentation.core.instance_segment_ops.\"\"\"\nimport tensorflow as tf\n\nfrom tf3d.utils import instance_segmentation_utils as isu\n\n\nclass InstanceSegmentUtilsTest(tf.test.TestCase):\n\n def get_instance_masks(self):\n mask0 = tf.constant([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0]],\n dtype=tf.float32)\n mask1 = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=tf.float32)\n mask2 = tf.constant([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1, 1, 1]],\n dtype=tf.float32)\n masks = tf.stack([mask0, mask1, mask2])\n return masks\n\n def test_map_labels_to_0_to_n1(self):\n labels = tf.constant([[-1, 2, 5],\n [0, 9, 1]], dtype=tf.int32)\n labels_0_n = isu.map_labels_to_0_to_n(labels)\n expected_labels_0_n = tf.constant([[-1, 2, 3],\n [0, 4, 1]], dtype=tf.int32)\n self.assertAllEqual(labels_0_n.numpy(), expected_labels_0_n.numpy())\n\n def test_map_labels_to_0_to_n2(self):\n labels = tf.constant([[-1, 1, 2],\n [1, 1, 2]], dtype=tf.int32)\n labels_0_n = isu.map_labels_to_0_to_n(labels)\n expected_labels_0_n = tf.constant([[-1, 0, 1],\n [0, 0, 1]], dtype=tf.int32)\n self.assertAllEqual(labels_0_n.numpy(), expected_labels_0_n.numpy())\n\n def test_randomly_select_one_point_per_segment(self):\n instance_labels = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 2, 2, 2, 2, 2, 2],\n [1, 2, 2, 2, 2, 2, 2, 2],\n [0, 0, 0, 0, 2, 2, 2, 2],\n [0, 0, 0, 0, 2, 2, 2, 2]],\n dtype=tf.int32)\n instance_labels = tf.reshape(instance_labels, [-1])\n (indices,\n masks_t) = isu.randomly_select_one_point_per_segment(instance_labels)\n masks = tf.transpose(masks_t)\n masks = tf.reshape(masks, [3, 5, 8])\n expected_masks = self.get_instance_masks()\n selected_instances = tf.gather(instance_labels, indices)\n expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32)\n self.assertAllEqual(selected_instances.numpy(),\n expected_selected_instances.numpy())\n self.assertAllClose(masks.numpy(), expected_masks.numpy())\n\n def test_inputs_Distances_to_centers(self):\n inputs = tf.random.uniform(\n [100, 8], minval=-10, maxval=10.0, dtype=tf.float32)\n centers = tf.random.uniform(\n [5, 8], minval=-10, maxval=10.0, dtype=tf.float32)\n distances1 = isu.inputs_distances_to_centers(inputs, centers)\n num_centers = tf.shape(centers)[0]\n inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1),\n tf.stack([1, num_centers, 1]))\n distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2)\n self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001)\n\n def test_pairwise_iou_matrix(self):\n mask0 = tf.constant([[1, 0],\n [0, 1]], dtype=tf.float32)\n mask1 = tf.constant([[1, 1],\n [0, 1]], dtype=tf.float32)\n mask2 = tf.constant([[1, 0],\n [1, 1]], dtype=tf.float32)\n mask3 = tf.constant([[1, 1],\n [1, 1]], dtype=tf.float32)\n mask4 = tf.constant([[0, 0],\n [0, 0]], dtype=tf.float32)\n mask5 = tf.constant([[1, 0],\n [1, 0]], dtype=tf.float32)\n masks1 = tf.stack([mask0, mask1, mask2])\n masks2 = tf.stack([mask3, mask4, mask5])\n ious = isu.get_pairwise_iou_matrix(masks1, masks2)\n expected_ious = tf.constant([[0.5, 0.0, 1.0/3.0],\n [0.75, 0.0, 0.25],\n [0.75, 0.0, 2.0/3.0]],\n dtype=tf.float32)\n self.assertAllClose(ious.numpy(), expected_ious.numpy())\n\n def test_instance_non_maximum_suppression_1d_scores(self):\n mask0 = tf.constant([[1, 0],\n [0, 1]], dtype=tf.float32)\n mask1 = tf.constant([[1, 1],\n [0, 1]], dtype=tf.float32)\n mask2 = tf.constant([[1, 0],\n [1, 1]], dtype=tf.float32)\n mask3 = tf.constant([[1, 1],\n [1, 1]], dtype=tf.float32)\n mask4 = tf.constant([[0, 0],\n [0, 0]], dtype=tf.float32)\n mask5 = tf.constant([[1, 0],\n [1, 0]], dtype=tf.float32)\n masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5])\n classes = tf.constant([1, 2, 3, 1, 2, 3], dtype=tf.int32)\n scores = tf.constant([1.0, 0.9, 0.8, 0.95, 0.85, 0.6], dtype=tf.float32)\n (nms_masks1,\n nms_scores1,\n nms_classes1,\n _) = isu.instance_non_maximum_suppression_1d_scores(\n masks,\n scores,\n classes,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=True)\n nms_masks_expected1 = tf.stack([mask0, mask4])\n nms_scores_expected1 = tf.constant([1.0, 0.85], dtype=tf.float32)\n nms_classes_expected1 = tf.constant([1, 2], dtype=tf.int32)\n (nms_masks2,\n nms_scores2,\n nms_classes2,\n _) = isu.instance_non_maximum_suppression_1d_scores(\n masks,\n scores,\n classes,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=False)\n nms_masks_expected2 = tf.stack([mask0, mask1, mask4, mask2])\n nms_scores_expected2 = tf.constant([1.0, 0.9, 0.85, 0.8], dtype=tf.float32)\n nms_classes_expected2 = tf.constant([1, 2, 2, 3], dtype=tf.int32)\n self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())\n self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy())\n self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy())\n self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy())\n self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy())\n self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy())\n\n def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self):\n masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)\n scores = tf.constant([], dtype=tf.float32)\n classes = tf.constant([], dtype=tf.int32)\n (nms_masks1,\n nms_scores1,\n nms_classes1,\n _) = isu.instance_non_maximum_suppression_1d_scores(\n masks,\n scores,\n classes,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=True)\n nms_masks_expected1 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)\n nms_scores_expected1 = tf.constant([], dtype=tf.float32)\n nms_classes_expected1 = tf.constant([], dtype=tf.int32)\n (nms_masks2,\n nms_scores2,\n nms_classes2,\n _) = isu.instance_non_maximum_suppression_1d_scores(\n masks,\n scores,\n classes,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=False)\n nms_masks_expected2 = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)\n nms_scores_expected2 = tf.constant([], dtype=tf.float32)\n nms_classes_expected2 = tf.constant([], dtype=tf.int32)\n self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())\n self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy())\n self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy())\n self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy())\n self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy())\n self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy())\n\n def test_instance_non_maximum_suppression_2d_scores(self):\n mask0 = tf.constant([[1, 0],\n [0, 1]], dtype=tf.float32)\n mask1 = tf.constant([[1, 1],\n [0, 1]], dtype=tf.float32)\n mask2 = tf.constant([[1, 0],\n [1, 1]], dtype=tf.float32)\n mask3 = tf.constant([[1, 1],\n [1, 1]], dtype=tf.float32)\n mask4 = tf.constant([[0, 0],\n [0, 0]], dtype=tf.float32)\n mask5 = tf.constant([[1, 0],\n [1, 0]], dtype=tf.float32)\n masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5])\n scores = tf.constant([[0.05, 1.0, 0.2],\n [0.9, 0.1, 0.3],\n [0.95, 0.92, 0.1],\n [0.1, 0.05, 0.0],\n [0.2, 0.3, 0.7],\n [0.1, 0.2, 0.8]],\n dtype=tf.float32)\n (nms_masks1,\n nms_scores1,\n nms_classes1) = isu.instance_non_maximum_suppression_2d_scores(\n masks,\n scores,\n 3,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=True)\n nms_masks_expected1 = tf.stack([mask0, mask5, mask4])\n nms_scores_expected1 = tf.constant([1.0, 0.8, 0.7], dtype=tf.float32)\n nms_classes_expected1 = tf.constant([1, 2, 2], dtype=tf.int32)\n (nms_masks2,\n nms_scores2,\n nms_classes2) = isu.instance_non_maximum_suppression_2d_scores(\n masks,\n scores,\n 3,\n min_score_thresh=0.65,\n min_iou_thresh=0.5,\n is_class_agnostic=False)\n nms_masks_expected2 = tf.stack([mask2, mask0, mask5, mask4])\n nms_scores_expected2 = tf.constant([0.95, 1.0, 0.8, 0.7], dtype=tf.float32)\n nms_classes_expected2 = tf.constant([0, 1, 2, 2], dtype=tf.int32)\n self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())\n self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy())\n self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy())\n self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy())\n self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy())\n self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy())\n\n def test_points_mask_iou(self):\n masks1 = tf.constant([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0]], dtype=tf.int32)\n masks2 = tf.constant([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 0, 1, 0, 1]], dtype=tf.int32)\n iou = isu.points_mask_iou(masks1=masks1, masks2=masks2)\n expected_iou = tf.constant([[0, 0, 0],\n [0, 1, 0.6],\n [0, 0.6, 1.0],\n [0, 0.4, 0]], dtype=tf.float32)\n self.assertAllClose(iou.numpy(), expected_iou.numpy())\n\n def test_points_mask_pairwise_iou(self):\n masks1 = tf.constant([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0]], dtype=tf.int32)\n masks2 = tf.constant([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1]], dtype=tf.int32)\n pairwise_iou = isu.points_mask_pairwise_iou(masks1=masks1, masks2=masks2)\n expected_iou = tf.constant([0, 1, 0.4, 0.2], dtype=tf.float32)\n self.assertAllClose(pairwise_iou.numpy(), expected_iou.numpy())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Makes a subset of the REALM data to quicken the development of main.py.\n\nExample of use:\npython create_data_subset_realm.py \\\n--source_text_path=/usr/local/google/home/julesgm/ram_drive/blocks.tfr \\\n--source_embeddings_prefix=/usr/local/google/home/julesgm/ram_drive\\\n/cc_news_pretrained/embedder/encoded/encoded.ckpt \\\n--subset_text_path=/usr/local/google/home/julesgm/subset/subset_text.tfr \\\n--subset_embeddings_ds_path=/usr/local/google/home/julesgm/subset/encoded.ckpt \\\n--source_total=13353718 \\\n--subset_total=5000 \\\n--logger_levels=__main__:DEBUG,retrieval_while_decoding.utils:DEBUG\n\"\"\"\n\nimport logging\nimport operator\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging as absl_logging\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\nimport utils\n\n\nLOGGER = logging.getLogger(__name__)\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"source_text_path\", None,\n \"Path to the TFRecord file with text.\")\nflags.DEFINE_string(\"source_embeddings_prefix\", None,\n \"Path to the TFRecord file with embeddings_ds.\")\nflags.DEFINE_string(\"subset_text_path\", None,\n \"Path to the TFRecord file with text.\")\nflags.DEFINE_string(\"subset_embeddings_ds_path\", None,\n \"Path to the TFRecord file with embeddings_ds.\")\nflags.DEFINE_integer(\"source_total\", None,\n \"Number of points in the original records\")\nflags.DEFINE_integer(\"subset_total\", None,\n \"Number of points desired for in the subset records.\")\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n\n absl_logging.use_python_logging()\n utils.log_module_args(LOGGER, argv[0])\n\n # Some checks for the flags\n utils.check_exists(FLAGS.source_text_path)\n utils.check_exists(os.path.dirname(FLAGS.subset_text_path))\n utils.check_exists(os.path.dirname(FLAGS.subset_embeddings_ds_path))\n utils.check_operator(operator.lt, FLAGS.subset_total, FLAGS.source_total)\n\n utils.check_glob_prefix(FLAGS.source_embeddings_prefix)\n\n # Select a random subset\n with utils.log_duration(LOGGER, \"main\", \"preparing indices\"):\n indices = np.random.choice(FLAGS.source_total, FLAGS.subset_total,\n replace=False)\n indices.sort()\n\n # Process the textual data\n # Much (5 min vs 2 h) faster than iterating through the records and writing\n # only those we want. An hypothesis for this is that\n # get_single_element would allow to get elements without parsing all of the\n # elements along the way, like simply iterating through the records would.\n # Or did they get constant time indexing in TFRecords?\n # Inspired by the ORQA codebase:\n # https://github.com/google-research/language/blob/master/language/orqa/models/orqa_model.py#L147\n with utils.log_duration(LOGGER, \"main\", \"preparing data\"):\n text_ds = tf.data.TFRecordDataset(FLAGS.source_text_path,\n buffer_size=512 * 1024 * 1024,\n num_parallel_reads=os.cpu_count())\n text_ds = text_ds.batch(FLAGS.source_total)\n text_ds = tf.data.experimental.get_single_element(text_ds)\n subset = tf.gather(text_ds, tf.constant(indices))\n\n with utils.log_duration(LOGGER, \"main\", \"writing text data\"):\n with tf.io.TFRecordWriter(FLAGS.subset_text_path) as text_writer:\n for text in tqdm.tqdm(subset, total=FLAGS.subset_total):\n text = text.numpy()\n # REALM's data uses no packaging of the data into features, etc.\n text_writer.write(text)\n\n with utils.log_duration(LOGGER, \"main\", \"All of the embedding task\"):\n # Process the embeddings data\n with tf.device(\"/cpu:0\"):\n with utils.log_duration(LOGGER, \"main\", \"Loading the checkpoint\"):\n embs = tf.train.load_checkpoint(FLAGS.source_embeddings_prefix\n ).get_tensor(\"block_emb\")\n utils.check_equal(embs.shape[0], FLAGS.source_total)\n\n with utils.log_duration(LOGGER, \"main\", \"taking a subset of the indices\"):\n subset = embs[indices]\n\n tf_db = tf.Variable(subset, shape=subset.shape)\n ckpt = tf.train.Checkpoint(block_emb=tf_db)\n\n with utils.log_duration(LOGGER, \"main\", \"Saving the checkpoint\"):\n ckpt.save(FLAGS.subset_embeddings_ds_path)\n\n LOGGER.debug(\"Done\")\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Eval a Keras model on embeddings.\"\"\"\n\nimport time\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow as tf\n\nfrom non_semantic_speech_benchmark.eval_embedding import metrics\nfrom non_semantic_speech_benchmark.eval_embedding.keras import get_data\nfrom non_semantic_speech_benchmark.eval_embedding.keras import models\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('file_pattern', None, 'Dataset location.')\nflags.DEFINE_string('embedding_name', None, 'Embedding name.')\nflags.DEFINE_alias('en', 'embedding_name')\nflags.DEFINE_string('embedding_dimension', None, 'Embedding dimension.')\nflags.DEFINE_alias('ed', 'embedding_dimension')\nflags.DEFINE_string('label_name', None, 'Name of label to use.')\nflags.DEFINE_list('label_list', None, 'List of possible label values.')\nflags.DEFINE_list('bucket_boundaries', ['99999'],\n 'bucket_boundaries for data. Default is all one bucket.')\n\nflags.DEFINE_integer('batch_size', None, 'The number of images in each batch.')\nflags.DEFINE_integer('tbs', None, 'not used')\n\nflags.DEFINE_integer('num_clusters', None, 'num_clusters')\nflags.DEFINE_alias('nc', 'num_clusters')\nflags.DEFINE_float('alpha_init', None, 'Initial autopool alpha.')\nflags.DEFINE_alias('ai', 'alpha_init')\nflags.DEFINE_boolean('use_batch_normalization', None,\n 'Whether to use batch normalization.')\nflags.DEFINE_alias('ubn', 'use_batch_normalization')\nflags.DEFINE_float('lr', None, 'not used')\n\nflags.DEFINE_string('logdir', None,\n 'Directory where the model was written to.')\n\nflags.DEFINE_string('eval_dir', None,\n 'Directory where the results are saved to.')\nflags.DEFINE_integer('take_fixed_data', None,\n 'If not `None`, take a fixed number of data elements.')\nflags.DEFINE_integer('timeout', 7200, 'Wait-for-checkpoint timeout.')\nflags.DEFINE_boolean('calculate_equal_error_rate', False,\n 'Whether to calculate the Equal Error Rate. Only '\n 'applicable for binary classification problems.')\n\n\ndef eval_and_report():\n \"\"\"Eval on voxceleb.\"\"\"\n logging.info('embedding_name: %s', FLAGS.embedding_name)\n logging.info('Logdir: %s', FLAGS.logdir)\n logging.info('Batch size: %s', FLAGS.batch_size)\n\n writer = tf.summary.create_file_writer(FLAGS.eval_dir)\n num_classes = len(FLAGS.label_list)\n model = models.get_keras_model(\n num_classes, FLAGS.use_batch_normalization,\n num_clusters=FLAGS.num_clusters, alpha_init=FLAGS.alpha_init)\n checkpoint = tf.train.Checkpoint(model=model)\n\n for ckpt in tf.train.checkpoints_iterator(\n FLAGS.logdir, timeout=FLAGS.timeout):\n assert 'ckpt-' in ckpt, ckpt\n step = ckpt.split('ckpt-')[-1]\n logging.info('Starting to evaluate step: %s.', step)\n\n checkpoint.restore(ckpt)\n\n logging.info('Loaded weights for eval step: %s.', step)\n\n reader = tf.data.TFRecordDataset\n ds = get_data.get_data(\n file_pattern=FLAGS.file_pattern,\n reader=reader,\n embedding_name=FLAGS.embedding_name,\n embedding_dim=FLAGS.embedding_dimension,\n label_name=FLAGS.label_name,\n label_list=FLAGS.label_list,\n bucket_boundaries=FLAGS.bucket_boundaries,\n bucket_batch_sizes=[FLAGS.batch_size] * (len(FLAGS.bucket_boundaries) + 1), # pylint:disable=line-too-long\n loop_forever=False,\n shuffle=False)\n logging.info('Got dataset for eval step: %s.', step)\n if FLAGS.take_fixed_data:\n ds = ds.take(FLAGS.take_fixed_data)\n\n acc_m = tf.keras.metrics.Accuracy()\n xent_m = tf.keras.metrics.CategoricalCrossentropy(from_logits=True)\n\n logging.info('Starting the ds loop...')\n count, ex_count = 0, 0\n all_logits, all_real = [], []\n s = time.time()\n for emb, y_onehot in ds:\n emb.shape.assert_has_rank(3)\n assert emb.shape[2] == FLAGS.embedding_dimension\n y_onehot.shape.assert_has_rank(2)\n assert y_onehot.shape[1] == len(FLAGS.label_list)\n\n logits = model(emb, training=False)\n all_logits.extend(logits.numpy()[:, 1])\n all_real.extend(y_onehot.numpy()[:, 1])\n acc_m.update_state(y_true=tf.argmax(y_onehot, 1),\n y_pred=tf.argmax(logits, 1))\n xent_m.update_state(y_true=y_onehot, y_pred=logits)\n ex_count += logits.shape[0]\n count += 1\n logging.info('Saw %i examples after %i iterations as %.2f secs...',\n ex_count, count,\n time.time() - s)\n if FLAGS.calculate_equal_error_rate:\n eer_score = metrics.calculate_eer(all_real, all_logits)\n auc_score = metrics.calculate_auc(all_real, all_logits)\n dprime_score = metrics.dprime_from_auc(auc_score)\n with writer.as_default():\n tf.summary.scalar('accuracy', acc_m.result().numpy(), step=int(step))\n tf.summary.scalar('xent_loss', xent_m.result().numpy(), step=int(step))\n tf.summary.scalar('auc', auc_score, step=int(step))\n tf.summary.scalar('dprime', dprime_score, step=int(step))\n if FLAGS.calculate_equal_error_rate:\n tf.summary.scalar('eer', eer_score, step=int(step))\n logging.info('Done with eval step: %s in %.2f secs.', step, time.time() - s)\n\n\ndef main(unused_argv):\n assert FLAGS.file_pattern\n assert FLAGS.embedding_name\n assert FLAGS.embedding_dimension\n assert FLAGS.label_name\n assert FLAGS.label_list\n assert FLAGS.bucket_boundaries\n assert FLAGS.batch_size\n assert FLAGS.logdir\n\n eval_and_report()\n\n\nif __name__ == '__main__':\n tf.compat.v2.enable_v2_behavior()\n assert tf.executing_eagerly()\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utility functions for operations on Model.\"\"\"\n\nimport ast\nimport os.path\nfrom typing import Sequence\n\nfrom kws_streaming.layers import modes\nfrom kws_streaming.layers.compat import tf\nfrom kws_streaming.layers.compat import tf1\nfrom kws_streaming.models import model_flags\nfrom kws_streaming.models import model_params\nfrom kws_streaming.models import models as kws_models\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras.engine import functional\n# pylint: enable=g-direct-tensorflow-import\n\n\ndef conv2d_bn(x,\n filters,\n kernel_size,\n padding='same',\n strides=(1, 1),\n activation='relu',\n use_bias=False,\n scale=False):\n \"\"\"Utility function to apply conv + BN.\n\n Arguments:\n x: input tensor.\n filters: filters in `Conv2D`.\n kernel_size: size of convolution kernel.\n padding: padding mode in `Conv2D`.\n strides: strides in `Conv2D`.\n activation: activation function applied in the end.\n use_bias: use bias for convolution.\n scale: scale batch normalization.\n\n Returns:\n Output tensor after applying `Conv2D` and `BatchNormalization`.\n \"\"\"\n\n x = tf.keras.layers.Conv2D(\n filters, kernel_size,\n strides=strides,\n padding=padding,\n use_bias=use_bias)(x)\n x = tf.keras.layers.BatchNormalization(scale=scale)(x)\n x = tf.keras.layers.Activation(activation)(x)\n return x\n\n\ndef save_model_summary(model, path, file_name='model_summary.txt'):\n \"\"\"Saves model topology/summary in text format.\n\n Args:\n model: Keras model\n path: path where to store model summary\n file_name: model summary file name\n \"\"\"\n with open(os.path.join(path, file_name), 'wt') as fd:\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x)) # pylint: disable=unnecessary-lambda\n model_summary = '\\n'.join(stringlist)\n fd.write(model_summary)\n\n\ndef _set_mode(model, mode):\n \"\"\"Set model's inference type and disable training.\"\"\"\n for i in range(len(model.layers)):\n config = model.layers[i].get_config()\n # for every layer set mode, if it has it\n if 'mode' in config:\n model.layers[i].mode = mode\n # with any mode of inference - training is False\n if 'training' in config:\n model.layers[i].training = False\n if mode == modes.Modes.NON_STREAM_INFERENCE:\n if 'unroll' in config:\n model.layers[i].unroll = True\n return model\n\n\ndef _get_input_output_states(model):\n \"\"\"Get input/output states of model with external states.\"\"\"\n input_states = []\n output_states = []\n for i in range(len(model.layers)):\n config = model.layers[i].get_config()\n # input output states exist only in layers with property 'mode'\n if 'mode' in config:\n input_states.append(model.layers[i].get_input_state())\n output_states.append(model.layers[i].get_output_state())\n return input_states, output_states\n\n\ndef _clone_model(model, input_tensors):\n \"\"\"Clone model with configs, except of weights.\"\"\"\n new_input_layers = {} # Cache for created layers.\n # pylint: disable=protected-access\n if input_tensors is not None:\n # Make sure that all input tensors come from a Keras layer.\n input_tensors = tf.nest.flatten(input_tensors)\n for i, input_tensor in enumerate(input_tensors):\n if not tf.keras.backend.is_keras_tensor(input_tensor):\n raise ValueError('Expected keras tensor but get', input_tensor)\n original_input_layer = model._input_layers[i]\n newly_created_input_layer = input_tensor._keras_history.layer\n new_input_layers[original_input_layer] = newly_created_input_layer\n\n model_config, created_layers = models._clone_layers_and_model_config(\n model, new_input_layers, models._clone_layer)\n # pylint: enable=protected-access\n\n # Reconstruct model from the config, using the cloned layers.\n input_tensors, output_tensors, created_layers = (\n functional.reconstruct_from_config(\n model_config, created_layers=created_layers))\n\n new_model = tf.keras.Model(input_tensors, output_tensors, name=model.name)\n return new_model\n\n\ndef _copy_weights(new_model, model):\n \"\"\"Copy weights of trained model to an inference one.\"\"\"\n\n def _same_weights(weight, new_weight):\n # Check that weights are the same\n # Note that states should be marked as non trainable\n return (weight.trainable == new_weight.trainable and\n weight.shape == new_weight.shape and\n weight.name[weight.name.rfind('/'):None] ==\n new_weight.name[new_weight.name.rfind('/'):None])\n\n if len(new_model.layers) != len(model.layers):\n raise ValueError(\n 'number of layers in new_model: %d != to layers number in model: %d ' %\n (len(new_model.layers), len(model.layers)))\n\n for i in range(len(model.layers)):\n layer = model.layers[i]\n new_layer = new_model.layers[i]\n\n # if number of weights in the layers are the same\n # then we can set weights directly\n if len(layer.get_weights()) == len(new_layer.get_weights()):\n new_layer.set_weights(layer.get_weights())\n elif layer.weights:\n k = 0 # index pointing to weights in the copied model\n new_weights = []\n # iterate over weights in the new_model\n # and prepare a new_weights list which will\n # contain weights from model and weight states from new model\n for k_new in range(len(new_layer.get_weights())):\n new_weight = new_layer.weights[k_new]\n new_weight_values = new_layer.get_weights()[k_new]\n same_weights = True\n\n # if there are weights which are not copied yet\n if k < len(layer.get_weights()):\n weight = layer.weights[k]\n weight_values = layer.get_weights()[k]\n if (weight.shape != weight_values.shape or\n new_weight.shape != new_weight_values.shape):\n raise ValueError('weights are not listed in order')\n\n # if there are weights available for copying and they are the same\n if _same_weights(weight, new_weight):\n new_weights.append(weight_values)\n k = k + 1 # go to next weight in model\n else:\n same_weights = False # weights are different\n else:\n same_weights = False # all weights are copied, remaining is different\n\n if not same_weights:\n # weight with index k_new is missing in model,\n # so we will keep iterating over k_new until find similar weights\n new_weights.append(new_weight_values)\n\n # check that all weights from model are copied to a new_model\n if k != len(layer.get_weights()):\n raise ValueError(\n 'trained model has: %d weights, but only %d were copied' %\n (len(layer.get_weights()), k))\n\n # now they should have the same number of weights with matched sizes\n # so we can set weights directly\n new_layer.set_weights(new_weights)\n return new_model\n\n\ndef _flatten_nested_sequence(sequence):\n \"\"\"Returns a flattened list of sequence's elements.\"\"\"\n if not isinstance(sequence, Sequence):\n return [sequence]\n result = []\n for value in sequence:\n result.extend(_flatten_nested_sequence(value))\n return result\n\n\ndef _get_state_shapes(model_states):\n \"\"\"Converts a nested list of states in to a flat list of their shapes.\"\"\"\n return [state.shape for state in _flatten_nested_sequence(model_states)]\n\n\ndef convert_to_inference_model(model, input_tensors, mode):\n \"\"\"Convert functional `Model` instance to a streaming inference.\n\n It will create a new model with new inputs: input_tensors.\n All weights will be copied. Internal states for streaming mode will be created\n Only functional Keras model is supported!\n\n Args:\n model: Instance of `Model`.\n input_tensors: list of input tensors to build the model upon.\n mode: is defined by modes.Modes\n\n Returns:\n An instance of streaming inference `Model` reproducing the behavior\n of the original model, on top of new inputs tensors,\n using copied weights.\n\n Raises:\n ValueError: in case of invalid `model` argument value or input_tensors\n \"\"\"\n\n # scope is introduced for simplifiyng access to weights by names\n scope_name = 'streaming'\n with tf.name_scope(scope_name):\n if not isinstance(model, tf.keras.Model):\n raise ValueError(\n 'Expected `model` argument to be a `Model` instance, got ', model)\n if isinstance(model, tf.keras.Sequential):\n raise ValueError(\n 'Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'got a `Sequential` instance instead:', model)\n # pylint: disable=protected-access\n if not model._is_graph_network:\n raise ValueError('Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'but got a subclass model instead.')\n # pylint: enable=protected-access\n model = _set_mode(model, mode)\n new_model = _clone_model(model, input_tensors)\n\n if mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:\n return _copy_weights(new_model, model)\n elif mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:\n input_states, output_states = _get_input_output_states(new_model)\n all_inputs = new_model.inputs + input_states\n all_outputs = new_model.outputs + output_states\n new_streaming_model = tf.keras.Model(all_inputs, all_outputs)\n new_streaming_model.input_shapes = _get_state_shapes(all_inputs)\n new_streaming_model.output_shapes = _get_state_shapes(all_outputs)\n\n # inference streaming model with external states\n # has the same number of weights with\n # non streaming model so we can use set_weights directly\n new_streaming_model.set_weights(model.get_weights())\n return new_streaming_model\n elif mode == modes.Modes.NON_STREAM_INFERENCE:\n new_model.set_weights(model.get_weights())\n return new_model\n else:\n raise ValueError('non supported mode ', mode)\n\n\ndef to_streaming_inference(model_non_stream, flags, mode):\n \"\"\"Convert non streaming trained model to inference modes.\n\n Args:\n model_non_stream: trained Keras model non streamable\n flags: settings with global data and model properties\n mode: it supports Non streaming inference, Streaming inference with internal\n states, Streaming inference with external states\n\n Returns:\n Keras inference model of inference_type\n \"\"\"\n tf.keras.backend.set_learning_phase(0)\n input_data_shape = modes.get_input_data_shape(flags, mode)\n input_tensors = [\n tf.keras.layers.Input(\n shape=input_data_shape, batch_size=1, name='input_audio')\n ]\n model_inference = convert_to_inference_model(model_non_stream, input_tensors,\n mode)\n return model_inference\n\n\ndef model_to_tflite(sess,\n model_non_stream,\n flags,\n mode=modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE,\n save_model_path=None,\n optimizations=None):\n \"\"\"Convert non streaming model to tflite inference model.\n\n In this case inference graph will be stateless.\n But model can be streaming stateful with external state or\n non streaming statless (depending on input arg mode)\n\n Args:\n sess: tf session\n model_non_stream: Keras non streamable model\n flags: settings with global data and model properties\n mode: inference mode it can be streaming with external state or non\n streaming\n save_model_path: path to save intermediate model summary\n optimizations: list of optimization options\n\n Returns:\n tflite model\n \"\"\"\n if mode not in (modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE,\n modes.Modes.NON_STREAM_INFERENCE):\n raise ValueError('mode %s is not supported ' % mode)\n # convert non streaming Keras model to\n # Keras inference model (non streaming or streaming)\n model_stateless_stream = to_streaming_inference(model_non_stream, flags, mode)\n\n if save_model_path:\n save_model_summary(model_stateless_stream, save_model_path)\n\n # convert Keras inference model to tflite inference model\n converter = tf1.lite.TFLiteConverter.from_session(\n sess, model_stateless_stream.inputs, model_stateless_stream.outputs)\n converter.inference_type = tf1.lite.constants.FLOAT\n\n # this will enable audio_spectrogram and mfcc in TFLite\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]\n converter.allow_custom_ops = True\n if optimizations:\n converter.optimizations = optimizations\n tflite_model = converter.convert()\n return tflite_model\n\n\n# in below code .from_tensor() instead of tf.TensorSpec is adding TensorSpec\n# which is not recognized here, so making TensorSpec visible too\nTensorSpec = tf.TensorSpec\n\n\ndef model_to_saved(model_non_stream,\n flags,\n save_model_path,\n mode=modes.Modes.STREAM_INTERNAL_STATE_INFERENCE):\n \"\"\"Convert Keras model to SavedModel.\n\n Depending on mode:\n 1 Converted inference graph and model will be streaming statefull.\n 2 Converted inference graph and model will be non streaming stateless.\n\n Args:\n model_non_stream: Keras non streamable model\n flags: settings with global data and model properties\n save_model_path: path where saved model representation with be stored\n mode: inference mode it can be streaming with external state or non\n streaming\n \"\"\"\n\n if mode not in (modes.Modes.STREAM_INTERNAL_STATE_INFERENCE,\n modes.Modes.NON_STREAM_INFERENCE):\n raise ValueError('mode %s is not supported ' % mode)\n\n if mode == modes.Modes.NON_STREAM_INFERENCE:\n model = model_non_stream\n else:\n # convert non streaming Keras model to Keras streaming model, internal state\n model = to_streaming_inference(model_non_stream, flags, mode)\n\n save_model_summary(model, save_model_path)\n model.save(save_model_path, include_optimizer=False, save_format='tf')\n\n\ndef parse(text):\n \"\"\"Parse model parameters.\n\n Args:\n text: string with layer parameters: '128,128' or \"'relu','relu'\".\n\n Returns:\n list of parsed parameters\n \"\"\"\n if not text:\n return []\n res = ast.literal_eval(text)\n if isinstance(res, tuple):\n return res\n else:\n return [res]\n\n\ndef next_power_of_two(x):\n \"\"\"Calculates the smallest enclosing power of two for an input.\n\n Args:\n x: Positive float or integer number.\n\n Returns:\n Next largest power of two integer.\n \"\"\"\n return 1 if x == 0 else 2**(int(x) - 1).bit_length()\n\n\ndef get_model_with_default_params(model_name, mode=None):\n \"\"\"Creates a model with the params specified in HOTWORD_MODEL_PARAMS.\"\"\"\n if model_name not in model_params.HOTWORD_MODEL_PARAMS:\n raise KeyError(\n \"Expected 'model_name' to be one of \"\n f\"{model_params.HOTWORD_MODEL_PARAMS.keys} but got '{model_name}'.\")\n params = model_params.HOTWORD_MODEL_PARAMS[model_name]\n params = model_flags.update_flags(params)\n model = kws_models.MODELS[params.model_name](params)\n if mode is not None:\n model = to_streaming_inference(model, flags=params, mode=mode)\n return model\n\n\ndef traverse_graph(prev_layer, layers):\n \"\"\"Traverse keras sequential graph.\"\"\"\n for layer in layers:\n if isinstance(layer, (tf.keras.Sequential, tf.keras.Model)):\n prev_layer = traverse_graph(prev_layer, layer.layers)\n else:\n prev_layer = layer(prev_layer)\n return prev_layer\n\n\ndef sequential_to_functional(model):\n \"\"\"Converts keras sequential model to functional one.\"\"\"\n input_layer = tf.keras.Input(\n batch_input_shape=model.layers[0].input_shape[0])\n prev_layer = input_layer\n prev_layer = traverse_graph(prev_layer, model.layers[1:])\n func_model = tf.keras.Model([input_layer], [prev_layer])\n return func_model\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Script to validate and dedup MS Marco OpenKP dataset.\n\nSome urls appear multiple times in the MS Marco OpenKP dataset: 6x in the dev\nand eval sets; ~50 urls appear ~20x in the train set. This script:\n1. creates new json files with one example per url,\n2. drops KeyPhrases that do not occur in text (no attempt to fix punctuation),\n3. keeps at most 3 KeyPhrases.\n\nOpenKPDev.jsonl: keeps 6610 examples out of 6616.\nOpenKPTrain.jsonl: keeps 133724 examples out of 134894.\nOpenKPEvalPublic.jsonl: keeps 6613 examples out of 6614.\n\"\"\"\n\nimport json\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow.compat.v1 as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'input_file', None,\n 'Jsonl file with the MS Marco OpenKP dataset (train or dev).')\n\nflags.DEFINE_string('output_file', None, 'Output jsonl file.')\n\nflags.DEFINE_boolean(\n 'is_eval', False,\n 'Set True for eval files, and leave False for train files with KeyPhrases '\n 'defined.')\n\n\ndef is_word_start(text, i):\n if i == 0:\n return True\n elif i > 0 and text[i - 1] == ' ':\n return True\n else:\n return False\n\n\ndef is_word_stop(text, i):\n if i == len(text):\n return True\n elif i > 0 and text[i] == ' ':\n return True\n else:\n return False\n\n\ndef is_keyphrase_whole_words(text, keyphrase_list):\n \"\"\"Check if keyphrase is contained in text ans formed by whole words.\"\"\"\n keyphrase = ' '.join(\n [item.strip() for item in keyphrase_list if item.strip()])\n keyphrase_lower = keyphrase.lower()\n n = len(keyphrase)\n # The keyphrase has to be a full word match.\n j = text.find(keyphrase_lower)\n while j >= 0:\n if is_word_start(text, j) and is_word_stop(text, j + n):\n return True, [keyphrase]\n j = text.find(keyphrase_lower, j + 1)\n return False, [keyphrase]\n\n\ndef main(unused_argv):\n lines = tf.gfile.Open(FLAGS.input_file).readlines()\n json_lines = [json.loads(line) for line in lines]\n\n with tf.gfile.Open(FLAGS.output_file, 'w') as fout:\n # Go through all the data line by line and check types of the data.\n counter_url = 0\n for line in json_lines:\n counter_url += 1\n url = line['url']\n assert isinstance(url, str)\n\n text = line['text']\n assert isinstance(text, str)\n text_split = text.split(' ')\n\n if not FLAGS.is_eval:\n keyphrases = line['KeyPhrases']\n assert isinstance(keyphrases, list)\n\n vdom_str = line['VDOM']\n assert isinstance(vdom_str, str)\n\n vdom = json.loads(vdom_str)\n assert isinstance(vdom, list)\n\n # Check that the vdom text actually occurs in the provided document text.\n # And that the VDOMS are contiguous and cover the whole document text.\n url_id = -1\n assert vdom\n assert vdom[0]['start_idx'] == 0 # First VDOM starts at position 0.\n assert vdom[-1]['end_idx'] == len(text_split) # Last VDOM ends at end.\n for i in range(len(vdom)):\n el = vdom[i]\n el_id = el['Id'] # Seems to be example id, not element id.\n el_text = el['text']\n start_idx = el['start_idx']\n end_idx = el['end_idx']\n if i == 0:\n url_id = el_id\n else:\n assert el_id == url_id\n # Check that the VDOM text matches the correct position in the document\n # text.\n assert el_text == ' '.join(text_split[start_idx:end_idx])\n # Check that the VDOM elements are consecutively covering the document\n if i > 0:\n assert start_idx == vdom[i - 1]['end_idx']\n print('Processed %d json lines.' % counter_url)\n\n if FLAGS.is_eval:\n counter_url = 0\n url_set = set()\n for line in json_lines:\n url = line['url']\n if url in url_set:\n print('Duplicate url:', url)\n # There is only one duplicate in the eval set, and we keep its first\n # occurrence.\n continue\n url_set.add(url)\n counter_url += 1\n fout.write(json.dumps(line))\n fout.write('\\n')\n print('Processed %d urls.' % counter_url)\n else: # Train or dev file.\n # Count valid KeyPhrases and pick the examples with most valid KeyPhrases.\n counter_url = 0\n url2valid_line = {} # Store tuple: url -> (valid_keyphrases, line number)\n num_valid_keyphrase = []\n for iline, line in enumerate(json_lines):\n counter_url += 1\n url = line['url']\n text = line['text'].lower()\n keyphrases = line['KeyPhrases']\n\n valid_keyphrases = []\n for keyphrase_list in keyphrases:\n is_valid, keyphrase = is_keyphrase_whole_words(text, keyphrase_list)\n if is_valid:\n valid_keyphrases.append(keyphrase)\n # else:\n # print('invalid:', keyphrase, url)\n assert valid_keyphrases\n num_valid_keyphrase.append(len(valid_keyphrases))\n # Keep at most 3 keyphrases (the dev set has up to 5 for some examples).\n line['KeyPhrases'] = valid_keyphrases[:3]\n\n if url not in url2valid_line:\n url2valid_line[url] = []\n # Sorting this will take the example with most keyphrases, and lowest\n # line number.\n url2valid_line[url].append((-len(valid_keyphrases), iline))\n print('Processed %d json lines.' % counter_url)\n print('set(num_valid_keyphrase):', set(num_valid_keyphrase))\n\n counter_url = 0\n for url in url2valid_line:\n counter_url += 1\n pairs = url2valid_line[url]\n pairs.sort()\n if pairs[0][0] == 0:\n print('zero valid keyphrases:', url)\n else:\n if len(pairs) > 1:\n print('dropping some examples for:', url)\n # This is the line of the selected example.\n iline = pairs[0][1]\n # Dump the example\n fout.write(json.dumps(json_lines[iline]))\n fout.write('\\n')\n print('Processed %d urls.' % counter_url)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('input_file')\n flags.mark_flag_as_required('output_file')\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Optimizers based on scalarization.\n\nOne of the simplest approaches to optimizing multi-loss problems is to scalarize\nto a real objective by combining the individual losses. Depending on how the\nscalarization is performed, different optimization algorithms arise.\n\"\"\"\n\nimport gin\nimport tensorflow.compat.v1 as tf\n\n\nfrom yoto.optimizers import base as optimizers_base\nfrom yoto.optimizers import distributions\n\n\[email protected](\"LinearlyScalarizedOptimizer\")\nclass LinearlyScalarizedOptimizer(optimizers_base.MultiLossOptimizer):\n r\"\"\"An optimizer that linearly scalarizes the losss.\n\n Namely, if the losses are loss_1, ..., loss_n, then it minimizes\n \\sum_i loss_i * weight_i,\n for fixed weights. The weights can be either randomly drawn from one of the\n supported distributions, or fixed.\n \"\"\"\n\n def __init__(self, problem, weights,\n batch_size=None, seed=17):\n \"\"\"Initializes the optimizer.\n\n Args:\n problem: An instance of `problems.Problem`.\n weights: Either `distributions.DistributionSpec` class or a\n dictionary mapping the loss names to their corresponding\n weights.\n batch_size: Passed to the initializer of `MultiLossOptimizer`.\n seed: random seed to be used for sampling the weights.\n \"\"\"\n super(LinearlyScalarizedOptimizer, self).__init__(\n problem, batch_size=batch_size)\n sampled_weights = distributions.get_samples_as_dicts(\n weights, names=self._losses_names, seed=seed)[0]\n self._check_weights_dict(sampled_weights)\n self._weights = sampled_weights\n\n def compute_train_loss_and_update_op(self, inputs, base_optimizer):\n losses, metrics = self._problem.losses_and_metrics(inputs, training=True)\n del metrics\n linearized_loss = 0.\n for loss_name, loss_value in losses.items():\n linearized_loss += tf.reduce_mean(loss_value * self._weights[loss_name])\n train_op = base_optimizer.minimize(\n linearized_loss, global_step=tf.train.get_or_create_global_step())\n self.normal_vars = tf.trainable_variables()\n return linearized_loss, train_op\n\n def compute_eval_loss(self, inputs):\n losses, metrics = self._problem.losses_and_metrics(inputs, training=False)\n del metrics\n linearized_loss = 0.\n for loss_name, loss_value in losses.items():\n linearized_loss += tf.reduce_mean(loss_value * self._weights[loss_name])\n return linearized_loss\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ETC model for NQ.\"\"\"\n\nimport collections\nimport functools\nfrom typing import Mapping, Text\n\nimport attr\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom etcmodel import tensor_utils\nfrom etcmodel.models import input_utils\nfrom etcmodel.models import modeling\nfrom etcmodel.models import optimization\n\n\ndef input_fn_builder(input_file, flags, etc_model_config, is_training,\n drop_remainder, num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n name_to_features = {\n \"unique_ids\":\n tf.FixedLenFeature([], tf.int64),\n \"token_ids\":\n tf.FixedLenFeature([flags.max_seq_length], tf.int64),\n \"token_pos\":\n tf.FixedLenFeature([flags.max_seq_length], tf.int64),\n \"candidate_ids\":\n tf.FixedLenFeature([flags.max_seq_length], tf.int64),\n \"sentence_ids\":\n tf.FixedLenFeature([flags.max_seq_length], tf.int64),\n \"long_breakpoints\":\n tf.FixedLenFeature([flags.max_seq_length], tf.int64),\n \"global_token_ids\":\n tf.FixedLenFeature([flags.max_global_seq_length], tf.int64),\n \"global_breakpoints\":\n tf.FixedLenFeature([flags.max_global_seq_length], tf.int64),\n \"sa_start\":\n tf.FixedLenFeature([], tf.int64),\n \"sa_end\":\n tf.FixedLenFeature([], tf.int64),\n \"la_start\":\n tf.FixedLenFeature([], tf.int64),\n \"la_end\":\n tf.FixedLenFeature([], tf.int64),\n \"answer_type\":\n tf.FixedLenFeature([], tf.int64)\n }\n\n def decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n for name in list(example.keys()):\n if name != \"unique_ids\":\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n d = tf.data.Dataset.list_files(input_file, shuffle=is_training)\n d = d.apply(\n tf.data.experimental.parallel_interleave(\n functools.partial(tf.data.TFRecordDataset, compression_type=\"GZIP\"),\n cycle_length=num_cpu_threads,\n sloppy=False))\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n d = d.prefetch(tf.data.experimental.AUTOTUNE)\n d = d.map(\n functools.partial(input_utils.add_side_input_features,\n etc_model_config), tf.data.experimental.AUTOTUNE)\n return d.prefetch(tf.data.experimental.AUTOTUNE)\n\n return input_fn\n\n\ndef build_model(etc_model_config, features, is_training, flags):\n \"\"\"Build an ETC model.\"\"\"\n token_ids = features[\"token_ids\"]\n global_token_ids = features[\"global_token_ids\"]\n\n model = modeling.EtcModel(\n config=etc_model_config,\n is_training=is_training,\n use_one_hot_relative_embeddings=flags.use_tpu)\n\n model_inputs = dict(token_ids=token_ids, global_token_ids=global_token_ids)\n for field in attr.fields(input_utils.GlobalLocalTransformerSideInputs):\n if field.name in features:\n model_inputs[field.name] = features[field.name]\n\n # Get the logits for the start and end predictions.\n l_final_hidden, _ = model(**model_inputs)\n\n l_final_hidden_shape = tensor_utils.get_shape_list(\n l_final_hidden, expected_rank=3)\n\n batch_size = l_final_hidden_shape[0]\n l_seq_length = l_final_hidden_shape[1]\n hidden_size = l_final_hidden_shape[2]\n\n num_answer_types = 5 # NULL, YES, NO, LONG, SHORT\n\n # We add a dense layer to the long output:\n l_output_weights = tf.get_variable(\n \"cls/nq/long_output_weights\", [4, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n l_output_bias = tf.get_variable(\n \"cls/nq/long_output_bias\", [4], initializer=tf.zeros_initializer())\n l_final_hidden_matrix = tf.reshape(l_final_hidden,\n [batch_size * l_seq_length,\n hidden_size])\n l_logits = tf.matmul(l_final_hidden_matrix, l_output_weights,\n transpose_b=True)\n l_logits = tf.nn.bias_add(l_logits, l_output_bias)\n l_logits = tf.reshape(l_logits, [batch_size, l_seq_length, 4])\n\n if flags.mask_long_output:\n # Mask out invalid SA/LA start/end positions:\n # 1) find the SEP and CLS tokens:\n long_sep = tf.cast(tf.equal(token_ids, flags.sep_tok_id), tf.int32)\n long_not_sep = 1 - long_sep\n long_cls = tf.cast(tf.equal(token_ids, flags.cls_tok_id), tf.int32)\n\n # 2) accum sum the SEPs, and the only possible answers are those with sum\n # equal to 1 (except SEPs) and the CLS position\n l_mask = tf.cast(tf.equal(tf.cumsum(long_sep, axis=-1), 1), tf.int32)\n l_mask = 1 - ((l_mask * long_not_sep) + long_cls)\n\n # 3) apply the mask to the logits\n l_mask = tf.expand_dims(tf.cast(l_mask, tf.float32) * -10E8, 2)\n l_logits = tf.math.add(l_logits, l_mask)\n\n # Get the logits for the answer type prediction.\n answer_type_output_layer = l_final_hidden[:, 0, :]\n answer_type_hidden_size = answer_type_output_layer.shape[-1].value\n\n answer_type_output_weights = tf.get_variable(\n \"answer_type_output_weights\", [num_answer_types,\n answer_type_hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n answer_type_output_bias = tf.get_variable(\n \"answer_type_output_bias\", [num_answer_types],\n initializer=tf.zeros_initializer())\n\n answer_type_logits = tf.matmul(\n answer_type_output_layer, answer_type_output_weights, transpose_b=True)\n answer_type_logits = tf.nn.bias_add(answer_type_logits,\n answer_type_output_bias)\n\n extra_model_losses = model.losses\n\n l_logits = tf.transpose(l_logits, [2, 0, 1])\n l_unstacked_logits = tf.unstack(l_logits, axis=0)\n return ([l_unstacked_logits[i] for i in range(4)], answer_type_logits,\n extra_model_losses)\n\n\ndef model_fn_builder(etc_model_config, num_train_steps, num_warmup_steps,\n flags):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n position_logits, answer_type_logits, extra_model_losses = build_model(\n etc_model_config, features, is_training, flags)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if flags.init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = input_utils.get_assignment_map_from_checkpoint(\n tvars, flags.init_checkpoint)\n if flags.use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(flags.init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(flags.init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n else:\n init_string = \", *RANDOM_INIT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Computes the loss for positions.\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # Computes the loss for labels.\n def compute_label_loss(logits, labels):\n one_hot_labels = tf.one_hot(labels, depth=5, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_labels * log_probs, axis=-1))\n return loss\n\n labels = [\n features[\"sa_start\"], features[\"sa_end\"], features[\"la_start\"],\n features[\"la_end\"]]\n loss = (sum(\n compute_loss(position_logits[idx], label, flags.max_seq_length)\n for idx, label in enumerate(labels)) + compute_label_loss(\n answer_type_logits, features[\"answer_type\"])) / 5.0\n\n if extra_model_losses:\n loss += tf.math.add_n(extra_model_losses)\n\n train_op = optimization.create_optimizer(\n loss, flags.learning_rate, num_train_steps, num_warmup_steps,\n flags.use_tpu, flags.optimizer, flags.poly_power,\n flags.start_warmup_step, flags.learning_rate_schedule)\n\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op, scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": tf.identity(features[\"unique_ids\"]),\n \"token_ids\": tf.identity(features[\"token_ids\"]),\n \"token_pos\": tf.identity(features[\"token_pos\"]),\n \"candidate_ids\": tf.identity(features[\"candidate_ids\"]),\n \"answer_type_logits\": answer_type_logits,\n }\n output_names = [\"sa_start\", \"sa_end\", \"la_start\", \"la_end\"]\n\n for idx, output_name in enumerate(output_names):\n predictions[output_name] = tf.identity(features[output_name])\n if output_name == \"la_global\":\n # propagate the ground truth:\n predictions[\"la_start\"] = tf.identity(features[\"la_start\"])\n predictions[\"la_end\"] = tf.identity(features[\"la_end\"])\n values, indices = tf.compat.v1.math.top_k(position_logits[idx], k=50)\n predictions[output_name + \"_pred\"] = indices\n predictions[output_name + \"_logit\"] = values\n predictions[output_name + \"_logit0\"] = position_logits[idx][:, 0]\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and PREDICT modes are supported: %s\" %\n (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef process_prediction(prediction: Mapping[Text, np.ndarray], writer\n ) -> None:\n \"\"\"Processes a single TF `Estimator.predict` prediction.\n\n Args:\n prediction: Prediction from `Estimator.predict` for a single example.\n writer: An open `tf.python_io.TFRecordWriter` to write to.\n \"\"\"\n\n def create_float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n\n # Scalar int64 features:\n features[\"unique_ids\"] = create_int_feature([prediction[\"unique_ids\"]])\n\n # Vector int64 features:\n # for name in [\"token_ids\", \"token_pos\", \"candidate_ids\"]:\n # features[name] = create_int_feature(result[name])\n token_pos = prediction[\"token_pos\"]\n\n # Span outputs:\n output_names = [\"sa_start\", \"sa_end\", \"la_start\", \"la_end\"]\n for name in output_names:\n # Scalar int64 output.\n features[name] = create_int_feature([prediction[name]])\n\n # ground truth:\n features[name + \"_mapped\"] = create_int_feature(\n [token_pos[prediction[name]]])\n\n # Vector int64 output.\n output_name = name + \"_pred\"\n features[output_name] = create_int_feature(prediction[output_name])\n features[output_name + \"_mapped\"] = create_int_feature(\n [token_pos[t] for t in prediction[output_name]])\n\n # Vector float output.\n output_name = name + \"_logit\"\n features[output_name] = create_float_feature(prediction[output_name])\n\n # Scalar float output.\n output_name = name + \"_logit0\"\n features[output_name] = create_float_feature([prediction[output_name]])\n\n features[\"answer_type_logits\"] = create_float_feature(\n prediction[\"answer_type_logits\"])\n\n writer.write(\n tf.train.Example(features=tf.train.Features(\n feature=features)).SerializeToString())\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# pylint:disable=line-too-long\nr\"\"\"Exports a graph as a saved model.\n\n\"\"\"\n# pylint:enable=line-too-long\n\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nfrom non_semantic_speech_benchmark.export_model import tf_frontend\n\n\nflags.DEFINE_string('export_dir', None, 'Location and name of SavedModel.')\nflags.DEFINE_string('trill_model_location', None,\n 'Location of TRILL SavedModel, with no frontend.')\nflags.DEFINE_string('trill_distilled_model_location', None,\n 'Location of TRILL-distilled SavedModel, with no frontend.')\n\n\nFLAGS = flags.FLAGS\n\n\[email protected]\ndef _sample_to_features(x, export_tflite=False):\n return tf_frontend.compute_frontend_features(\n x, 16000, overlap_seconds=79, tflite=export_tflite)\n\n\nclass TRILLModule(tf.train.Checkpoint):\n \"\"\"TRILL module for TF 1 and 2.\n\n \"\"\"\n\n def __init__(self, savedmodel_dir, distilled_output_keys, tflite):\n super(TRILLModule, self).__init__()\n self.trill_module = hub.load(savedmodel_dir)\n assert len(self.trill_module.signatures.keys()) == 1\n self.sig_key = list(self.trill_module.signatures.keys())[0]\n self.variables = self.trill_module.variables\n self.trainable_variables = self.trill_module.variables\n self.tflite = tflite\n\n self.distilled_output_keys = distilled_output_keys\n\n @tf.function\n def __call__(self, samples, sample_rate):\n \"\"\"Runs model.\n\n Args:\n samples: A 1-D or 2-D array. If integers, is automatically cast as a\n float.\n sample_rate: Sample rate. Must be 16 kHz.\n\n Returns:\n A dictionary of embeddings.\n \"\"\"\n tf.debugging.assert_equal(\n sample_rate, 16000, message='Sample rate must be 16kHz. '\n 'Instead, was %s' % sample_rate)\n if samples.shape.ndims > 2:\n raise ValueError('Samples must be 1 or 2 dimensional. Instead, found %s' %\n samples.shape.ndims)\n has_batchdim = samples.shape.ndims == 2\n\n # Compute frontend features.\n assert isinstance(samples, tf.Tensor)\n if has_batchdim:\n if self.tflite:\n features = tf.map_fn(\n _sample_to_features, (samples, True), dtype=tf.float64)\n else:\n features = tf.map_fn(_sample_to_features, samples, dtype=tf.float64)\n assert features.shape.rank == 4\n f_shape = tf.shape(features)\n else:\n features = _sample_to_features(samples, self.tflite)\n assert features.shape.rank == 3\n f_shape = tf.shape(features)\n\n # Cast features to tf.float32, if necessary.\n if features.dtype == tf.float64:\n features = tf.cast(features, tf.float32)\n\n # Reshape batch dimension, if necessary, and run inference.\n def _maybe_unbatch(f):\n if has_batchdim:\n return tf.reshape(f, [f_shape[0] * f_shape[1], f_shape[2], f_shape[3]])\n else:\n return f\n def _maybe_batch(n):\n assert n.shape.rank == 2\n if has_batchdim:\n feat_dim = n.shape[-1]\n out = tf.reshape(n, [f_shape[0], -1, feat_dim])\n out.set_shape([None, None, feat_dim])\n return out\n else:\n return n\n net_endpoints = self.trill_module.signatures[self.sig_key](\n _maybe_unbatch(features))\n if self.distilled_output_keys:\n emb = net_endpoints['tower0/network/layer26/embedding']\n out_dict = dict(embedding=_maybe_batch(emb))\n else:\n layer19 = tf.keras.backend.batch_flatten(\n net_endpoints['tower0/network/layer19/chain1/layer0/conv/BiasAdd'])\n layer19.set_shape([None, 12288])\n layer19 = _maybe_batch(layer19)\n out_dict = dict(\n layer19=layer19,\n embedding=_maybe_batch(net_endpoints['normalizing']))\n\n return out_dict\n\n\ndef make_and_export_trill(savedmodel_dir,\n distilled_output_keys,\n allow_batch_dimension,\n fixed_length_input,\n tflite_only):\n \"\"\"Make and export TRILL or TRILL distilled.\n\n Args:\n savedmodel_dir: Directory with frontend-less SavedModel.\n distilled_output_keys: Boolean. Whether exporting the distilled model.\n allow_batch_dimension: Whether to allow batch dimensions.\n fixed_length_input: Length of input, or `None` for variable length.\n tflite_only: Whether to export models suitable for mobile inference with\n TensorFlow Lite.\n\n Returns:\n (signatures, module)\n \"\"\"\n trill_mod = TRILLModule(\n savedmodel_dir, distilled_output_keys, tflite_only)\n\n signature = None\n if tflite_only:\n # For TFLite inference, we only generate float32 models with fixed input\n # and no batch dim.\n signature = trill_mod.__call__.get_concrete_function(\n tf.TensorSpec([fixed_length_input], tf.float32), tf.constant(16000))\n signatures = {'inference': signature}\n else:\n for dtype in (tf.int16, tf.float32, tf.float64):\n signature = trill_mod.__call__.get_concrete_function(\n tf.TensorSpec([fixed_length_input], dtype), tf.constant(16000))\n if allow_batch_dimension:\n trill_mod.__call__.get_concrete_function(\n tf.TensorSpec([None, fixed_length_input], dtype),\n tf.constant(16000))\n signatures = None\n\n return signatures, trill_mod\n\n\ndef convert_tflite_file(model_dir):\n \"\"\"Make and export TRILL and TRILL distilled.\"\"\"\n converter = tf.lite.TFLiteConverter.from_saved_model(\n saved_model_dir=model_dir, signature_keys=['inference'])\n\n # TODO(srjoglekar): Explore quantization later.\n converter.optimizations = []\n converter.post_training_quantize = False\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet\n .TFLITE_BUILTINS, # enable TensorFlow Lite builtin ops only.\n ]\n\n output_data = converter.convert()\n output_path = os.path.join(model_dir, 'model.tflite')\n if not tf.io.gfile.exists(model_dir):\n tf.io.gfile.makedirs(model_dir)\n with tf.io.gfile.GFile(output_path, 'wb') as f:\n f.write(output_data)\n return output_path\n\n\ndef construct_savedmodel_dir(export_dir, distilled_model, tflite,\n allow_batch_dimension, fixed_length_input):\n name = 'trill-distilled' if distilled_model else 'trill'\n suffix = '_tflite' if tflite else ''\n bd_str = 'wbatchdim' if allow_batch_dimension else 'nobatchdim'\n fl_str = f'fixedlen{fixed_length_input}' if fixed_length_input else 'nofixedlen'\n return os.path.join(export_dir, f'{name}{suffix}_{bd_str}_{fl_str}')\n\n\ndef test_module(out_dir,\n allow_batch_dimension=True,\n fixed_length_input=None,\n tflite_only=False):\n \"\"\"Test that the exported doesn't crash.\"\"\"\n if out_dir.endswith('tflite'):\n # TODO(joelshor, srjoglekar): Load TFLite model here.\n return\n else:\n model = hub.load(out_dir)\n sr = tf.constant(16000)\n input_len = fixed_length_input or 320000\n logging.info('Input length: %s', input_len)\n\n proper_shape = tf.random.uniform([input_len], -1.0, 1.0, tf.float32)\n model(proper_shape, sr)\n if allow_batch_dimension:\n proper_shape = tf.random.uniform([5, input_len], -1.0, 1.0, tf.float32)\n model(proper_shape, sr)\n\n if not tflite_only:\n # TfLite does not support these types, and uses fixed sizes.\n proper_shape = tf.random.uniform([input_len], -1.0, 1.0, tf.float64)\n model(proper_shape, sr)\n if allow_batch_dimension:\n proper_shape = tf.random.uniform([5, input_len], -1.0, 1.0, tf.float64)\n model(proper_shape, sr)\n\n proper_shape = np.random.randint(\n 0, high=10000, size=(input_len), dtype=np.int16)\n model(proper_shape, sr)\n if allow_batch_dimension:\n proper_shape = np.random.randint(\n 0, high=10000, size=(5, input_len), dtype=np.int16)\n model(proper_shape, sr)\n\n if fixed_length_input is None:\n short_shape = np.random.randint(\n 0, high=10000, size=(5000), dtype=np.int16)\n model(short_shape, sr)\n if allow_batch_dimension:\n short_shape = np.random.randint(\n 0, high=10000, size=(5, 5000), dtype=np.int16)\n model(short_shape, sr)\n\n try:\n model(short_shape, tf.constant(8000))\n assert False\n except tf.errors.InvalidArgumentError:\n pass\n\n # Check variables.\n assert model.variables\n assert model.trainable_variables\n assert not model._is_hub_module_v1 # pylint:disable=protected-access\n\n\ndef main(unused_argv):\n\n # pylint: disable=bad-whitespace\n t_loc = FLAGS.trill_model_location\n d_loc = FLAGS.trill_distilled_model_location\n model_params = [\n # loc, distilled batch dim, input len, tflite\n (t_loc, False, True, None, False),\n (t_loc, False, False, 16000, False),\n (d_loc, True, True, None, False),\n (d_loc, True, False, 16000, False),\n (d_loc, True, True, None, True),\n (d_loc, True, False, None, True),\n (d_loc, True, False, 16000, True),\n ]\n # pylint: enable=bad-whitespace\n\n for (model_location, distilled_output_keys, allow_batch_dimension,\n fixed_length_input, export_tflite) in model_params:\n signatures, saved_mod = make_and_export_trill(\n model_location,\n distilled_output_keys,\n allow_batch_dimension,\n fixed_length_input,\n export_tflite)\n if not export_tflite:\n assert signatures is None, signatures\n out_dir = construct_savedmodel_dir(\n FLAGS.export_dir, distilled_output_keys, export_tflite,\n allow_batch_dimension, fixed_length_input)\n tf.saved_model.save(saved_mod, out_dir, signatures)\n if export_tflite:\n tflite_out_file = convert_tflite_file(out_dir)\n test_module(\n tflite_out_file,\n allow_batch_dimension,\n fixed_length_input,\n tflite_only=True)\n test_module(\n out_dir,\n allow_batch_dimension,\n fixed_length_input,\n tflite_only=export_tflite)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('export_dir')\n tf.compat.v2.enable_v2_behavior()\n assert tf.executing_eagerly()\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Implementation for Contrastive classification head.\"\"\"\n\nimport tensorflow.compat.v1 as tf\n\n\nclass ClassificationHead(tf.layers.Layer):\n \"\"\"A classification head.\n\n Attributes:\n num_classes: The number of classes to classify into.\n kernel_initializer: An initializer to use for the weights.\n name: Name for this object.\n \"\"\"\n\n def __init__(self,\n num_classes,\n kernel_initializer=tf.initializers.glorot_uniform(),\n name='ClassificationHead',\n **kwargs):\n super(ClassificationHead, self).__init__(name=name, **kwargs)\n\n self.dense_layer = tf.layers.Dense(\n num_classes,\n activation=None,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=None)\n\n def call(self, inputs, training=None):\n del training # unused.\n\n if inputs.shape.rank != 2:\n raise ValueError(\n f'Input shape {inputs.shape} is expected to have rank 2, but does '\n 'not.')\n\n return self.dense_layer(inputs)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helper functions to add support for magnitude-based model pruning.\n\n # Adds variables and ops to the graph to enable\n # elementwise masking of weights\n apply_mask(weights)\n\n # Returns a list containing the sparsity of each of the weight tensors\n get_weight_sparsity()\n\n # Returns a list of all the masked weight tensorflow variables\n get_masked_weights()\n\n # Returns a list of all the mask tensorflow variables\n get_masks()\n\n # Returns a list of all the thresholds\n get_thresholds()\n\n # Returns a list of all the weight tensors that have been masked\n get_weights()\n\n The Pruning class uses a tf.hparams object to set up the\n parameters for a model pruning. Here's a typical usage:\n\n # Parse pruning hyperparameters\n pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)\n\n # Create a pruning object using the pruning_hparams\n p = pruning.Pruning(pruning_hparams)\n\n # Add mask update ops to the graph\n mask_update_op = p.conditional_mask_update_op()\n\n # Add the summaries\n p.add_pruning_summaries()\n\n # Run the op\n session.run(mask_update_op)\n\n # An object of the pruning also accepts externally defined sparsity:\n sparsity = tf.Variable(0.5, name = \"ConstantSparsity\")\n p = pruning.Pruning(pruning_hparams, sparsity=sparsity)\n\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow.compat.v1 as tf\n\nfrom model_pruning.python import pruning_utils\nfrom tensorflow.contrib import training as contrib_training\nfrom tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import\n\nMASK_COLLECTION = 'masks'\nTHRESHOLD_COLLECTION = 'thresholds'\nMASKED_WEIGHT_COLLECTION = 'masked_weights'\nWEIGHT_COLLECTION = 'kernel'\n# The 'weights' part of the name is needed for the quantization library\n# to recognize that the kernel should be quantized.\nMASKED_WEIGHT_NAME = 'weights/masked_weight'\nWEIGHT_GRADIENT_COLLECTION = 'gradient_weights'\nOLD_WEIGHT_COLLECTION = 'old_weights'\nOLD_OLD_WEIGHT_COLLECTION = 'old_old_weights'\n\n\ndef apply_mask(x, scope='', prune_option='weight'):\n \"\"\"Apply mask to a given weight tensor.\n\n Args:\n x: Input weight tensor\n scope: The current variable scope. Defaults to \"\".\n prune_option: pruning option. Defaults to 'weight'. option =\n 'first_order_gradient' means using |weight| * |first order gradient| for\n pruning. option = 'second_order_gradient' means using |weight| * |second\n order gradient| for pruning.\n\n Returns:\n Tensor representing masked_weights\n \"\"\"\n\n mask = pruning_utils.weight_mask_variable(x, scope)\n threshold = pruning_utils.weight_threshold_variable(x, scope)\n # Add masked_weights in the weights namescope so as to make it easier\n # for the quantization library to add quant ops.\n masked_weights = tf.multiply(mask, x, MASKED_WEIGHT_NAME)\n\n if prune_option in ('first_order_gradient', 'second_order_gradient'):\n # absolute value of gradients for gradient based pruning\n gradient = pruning_utils.weight_gradient_variable(x, scope)\n old_weight = pruning_utils.old_weight_variable(x, scope)\n old_old_weight = pruning_utils.old_old_weight_variable(x, scope)\n\n # Make sure the mask for a given variable are not added multiple times to the\n # collection. This is particularly important when applying mask to RNN's\n # weight variables\n if mask not in tf.get_collection_ref(MASK_COLLECTION):\n tf.add_to_collection(THRESHOLD_COLLECTION, threshold)\n tf.add_to_collection(MASK_COLLECTION, mask)\n tf.add_to_collection(MASKED_WEIGHT_COLLECTION, masked_weights)\n tf.add_to_collection(WEIGHT_COLLECTION, x)\n if prune_option in ('first_order_gradient', 'second_order_gradient'):\n tf.add_to_collection(WEIGHT_GRADIENT_COLLECTION, gradient)\n tf.add_to_collection(OLD_WEIGHT_COLLECTION, old_weight)\n tf.add_to_collection(OLD_OLD_WEIGHT_COLLECTION, old_old_weight)\n return masked_weights\n\n\ndef apply_mask_and_return(x, scope='', prune_option='weight'):\n \"\"\"Apply mask to a given weight tensor.\n\n Args:\n x: Input weight tensor\n scope: The current variable scope. Defaults to \"\".\n prune_option: pruning option. Defaults to 'weight'. option =\n 'first_order_gradient' means using |weight| * |first order gradient| for\n pruning. option = 'second_order_gradient' means using |weight| * |second\n order gradient| for pruning.\n\n Returns:\n masked_weights: a TensorFlow tensor representing masked weights.\n mask: a TensorFlow tensor representing the pruning mask.\n \"\"\"\n\n mask = pruning_utils.weight_mask_variable(x, scope)\n threshold = pruning_utils.weight_threshold_variable(x, scope)\n # Add masked_weights in the weights namescope so as to make it easier\n # for the quantization library to add quant ops.\n masked_weights = tf.multiply(mask, x, MASKED_WEIGHT_NAME)\n\n if prune_option in ('first_order_gradient', 'second_order_gradient'):\n # absolute value of gradients for gradient based pruning\n gradient = pruning_utils.weight_gradient_variable(x, scope)\n old_weight = pruning_utils.old_weight_variable(x, scope)\n old_old_weight = pruning_utils.old_old_weight_variable(x, scope)\n\n # Make sure the mask for a given variable are not added multiple times to the\n # collection. This is particularly important when applying mask to RNN's\n # weight variables\n if mask not in tf.get_collection_ref(MASK_COLLECTION):\n tf.add_to_collection(THRESHOLD_COLLECTION, threshold)\n tf.add_to_collection(MASK_COLLECTION, mask)\n tf.add_to_collection(MASKED_WEIGHT_COLLECTION, masked_weights)\n tf.add_to_collection(WEIGHT_COLLECTION, x)\n if prune_option in ('first_order_gradient', 'second_order_gradient'):\n tf.add_to_collection(WEIGHT_GRADIENT_COLLECTION, gradient)\n tf.add_to_collection(OLD_WEIGHT_COLLECTION, old_weight)\n tf.add_to_collection(OLD_OLD_WEIGHT_COLLECTION, old_old_weight)\n return [masked_weights, mask]\n\n\ndef get_masked_weights():\n return tf.get_collection(MASKED_WEIGHT_COLLECTION)\n\n\ndef get_masks():\n return tf.get_collection(MASK_COLLECTION)\n\n\ndef get_thresholds():\n return tf.get_collection(THRESHOLD_COLLECTION)\n\n\ndef get_weights():\n return tf.get_collection(WEIGHT_COLLECTION)\n\n\ndef get_gradients():\n return tf.get_collection(WEIGHT_GRADIENT_COLLECTION)\n\n\ndef get_old_weights():\n return tf.get_collection(OLD_WEIGHT_COLLECTION)\n\n\ndef get_old_old_weights():\n return tf.get_collection(OLD_OLD_WEIGHT_COLLECTION)\n\n\ndef get_weight_sparsity():\n \"\"\"Get sparsity of the weights.\n\n Args: None\n\n Returns:\n A list containing the sparsity of each of the weight tensors\n \"\"\"\n masks = get_masks()\n return [tf.nn.zero_fraction(mask) for mask in masks]\n\n\ndef get_pruning_hparams():\n \"\"\"Get a tf.HParams object with the default values for the hyperparameters.\n\n name: string\n name of the pruning specification. Used for adding summaries and ops under\n a common tensorflow name_scope\n begin_pruning_step: integer\n the global step at which to begin pruning\n end_pruning_step: integer\n the global step at which to terminate pruning. Defaults to -1 implying\n that pruning continues till the training stops\n weight_sparsity_map: list of strings\n comma separed list of {weight_variable_name:target sparsity} or\n {regex:target sparsity} pairs.\n For layers/weights not in this list, sparsity as specified by the\n target_sparsity hyperparameter is used.\n Eg. [conv1:0.9,conv2/kernel:0.8]\n block_dims_map: list of strings\n comma separated list of {weight variable name:block_height x block_width}\n or {regex:block_height x block_width} pairs. For layers/weights not in\n this list, block dims are specified by the block_height, block_width\n hyperparameters are used Eg. [dense1:4x4,dense2:1x16,dense3:1x1]\n threshold_decay: float\n the decay factor to use for exponential decay of the thresholds\n pruning_frequency: integer\n How often should the masks be updated? (in # of global_steps)\n nbins: integer\n number of bins to use for histogram computation\n block_height: integer\n number of rows in a block (defaults to 1), can be -1 in which\n case it is set to the size of the corresponding weight tensor.\n block_width: integer\n number of cols in a block (defaults to 1), can be -1 in which\n case it is set to the size of the corresponding weight tensor.\n block_pooling_function: string\n Whether to perform average (AVG) or max (MAX) pooling in the block\n (default: AVG)\n initial_sparsity: float\n initial sparsity value\n target_sparsity: float\n target sparsity value\n sparsity_function_begin_step: integer\n the global step at this which the gradual sparsity function begins to\n take effect\n sparsity_function_end_step: integer\n the global step used as the end point for the gradual sparsity function\n sparsity_function_exponent: float\n exponent = 1 is linearly varying sparsity between initial and final.\n exponent > 1 varies more slowly towards the end than the beginning\n use_tpu: False\n Indicates whether to use TPU\n gradient_decay_rate: float\n when prune_option is gradient based pruning, decay factor for gradient\n decay\n prune_option: string\n option = 'weight' means using |weight| for pruning.\n option = 'first_order_gradient' means using |weight| * |first order\n gradient| for pruning.\n option = 'second_order_gradient' means using |weight| * |second order\n gradient| for pruning.\n second order gradient is approximated by |weight + old_old_weight -\n 2*old_weight|.\n option = 'compression' means using compression.\n alpha_decrement_value: only effective when prune_option is 'compression',\n see graph_compression/compression_lib/compression_op.py. The following\n arguments are all only effective when prune_option == 'compression', see\n graph_compression/compression_lib/compression_op.py for details.\n begin_compression_step: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n end_compresson_step: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n compression_frequency: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n compression_option: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n rank: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n update_option: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n run_update_interval_check: only effective when prune_option is 'compression'\n see graph_compression/compression_op.py.\n pruning_fraction: only effective when prune_option is 'compression',\n see graph_compression/compression_op.py.\n use_collection: only effective when prune_option is 'compression',\n update_ops are retrieved from UPDATE_OP_COLLECTION if True,\n otherwise update_ops are obtained from\n matrix_compression_obj.all_update_op() directly. Default is\n True.\n\n\n We use the following sparsity function:\n\n num_steps = (sparsity_function_end_step -\n sparsity_function_begin_step)/pruning_frequency\n sparsity(step) = (initial_sparsity - target_sparsity)*\n [1-step/(num_steps -1)]**exponent + target_sparsity\n\n Args: None\n\n Returns:\n tf.HParams object initialized to default values\n\n \"\"\"\n return contrib_training.HParams(\n name='model_pruning',\n begin_pruning_step=0,\n end_pruning_step=-1,\n weight_sparsity_map=[''],\n block_dims_map=[''],\n threshold_decay=0.0,\n pruning_frequency=10,\n nbins=256,\n block_height=1,\n block_width=1,\n block_pooling_function='AVG',\n initial_sparsity=0.0,\n target_sparsity=0.5,\n sparsity_function_begin_step=0,\n sparsity_function_end_step=100,\n sparsity_function_exponent=3.0,\n use_tpu=False,\n gradient_decay_rate=0.99,\n prune_option='weight',\n alpha_decrement_value=0.01,\n begin_compression_step=0,\n end_compression_step=-1,\n compression_frequency=10,\n compression_option=0,\n rank=7,\n block_size=1,\n update_option=0,\n run_update_interval_check=1,\n pruning_fraction=0.4,\n use_collection=True,\n input_block_size=1)\n\n\nclass Pruning(object):\n\n def __init__(self, spec=None, global_step=None, sparsity=None):\n \"\"\"Set up the specification for model pruning.\n\n If a spec is provided, the sparsity is set up based on the sparsity_function\n in the spec. The effect of sparsity_function is overridden if the sparsity\n variable is passed to the constructor. This enables setting up arbitrary\n sparsity profiles externally and passing it to this pruning functions.\n\n Args:\n spec: Pruning spec, a tf.HParams object\n global_step: A tensorflow variable that is used while setting up the\n sparsity function\n sparsity: A tensorflow scalar variable storing the sparsity\n \"\"\"\n\n # Pruning specification\n self._spec = spec if spec else get_pruning_hparams()\n tf.logging.vlog(0, 'Pruning spec...')\n self.print_hparams()\n\n self.matrix_compression_spec = self._spec\n\n # Sanity check for pruning hparams\n self._validate_spec()\n\n # A tensorflow variable that tracks the sparsity function.\n # If not provided as input, the graph must already contain the global_step\n # variable before calling this constructor.\n self._global_step = self._setup_global_step(global_step)\n\n # Stores the tensorflow sparsity variable.\n # Built using self._setup_sparsity() or provided externally\n self._sparsity = (\n sparsity if sparsity is not None else self._setup_sparsity())\n\n # List of tensorflow assignments ops for new masks and thresholds\n self._assign_ops = []\n\n self._assign_gradient_ops = []\n\n self._assign_old_weight_ops = []\n\n self._assign_old_old_weight_ops = []\n\n # Tensorflow variable keeping track of the last global step when the masks\n # and gradients were updated\n self._last_update_step = self._setup_last_update_step()\n self._last_gradient_update_step = self._setup_last_gradient_update_step()\n\n # Block dimensions\n self._block_dims = [self._spec.block_height, self._spec.block_width]\n\n # Block pooling function\n self._block_pooling_function = self._spec.block_pooling_function\n\n # Mapping of layer/weight names and block dims\n self._block_dims_map = self._get_block_dims_map()\n\n # Mapping of weight names and target sparsity\n self._weight_sparsity_map = self._get_weight_sparsity_map()\n\n def _validate_spec(self):\n spec = self._spec\n if spec.begin_pruning_step < 0:\n raise ValueError('Illegal value for begin_pruning_step')\n\n if spec.begin_pruning_step >= spec.end_pruning_step:\n if spec.end_pruning_step != -1:\n raise ValueError(\n 'Pruning must begin before it can end. begin_step=%d, end_step=%d.'\n 'Set end_pruning_step to -1 if pruning is required till training'\n 'stops' % (spec.begin_pruning_step, spec.end_pruning_step))\n\n if spec.sparsity_function_begin_step < 0:\n raise ValueError('Illegal value for sparsity_function_begin_step')\n\n if spec.sparsity_function_begin_step >= spec.sparsity_function_end_step:\n raise ValueError('Sparsity function requires begin_step < end_step')\n\n if not 0.0 <= spec.threshold_decay < 1.0:\n raise ValueError('threshold_decay must be in range [0,1)')\n\n if not 0.0 <= spec.initial_sparsity < 1.0:\n raise ValueError('initial_sparsity must be in range [0,1)')\n\n if not 0.0 <= spec.target_sparsity < 1.0:\n raise ValueError('target_sparsity must be in range [0,1)')\n\n if spec.prune_option not in ('weight', 'first_order_gradient',\n 'second_order_gradient'):\n raise ValueError('prune option specified is not supported')\n\n def _setup_global_step(self, global_step):\n graph_global_step = global_step\n if graph_global_step is None:\n graph_global_step = tf.train.get_global_step()\n if not graph_global_step:\n raise ValueError(\n 'Could not get the global step. Either pass it explicitly, or '\n 'ensure that the library is called within a TF graph.')\n\n return tf.cast(graph_global_step, tf.int32)\n\n def _setup_sparsity(self):\n begin_step = self._spec.sparsity_function_begin_step\n end_step = self._spec.sparsity_function_end_step\n initial_sparsity = self._spec.initial_sparsity\n target_sparsity = self._spec.target_sparsity\n exponent = self._spec.sparsity_function_exponent\n\n with tf.name_scope(self._spec.name):\n p = tf.minimum(\n 1.0,\n tf.maximum(\n 0.0,\n tf.div(\n tf.cast(self._global_step - begin_step, tf.float32),\n end_step - begin_step)))\n sparsity = tf.add(\n tf.multiply(initial_sparsity - target_sparsity,\n tf.pow(1 - p, exponent)),\n target_sparsity,\n name='sparsity')\n\n return sparsity\n\n def _setup_last_update_step(self):\n with tf.variable_scope(self._spec.name, use_resource=True) as scope:\n try:\n last_update_step = tf.get_variable(\n 'last_mask_update_step', [],\n initializer=tf.zeros_initializer(),\n trainable=False,\n dtype=tf.int32)\n except ValueError:\n scope.reuse_variables()\n last_update_step = tf.get_variable(\n 'last_mask_update_step', dtype=tf.int32)\n return last_update_step\n\n def _get_block_dims_map(self):\n \"\"\"Returns the map of layer name: block dims.\"\"\"\n block_dims_map = {}\n val_list = self._spec.block_dims_map\n filtered_val_list = [l for l in val_list if l]\n for val in filtered_val_list:\n weight_name, block_dims_str = val.split(':')\n block_dims_str = block_dims_str.split('x')\n if len(block_dims_str) != 2:\n raise ValueError('Expected 2 values for block dim for %s, got %s' %\n (weight_name, block_dims_str))\n block_dims = [int(block_dims_str[0]), int(block_dims_str[1])]\n block_dims_map[re.compile(weight_name)] = block_dims\n\n return block_dims_map\n\n def _get_block_dims(self, weight_name):\n \"\"\"Returns the block dims for the given layer/weight name.\"\"\"\n block_dims_list = [\n block_dims for regexp, block_dims in self._block_dims_map.items()\n if regexp.search(weight_name)\n ]\n if not block_dims_list:\n return self._block_dims\n\n if len(block_dims_list) > 1:\n raise ValueError('Multiple matches in block_dims_map for weight %s' %\n weight_name)\n\n return block_dims_list[0]\n\n def _setup_last_gradient_update_step(self):\n with tf.variable_scope(self._spec.name, use_resource=True) as scope:\n try:\n last_gradient_update_step = tf.get_variable(\n 'last_gradient_update_step', [],\n initializer=tf.zeros_initializer(),\n trainable=False,\n dtype=tf.int32)\n except ValueError:\n scope.reuse_variables()\n last_gradient_update_step = tf.get_variable(\n 'last_gradient_update_step', dtype=tf.int32)\n return last_gradient_update_step\n\n def _get_weight_sparsity_map(self):\n \"\"\"Returns the map of weight_name:sparsity parsed from the hparams.\"\"\"\n weight_sparsity_map = {}\n val_list = self._spec.weight_sparsity_map\n filtered_val_list = [l for l in val_list if l]\n for val in filtered_val_list:\n weight_name, sparsity = val.split(':')\n if float(sparsity) >= 1.0:\n raise ValueError('Weight sparsity can not exceed 1.0')\n weight_sparsity_map[re.compile(weight_name)] = float(sparsity)\n\n return weight_sparsity_map\n\n def _get_sparsity(self, weight_name):\n \"\"\"Returns target sparsity for the given layer/weight name.\"\"\"\n target_sparsity = [\n sparsity for regexp, sparsity in self._weight_sparsity_map.items()\n if regexp.search(weight_name)\n ]\n if not target_sparsity:\n return self._sparsity\n\n if len(target_sparsity) > 1:\n raise ValueError('Multiple matches in weight_sparsity_map for weight %s' %\n weight_name)\n # TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize\n # to handle other cases as well.\n return tf.multiply(self._sparsity,\n tf.div(target_sparsity[0], self._spec.target_sparsity))\n\n def _update_mask(self, weights, threshold, gradients): # pylint: disable=unused-argument\n \"\"\"Updates the mask for a given weight tensor.\n\n This functions first computes the cdf of the weight tensor, and estimates\n the threshold value such that 'desired_sparsity' fraction of weights\n have magnitude less than the threshold.\n\n Args:\n weights: The weight tensor that needs to be masked.\n threshold: The current threshold value. The function will compute a new\n threshold and return the exponential moving average using the current\n value of threshold\n gradients: The gradient tensor that is used for salience calculation.\n\n Returns:\n new_threshold: The new value of the threshold based on weights, and\n sparsity at the current global_step\n new_mask: A numpy array of the same size and shape as weights containing\n 0 or 1 to indicate which of the values in weights falls below\n the threshold\n\n Raises:\n ValueError: if sparsity is not defined\n \"\"\"\n if self._sparsity is None:\n raise ValueError('Sparsity variable undefined')\n\n sparsity = self._get_sparsity(weights.op.name)\n with tf.name_scope(weights.op.name + '_pruning_ops'):\n tf.logging.info('Applying option %s pruning', self._spec.prune_option)\n if self._spec.prune_option == 'weight':\n abs_weights = tf.abs(weights)\n elif self._spec.prune_option in ('first_order_gradient',\n 'second_order_gradient'):\n if gradients is None:\n raise ValueError('gradient tensor cannot be None.')\n # gradient variable stores absolute value already\n abs_weights = tf.multiply(tf.abs(weights), gradients)\n else:\n raise ValueError('undefined option')\n\n k = tf.cast(\n tf.round(tf.cast(tf.size(abs_weights), tf.float32) * (1 - sparsity)),\n tf.int32)\n\n # Generate a random shuffling of the weights s.t. the tie-breaker on\n # weight magnitude is random uniform.\n shuffling = tf.random_shuffle(\n tf.range(tf.size(abs_weights)))\n shuffling = tf.reshape(shuffling, [-1, 1])\n\n # Flatten the weights and scatter the values randomly.\n abs_weights = tf.reshape(abs_weights, [-1])\n abs_weights = tf.scatter_nd(\n shuffling,\n abs_weights,\n tf.shape(abs_weights))\n\n # Sort the entire array\n _, indices = tf.nn.top_k(abs_weights, k=tf.size(abs_weights))\n\n # `k` is how many non-zero weights we're going to have. Create a new\n # mask where the first `k` elements are set to one and all others are\n # set to zero.\n mask_staging = tf.range(tf.size(abs_weights))\n mask_staging = tf.cast(\n tf.less(mask_staging, k),\n tf.float32)\n\n # Scatter the mask back into the proper positions for the weight matrix.\n indices = tf.reshape(indices, [-1, 1])\n new_mask = tf.scatter_nd(\n indices,\n mask_staging,\n tf.shape(mask_staging))\n\n # Un-shuffle the newly created mask.\n new_mask = tf.reshape(\n tf.gather_nd(\n new_mask,\n shuffling),\n tf.shape(weights))\n return tf.constant(0, tf.float32), new_mask\n\n def _maybe_update_block_mask(self, weights, threshold, gradients=None):\n \"\"\"Performs block-granular masking of the weights.\n\n Block pruning occurs only if the block_height or block_width is > 1 and\n if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise\n pruning occurs.\n Args:\n weights: The weight tensor that needs to be masked.\n threshold: The current threshold value. The function will compute a new\n threshold and return the exponential moving average using the current\n value of threshold\n gradients: The gradient tensor that used for salience calculation.\n\n Returns:\n new_threshold: The new value of the threshold based on weights, and\n sparsity at the current global_step\n new_mask: A numpy array of the same size and shape as weights containing\n 0 or 1 to indicate which of the values in weights falls below\n the threshold\n\n Raises:\n ValueError: if block pooling function is not AVG or MAX\n \"\"\"\n\n block_dims = self._get_block_dims(weights.op.name)\n squeezed_weights = tf.squeeze(weights)\n if squeezed_weights.get_shape().ndims != 2 or block_dims == [1, 1]:\n return self._update_mask(weights, threshold, gradients)\n\n if (self._spec.prune_option in ('first_order_gradient',\n 'second_order_gradient') and\n gradients is None):\n raise ValueError(\n 'Gradient based pruning implementation for block sparsity is not supported.'\n )\n\n for i in range(2):\n if block_dims[i] == -1:\n block_dims[i] = squeezed_weights.get_shape()[i]\n\n if self._block_pooling_function not in ['AVG', 'MAX']:\n raise ValueError('Unknown pooling function for block sparsity: %s' %\n self._block_pooling_function)\n\n with tf.name_scope(weights.op.name + '_pruning_ops'):\n abs_weights = tf.abs(squeezed_weights)\n if gradients is not None:\n abs_gradients = tf.abs(tf.squeeze(gradients))\n\n pool_window = block_dims\n pool_fn = pruning_utils.factorized_pool\n squeeze_axis = None\n if not self._spec.use_tpu:\n pool_fn = tf.nn.pool\n abs_weights = tf.reshape(\n abs_weights,\n [1, abs_weights.get_shape()[0],\n abs_weights.get_shape()[1], 1])\n if gradients is not None:\n # Reshape gradients to be a rank 4 tensor of shape [1, .., .., 1].\n abs_gradients = tf.reshape(\n abs_gradients,\n [1, gradients.get_shape()[0], gradients.get_shape()[1], 1])\n squeeze_axis = [0, 3]\n\n pooled_weights = pool_fn(\n abs_weights,\n window_shape=pool_window,\n pooling_type=self._block_pooling_function,\n strides=pool_window,\n padding='SAME',\n name=weights.op.name + '_pooled')\n\n if gradients is not None:\n pooled_gradients = pool_fn(\n abs_gradients,\n window_shape=pool_window,\n pooling_type=self._block_pooling_function,\n strides=pool_window,\n padding='SAME',\n name=gradients.op.name + '_pooled')\n else:\n pooled_gradients = None\n\n if pooled_weights.get_shape().ndims != 2:\n pooled_weights = tf.squeeze(pooled_weights, axis=squeeze_axis)\n\n if gradients is not None and pooled_gradients.get_shape().ndims != 2:\n pooled_gradients = tf.squeeze(pooled_gradients, axis=squeeze_axis)\n\n smoothed_threshold, new_mask = self._update_mask(pooled_weights,\n threshold,\n pooled_gradients)\n\n updated_mask = pruning_utils.expand_tensor(new_mask, block_dims)\n sliced_mask = tf.slice(\n updated_mask, [0, 0],\n [squeezed_weights.get_shape()[0],\n squeezed_weights.get_shape()[1]])\n\n return smoothed_threshold, tf.reshape(sliced_mask, tf.shape(weights))\n\n def _get_assign_old_weight_ops(self):\n if self._assign_old_weight_ops:\n raise ValueError(\n 'Assign op list not empty. _get_old_weight_assign_ops() called twice?'\n )\n\n weights = get_weights()\n old_weights = get_old_weights()\n\n if len(weights) != len(old_weights):\n raise ValueError(\n 'Number of weights %s and number of old_weights %s mismatch' %\n (len(weights), len(old_weights)))\n\n for index, weight in enumerate(weights):\n old_weight = old_weights[index]\n\n self._assign_old_weight_ops.append(\n pruning_utils.variable_assign(old_weight, weight))\n\n def _get_assign_old_old_weight_ops(self):\n if self._assign_old_old_weight_ops:\n raise ValueError(\n 'Assign op list not empty. _get_old_old_weight_assign_ops() called twice?'\n )\n\n old_old_weights = get_old_old_weights()\n old_weights = get_old_weights()\n\n if len(old_old_weights) != len(old_weights):\n raise ValueError(\n 'Number of old_old_weights %s and number of old_weights %s mismatch' %\n (len(old_old_weights), len(old_weights)))\n\n for index, old_old_weight in enumerate(old_old_weights):\n old_weight = old_weights[index]\n\n self._assign_old_old_weight_ops.append(\n pruning_utils.variable_assign(old_old_weight, old_weight))\n\n def _get_assign_gradient_ops(self):\n # Make sure the assignment ops have not already been added to the list\n if self._assign_gradient_ops:\n raise ValueError(\n 'Assign op list not empty. _get_mask_assign_ops() called twice?')\n\n weights = get_weights()\n old_weights = get_old_weights()\n old_old_weights = get_old_old_weights()\n gradients = get_gradients()\n\n if len(weights) != len(old_weights):\n raise ValueError(\n 'Number of weights %s and number of old_weights %s mismatch' %\n (len(weights), len(old_weights)))\n\n if len(weights) != len(gradients):\n raise ValueError(\n 'Number of weights %s and number of gradients %s mismatch' %\n (len(weights), len(gradients)))\n\n for index, _ in enumerate(weights):\n weight = weights[index]\n old_weight = old_weights[index]\n old_old_weight = old_old_weights[index]\n gradient = gradients[index]\n\n if weight.shape.as_list() != old_weight.shape.as_list():\n raise ValueError('weight tensor has different shape from old_weight')\n\n if weight.shape.as_list() != gradient.shape.as_list():\n raise ValueError('weight tensor has different shape from gradient')\n\n if weight.shape.as_list() != old_old_weight.shape.as_list():\n raise ValueError('weight tensor has different shape from old_weight')\n\n is_partitioned = isinstance(weight, variables.PartitionedVariable)\n if is_partitioned:\n weight = weight.as_tensor()\n old_weight = old_weight.as_tensor()\n old_old_weight = old_old_weight.as_tensor()\n\n decay = self._spec.gradient_decay_rate\n if self._spec.prune_option == 'first_order_gradient':\n tf.logging.info('Applying first order gradient pruning')\n normalized_weight_delta = tf.nn.l2_normalize(\n tf.abs(weight - old_weight))\n elif self._spec.prune_option == 'second_order_gradient':\n tf.logging.info('Applying second order gradient pruning')\n normalized_weight_delta = tf.nn.l2_normalize(\n tf.abs(weight + old_old_weight - 2 * old_weight))\n else:\n raise ValueError('Unknown prune option. Should not execute this code.')\n new_gradient = decay * gradient + (1 - decay) * normalized_weight_delta\n\n self._assign_gradient_ops.append(\n pruning_utils.variable_assign(gradient, new_gradient))\n\n def _get_mask_assign_ops(self):\n # Make sure the assignment ops have not already been added to the list\n if self._assign_ops:\n raise ValueError(\n 'Assign op list not empty. _get_mask_assign_ops() called twice?')\n\n masks = get_masks()\n weights = get_weights()\n thresholds = get_thresholds()\n gradients = get_gradients()\n\n if len(masks) != len(thresholds):\n raise ValueError(\n 'Number of masks %s and number of thresholds %s mismatch' %\n (len(masks), len(thresholds)))\n\n for index, mask in enumerate(masks):\n threshold = thresholds[index]\n weight = weights[index]\n if self._spec.prune_option in ('first_order_gradient',\n 'second_order_gradient'):\n gradient = gradients[index]\n else:\n gradient = None\n\n is_partitioned = isinstance(weight, variables.PartitionedVariable)\n if is_partitioned:\n weight = weight.as_tensor()\n\n new_threshold, new_mask = self._maybe_update_block_mask(\n weight, threshold, gradient)\n self._assign_ops.append(\n pruning_utils.variable_assign(threshold, new_threshold))\n\n self._assign_ops.append(\n pruning_utils.partitioned_variable_assign(mask, new_mask)\n if is_partitioned else pruning_utils.variable_assign(mask, new_mask))\n\n def old_weight_update_op(self):\n with tf.name_scope(self._spec.name):\n if self._spec.prune_option not in ('first_order_gradient',\n 'second_order_gradient'):\n return tf.no_op('gradient_update_no_op')\n if not self._assign_old_weight_ops:\n self._get_assign_old_weight_ops()\n with tf.control_dependencies(self._assign_old_weight_ops):\n tf.logging.info('Updating old weights.')\n return tf.no_op('old_weight_update')\n\n def old_old_weight_update_op(self):\n with tf.name_scope(self._spec.name):\n if self._spec.prune_option != 'second_order_gradient':\n return tf.no_op('gradient_update_no_op')\n if not self._assign_old_old_weight_ops:\n self._get_assign_old_old_weight_ops()\n with tf.control_dependencies(self._assign_old_old_weight_ops):\n tf.logging.info('Updating old old weights.')\n return tf.no_op('old_old_weight_update')\n\n def gradient_update_op(self):\n with tf.name_scope(self._spec.name):\n if self._spec.prune_option not in ('first_order_gradient',\n 'second_order_gradient'):\n return tf.no_op('gradient_update_no_op')\n if not self._assign_gradient_ops:\n self._get_assign_gradient_ops()\n with tf.control_dependencies([\n tf.assign(\n self._last_gradient_update_step,\n self._global_step,\n name='last_gradient_update_step_assign')\n ]):\n with tf.control_dependencies(self._assign_gradient_ops):\n tf.logging.info('Updating gradients.')\n return tf.no_op('gradient_update')\n\n def conditional_gradient_update_op(self):\n\n def maybe_update_gradients():\n with tf.name_scope(self._spec.name):\n is_step_within_pruning_range = tf.logical_and(\n tf.greater_equal(self._global_step, self._spec.begin_pruning_step),\n # If end_pruning_step is negative, keep pruning forever!\n tf.logical_or(\n tf.less_equal(self._global_step, self._spec.end_pruning_step),\n tf.less(self._spec.end_pruning_step, 0)))\n return is_step_within_pruning_range\n\n def gradient_update_op():\n return self.gradient_update_op()\n\n def no_update_op():\n return tf.no_op()\n\n return tf.cond(maybe_update_gradients(), gradient_update_op, no_update_op)\n\n def mask_update_op(self):\n with tf.name_scope(self._spec.name):\n if not self._assign_ops:\n self._get_mask_assign_ops()\n\n grad_update_ops = self.gradient_update_op()\n old_weight_update_ops = self.old_weight_update_op()\n old_old_weight_update_ops = self.old_old_weight_update_op()\n\n with tf.control_dependencies([\n tf.assign(\n self._last_update_step,\n self._global_step,\n name='last_mask_update_step_assign')\n ]):\n with tf.control_dependencies([grad_update_ops]):\n with tf.control_dependencies([old_old_weight_update_ops]):\n with tf.control_dependencies([old_weight_update_ops]):\n with tf.control_dependencies(self._assign_ops):\n tf.logging.info('Updating masks.')\n return tf.no_op('mask_update')\n\n def conditional_mask_update_op(self):\n\n def maybe_update_masks():\n with tf.name_scope(self._spec.name):\n is_step_within_pruning_range = tf.logical_and(\n tf.greater_equal(self._global_step, self._spec.begin_pruning_step),\n # If end_pruning_step is negative, keep pruning forever!\n tf.logical_or(\n tf.less_equal(self._global_step, self._spec.end_pruning_step),\n tf.less(self._spec.end_pruning_step, 0)))\n is_pruning_step = tf.less_equal(\n tf.add(self._last_update_step, self._spec.pruning_frequency),\n self._global_step)\n return tf.logical_and(is_step_within_pruning_range, is_pruning_step)\n\n def mask_update_op():\n return self.mask_update_op()\n\n def no_update_op():\n return tf.no_op()\n\n return tf.cond(maybe_update_masks(), mask_update_op, no_update_op)\n\n def add_pruning_summaries(self):\n \"\"\"Adds summaries of weight sparsities and thresholds.\"\"\"\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n tf.summary.scalar('last_gradient_update_step',\n self._last_gradient_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n gradients = get_gradients()\n\n for mask, threshold, gradient in zip(masks, thresholds, gradients):\n tf.summary.scalar(mask.op.name + '/sparsity', tf.nn.zero_fraction(mask))\n tf.summary.scalar(threshold.op.name + '/threshold', threshold)\n tf.summary.scalar(gradient.op.name + '/gradient', tf.norm(gradient))\n tf.summary.scalar(gradient.op.name + '/gradient-sparsity',\n tf.nn.zero_fraction(gradient))\n tf.summary.histogram(gradient.op.name + '/abs.gradient', gradient)\n\n def apply_mask(self, x, scope=''):\n return apply_mask(x, scope, self._spec.prune_option)\n\n def print_hparams(self):\n tf.logging.vlog(0, self._spec.to_json())\n\n def get_spec(self):\n \"\"\"Get the spec / hparams used to create the ApplyCompression object.\"\"\"\n return self._spec\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Replay buffer that performs relabeling.\"\"\"\n\nimport gin\nimport numpy as np\nimport tensorflow as tf\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\nfrom tf_agents.utils import common\n\n\[email protected]\nclass RelabellingReplayBuffer(tf_uniform_replay_buffer.TFUniformReplayBuffer):\n \"\"\"A replay buffer that relabels experience.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the replay buffer.\n\n Args:\n *args: Arguments.\n **kwargs: Keyword arguments.\n\n Additional arguments:\n task_distribution: an instance of multitask.TaskDistribution.\n sample_batch_size: (int) the batch size.\n num_parallel_calls: (int) number of parallel calls for sampling.\n num_future_states: (int) number of future states to consider for\n future state relabeling.\n actor: the actor network.\n critic: the critic network.\n gamma: (float) the discount factor.\n relabel_type: (str) indicator of the relabeling strategy.\n candidate_task_type: (str) within each back, should we use the states,\n next_states, or originally commanded tasks as possible tasks when\n relabeling.\n relabel_prob: (float) fraction of experience to relabel when sampling.\n keep_current_goal: (bool) for ``last'' and ``final'' relabeling,\n should we add both the originally commanded task and the relabeled\n task when inserting new experience into the replay buffer.\n normalize_cols: (bool) Normalizing the columns has the effect of\n including the partition function.\n \"\"\"\n self._task_distribution = kwargs.pop(\"task_distribution\")\n self._sample_batch_size = kwargs.pop(\"sample_batch_size\")\n self._num_parallel_calls = kwargs.pop(\"num_parallel_calls\")\n self._num_future_states = kwargs.pop(\"num_future_states\", 4)\n self._actor = kwargs.pop(\"actor\")\n self._critic = kwargs.pop(\"critic\")\n self._gamma = kwargs.pop(\"gamma\")\n self._relabel_type = kwargs.pop(\"relabel_type\", None)\n assert self._relabel_type in [None, \"last\", \"future\", \"soft\", \"random\"]\n self._candidate_task_type = kwargs.pop(\"candidate_task_type\", \"states\")\n assert self._candidate_task_type in [\"states\", \"next_states\", \"tasks\"]\n self._relabel_prob = kwargs.pop(\"relabel_prob\", 1.0)\n self._keep_current_goal = kwargs.pop(\"keep_current_goal\", False)\n\n self._normalize_cols = kwargs.pop(\"normalize_cols\", True)\n\n self._iterator = None\n super(RelabellingReplayBuffer, self).__init__(*args, **kwargs)\n\n def get_batch(self):\n if self._iterator is None:\n dataset = self.as_dataset(\n sample_batch_size=self._sample_batch_size,\n num_parallel_calls=self._num_parallel_calls,\n num_steps=2,\n ).prefetch(3)\n self._iterator = iter(dataset)\n experience, unused_info = next(self._iterator)\n if self._relabel_type in [\"soft\", \"random\"]:\n experience = self._soft_relabel(experience)\n elif self._relabel_type in [\"last\", \"future\"]:\n # Reassign the next_states to have the same goal as the current states\n _, tasks = self._task_distribution.split(experience.observation[:, 0])\n next_states, _ = self._task_distribution.split(experience.observation[:,\n 1])\n next_states_and_tasks = self._task_distribution.combine(\n next_states, tasks)\n new_observation = tf.concat(\n [\n experience.observation[:, 0][:, None], next_states_and_tasks[:,\n None]\n ],\n axis=1,\n )\n assert new_observation.shape == experience.observation.shape\n experience = experience.replace(observation=new_observation)\n if self._relabel_type is not None:\n # Recompute rewards and done flags\n states, tasks = self._task_distribution.split(experience.observation[:,\n 0])\n next_states, next_tasks = self._task_distribution.split(\n experience.observation[:, 1])\n rewards, dones = self._task_distribution.evaluate(states,\n experience.action[:, 0],\n tasks)\n # Strictly speaking, we don't need to relabel the next rewards and next\n # dones because they end up being thrown away. Only the current rewards\n # and dones end up being important.\n next_rewards, next_dones = self._task_distribution.evaluate(\n next_states, experience.action[:, 1], next_tasks)\n\n new_rewards = tf.concat([rewards[:, None], next_rewards[:, None]], axis=1)\n new_dones = tf.concat([dones[:, None], next_dones[:, None]], axis=1)\n # 0 if episode is done, 1 if episode is continuing\n new_discount = 1.0 - tf.cast(new_dones, tf.float32)\n assert new_rewards.shape == experience.reward.shape\n assert new_discount.shape == experience.discount.shape\n experience = experience.replace(reward=new_rewards, discount=new_discount)\n return experience\n\n def _soft_relabel(self, experience):\n \"\"\"Reassigns tasks to each state and next state.\n\n Does not recompute the rewards or done flags.\n\n Args:\n experience: The experience that we want to relabel with inverse RL.\n Returns:\n relabeled_experience: The relabeled experience.\n \"\"\"\n raise NotImplementedError\n\n def _add_batch(self, items):\n \"\"\"Adds a trajectory to the replay buffer.\"\"\"\n assert items[0].is_first()\n for item in items:\n # The items are batched already, so we remove the first dimension.\n assert item.observation.shape[1:] == self.data_spec.observation.shape\n super(RelabellingReplayBuffer, self)._add_batch(item)\n\n\nclass GoalRelabellingReplayBuffer(RelabellingReplayBuffer):\n \"\"\"Implements a replay buffer for relabeling goals.\"\"\"\n\n def _add_batch(self, items):\n \"\"\"Adds a trajectory to the replay buffer.\"\"\"\n batch_size = len(items)\n if self._relabel_type in [\"future\", \"last\"]:\n relabelled_items = []\n for i in range(batch_size):\n if self._relabel_type == \"future\":\n relabel_indices = np.random.randint(\n i, batch_size, size=self._num_future_states)\n else:\n relabel_indices = [batch_size - 1]\n if self._keep_current_goal:\n relabelled_items.append(items[i])\n for j in relabel_indices:\n state, _ = self._task_distribution.split(items[i].observation)\n next_state, _ = self._task_distribution.split(items[j].observation)\n task = self._task_distribution.state_to_task(next_state)\n state_and_task = self._task_distribution.combine(state, task)\n new_item = items[i].replace(observation=state_and_task)\n relabelled_items.append(new_item)\n items = relabelled_items\n super(GoalRelabellingReplayBuffer, self)._add_batch(items)\n\n @tf.function\n def _soft_relabel(self, experience):\n # experience.observation.shape = [B x T=2 x obs_dim+state_dim]\n states, orig_tasks = self._task_distribution.split(\n experience.observation[:, 0])\n if self._task_distribution.tasks is None:\n tasks = orig_tasks\n else:\n tasks = tf.constant(self._task_distribution.tasks, dtype=tf.float32)\n next_states, _ = self._task_distribution.split(experience.observation[:, 1])\n if self._candidate_task_type == \"states\":\n candidate_tasks = self._task_distribution.state_to_task(states)\n elif self._candidate_task_type == \"next_states\":\n candidate_tasks = self._task_distribution.state_to_task(next_states)\n else:\n assert self._candidate_task_type == \"tasks\"\n candidate_tasks = tasks\n\n actions = experience.action[:, 0]\n num_tasks = tasks.shape[0]\n batch_size = states.shape[0]\n task_dim = tasks.shape[1]\n obs_dim = states.shape[1]\n action_dim = actions.shape[1]\n action_spec = self._actor.output_tensor_spec\n\n states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D\n states_tiled = tf.reshape(states_tiled,\n [batch_size * num_tasks, obs_dim]) # B*B x D\n actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D\n actions_tiled = tf.reshape(actions_tiled,\n [batch_size * num_tasks, action_dim]) # B*B x D\n tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D\n tasks_tiled = tf.reshape(tasks_tiled,\n [batch_size * num_tasks, task_dim]) # B*B x D\n\n next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1])\n next_states_tiled = tf.reshape(next_states_tiled,\n [batch_size * num_tasks, obs_dim]) # B*B x D\n next_relabelled_obs = self._task_distribution.combine(\n next_states_tiled, tasks_tiled)\n\n sampled_actions_tiled = self._actor(\n next_relabelled_obs, step_type=(), network_state=())[0].sample()\n critic_input = (next_relabelled_obs, sampled_actions_tiled)\n q_vals, _ = self._critic(critic_input, training=False)\n q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks))\n\n rewards, dones = self._task_distribution.evaluate(states_tiled,\n actions_tiled,\n tasks_tiled)\n dones = tf.cast(dones, tf.float32)\n rewards_vec = tf.reshape(rewards, (batch_size, num_tasks))\n dones_vec = tf.reshape(dones, (batch_size, num_tasks))\n\n relabelled_obs = self._task_distribution.combine(states_tiled, tasks_tiled)\n action_distribution = self._actor(\n relabelled_obs, step_type=(), network_state=())[0]\n log_pi = common.log_probability(action_distribution, actions_tiled,\n action_spec)\n log_pi_vec = tf.reshape(log_pi, (batch_size, num_tasks))\n\n logits_vec = (\n rewards_vec - log_pi_vec + self._gamma * (1.0 - dones_vec) * q_vals_vec)\n if self._relabel_type == \"random\":\n logits_vec = tf.ones_like(logits_vec) # Hack to make sampling random\n\n ## End new version\n if self._normalize_cols:\n logits_vec = logits_vec - tf.math.reduce_logsumexp(\n logits_vec, axis=0)[None]\n relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1)\n\n ### Metrics\n global_step = tf.compat.v1.train.get_or_create_global_step()\n orig_indices = tf.range(\n self._sample_batch_size, dtype=relabel_indices.dtype)\n with tf.name_scope(\"relabelling\"):\n # How often are the originally commanded goals most optimal?\n opt_indices = tf.argmax(logits_vec, axis=1)\n orig_is_opt = opt_indices == orig_indices\n orig_opt_frac = tf.reduce_mean(tf.cast(orig_is_opt, tf.float32))\n tf.compat.v2.summary.scalar(\n name=\"orig_task_optimal\", data=orig_opt_frac, step=global_step)\n\n # How often is the relabelled goal optimal?\n # The relabel_indices are [B, 1], so we need to remove the extra dim.\n relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices\n relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32))\n tf.compat.v2.summary.scalar(\n name=\"relabel_task_optimal\", data=relabel_opt_frac, step=global_step)\n\n # What are the average Q values of the original tasks?\n if batch_size == num_tasks:\n indices = tf.transpose(tf.stack([orig_indices, orig_indices], axis=0))\n orig_q_vals = tf.gather_nd(logits_vec, indices)\n tf.compat.v2.summary.scalar(\n name=\"orig_q_vals\",\n data=tf.reduce_mean(orig_q_vals),\n step=global_step,\n )\n\n # What are the average Q values of the relabelled tasks?\n indices = tf.transpose(\n tf.stack([orig_indices, tf.squeeze(relabel_indices)], axis=0))\n relabel_q_vals = tf.gather_nd(logits_vec, indices)\n tf.compat.v2.summary.scalar(\n name=\"relabel_q_vals\",\n data=tf.reduce_mean(relabel_q_vals),\n step=global_step,\n )\n\n max_q = tf.reduce_max(logits_vec, axis=1)\n tf.compat.v2.summary.scalar(\n name=\"max_q\", data=tf.reduce_mean(max_q), step=global_step)\n\n ### End metrics\n\n # For both state-centric and goal-centric relabelling, the implementation of\n # mixing is the same: we randomly replace some of the indices with the\n # diagonal.\n relabelled_tasks = tf.gather(candidate_tasks, tf.squeeze(relabel_indices))\n\n if self._relabel_prob == 0:\n relabelled_tasks = orig_tasks\n elif 0 < self._relabel_prob < 1:\n logits = tf.log([1.0 - self._relabel_prob, self._relabel_prob])\n mask = tf.squeeze(\n tf.random.categorical(\n logits[None], num_samples=self._sample_batch_size))\n mask = tf.cast(mask, tf.float32)[:, None]\n relabelled_tasks = mask * orig_tasks + (1 - mask) * relabelled_tasks\n\n states_and_tasks = self._task_distribution.combine(states, relabelled_tasks)\n next_states_and_tasks = self._task_distribution.combine(\n next_states, relabelled_tasks)\n new_observation = tf.concat(\n [states_and_tasks[:, None], next_states_and_tasks[:, None]], axis=1)\n assert new_observation.shape == experience.observation.shape\n experience = experience.replace(observation=new_observation)\n return experience\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data reader, based on tensorflow/examples/speech_commands.\"\"\"\n\nimport hashlib\nimport math\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\nfrom absl import logging\nimport numpy as np\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\nfrom kws_streaming.layers import modes\n\n# pylint: disable=g-direct-tensorflow-import\n# below ops are on a depreciation path in tf, so we temporarily disable pylint\n# to be able to import them: TODO(rybakov) - use direct tf\n\nfrom tensorflow.python.ops import gen_audio_ops as audio_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\ntf.disable_eager_execution()\n\n# If it's available, load the specialized feature generator. If this doesn't\n# work, try building with bazel instead of running the Python script directly.\ntry:\n from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top\nexcept ImportError:\n frontend_op = None\n# pylint: enable=g-direct-tensorflow-import\n\nMAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M\nSILENCE_LABEL = '_silence_'\nSILENCE_INDEX = 0\nUNKNOWN_WORD_LABEL = '_unknown_'\nUNKNOWN_WORD_INDEX = 1\nBACKGROUND_NOISE_DIR_NAME = '_background_noise_'\nRANDOM_SEED = 59185\nMAX_ABS_INT16 = 32768\n\n\ndef prepare_words_list(wanted_words, split_data):\n \"\"\"Prepends common tokens to the custom word list.\n\n Args:\n wanted_words: List of strings containing the custom words.\n split_data: True - split data automatically; False - user splits the data\n\n Returns:\n List with the standard silence and unknown tokens added.\n \"\"\"\n if split_data:\n # with automatic data split we append two more labels\n return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words\n else:\n # data already split by user, no need to add other labels\n return wanted_words\n\n\ndef which_set(filename, validation_percentage, testing_percentage):\n \"\"\"Determines which data partition the file should belong to.\n\n We want to keep files in the same training, validation, or testing sets even\n if new ones are added over time. This makes it less likely that testing\n samples will accidentally be reused in training when long runs are restarted\n for example. To keep this stability, a hash of the filename is taken and used\n to determine which set it should belong to. This determination only depends on\n the name and the set proportions, so it won't change as other files are added.\n\n It's also useful to associate particular files as related (for example words\n spoken by the same person), so anything after '_nohash_' in a filename is\n ignored for set determination. This ensures that 'bobby_nohash_0.wav' and\n 'bobby_nohash_1.wav' are always in the same set, for example.\n\n Args:\n filename: File path of the data sample.\n validation_percentage: How much of the data set to use for validation.\n testing_percentage: How much of the data set to use for testing.\n\n Returns:\n String, one of 'training', 'validation', or 'testing'.\n \"\"\"\n base_name = os.path.basename(filename)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put a wav in, so the data set creator has a way of\n # grouping wavs that are close variations of each other.\n hash_name = re.sub(r'_nohash_.*$', '', base_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_WAVS_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_WAVS_PER_CLASS))\n if percentage_hash < validation_percentage:\n result = 'validation'\n elif percentage_hash < (testing_percentage + validation_percentage):\n result = 'testing'\n else:\n result = 'training'\n return result\n\n\ndef load_wav_file(filename):\n \"\"\"Loads an audio file and returns a float PCM-encoded array of samples.\n\n Args:\n filename: Path to the .wav file to load.\n\n Returns:\n Numpy array holding the sample data as floats between -1.0 and 1.0.\n \"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n return sess.run(\n wav_decoder,\n feed_dict={wav_filename_placeholder: filename}).audio.flatten()\n\n\ndef save_wav_file(filename, wav_data, sample_rate):\n \"\"\"Saves audio sample data to a .wav audio file.\n\n Args:\n filename: Path to save the file to.\n wav_data: 2D array of float PCM-encoded audio data.\n sample_rate: Samples per second to encode in the file.\n \"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n sample_rate_placeholder = tf.placeholder(tf.int32, [])\n wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])\n wav_encoder = tf.audio.encode_wav(wav_data_placeholder,\n sample_rate_placeholder)\n wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)\n sess.run(\n wav_saver,\n feed_dict={\n wav_filename_placeholder: filename,\n sample_rate_placeholder: sample_rate,\n wav_data_placeholder: np.reshape(wav_data, (-1, 1))\n })\n\n\nclass AudioProcessor(object):\n \"\"\"Handles loading, partitioning, and preparing audio training data.\n\n Args:\n flags: data and model parameters, described at model_train_eval.py\n \"\"\"\n\n def __init__(self, flags):\n wanted_words = flags.wanted_words.split(',')\n if flags.data_dir:\n self.data_dir = flags.data_dir\n if flags.split_data:\n self.maybe_download_and_extract_dataset(flags.data_url, self.data_dir)\n self.prepare_data_index(flags.silence_percentage,\n flags.unknown_percentage, wanted_words,\n flags.validation_percentage,\n flags.testing_percentage, flags.split_data)\n else:\n self.prepare_split_data_index(wanted_words, flags.split_data)\n\n self.prepare_background_data()\n self.prepare_processing_graph(flags)\n\n def maybe_download_and_extract_dataset(self, data_url, dest_directory):\n \"\"\"Download and extract data set tar file.\n\n If the data set we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a\n directory.\n If the data_url is none, don't download anything and expect the data\n directory to contain the correct files already.\n\n Args:\n data_url: Web location of the tar file containing the data set.\n dest_directory: File path to extract data to.\n \"\"\"\n if not data_url:\n return\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n try:\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n except:\n logging.error(\n 'Failed to download URL: %s to folder: %s\\n'\n 'Please make sure you have enough free space and'\n ' an internet connection', data_url, filepath)\n raise\n print()\n statinfo = os.stat(filepath)\n logging.info('Successfully downloaded %s (%d bytes)', filename,\n statinfo.st_size)\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n def prepare_data_index(self, silence_percentage, unknown_percentage,\n wanted_words, validation_percentage,\n testing_percentage, split_data):\n \"\"\"Prepares a list of the samples organized by set and label.\n\n The training loop needs a list of all the available data, organized by\n which partition it should belong to, and with ground truth labels attached.\n This function analyzes the folders below the `data_dir`, figures out the\n right\n labels for each file based on the name of the subdirectory it belongs to,\n and uses a stable hash to assign it to a data set partition.\n\n Args:\n silence_percentage: How much of the resulting data should be background.\n unknown_percentage: How much should be audio outside the wanted classes.\n wanted_words: Labels of the classes we want to be able to recognize.\n validation_percentage: How much of the data set to use for validation.\n testing_percentage: How much of the data set to use for testing.\n split_data: True - split data automatically; False - user splits the data\n\n Returns:\n Dictionary containing a list of file information for each set partition,\n and a lookup map for each class to determine its numeric index.\n\n Raises:\n Exception: If expected files are not found.\n \"\"\"\n # Make sure the shuffling and picking of unknowns is deterministic.\n random.seed(RANDOM_SEED)\n wanted_words_index = {}\n for index, wanted_word in enumerate(wanted_words):\n wanted_words_index[wanted_word] = index + 2\n self.data_index = {'validation': [], 'testing': [], 'training': []}\n unknown_index = {'validation': [], 'testing': [], 'training': []}\n all_words = {}\n # Look through all the subfolders to find audio samples\n search_path = os.path.join(self.data_dir, '*', '*.wav')\n for wav_path in gfile.Glob(search_path):\n _, word = os.path.split(os.path.dirname(wav_path))\n word = word.lower()\n # Treat the '_background_noise_' folder as a special case, since we expect\n # it to contain long audio samples we mix in to improve training.\n if word == BACKGROUND_NOISE_DIR_NAME:\n continue\n all_words[word] = True\n set_index = which_set(wav_path, validation_percentage, testing_percentage)\n # If it's a known class, store its detail, otherwise add it to the list\n # we'll use to train the unknown label.\n if word in wanted_words_index:\n self.data_index[set_index].append({'label': word, 'file': wav_path})\n else:\n unknown_index[set_index].append({'label': word, 'file': wav_path})\n if not all_words:\n raise Exception('No .wavs found at ' + search_path)\n for index, wanted_word in enumerate(wanted_words):\n if wanted_word not in all_words:\n raise Exception('Expected to find ' + wanted_word +\n ' in labels but only found ' +\n ', '.join(all_words.keys()))\n # We need an arbitrary file to load as the input for the silence samples.\n # It's multiplied by zero later, so the content doesn't matter.\n silence_wav_path = self.data_index['training'][0]['file']\n for set_index in ['validation', 'testing', 'training']:\n set_size = len(self.data_index[set_index])\n silence_size = int(math.ceil(set_size * silence_percentage / 100))\n for _ in range(silence_size):\n self.data_index[set_index].append({\n 'label': SILENCE_LABEL,\n 'file': silence_wav_path\n })\n # Pick some unknowns to add to each partition of the data set.\n random.shuffle(unknown_index[set_index])\n unknown_size = int(math.ceil(set_size * unknown_percentage / 100))\n self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])\n # Make sure the ordering is random.\n for set_index in ['validation', 'testing', 'training']:\n random.shuffle(self.data_index[set_index])\n # Prepare the rest of the result data structure.\n self.words_list = prepare_words_list(wanted_words, split_data)\n self.word_to_index = {}\n for word in all_words:\n if word in wanted_words_index:\n self.word_to_index[word] = wanted_words_index[word]\n else:\n self.word_to_index[word] = UNKNOWN_WORD_INDEX\n self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX\n\n def validate_dir_structure(self, data_dir, dirs):\n for dir_name in dirs + [BACKGROUND_NOISE_DIR_NAME]:\n sub_dir_name = os.path.join(data_dir, dir_name)\n if not os.path.isdir(sub_dir_name):\n raise IOError('Directory is not found ' + sub_dir_name)\n\n def prepare_split_data_index(self, wanted_words, split_data):\n \"\"\"Prepares a list of the samples organized by set and label.\n\n The training loop needs a list of all the available data, organized by\n which partition it should belong to, and with ground truth labels attached.\n This function analyzes the folders below the `data_dir`,\n where `data_dir` has to contain folders (prepared by user):\n testing\n training\n validation\n _background_noise_ - contains data which are used for adding background\n noise to training data only\n\n Args:\n wanted_words: Labels of the classes we want to be able to recognize.\n split_data: True - split data automatically; False - user splits the data\n\n Returns:\n Dictionary containing a list of file information for each set partition,\n and a lookup map for each class to determine its numeric index.\n\n Raises:\n Exception: If expected files are not found.\n \"\"\"\n # Make sure the shuffling and picking of unknowns is deterministic.\n random.seed(RANDOM_SEED)\n\n dirs = ['testing', 'training', 'validation']\n\n self.validate_dir_structure(self.data_dir, dirs)\n\n wanted_words_index = {}\n for index, wanted_word in enumerate(wanted_words):\n wanted_words_index[wanted_word] = index\n\n self.words_list = prepare_words_list(wanted_words, split_data)\n\n self.data_index = {'validation': [], 'testing': [], 'training': []}\n\n for set_index in dirs:\n all_words = {}\n # Look through all the subfolders in set_index to find audio samples\n search_path = os.path.join(\n os.path.join(self.data_dir, set_index), '*', '*.wav')\n for wav_path in gfile.Glob(search_path):\n _, word = os.path.split(os.path.dirname(wav_path))\n word = word.lower()\n # Treat the '_background_noise_' folder as a special case,\n # it contains long audio samples we mix in to improve training.\n if word == BACKGROUND_NOISE_DIR_NAME:\n continue\n all_words[word] = True\n\n # If it's a known class, store its detail, otherwise raise error\n if word in wanted_words_index:\n self.data_index[set_index].append({'label': word, 'file': wav_path})\n else:\n raise Exception('Unknown word ' + word)\n\n if not all_words:\n raise IOError('No .wavs found at ' + search_path)\n for index, wanted_word in enumerate(wanted_words):\n if wanted_word not in all_words:\n raise IOError('Expected to find ' + wanted_word +\n ' in labels but only found ' +\n ', '.join(all_words.keys()))\n\n # Make sure the ordering is random.\n for set_index in ['validation', 'testing', 'training']:\n random.shuffle(self.data_index[set_index])\n\n # Prepare the rest of the result data structure.\n self.word_to_index = {}\n for word in all_words:\n if word in wanted_words_index:\n self.word_to_index[word] = wanted_words_index[word]\n else:\n raise Exception('Unknown word ' + word)\n\n def prepare_background_data(self):\n \"\"\"Searches a folder for background noise audio, and loads it into memory.\n\n It's expected that the background audio samples will be in a subdirectory\n named '_background_noise_' inside the 'data_dir' folder, as .wavs that match\n the sample rate of the training data, but can be much longer in duration.\n\n If the '_background_noise_' folder doesn't exist at all, this isn't an\n error, it's just taken to mean that no background noise augmentation should\n be used. If the folder does exist, but it's empty, that's treated as an\n error.\n\n Returns:\n List of raw PCM-encoded audio samples of background noise.\n\n Raises:\n Exception: If files aren't found in the folder.\n \"\"\"\n self.background_data = []\n background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)\n if not os.path.exists(background_dir):\n return self.background_data\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,\n '*.wav')\n for wav_path in gfile.Glob(search_path):\n wav_data = sess.run(\n wav_decoder,\n feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()\n self.background_data.append(wav_data)\n if not self.background_data:\n raise Exception('No background wav files were found in ' + search_path)\n\n def prepare_processing_graph(self, flags):\n \"\"\"Builds a TensorFlow graph to apply the input distortions.\n\n Creates a graph that loads a WAVE file, decodes it, scales the volume,\n shifts it in time, adds in background noise, calculates a spectrogram, and\n then builds an MFCC fingerprint from that.\n\n This must be called with an active TensorFlow session running, and it\n creates multiple placeholder inputs, and one output:\n\n - wav_filename_placeholder_: Filename of the WAV to load.\n - foreground_volume_placeholder_: How loud the main clip should be.\n - foreground_resampling_placeholder_: Controls signal stretching/squeezing\n - time_shift_padding_placeholder_: Where to pad the clip.\n - time_shift_offset_placeholder_: How much to move the clip in time.\n - background_data_placeholder_: PCM sample data for background noise.\n - background_volume_placeholder_: Loudness of mixed-in background.\n - output_: Output 2D fingerprint of processed audio or raw audio.\n\n Args:\n flags: data and model parameters, described at model_train.py\n\n Raises:\n ValueError: If the preprocessing mode isn't recognized.\n Exception: If the preprocessor wasn't compiled in.\n \"\"\"\n with tf.get_default_graph().name_scope('data'):\n desired_samples = flags.desired_samples\n self.wav_filename_placeholder_ = tf.placeholder(\n tf.string, [], name='wav_filename')\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = tf.audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(\n tf.float32, [], name='foreground_volume')\n # signal resampling to generate more training data\n # it will stretch or squeeze input signal proportinally to:\n self.foreground_resampling_placeholder_ = tf.placeholder(tf.float32, [])\n\n if self.foreground_resampling_placeholder_ != 1.0:\n image = tf.expand_dims(wav_decoder.audio, 0)\n image = tf.expand_dims(image, 2)\n shape = tf.shape(wav_decoder.audio)\n image_resized = tf.image.resize(\n images=image,\n size=(tf.cast((tf.cast(shape[0], tf.float32) *\n self.foreground_resampling_placeholder_),\n tf.int32), 1),\n preserve_aspect_ratio=False)\n image_resized_cropped = tf.image.resize_with_crop_or_pad(\n image_resized,\n target_height=desired_samples,\n target_width=1,\n )\n image_resized_cropped = tf.squeeze(image_resized_cropped, axis=[0, 3])\n scaled_foreground = tf.multiply(image_resized_cropped,\n self.foreground_volume_placeholder_)\n else:\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(\n tf.int32, [2, 2], name='time_shift_padding')\n self.time_shift_offset_placeholder_ = tf.placeholder(\n tf.int32, [2], name='time_shift_offset')\n padded_foreground = tf.pad(\n tensor=scaled_foreground,\n paddings=self.time_shift_padding_placeholder_,\n mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground,\n self.time_shift_offset_placeholder_,\n [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(\n tf.float32, [desired_samples, 1], name='background_data')\n self.background_volume_placeholder_ = tf.placeholder(\n tf.float32, [], name='background_volume')\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n\n if flags.preprocess == 'raw':\n # background_clamp dims: [time, channels]\n # remove channel dim\n self.output_ = tf.squeeze(background_clamp, axis=1)\n # below options are for backward compatibility with previous\n # version of hotword detection on microcontrollers\n # in this case audio feature extraction is done separately from\n # neural net and user will have to manage it.\n elif flags.preprocess == 'mfcc':\n # Run the spectrogram and MFCC ops to get a 2D audio: Short-time FFTs\n # background_clamp dims: [time, channels]\n spectrogram = audio_ops.audio_spectrogram(\n background_clamp,\n window_size=flags.window_size_samples,\n stride=flags.window_stride_samples,\n magnitude_squared=flags.fft_magnitude_squared)\n # spectrogram: [channels/batch, frames, fft_feature]\n\n # extract mfcc features from spectrogram by audio_ops.mfcc:\n # 1 Input is spectrogram frames.\n # 2 Weighted spectrogram into bands using a triangular mel filterbank\n # 3 Logarithmic scaling\n # 4 Discrete cosine transform (DCT), return lowest dct_coefficient_count\n mfcc = audio_ops.mfcc(\n spectrogram=spectrogram,\n sample_rate=flags.sample_rate,\n upper_frequency_limit=flags.mel_upper_edge_hertz,\n lower_frequency_limit=flags.mel_lower_edge_hertz,\n filterbank_channel_count=flags.mel_num_bins,\n dct_coefficient_count=flags.dct_num_features)\n # mfcc: [channels/batch, frames, dct_coefficient_count]\n # remove channel dim\n self.output_ = tf.squeeze(mfcc, axis=0)\n elif flags.preprocess == 'micro':\n if not frontend_op:\n raise Exception(\n 'Micro frontend op is currently not available when running'\n ' TensorFlow directly from Python, you need to build and run'\n ' through Bazel')\n int16_input = tf.cast(\n tf.multiply(background_clamp, MAX_ABS_INT16), tf.int16)\n # audio_microfrontend does:\n # 1. A slicing window function of raw audio\n # 2. Short-time FFTs\n # 3. Filterbank calculations\n # 4. Noise reduction\n # 5. PCAN Auto Gain Control\n # 6. Logarithmic scaling\n\n # int16_input dims: [time, channels]\n micro_frontend = frontend_op.audio_microfrontend(\n int16_input,\n sample_rate=flags.sample_rate,\n window_size=flags.window_size_ms,\n window_step=flags.window_stride_ms,\n num_channels=flags.mel_num_bins,\n upper_band_limit=flags.mel_upper_edge_hertz,\n lower_band_limit=flags.mel_lower_edge_hertz,\n out_scale=1,\n out_type=tf.float32)\n # int16_input dims: [frames, num_channels]\n self.output_ = tf.multiply(micro_frontend, (10.0 / 256.0))\n else:\n raise ValueError('Unknown preprocess mode \"%s\" (should be \"raw\", '\n ' \"mfcc\", or \"micro\")' % (flags.preprocess))\n\n def set_size(self, mode):\n \"\"\"Calculates the number of samples in the dataset partition.\n\n Args:\n mode: Which partition, must be 'training', 'validation', or 'testing'.\n\n Returns:\n Number of samples in the partition.\n \"\"\"\n return len(self.data_index[mode])\n\n def get_data(self, how_many, offset, flags, background_frequency,\n background_volume_range, time_shift, mode, resample_offset,\n volume_augmentation_offset, sess):\n \"\"\"Gather samples from the data set, applying transformations as needed.\n\n When the mode is 'training', a random selection of samples will be returned,\n otherwise the first N clips in the partition will be used. This ensures that\n validation always uses the same samples, reducing noise in the metrics.\n\n Args:\n how_many: Desired number of samples to return. -1 means the entire\n contents of this partition.\n offset: Where to start when fetching deterministically.\n flags: data and model parameters, described at model_train.py\n background_frequency: How many clips will have background noise, 0.0 to\n 1.0.\n background_volume_range: How loud the background noise will be.\n time_shift: How much to randomly shift the clips by in time.\n It shifts audio data in range from -time_shift to time_shift.\n mode: Which partition to use, must be 'training', 'validation', or\n 'testing'.\n resample_offset: resample input signal - stretch it or squeeze by 0..0.15\n If 0 - then not resampling.\n volume_augmentation_offset: it is used for raw audio volume control.\n During training volume multiplier will be sampled from\n 1.0 - volume_augmentation_offset ... 1.0 + volume_augmentation_offset\n sess: TensorFlow session that was active when processor was created.\n\n Returns:\n List of sample data for the transformed samples, and list of label indexes\n\n Raises:\n ValueError: If background samples are too short.\n \"\"\"\n # Pick one of the partitions to choose samples from.\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n if flags.pick_deterministically and mode == 'training':\n # it is a special case:\n sample_count = how_many\n else:\n sample_count = max(0, min(how_many, len(candidates) - offset))\n\n # Data and labels will be populated and returned.\n input_data_shape = modes.get_input_data_shape(flags, modes.Modes.TRAINING)\n data = np.zeros((sample_count,) + input_data_shape)\n labels = np.zeros(sample_count)\n desired_samples = flags.desired_samples\n use_background = self.background_data and (mode == 'training')\n pick_deterministically = (mode !=\n 'training') or flags.pick_deterministically\n # Use the processing graph we created earlier to repeatedly to generate the\n # final output sample data we'll use in training.\n for i in xrange(offset, offset + sample_count):\n # Pick which audio sample to use.\n if how_many == -1 or pick_deterministically:\n # during inference offset is 0,\n # but during training offset can be 0 or\n # training_step * batch_size, so 'i' can go beyond array size\n sample_index = i % len(candidates)\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n # If we're time shifting, set up the offset for this sample.\n if time_shift > 0:\n time_shift_amount = np.random.randint(-time_shift, time_shift)\n else:\n time_shift_amount = 0\n if time_shift_amount > 0:\n time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n time_shift_offset = [0, 0]\n else:\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n\n resample = 1.0\n if mode == 'training' and resample_offset != 0.0:\n resample = np.random.uniform(\n low=resample - resample_offset, high=resample + resample_offset)\n input_dict = {\n self.wav_filename_placeholder_: sample['file'],\n self.time_shift_padding_placeholder_: time_shift_padding,\n self.time_shift_offset_placeholder_: time_shift_offset,\n self.foreground_resampling_placeholder_: resample,\n }\n # Choose a section of background noise to mix in.\n if use_background:\n background_index = np.random.randint(len(self.background_data))\n background_samples = self.background_data[background_index]\n if len(background_samples) <= flags.desired_samples:\n raise ValueError('Background sample is too short! Need more than %d'\n ' samples but only %d were found' %\n (flags.desired_samples, len(background_samples)))\n background_offset = np.random.randint(\n 0,\n len(background_samples) - flags.desired_samples)\n background_clipped = background_samples[background_offset:(\n background_offset + desired_samples)]\n background_reshaped = background_clipped.reshape([desired_samples, 1])\n if np.random.uniform(0, 1) < background_frequency:\n background_volume = np.random.uniform(0, background_volume_range)\n else:\n background_volume = 0\n else:\n background_reshaped = np.zeros([desired_samples, 1])\n background_volume = 0\n input_dict[self.background_data_placeholder_] = background_reshaped\n input_dict[self.background_volume_placeholder_] = background_volume\n # If we want silence, mute out the main sample but leave the background.\n if sample['label'] == SILENCE_LABEL:\n input_dict[self.foreground_volume_placeholder_] = 0\n else:\n foreground_volume = 1.0 # multiplier of audio signal\n # in training mode produce audio data with different volume\n if mode == 'training' and volume_augmentation_offset != 0.0:\n foreground_volume = np.random.uniform(\n low=foreground_volume - volume_augmentation_offset,\n high=foreground_volume + volume_augmentation_offset)\n\n input_dict[self.foreground_volume_placeholder_] = foreground_volume\n # Run the graph to produce the output audio.\n data_tensor = sess.run(self.output_, feed_dict=input_dict)\n data[i - offset, :] = data_tensor\n label_index = self.word_to_index[sample['label']]\n labels[i - offset] = label_index\n return data, labels\n\n def get_features_for_wav(self, wav_filename, flags, sess):\n \"\"\"Applies the feature transformation process to the input_wav.\n\n Runs the feature generation process (generally producing a spectrogram from\n the input samples) on the WAV file. This can be useful for testing and\n verifying implementations being run on other platforms.\n\n Args:\n wav_filename: The path to the input audio file.\n flags: data and model parameters, described at model_train.py\n sess: TensorFlow session that was active when processor was created.\n\n Returns:\n Numpy data array containing the generated features.\n \"\"\"\n desired_samples = flags.desired_samples\n input_dict = {\n self.wav_filename_placeholder_: wav_filename,\n self.time_shift_padding_placeholder_: [[0, 0], [0, 0]],\n self.time_shift_offset_placeholder_: [0, 0],\n self.background_data_placeholder_: np.zeros([desired_samples, 1]),\n self.background_volume_placeholder_: 0,\n self.foreground_volume_placeholder_: 1,\n self.foreground_resampling_placeholder_: 1.0,\n }\n # Run the graph to produce the output audio.\n data_tensor = sess.run([self.output_], feed_dict=input_dict)\n return data_tensor\n\n def get_unprocessed_data(self, how_many, flags, mode):\n \"\"\"Retrieve sample data for the given partition, with no transformations.\n\n Args:\n how_many: Desired number of samples to return. -1 means the entire\n contents of this partition.\n flags: data and model parameters, described at model_train.py\n mode: Which partition to use, must be 'training', 'validation', or\n 'testing'.\n\n Returns:\n List of sample data for the samples, and list of labels in one-hot form.\n \"\"\"\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = how_many\n desired_samples = flags.desired_samples\n words_list = self.words_list\n data = np.zeros((sample_count, desired_samples))\n labels = []\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n foreground_volume_placeholder = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n foreground_volume_placeholder)\n for i in range(sample_count):\n if how_many == -1:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n input_dict = {wav_filename_placeholder: sample['file']}\n if sample['label'] == SILENCE_LABEL:\n input_dict[foreground_volume_placeholder] = 0\n else:\n input_dict[foreground_volume_placeholder] = 1\n data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()\n label_index = self.word_to_index[sample['label']]\n labels.append(words_list[label_index])\n return data, labels\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for feature utils.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom etcmodel import feature_utils\n\n\nclass TensorUtilsTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_relative_position_generator_init(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n self.assertEqual(3, relative_pos_gen.max_distance)\n self.assertEqual(False, relative_pos_gen.ignore_direction)\n self.assertEqual(7, relative_pos_gen.relative_vocab_size)\n self.assertEqual(6, relative_pos_gen.left_pad_value)\n self.assertEqual(3, relative_pos_gen.right_pad_value)\n\n def test_relative_position_generator_init_ignore_direction(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(\n max_distance=3, ignore_direction=True)\n\n self.assertEqual(3, relative_pos_gen.max_distance)\n self.assertEqual(True, relative_pos_gen.ignore_direction)\n self.assertEqual(4, relative_pos_gen.relative_vocab_size)\n self.assertEqual(3, relative_pos_gen.left_pad_value)\n self.assertEqual(3, relative_pos_gen.right_pad_value)\n\n def test_relative_position_generator_init_max_distance_0(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=0)\n\n self.assertEqual(0, relative_pos_gen.max_distance)\n self.assertEqual(False, relative_pos_gen.ignore_direction)\n self.assertEqual(1, relative_pos_gen.relative_vocab_size)\n self.assertEqual(0, relative_pos_gen.left_pad_value)\n self.assertEqual(0, relative_pos_gen.right_pad_value)\n\n def test_relative_position_generator_init_invalid_arguments(self):\n with self.assertRaises(ValueError):\n feature_utils.RelativePositionGenerator(max_distance=-1)\n\n def test_make_relative_att_ids_padding_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [[\n [0, 1, 2, 3, 3, 3], #\n [4, 0, 1, 2, 3, 3], #\n [5, 4, 0, 1, 2, 3], #\n [6, 5, 4, 0, 1, 2], #\n [6, 6, 5, 4, 0, 1], #\n [6, 6, 6, 5, 4, 0], #\n ]]\n self.assertAllEqual(expected, relative_pos_gen.make_relative_att_ids(6))\n\n def test_make_relative_att_ids_padding_case_ignore_direction(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(\n max_distance=3, ignore_direction=True)\n\n expected = [[\n [0, 1, 2, 3, 3, 3], #\n [1, 0, 1, 2, 3, 3], #\n [2, 1, 0, 1, 2, 3], #\n [3, 2, 1, 0, 1, 2], #\n [3, 3, 2, 1, 0, 1], #\n [3, 3, 3, 2, 1, 0], #\n ]]\n self.assertAllEqual(expected, relative_pos_gen.make_relative_att_ids(6))\n\n def test_make_relative_att_ids_trimming_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=9)\n\n expected = [[\n [0, 1, 2, 3, 4], #\n [10, 0, 1, 2, 3], #\n [11, 10, 0, 1, 2], #\n [12, 11, 10, 0, 1], #\n [13, 12, 11, 10, 0], #\n ]]\n self.assertAllEqual(expected, relative_pos_gen.make_relative_att_ids(5))\n\n def test_make_relative_att_ids_no_pad_or_trim_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=4)\n\n expected = [[\n [0, 1, 2, 3, 4], #\n [5, 0, 1, 2, 3], #\n [6, 5, 0, 1, 2], #\n [7, 6, 5, 0, 1], #\n [8, 7, 6, 5, 0], #\n ]]\n self.assertAllEqual(expected, relative_pos_gen.make_relative_att_ids(5))\n\n def test_make_relative_att_ids_max_distance_0(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=0)\n\n expected = [[\n [0, 0, 0, 0], #\n [0, 0, 0, 0], #\n [0, 0, 0, 0], #\n [0, 0, 0, 0], #\n ]]\n self.assertAllEqual(expected, relative_pos_gen.make_relative_att_ids(4))\n\n def test_make_relative_att_ids_batch_size_2(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ],\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ]\n ]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_relative_att_ids(seq_len=5, batch_size=2))\n\n def test_make_relative_att_ids_batch_size_2_tensor(self):\n dummy_batch = tf.ones([2, 5])\n\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ],\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ]\n ]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_relative_att_ids(\n seq_len=5, batch_size=tf.shape(dummy_batch)[0]))\n\n def test_overwrite_relative_att_ids_outside_segments(self):\n\n # batch_size = 2, seq_len = 5, max_distance = 3\n rel_att_ids = [\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ],\n [\n [0, 1, 2, 3, 3], #\n [4, 0, 1, 2, 3], #\n [5, 4, 0, 1, 2], #\n [6, 5, 4, 0, 1], #\n [6, 6, 5, 4, 0], #\n ]\n ]\n\n segment_ids = [[10, 10, 20, 30, 30], [10, 20, 20, 10, 10]]\n overwrite_value = 100\n\n expected_rel_att_ids = [\n [\n [0, 1, 100, 100, 100], #\n [4, 0, 100, 100, 100], #\n [100, 100, 0, 100, 100], #\n [100, 100, 100, 0, 1], #\n [100, 100, 100, 4, 0], #\n ],\n [\n [0, 100, 100, 3, 3], #\n [100, 0, 1, 100, 100], #\n [100, 4, 0, 100, 100], #\n [6, 100, 100, 0, 1], #\n [6, 100, 100, 4, 0], #\n ]\n ]\n\n self.assertAllEqual(\n expected_rel_att_ids,\n feature_utils.overwrite_relative_att_ids_outside_segments(\n rel_att_ids=rel_att_ids,\n segment_ids=segment_ids,\n overwrite_value=overwrite_value))\n\n def test_make_relative_att_ids_invalid_arguments(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n with self.assertRaises(ValueError):\n relative_pos_gen.make_relative_att_ids(0)\n\n with self.assertRaises(ValueError):\n relative_pos_gen.make_relative_att_ids(seq_len=5, batch_size=0)\n\n def test_make_local_relative_att_ids_padding_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [[\n [6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3], #\n [6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3], #\n [6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3], #\n [6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3], #\n ]]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(seq_len=4, local_radius=5))\n\n def test_make_local_relative_att_ids_padding_case_ignore_direction(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(\n max_distance=3, ignore_direction=True)\n\n expected = [[\n [3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3], #\n [3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3], #\n [3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3], #\n [3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3], #\n ]]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(seq_len=4, local_radius=5))\n\n def test_make_local_relative_att_ids_trimming_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=9)\n\n expected = [[\n [13, 12, 11, 10, 0, 1, 2, 3, 4], #\n [13, 12, 11, 10, 0, 1, 2, 3, 4], #\n [13, 12, 11, 10, 0, 1, 2, 3, 4], #\n ]]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(seq_len=3, local_radius=4))\n\n def test_make_local_relative_att_ids_no_pad_or_trim_case(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=4)\n\n expected = [[\n [8, 7, 6, 5, 0, 1, 2, 3, 4], #\n [8, 7, 6, 5, 0, 1, 2, 3, 4], #\n [8, 7, 6, 5, 0, 1, 2, 3, 4], #\n ]]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(seq_len=3, local_radius=4))\n\n def test_make_local_relative_att_ids_max_distance_0(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=0)\n\n expected = [[\n [0, 0, 0, 0, 0], #\n [0, 0, 0, 0, 0], #\n ]]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(seq_len=2, local_radius=2))\n\n def test_make_local_relative_att_ids_batch_size_2(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [\n [\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n ],\n [\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n ],\n ]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(\n seq_len=3, local_radius=4, batch_size=2))\n\n def test_make_local_relative_att_ids_batch_size_2_tensor(self):\n dummy_batch = tf.ones([2, 5])\n\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n expected = [\n [\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n ],\n [\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n [6, 6, 5, 4, 0, 1, 2, 3, 3], #\n ],\n ]\n self.assertAllEqual(\n expected,\n relative_pos_gen.make_local_relative_att_ids(\n seq_len=3, local_radius=4, batch_size=tf.shape(dummy_batch)[0]))\n\n def test_make_local_relative_att_ids_invalid_arguments(self):\n relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)\n\n with self.assertRaises(ValueError):\n relative_pos_gen.make_local_relative_att_ids(seq_len=0, local_radius=3)\n\n with self.assertRaises(ValueError):\n relative_pos_gen.make_local_relative_att_ids(seq_len=5, local_radius=0)\n\n with self.assertRaises(ValueError):\n relative_pos_gen.make_local_relative_att_ids(\n seq_len=5, local_radius=3, batch_size=0)\n\n def test_make_att_mask_from_input_mask(self):\n input_mask = [\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0],\n ]\n\n expected = [\n [\n [1, 1, 1, 0, 0, 0], #\n [1, 1, 1, 0, 0, 0], #\n [1, 1, 1, 0, 0, 0], #\n [0, 0, 0, 1, 1, 1], #\n [0, 0, 0, 1, 1, 1], #\n [0, 0, 0, 1, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [0, 0, 0, 0, 0, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n ], #\n ]\n self.assertAllEqual(expected,\n feature_utils.make_att_mask_from_input_mask(input_mask))\n\n def test_make_segmented_att_mask(self):\n segment_ids = [\n [0, 0, 1, 1, 0, 0],\n [2, 2, 2, 2, 2, 2],\n [0, 0, 3, 0, 3, 0],\n [0, 5, 4, 3, 2, 1],\n ]\n\n expected = [\n [\n [1, 1, 0, 0, 1, 1], #\n [1, 1, 0, 0, 1, 1], #\n [0, 0, 1, 1, 0, 0], #\n [0, 0, 1, 1, 0, 0], #\n [1, 1, 0, 0, 1, 1], #\n [1, 1, 0, 0, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n ], #\n [\n [1, 1, 0, 1, 0, 1], #\n [1, 1, 0, 1, 0, 1], #\n [0, 0, 1, 0, 1, 0], #\n [1, 1, 0, 1, 0, 1], #\n [0, 0, 1, 0, 1, 0], #\n [1, 1, 0, 1, 0, 1], #\n ], #\n [\n [1, 0, 0, 0, 0, 0], #\n [0, 1, 0, 0, 0, 0], #\n [0, 0, 1, 0, 0, 0], #\n [0, 0, 0, 1, 0, 0], #\n [0, 0, 0, 0, 1, 0], #\n [0, 0, 0, 0, 0, 1], #\n ], #\n ]\n self.assertAllEqual(expected,\n feature_utils.make_segmented_att_mask(segment_ids))\n\n def test_make_att_mask_from_breakpoints(self):\n att_breakpoints = [\n [0, 1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1],\n ]\n\n expected = [\n [\n [1, 1, 0, 0, 0, 0], #\n [1, 1, 0, 0, 0, 0], #\n [0, 0, 1, 1, 0, 0], #\n [0, 0, 1, 1, 0, 0], #\n [0, 0, 0, 0, 1, 1], #\n [0, 0, 0, 0, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n ], #\n [\n [1, 0, 0, 0, 0, 0], #\n [0, 1, 1, 1, 1, 1], #\n [0, 1, 1, 1, 1, 1], #\n [0, 1, 1, 1, 1, 1], #\n [0, 1, 1, 1, 1, 1], #\n [0, 1, 1, 1, 1, 1], #\n ], #\n [\n [1, 0, 0, 0, 0, 0], #\n [0, 1, 0, 0, 0, 0], #\n [0, 0, 1, 0, 0, 0], #\n [0, 0, 0, 1, 0, 0], #\n [0, 0, 0, 0, 1, 0], #\n [0, 0, 0, 0, 0, 1], #\n ], #\n ]\n self.assertAllEqual(\n expected, feature_utils.make_att_mask_from_breakpoints(att_breakpoints))\n\n def test_make_att_mask_from_breakpoints_use_starting_breakpoints(self):\n att_breakpoints = [\n [0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1],\n ]\n\n expected = [\n [\n [1, 1, 0, 0, 0, 0], #\n [1, 1, 0, 0, 0, 0], #\n [0, 0, 1, 1, 0, 0], #\n [0, 0, 1, 1, 0, 0], #\n [0, 0, 0, 0, 1, 1], #\n [0, 0, 0, 0, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1], #\n ], #\n [\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0], #\n [0, 0, 0, 0, 0, 1], #\n ], #\n [\n [1, 0, 0, 0, 0, 0], #\n [0, 1, 0, 0, 0, 0], #\n [0, 0, 1, 0, 0, 0], #\n [0, 0, 0, 1, 0, 0], #\n [0, 0, 0, 0, 1, 0], #\n [0, 0, 0, 0, 0, 1], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_att_mask_from_breakpoints(\n att_breakpoints, use_starting_breakpoints=True))\n\n def test_make_local_segmented_att_mask(self):\n segment_ids = [\n [0, 0, 1, 0, 1, 0, 1, 1],\n [2, 2, 2, 2, 2, 2, 2, 2],\n [4, 3, 3, 3, 4, 1, 1, 1],\n [0, 6, 5, 4, 3, 2, 1, 0],\n ]\n\n expected = [\n [\n [0, 0, 1, 1, 0], #\n [0, 1, 1, 0, 1], #\n [0, 0, 1, 0, 1], #\n [1, 0, 1, 0, 1], #\n [1, 0, 1, 0, 1], #\n [1, 0, 1, 0, 0], #\n [1, 0, 1, 1, 0], #\n [0, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_local_segmented_att_mask(\n segment_ids, local_radius=2))\n\n def test_make_local_segmented_att_mask_uneven_blocking_case(self):\n segment_ids = [\n [0, 0, 1, 0, 1, 0, 1, 1, 2, 2],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n\n expected = [\n [\n [0, 0, 0, 1, 1, 0, 1], #\n [0, 0, 1, 1, 0, 1, 0], #\n [0, 0, 0, 1, 0, 1, 0], #\n [1, 1, 0, 1, 0, 1, 0], #\n [0, 1, 0, 1, 0, 1, 1], #\n [0, 1, 0, 1, 0, 0, 0], #\n [0, 1, 0, 1, 1, 0, 0], #\n [1, 0, 1, 1, 0, 0, 0], #\n [0, 0, 0, 1, 1, 0, 0], #\n [0, 0, 1, 1, 0, 0, 0], #\n ], #\n [\n [0, 0, 0, 1, 1, 1, 1], #\n [0, 0, 1, 1, 1, 1, 1], #\n [0, 1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1, 1, 0], #\n [1, 1, 1, 1, 1, 0, 0], #\n [1, 1, 1, 1, 0, 0, 0], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_local_segmented_att_mask(\n segment_ids, local_radius=3))\n\n def test_make_local_segmented_att_mask_single_block_case(self):\n segment_ids = [\n [0, 1],\n [0, 0],\n ]\n\n expected = [\n [\n [0, 0, 0, 1, 0, 0, 0], #\n [0, 0, 0, 1, 0, 0, 0], #\n ], #\n [\n [0, 0, 0, 1, 1, 0, 0], #\n [0, 0, 1, 1, 0, 0, 0], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_local_segmented_att_mask(\n segment_ids, local_radius=3))\n\n def test_make_local_segmented_att_mask_static_shape(self):\n # This test is only relevant for TF v1 session mode. If the batch size\n # is statically unknown (None), we want to make sure all shapes in the\n # output other than batch size are still statically known.\n\n # We use `placeholder_with_default` to simulate the TF v1 situation where\n # the static `batch_size` is unknown.\n segment_ids = tf.compat.v1.placeholder_with_default(\n np.zeros([1, 8]), shape=[None, 8])\n\n local_radius = 2\n result = feature_utils.make_local_segmented_att_mask(\n segment_ids, local_radius=local_radius)\n\n self.assertAllEqual([8, 2 * local_radius + 1], result.shape.as_list()[1:])\n\n def test_make_local_att_mask_from_breakpoints(self):\n att_breakpoints = [\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n ]\n\n expected = [\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_local_att_mask_from_breakpoints(\n att_breakpoints, local_radius=2))\n\n def test_make_local_att_mask_from_breakpoints_use_starting_breakpoints(self):\n att_breakpoints = [\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n ]\n\n expected = [\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 1], #\n [1, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 1, 1], #\n [0, 1, 1, 1, 0], #\n [1, 1, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n ], #\n [\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n [0, 0, 1, 0, 0], #\n ], #\n ]\n self.assertAllEqual(\n expected,\n feature_utils.make_local_att_mask_from_breakpoints(\n att_breakpoints, local_radius=2, use_starting_breakpoints=True))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.max",
"tensorflow.compat.as_text",
"tensorflow.io.gfile.makedirs",
"numpy.transpose"
],
[
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.expand_dims",
"tensorflow.gather",
"tensorflow.square"
],
[
"tensorflow.io.TFRecordWriter",
"tensorflow.data.experimental.get_single_element",
"tensorflow.device",
"tensorflow.constant",
"tensorflow.Variable",
"numpy.random.choice",
"tensorflow.train.Checkpoint",
"tensorflow.train.load_checkpoint"
],
[
"tensorflow.executing_eagerly",
"tensorflow.train.Checkpoint",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.train.checkpoints_iterator",
"tensorflow.keras.metrics.CategoricalCrossentropy",
"tensorflow.keras.metrics.Accuracy",
"tensorflow.argmax",
"tensorflow.summary.create_file_writer"
],
[
"tensorflow.python.keras.engine.functional.reconstruct_from_config",
"tensorflow.python.keras.models._clone_layers_and_model_config"
],
[
"tensorflow.compat.v1.gfile.Open"
],
[
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.train.get_or_create_global_step"
],
[
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.cumsum",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.to_int32",
"tensorflow.compat.v1.truncated_normal_initializer",
"tensorflow.compat.v1.math.add",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.nn.log_softmax",
"tensorflow.compat.v1.data.Dataset.list_files",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.math.add_n",
"tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.compat.v1.math.top_k",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.nn.bias_add"
],
[
"tensorflow.debugging.assert_equal",
"tensorflow.constant",
"tensorflow.executing_eagerly",
"tensorflow.shape",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.random.uniform",
"tensorflow.io.gfile.makedirs",
"tensorflow.cast",
"tensorflow.keras.backend.batch_flatten",
"tensorflow.reshape",
"tensorflow.saved_model.save",
"tensorflow.map_fn",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.TensorSpec",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.layers.Dense",
"tensorflow.compat.v1.initializers.glorot_uniform"
],
[
"tensorflow.compat.v1.logical_and",
"tensorflow.compat.v1.no_op",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.add",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.pow",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.norm",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.gather_nd",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.nn.zero_fraction",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.div",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.get_collection_ref",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.cast",
"tensorflow.contrib.training.HParams",
"tensorflow.compat.v1.logging.vlog",
"tensorflow.compat.v1.less_equal",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.compat.v1.train.get_global_step",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.summary.histogram",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.squeeze"
],
[
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"numpy.random.randint",
"tensorflow.squeeze",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.gather_nd",
"tensorflow.math.reduce_logsumexp",
"tensorflow.compat.v2.summary.scalar",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.random.categorical",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.log"
],
[
"tensorflow.compat.v1.shape",
"tensorflow.lite.experimental.microfrontend.python.ops.audio_microfrontend_op.audio_microfrontend",
"tensorflow.python.ops.gen_audio_ops.audio_spectrogram",
"tensorflow.compat.v1.add",
"numpy.random.randint",
"tensorflow.python.ops.io_ops.write_file",
"tensorflow.compat.v1.audio.decode_wav",
"numpy.reshape",
"tensorflow.compat.v1.multiply",
"tensorflow.python.ops.io_ops.read_file",
"tensorflow.compat.v1.audio.encode_wav",
"numpy.zeros",
"tensorflow.python.ops.gen_audio_ops.mfcc",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.image.resize_with_crop_or_pad",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.expand_dims",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.placeholder",
"numpy.random.uniform",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.pad"
],
[
"tensorflow.ones",
"tensorflow.test.main",
"numpy.zeros",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
davidinouye/destructive-deep-learning | [
"632add7a9731347e050d271ceebb24251e1d8e01"
] | [
"scripts/icml_2018_experiment.py"
] | [
"\"\"\"ICML 2018 experiment for MNIST and CIFAR-10.\"\"\"\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\nimport warnings\n\nimport numpy as np\nimport scipy.stats # Needed for standard error of the mean scipy.stats.sem\nfrom sklearn.base import clone\nfrom sklearn.decomposition import PCA\n\n# Add the directory of this script\nsys.path.append(os.path.dirname(os.path.realpath(__file__))) # noqa E402\n# Add directory for ddl library\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) # noqa E402\n\n# isort:imports-firstparty\nfrom ddl.base import CompositeDestructor\nfrom ddl.deep import DeepDestructorCV\nfrom ddl.externals.mlpack import MlpackDensityTreeEstimator\nfrom ddl.independent import IndependentDensity, IndependentDestructor, IndependentInverseCdf\nfrom ddl.linear import BestLinearReconstructionDestructor\nfrom ddl.local import FeatureGroupsDestructor, ImageFeaturePairs\nfrom ddl.tree import TreeDensity, TreeDestructor\nfrom ddl.univariate import HistogramUnivariateDensity, ScipyUnivariateDensity\nfrom maf_data import CIFAR10_ALPHA, MNIST_ALPHA, get_maf_data\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_experiment(data_name, model_name, model_kwargs=None):\n \"\"\"\n\n Parameters\n ----------\n data_name :\n model_name :\n model_kwargs :\n\n Returns\n -------\n\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n # Setup\n experiment_filename = model_kwargs['experiment_filename']\n experiment_label = model_kwargs['experiment_label']\n _setup_loggers(experiment_filename)\n try:\n git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']\n ).decode('ascii')[:-1]\n except subprocess.CalledProcessError:\n git_hash = 'unknown'\n logger.debug('Current git hash = %s' % git_hash)\n\n # Load data\n logger.debug('Loading data for %s' % experiment_label)\n data_dict = get_maf_data(data_name)\n X_train, X_validation, X_test = (\n data_dict['X_train'], data_dict['X_validation'], data_dict['X_test'])\n n_train, n_validation, n_test = (_X.shape[0] for _X in (X_train, X_validation, X_test))\n\n # Setup cv and refit parameters\n X_train_val = np.vstack((X_train, X_validation))\n model_kwargs['cv'] = [(np.arange(n_train), n_train + np.arange(n_validation))]\n model_kwargs['refit'] = False\n\n # Load model\n deep_destructor = _get_model(data_name, model_name, model_kwargs=model_kwargs)\n\n # Fit destructor\n logger.debug('Starting training for %s' % experiment_label)\n start_time = time.time()\n deep_destructor.fit(X_train_val, y=None, X_test=X_test)\n train_time = time.time() - start_time\n logger.debug('Finished training for %s' % experiment_label)\n logger.debug('%s: Time to train = %g s or %g minutes or %g hours'\n % (experiment_label, train_time, train_time / 60, train_time / 60 / 60))\n\n # Get test score\n start_time = time.time()\n test_scores = deep_destructor.score_samples(X_test)\n score_time = time.time() - start_time\n test_score = np.mean(test_scores)\n test_score_stderr = scipy.stats.sem(test_scores)\n logger.debug('%s: Final test score=%g with std_err=%g computed in %g s'\n % (experiment_label, float(test_score), test_score_stderr, score_time))\n date_time_completed = time.strftime(\"%Y_%m_%d-%H_%M_%S\")\n logger.debug('Date/time completed (just before saving): %s' % date_time_completed)\n\n # Prepare results in dictionary\n result_dict = dict(\n # Data statistics\n data_name=data_name, n_features=X_train.shape[1],\n n_train=n_train, n_validation=n_validation, n_test=n_test,\n # Model\n destructor=deep_destructor, model_name=model_name, model_kwargs=model_kwargs,\n # Time\n train_time=train_time, score_time=score_time, date_time_completed=date_time_completed,\n # Test scores\n test_score=test_score, test_score_stderr=test_score_stderr, test_scores=test_scores,\n git_hash=git_hash,\n )\n\n # Save results to pickle file\n with open(experiment_filename + '.pkl', 'wb') as f:\n pickle.dump(result_dict, f)\n logger.debug('%s: Saved results to file %s' % (experiment_label, experiment_filename))\n return result_dict\n\n\ndef load_experiment_results(data_name, model_name=None, model_kwargs=None, notebook=False):\n \"\"\"\n\n Parameters\n ----------\n data_name :\n model_name :\n model_kwargs :\n notebook :\n\n Returns\n -------\n\n \"\"\"\n experiment_filename, _ = _get_experiment_filename_and_label(data_name, model_name=model_name,\n model_kwargs=model_kwargs)\n if notebook:\n experiment_filename = os.path.join('..', experiment_filename)\n\n with open(experiment_filename + '.pkl', 'rb') as f:\n result_dict = pickle.load(file=f)\n logger.debug('Loaded results from file %s' % experiment_filename)\n return result_dict\n\n\ndef _get_model(data_name, model_name, model_kwargs):\n if 'is_test' not in model_kwargs:\n model_kwargs['is_test'] = False\n # Init destructor is shared with all models\n init_destructor = CompositeDestructor(\n destructors=[\n _get_inverse_logit_destructor(data_name),\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=HistogramUnivariateDensity(\n bins=256, bounds=[0, 1], alpha=1)\n )\n )\n ],\n random_state=0,\n )\n\n # Setup canonical destructor for various models\n if model_name == 'deep-copula':\n deep_stop_tol = 0.001\n canonical_destructor = _get_copula_destructor()\n else:\n deep_stop_tol = 0.0001\n n_jobs = model_kwargs['n_jobs']\n\n # Get pair estimators (i.e. pairs of pixels in a spiral pattern)\n pair_estimators = _get_pair_estimators(data_name, n_uniq_dir=8)\n\n # Setup the local/pair destructor\n pair_canonical_destructor = _get_pair_canonical_destructor(model_name)\n\n # Setup a list of canonical destructors that destroy in each pixel direction\n canonical_destructor = [\n FeatureGroupsDestructor(\n groups_estimator=pair_estimator,\n group_canonical_destructor=clone(pair_canonical_destructor),\n n_jobs=n_jobs\n )\n for pair_estimator in pair_estimators\n ]\n\n # Shared DeepDestructorCV\n return DeepDestructorCV(\n init_destructor=init_destructor,\n canonical_destructor=canonical_destructor,\n stop_tol=deep_stop_tol,\n # Either n_extend or max_canonical_destructors must be None\n n_extend=1,\n cv=model_kwargs['cv'],\n refit=model_kwargs['refit'],\n silent=False,\n log_prefix='',\n random_state=0,\n # Set maximum number of layers (None for infinite)\n max_canonical_destructors=None if not model_kwargs['is_test'] else 1,\n )\n\n\ndef _get_inverse_logit_destructor(data_name):\n if data_name == 'mnist':\n alpha = MNIST_ALPHA\n elif data_name == 'cifar10':\n alpha = CIFAR10_ALPHA\n else:\n raise ValueError('dataset should either be mnist or cifar10')\n inverse_logit = CompositeDestructor(\n destructors=[\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=ScipyUnivariateDensity(\n scipy_rv=scipy.stats.logistic,\n scipy_fit_kwargs=dict(floc=0, fscale=1)\n )\n )\n ),\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=ScipyUnivariateDensity(\n scipy_rv=scipy.stats.uniform,\n scipy_fit_kwargs=dict(floc=alpha, fscale=1 - 2 * alpha)\n )\n )\n )\n ]\n )\n return inverse_logit\n\n\ndef _get_copula_destructor(hist_kwargs=None):\n if hist_kwargs is None:\n hist_kwargs = dict(bins=40, bounds=[0, 1], alpha=100)\n return CompositeDestructor(\n destructors=[\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=HistogramUnivariateDensity(**hist_kwargs)\n )\n ),\n IndependentInverseCdf(),\n BestLinearReconstructionDestructor(\n linear_estimator=PCA(),\n destructor=IndependentDestructor(),\n linear_projector_kwargs=dict(fit_bias=False),\n )\n ],\n random_state=0,\n )\n\n\ndef _get_pair_canonical_destructor(model_name):\n if model_name == 'image-pairs-tree':\n return TreeDestructor(\n tree_density=TreeDensity(\n tree_estimator=MlpackDensityTreeEstimator(\n max_depth=None,\n min_samples_leaf=100,\n max_leaf_nodes=50,\n ),\n get_tree=None,\n node_destructor=None,\n uniform_weight=0.5,\n )\n )\n elif model_name == 'image-pairs-copula':\n return _get_copula_destructor()\n else:\n raise ValueError('Invalid model name \"%s\"')\n\n\ndef _get_pair_estimators(data_name, n_uniq_dir):\n \"\"\"Returns `n_uniq_dir` pair estimators in a spiral pattern.\"\"\"\n\n def _generate_pixel_circle(radius=1):\n cur = radius * np.array([1, 1]) # Start in top right\n d = [cur]\n for step in np.array([[0, -1], [-1, 0], [0, 1], [1, 0]]):\n for i in range(2 * radius):\n cur = cur + step\n d.append(cur)\n d.pop(-1) # remove last that is a repeat\n\n def _rotate(a, n):\n return a[n:] + a[:n]\n\n return _rotate(d, radius) # Rotate to make directly east the first direction\n\n def _generate_pixel_spiral(n_spirals=2):\n d = []\n for i in range(n_spirals):\n d.extend(_generate_pixel_circle(radius=i + 1))\n return d\n\n directions = np.array(_generate_pixel_spiral(n_spirals=10))\n\n if data_name == 'mnist':\n directions = directions[:n_uniq_dir]\n return [\n ImageFeaturePairs(\n image_shape=(28, 28), relative_position=r,\n init_offset=(0, 0), step=(1, 0), wrap=True\n )\n for r in directions\n ]\n elif data_name == 'cifar10':\n # Make 3d coordinates\n directions = [(d2[0], d2[1], 0) for d2 in directions[:n_uniq_dir]]\n init_offset = [(0, 0, 0) for _ in directions]\n # Handle color channels\n directions.extend([(0, 0, 1), (0, 0, 1), (0, 0, 1)])\n init_offset.extend([(0, 0, 0), (0, 0, 1), (0, 0, 2)])\n return [\n ImageFeaturePairs(\n image_shape=(32, 32, 3), relative_position=r,\n init_offset=io, step=(1, 0, 0), wrap=True\n )\n for r, io in zip(directions, init_offset)\n ]\n else:\n raise RuntimeError('Only mnist and cifar10 are supported')\n\n\ndef _setup_loggers(experiment_filename):\n # Setup log file and console to have same format\n log_formatter = logging.Formatter(\n fmt='%(asctime)s:%(levelname)s:%(name)s:%(process)d: %(message)s')\n log_file = logging.FileHandler(experiment_filename + '.log')\n log_file.setFormatter(log_formatter)\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_formatter)\n\n # Add handlers to root logger\n root_logger = logging.getLogger()\n root_logger.addHandler(console_handler)\n root_logger.addHandler(log_file)\n\n # Adjust settings for loggers\n logging.captureWarnings(True)\n logging.getLogger().setLevel(logging.DEBUG)\n logging.getLogger('ddl').setLevel(logging.DEBUG)\n\n\ndef _get_experiment_filename_and_label(data_name, model_name=None, model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n data_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '..', 'data', 'results')\n try:\n os.makedirs(data_dir)\n except OSError:\n pass\n arg_str = '_'.join(['%s-%s' % (k, str(v)) for k, v in model_kwargs.items()])\n arg_str = arg_str.replace('.', '_')\n if len(arg_str) > 0:\n arg_str = '_' + arg_str\n filename = ('data-%s_model-%s%s'\n % (str(data_name), str(model_name), arg_str))\n pickle_filename = os.path.join(data_dir, filename)\n\n arg_str = ', '.join(['%s=%s' % (k, str(v)) for k, v in model_kwargs.items()])\n if len(arg_str) > 0:\n arg_str = ', ' + arg_str\n experiment_label = '(data=%s, model=%s%s)' % (data_name, str(model_name), arg_str)\n\n return pickle_filename, experiment_label\n\n\n# Add fast sanity-check tests for mnist dataset\ntry:\n # noinspection PyPackageRequirements\n import pytest\nexcept ImportError:\n pass\nelse:\n @pytest.mark.parametrize(\n 'model_name',\n # 'image-pairs-tree' not needed since covered by other tests\n ['deep-copula', 'image-pairs-copula']\n )\n def test_mnist_experiment(model_name):\n data_name = 'mnist'\n model_kwargs = dict(is_test=True, n_jobs=1)\n model_kwargs['experiment_filename'], model_kwargs[\n 'experiment_label'] = _get_experiment_filename_and_label(\n data_name, model_name=model_name, model_kwargs=model_kwargs)\n result_dict = run_experiment(data_name, model_name, model_kwargs=model_kwargs)\n\n # Check if test likelihood/score is as expected\n _model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']\n expected_test_scores = [-1.060270463188296844e+03, -1.155477974922050180e+03,\n -1.134326498390250208e+03]\n ind = _model_names.index(model_name)\n assert (np.abs(expected_test_scores[ind] - result_dict['test_score'])\n / np.abs(expected_test_scores[ind]) < 1e-15)\n\nif __name__ == '__main__':\n # Parse args\n all_data_names = ['mnist', 'cifar10']\n all_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']\n parser = argparse.ArgumentParser(description='Sets up and/or runs MAF experiments.')\n parser.add_argument(\n '--model_names', default=','.join(all_model_names),\n help='One or more model names separated by commas from the list %s' % str(all_model_names))\n parser.add_argument(\n '--data_names', default=','.join(all_data_names),\n help='One or more data names separated by commas from the list %s' % str(all_data_names))\n parser.add_argument(\n '--parallel_subprocesses', default=False, type=bool,\n help='Whether to use parallel subprocesses for each (model, data) experiment '\n 'pair or run directly (default is False).')\n parser.add_argument(\n '--n_jobs', default=1, type=int,\n help='Number of parallel jobs to use for image-pairs models (default is 1).')\n args = parser.parse_args()\n print('Parsed args = %s' % str(args))\n print('----------------------')\n\n # Run experiments\n _model_kwargs = vars(args).copy() # Extract model_kwargs as dictionary\n model_names = _model_kwargs.pop('model_names').split(',')\n data_names = _model_kwargs.pop('data_names').split(',')\n is_parallel = _model_kwargs.pop('parallel_subprocesses')\n processes = []\n for _data_name in data_names:\n # Make sure data has already been cached\n get_maf_data(_data_name)\n for _model_name in model_names:\n _model_kwargs['experiment_filename'], _model_kwargs[\n 'experiment_label'] = _get_experiment_filename_and_label(\n _data_name, model_name=_model_name, model_kwargs=_model_kwargs)\n if not is_parallel:\n # Just run the experiment directly\n try:\n run_experiment(_data_name, _model_name, _model_kwargs)\n except RuntimeError as e:\n if 'mlpack' not in str(e).lower():\n raise e\n else:\n warnings.warn('Skipping %s because of error \"%s\"' % (_model_name, str(e)))\n else:\n # Generate script to run experiment in parallel in separate subprocesses\n script_str = (\n 'import os\\n'\n 'os.chdir(\\'%s\\')\\n'\n 'from icml_2018_experiment import run_experiment\\n'\n 'run_experiment(\\'%s\\', \\'%s\\', model_kwargs=%s)\\n'\n ) % (\n os.path.dirname(os.path.realpath(__file__)),\n _data_name, _model_name, str(_model_kwargs)\n )\n echo_args = ['echo', '-e', script_str]\n\n # Launch subprocess which can run in parallel\n DEVNULL = open(os.devnull, 'w')\n echo = subprocess.Popen(['echo', '-e', script_str], stdout=subprocess.PIPE)\n python = subprocess.Popen(['python'], stdin=echo.stdout, stdout=DEVNULL)\n processes.append(echo)\n processes.append(python)\n print('Started subprocess for experiment %s' % _model_kwargs['experiment_label'])\n print(\n ' Appending to end of log file %s.log' % _model_kwargs['experiment_filename'])\n\n # Remove filenames and labels for next round\n _model_kwargs.pop('experiment_filename')\n _model_kwargs.pop('experiment_label')\n\n if is_parallel:\n # Wait for all processes to finish\n print('Waiting for all subprocesses to finish')\n for p in processes:\n p.wait()\n print('All subprocesses finished!')\n"
] | [
[
"numpy.abs",
"numpy.arange",
"sklearn.base.clone",
"numpy.mean",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guitargeek/pandas | [
"a6c1f6cccee6bbccfb29488a94664ed07db024d9"
] | [
"pandas/tests/scalar/timestamp/test_timestamp.py"
] | [
"\"\"\" test the scalar Timestamp \"\"\"\n\nimport calendar\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport locale\nimport pickle\nimport unicodedata\n\nfrom dateutil.tz import tzutc\nimport numpy as np\nimport pytest\nimport pytz\nfrom pytz import (\n timezone,\n utc,\n)\n\nfrom pandas._libs.tslibs.timezones import (\n dateutil_gettz as gettz,\n get_timezone,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NaT,\n Timedelta,\n Timestamp,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\nclass TestTimestampProperties:\n def test_freq_deprecation(self):\n # GH#41586\n msg = \"The 'freq' argument in Timestamp is deprecated\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # warning issued at construction\n ts = Timestamp(\"2021-06-01\", freq=\"D\")\n ts2 = Timestamp(\"2021-06-01\", freq=\"B\")\n\n msg = \"Timestamp.freq is deprecated\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # warning issued at attribute lookup\n ts.freq\n\n for per in [\"month\", \"quarter\", \"year\"]:\n for side in [\"start\", \"end\"]:\n attr = f\"is_{per}_{side}\"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n getattr(ts2, attr)\n\n # is_(month|quarter|year)_(start|end) does _not_ issue a warning\n # with freq=\"D\" bc the result will be unaffected by the deprecation\n with tm.assert_produces_warning(None):\n getattr(ts, attr)\n\n @pytest.mark.filterwarnings(\"ignore:The 'freq' argument:FutureWarning\")\n @pytest.mark.filterwarnings(\"ignore:Timestamp.freq is deprecated:FutureWarning\")\n def test_properties_business(self):\n ts = Timestamp(\"2017-10-01\", freq=\"B\")\n control = Timestamp(\"2017-10-01\")\n assert ts.dayofweek == 6\n assert ts.day_of_week == 6\n assert not ts.is_month_start # not a weekday\n assert not ts.freq.is_month_start(ts)\n assert ts.freq.is_month_start(ts + Timedelta(days=1))\n assert not ts.is_quarter_start # not a weekday\n assert not ts.freq.is_quarter_start(ts)\n assert ts.freq.is_quarter_start(ts + Timedelta(days=1))\n # Control case: non-business is month/qtr start\n assert control.is_month_start\n assert control.is_quarter_start\n\n ts = Timestamp(\"2017-09-30\", freq=\"B\")\n control = Timestamp(\"2017-09-30\")\n assert ts.dayofweek == 5\n assert ts.day_of_week == 5\n assert not ts.is_month_end # not a weekday\n assert not ts.freq.is_month_end(ts)\n assert ts.freq.is_month_end(ts - Timedelta(days=1))\n assert not ts.is_quarter_end # not a weekday\n assert not ts.freq.is_quarter_end(ts)\n assert ts.freq.is_quarter_end(ts - Timedelta(days=1))\n # Control case: non-business is month/qtr start\n assert control.is_month_end\n assert control.is_quarter_end\n\n @pytest.mark.parametrize(\n \"attr, expected\",\n [\n [\"year\", 2014],\n [\"month\", 12],\n [\"day\", 31],\n [\"hour\", 23],\n [\"minute\", 59],\n [\"second\", 0],\n [\"microsecond\", 0],\n [\"nanosecond\", 0],\n [\"dayofweek\", 2],\n [\"day_of_week\", 2],\n [\"quarter\", 4],\n [\"dayofyear\", 365],\n [\"day_of_year\", 365],\n [\"week\", 1],\n [\"daysinmonth\", 31],\n ],\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_fields(self, attr, expected, tz):\n # GH 10050\n # GH 13303\n ts = Timestamp(\"2014-12-31 23:59:00\", tz=tz)\n result = getattr(ts, attr)\n # that we are int like\n assert isinstance(result, int)\n assert result == expected\n\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_millisecond_raises(self, tz):\n ts = Timestamp(\"2014-12-31 23:59:00\", tz=tz)\n msg = \"'Timestamp' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n ts.millisecond\n\n @pytest.mark.parametrize(\n \"start\", [\"is_month_start\", \"is_quarter_start\", \"is_year_start\"]\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_is_start(self, start, tz):\n ts = Timestamp(\"2014-01-01 00:00:00\", tz=tz)\n assert getattr(ts, start)\n\n @pytest.mark.parametrize(\"end\", [\"is_month_end\", \"is_year_end\", \"is_quarter_end\"])\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_is_end(self, end, tz):\n ts = Timestamp(\"2014-12-31 23:59:59\", tz=tz)\n assert getattr(ts, end)\n\n # GH 12806\n @pytest.mark.parametrize(\n \"data\",\n [Timestamp(\"2017-08-28 23:00:00\"), Timestamp(\"2017-08-28 23:00:00\", tz=\"EST\")],\n )\n # error: Unsupported operand types for + (\"List[None]\" and \"List[str]\")\n @pytest.mark.parametrize(\n \"time_locale\", [None] + (tm.get_locales() or []) # type: ignore[operator]\n )\n def test_names(self, data, time_locale):\n # GH 17354\n # Test .day_name(), .month_name\n if time_locale is None:\n expected_day = \"Monday\"\n expected_month = \"August\"\n else:\n with tm.set_locale(time_locale, locale.LC_TIME):\n expected_day = calendar.day_name[0].capitalize()\n expected_month = calendar.month_name[8].capitalize()\n\n result_day = data.day_name(time_locale)\n result_month = data.month_name(time_locale)\n\n # Work around https://github.com/pandas-dev/pandas/issues/22342\n # different normalizations\n expected_day = unicodedata.normalize(\"NFD\", expected_day)\n expected_month = unicodedata.normalize(\"NFD\", expected_month)\n\n result_day = unicodedata.normalize(\"NFD\", result_day)\n result_month = unicodedata.normalize(\"NFD\", result_month)\n\n assert result_day == expected_day\n assert result_month == expected_month\n\n # Test NaT\n nan_ts = Timestamp(NaT)\n assert np.isnan(nan_ts.day_name(time_locale))\n assert np.isnan(nan_ts.month_name(time_locale))\n\n def test_is_leap_year(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH 13727\n dt = Timestamp(\"2000-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n assert isinstance(dt.is_leap_year, bool)\n\n dt = Timestamp(\"1999-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n dt = Timestamp(\"2004-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n\n dt = Timestamp(\"2100-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n def test_woy_boundary(self):\n # make sure weeks at year boundaries are correct\n d = datetime(2013, 12, 31)\n result = Timestamp(d).week\n expected = 1 # ISO standard\n assert result == expected\n\n d = datetime(2008, 12, 28)\n result = Timestamp(d).week\n expected = 52 # ISO standard\n assert result == expected\n\n d = datetime(2009, 12, 31)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 1)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 3)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n result = np.array(\n [\n Timestamp(datetime(*args)).week\n for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]\n ]\n )\n assert (result == [52, 52, 53, 53]).all()\n\n def test_resolution(self):\n # GH#21336, GH#21365\n dt = Timestamp(\"2100-01-01 00:00:00\")\n assert dt.resolution == Timedelta(nanoseconds=1)\n\n # Check that the attribute is available on the class, mirroring\n # the stdlib datetime behavior\n assert Timestamp.resolution == Timedelta(nanoseconds=1)\n\n\nclass TestTimestamp:\n def test_tz(self):\n tstr = \"2014-02-01 09:00\"\n ts = Timestamp(tstr)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local == Timestamp(tstr, tz=\"Asia/Tokyo\")\n conv = local.tz_convert(\"US/Eastern\")\n assert conv == Timestamp(\"2014-01-31 19:00\", tz=\"US/Eastern\")\n assert conv.hour == 19\n\n # preserves nanosecond\n ts = Timestamp(tstr) + offsets.Nano(5)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local.nanosecond == 5\n conv = local.tz_convert(\"US/Eastern\")\n assert conv.nanosecond == 5\n assert conv.hour == 19\n\n def test_utc_z_designator(self):\n assert get_timezone(Timestamp(\"2014-11-02 01:00Z\").tzinfo) is utc\n\n def test_asm8(self):\n np.random.seed(7_960_929)\n ns = [Timestamp.min.value, Timestamp.max.value, 1000]\n\n for n in ns:\n assert (\n Timestamp(n).asm8.view(\"i8\") == np.datetime64(n, \"ns\").view(\"i8\") == n\n )\n\n assert Timestamp(\"nat\").asm8.view(\"i8\") == np.datetime64(\"nat\", \"ns\").view(\"i8\")\n\n def test_class_ops_pytz(self):\n def compare(x, y):\n assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(timezone(\"UTC\")))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n msg = \"timezone-aware Timestamp with UTC\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#22451\n ts_utc = Timestamp.utcfromtimestamp(current_time)\n compare(\n ts_utc,\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n compare(\n # Support tz kwarg in Timestamp.fromtimestamp\n Timestamp.fromtimestamp(current_time, \"UTC\"),\n datetime.fromtimestamp(current_time, utc),\n )\n compare(\n # Support tz kwarg in Timestamp.fromtimestamp\n Timestamp.fromtimestamp(current_time, tz=\"UTC\"),\n datetime.fromtimestamp(current_time, utc),\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_class_ops_dateutil(self):\n def compare(x, y):\n assert (\n int(\n np.round(Timestamp(x).value / 1e9)\n - np.round(Timestamp(y).value / 1e9)\n )\n == 0\n )\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(tzutc()))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n\n msg = \"timezone-aware Timestamp with UTC\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#22451\n ts_utc = Timestamp.utcfromtimestamp(current_time)\n\n compare(\n ts_utc,\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_basics_nanos(self):\n val = np.int64(946_684_800_000_000_000).view(\"M8[ns]\")\n stamp = Timestamp(val.view(\"i8\") + 500)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.microsecond == 0\n assert stamp.nanosecond == 500\n\n # GH 14415\n val = np.iinfo(np.int64).min + 80_000_000_000_000\n stamp = Timestamp(val)\n assert stamp.year == 1677\n assert stamp.month == 9\n assert stamp.day == 21\n assert stamp.microsecond == 145224\n assert stamp.nanosecond == 192\n\n @pytest.mark.parametrize(\n \"value, check_kwargs\",\n [\n [946688461000000000, {}],\n [946688461000000000 / 1000, {\"unit\": \"us\"}],\n [946688461000000000 / 1_000_000, {\"unit\": \"ms\"}],\n [946688461000000000 / 1_000_000_000, {\"unit\": \"s\"}],\n [10957, {\"unit\": \"D\", \"h\": 0}],\n [\n (946688461000000000 + 500000) / 1000000000,\n {\"unit\": \"s\", \"us\": 499, \"ns\": 964},\n ],\n [\n (946688461000000000 + 500000000) / 1000000000,\n {\"unit\": \"s\", \"us\": 500000},\n ],\n [(946688461000000000 + 500000) / 1000000, {\"unit\": \"ms\", \"us\": 500}],\n [(946688461000000000 + 500000) / 1000, {\"unit\": \"us\", \"us\": 500}],\n [(946688461000000000 + 500000000) / 1000000, {\"unit\": \"ms\", \"us\": 500000}],\n [946688461000000000 / 1000.0 + 5, {\"unit\": \"us\", \"us\": 5}],\n [946688461000000000 / 1000.0 + 5000, {\"unit\": \"us\", \"us\": 5000}],\n [946688461000000000 / 1000000.0 + 0.5, {\"unit\": \"ms\", \"us\": 500}],\n [946688461000000000 / 1000000.0 + 0.005, {\"unit\": \"ms\", \"us\": 5, \"ns\": 5}],\n [946688461000000000 / 1000000000.0 + 0.5, {\"unit\": \"s\", \"us\": 500000}],\n [10957 + 0.5, {\"unit\": \"D\", \"h\": 12}],\n ],\n )\n def test_unit(self, value, check_kwargs):\n def check(value, unit=None, h=1, s=1, us=0, ns=0):\n stamp = Timestamp(value, unit=unit)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.day == 1\n assert stamp.hour == h\n if unit != \"D\":\n assert stamp.minute == 1\n assert stamp.second == s\n assert stamp.microsecond == us\n else:\n assert stamp.minute == 0\n assert stamp.second == 0\n assert stamp.microsecond == 0\n assert stamp.nanosecond == ns\n\n check(value, **check_kwargs)\n\n def test_roundtrip(self):\n\n # test value to string and back conversions\n # further test accessors\n base = Timestamp(\"20140101 00:00:00\")\n\n result = Timestamp(base.value + Timedelta(\"5ms\").value)\n assert result == Timestamp(f\"{base}.005000\")\n assert result.microsecond == 5000\n\n result = Timestamp(base.value + Timedelta(\"5us\").value)\n assert result == Timestamp(f\"{base}.000005\")\n assert result.microsecond == 5\n\n result = Timestamp(base.value + Timedelta(\"5ns\").value)\n assert result == Timestamp(f\"{base}.000000005\")\n assert result.nanosecond == 5\n assert result.microsecond == 0\n\n result = Timestamp(base.value + Timedelta(\"6ms 5us\").value)\n assert result == Timestamp(f\"{base}.006005\")\n assert result.microsecond == 5 + 6 * 1000\n\n result = Timestamp(base.value + Timedelta(\"200ms 5us\").value)\n assert result == Timestamp(f\"{base}.200005\")\n assert result.microsecond == 5 + 200 * 1000\n\n def test_hash_equivalent(self):\n d = {datetime(2011, 1, 1): 5}\n stamp = Timestamp(datetime(2011, 1, 1))\n assert d[stamp] == 5\n\n @pytest.mark.parametrize(\n \"timezone, year, month, day, hour\",\n [[\"America/Chicago\", 2013, 11, 3, 1], [\"America/Santiago\", 2021, 4, 3, 23]],\n )\n def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):\n # see gh-33931\n test_timezone = gettz(timezone)\n transition_1 = Timestamp(\n year=year,\n month=month,\n day=day,\n hour=hour,\n minute=0,\n fold=0,\n tzinfo=test_timezone,\n )\n transition_2 = Timestamp(\n year=year,\n month=month,\n day=day,\n hour=hour,\n minute=0,\n fold=1,\n tzinfo=test_timezone,\n )\n assert hash(transition_1) == hash(transition_2)\n\n def test_tz_conversion_freq(self, tz_naive_fixture):\n # GH25241\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n t1 = Timestamp(\"2019-01-01 10:00\", freq=\"H\")\n assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n t2 = Timestamp(\"2019-01-02 12:00\", tz=\"UTC\", freq=\"T\")\n assert t2.tz_convert(tz=\"UTC\").freq == t2.freq\n\n def test_pickle_freq_no_warning(self):\n # GH#41949 we don't want a warning on unpickling\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n ts = Timestamp(\"2019-01-01 10:00\", freq=\"H\")\n\n out = pickle.dumps(ts)\n with tm.assert_produces_warning(None):\n res = pickle.loads(out)\n\n assert res._freq == ts._freq\n\n\nclass TestTimestampNsOperations:\n def test_nanosecond_string_parsing(self):\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\")\n # GH 7878\n expected_repr = \"2013-05-01 07:15:45.123456789\"\n expected_value = 1_367_392_545_123_456_789\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789+09:00\", tz=\"Asia/Tokyo\")\n assert ts.value == expected_value - 9 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"UTC\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"US/Eastern\")\n assert ts.value == expected_value + 4 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n # GH 10041\n ts = Timestamp(\"20130501T071545.123456789\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n def test_nanosecond_timestamp(self):\n # GH 7610\n expected = 1_293_840_000_000_000_005\n t = Timestamp(\"2011-01-01\") + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(\"2011-01-01 00:00:00.000000005\")\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n expected = 1_293_840_000_000_000_010\n t = t + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(\"2011-01-01 00:00:00.000000010\")\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n\nclass TestTimestampToJulianDate:\n def test_compare_1700(self):\n r = Timestamp(\"1700-06-23\").to_julian_date()\n assert r == 2_342_145.5\n\n def test_compare_2000(self):\n r = Timestamp(\"2000-04-12\").to_julian_date()\n assert r == 2_451_646.5\n\n def test_compare_2100(self):\n r = Timestamp(\"2100-08-12\").to_julian_date()\n assert r == 2_488_292.5\n\n def test_compare_hour01(self):\n r = Timestamp(\"2000-08-12T01:00:00\").to_julian_date()\n assert r == 2_451_768.5416666666666666\n\n def test_compare_hour13(self):\n r = Timestamp(\"2000-08-12T13:00:00\").to_julian_date()\n assert r == 2_451_769.0416666666666666\n\n\nclass TestTimestampConversion:\n def test_conversion(self):\n # GH#9255\n ts = Timestamp(\"2000-01-01\")\n\n result = ts.to_pydatetime()\n expected = datetime(2000, 1, 1)\n assert result == expected\n assert type(result) == type(expected)\n\n result = ts.to_datetime64()\n expected = np.datetime64(ts.value, \"ns\")\n assert result == expected\n assert type(result) == type(expected)\n assert result.dtype == expected.dtype\n\n def test_to_pydatetime_nonzero_nano(self):\n ts = Timestamp(\"2011-01-01 9:00:00.123456789\")\n\n # Warn the user of data loss (nanoseconds).\n with tm.assert_produces_warning(UserWarning):\n expected = datetime(2011, 1, 1, 9, 0, 0, 123456)\n result = ts.to_pydatetime()\n assert result == expected\n\n def test_timestamp_to_datetime(self):\n stamp = Timestamp(\"20090415\", tz=\"US/Eastern\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=\"dateutil/US/Eastern\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_explicit_pytz(self):\n stamp = Timestamp(\"20090415\", tz=pytz.timezone(\"US/Eastern\"))\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n @td.skip_if_windows\n def test_timestamp_to_datetime_explicit_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=gettz(\"US/Eastern\"))\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_to_datetime_bijective(self):\n # Ensure that converting to datetime and back only loses precision\n # by going from nanoseconds to microseconds.\n exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning):\n pydt_max = Timestamp.max.to_pydatetime()\n\n assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000\n\n exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning):\n pydt_min = Timestamp.min.to_pydatetime()\n\n # The next assertion can be enabled once GH#39221 is merged\n # assert pydt_min < Timestamp.min # this is bc nanos are dropped\n tdus = timedelta(microseconds=1)\n assert pydt_min + tdus > Timestamp.min\n\n assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000\n\n def test_to_period_tz_warning(self):\n # GH#21333 make sure a warning is issued when timezone\n # info is lost\n ts = Timestamp(\"2009-04-15 16:17:18\", tz=\"US/Eastern\")\n with tm.assert_produces_warning(UserWarning):\n # warning that timezone info will be lost\n ts.to_period(\"D\")\n\n def test_to_numpy_alias(self):\n # GH 24653: alias .to_numpy() for scalars\n ts = Timestamp(datetime.now())\n assert ts.to_datetime64() == ts.to_numpy()\n\n # GH#44460\n msg = \"dtype and copy arguments are ignored\"\n with pytest.raises(ValueError, match=msg):\n ts.to_numpy(\"M8[s]\")\n with pytest.raises(ValueError, match=msg):\n ts.to_numpy(copy=True)\n\n\nclass SubDatetime(datetime):\n pass\n\n\[email protected](\n \"lh,rh\",\n [\n (SubDatetime(2000, 1, 1), Timedelta(hours=1)),\n (Timedelta(hours=1), SubDatetime(2000, 1, 1)),\n ],\n)\ndef test_dt_subclass_add_timedelta(lh, rh):\n # GH#25851\n # ensure that subclassed datetime works for\n # Timedelta operations\n result = lh + rh\n expected = SubDatetime(2000, 1, 1, 1)\n assert result == expected\n"
] | [
[
"pandas.Timestamp.fromtimestamp",
"pandas._testing.get_locales",
"numpy.iinfo",
"pandas.Timestamp.today",
"pandas.Timestamp.utcfromtimestamp",
"pandas.Timestamp.combine",
"pandas.Timestamp.min.to_pydatetime",
"pandas.Timestamp.utcnow",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_produces_warning",
"pandas.Timedelta",
"numpy.int64",
"numpy.random.seed",
"pandas.Timestamp.max.to_pydatetime",
"numpy.datetime64",
"pandas.tseries.offsets.Nano",
"pandas._testing.set_locale",
"pandas.Timestamp.now",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JeffreyDF/Lasagne | [
"6dd88f5fada20768087f29ae89cbd83980fe0a4e",
"6dd88f5fada20768087f29ae89cbd83980fe0a4e"
] | [
"lasagne/tests/layers/test_conv.py",
"lasagne/tests/layers/test_shape.py"
] | [
"import numpy as np\nimport pytest\nimport importlib\nimport theano\n\nimport lasagne\nfrom lasagne.utils import floatX, as_tuple\n\n\ndef conv2d(input, kernel, pad):\n \"\"\"Execute a 2D convolution.\n\n Parameters\n ----------\n input : numpy array\n kernel : numpy array\n pad : {0, 'valid', 'same', 'full'}\n\n Returns\n -------\n numpy array\n \"\"\"\n if pad not in ['valid', 'same', 'full']:\n pad = as_tuple(pad, 2, int)\n input = np.pad(input,\n ((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),\n mode='constant')\n pad = 'valid'\n\n output = np.zeros((input.shape[0],\n kernel.shape[0],\n input.shape[2] + kernel.shape[2] - 1,\n input.shape[3] + kernel.shape[3] - 1,\n ))\n\n for i in range(kernel.shape[2]):\n for j in range(kernel.shape[3]):\n k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]\n output[:, :, i:i + input.shape[2],\n j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)\n\n if pad == 'valid':\n trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)\n output = output[:,\n :,\n trim[0]:-trim[0] or None,\n trim[1]:-trim[1] or None]\n\n elif pad == 'same':\n shift_x = (kernel.shape[2] - 1) // 2\n shift_y = (kernel.shape[3] - 1) // 2\n output = output[:, :, shift_x:input.shape[2] + shift_x,\n shift_y:input.shape[3] + shift_y]\n return output\n\n\ndef conv2d_test_sets():\n def _convert(input, kernel, output, kwargs):\n return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]\n\n for pad in [0, 'full', 'same']:\n for stride in [1, 2, 3]:\n for filter_size in [1, 3]:\n if stride > filter_size:\n continue\n input = np.random.random((3, 1, 16, 23))\n kernel = np.random.random((16, 1, filter_size, filter_size))\n output = conv2d(input, kernel, pad=pad)\n output = output[:, :, ::stride, ::stride]\n yield _convert(input, kernel, output, {'pad': pad,\n 'stride': stride\n })\n\n # bias-less case\n input = np.random.random((3, 1, 16, 23))\n kernel = np.random.random((16, 1, 3, 3))\n output = conv2d(input, kernel, pad='valid')\n yield _convert(input, kernel, output, {'b': None})\n # pad='valid' case\n yield _convert(input, kernel, output, {'pad': 'valid'})\n\n\ndef conv1d(input, kernel, pad):\n if pad not in ['valid', 'same', 'full']:\n input = np.pad(input,\n ((0, 0), (0, 0), (int(pad), int(pad))),\n mode='constant')\n pad = 'valid'\n\n output = []\n for b in input:\n temp = []\n for c in kernel:\n temp.append(\n np.convolve(b[0, :], c[0, :], mode=pad))\n output.append(temp)\n return np.array(output)\n\n\ndef conv1d_test_sets():\n def _convert(input, kernel, output, kwargs):\n return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]\n\n for pad in [0, 1, 2, 'full', 'same']:\n for stride in [1, 2, 3]:\n for filter_size in [1, 3]:\n if stride > filter_size:\n continue\n input = np.random.random((3, 1, 23))\n kernel = np.random.random((16, 1, filter_size))\n output = conv1d(input, kernel, pad)\n output = output[:, :, ::stride]\n yield _convert(input, kernel, output, {'pad': pad,\n 'stride': stride,\n })\n\n # bias-less case\n input = np.random.random((3, 1, 23))\n kernel = np.random.random((16, 1, 3))\n output = conv1d(input, kernel, pad='valid')\n yield _convert(input, kernel, output, {'b': None})\n # pad='valid' case\n yield _convert(input, kernel, output, {'pad': 'valid'})\n\n\ndef test_conv_output_length():\n from lasagne.layers.conv import conv_output_length\n\n assert conv_output_length(13, 5, 3, 'valid') == 3\n assert conv_output_length(13, 5, 3, 0) == 3\n assert conv_output_length(13, 5, 3, 'full') == 6\n assert conv_output_length(13, 5, 3, 'same') == 5\n assert conv_output_length(13, 5, 3, 2) == 5\n\n with pytest.raises(ValueError) as exc:\n conv_output_length(13, 5, 3, '_nonexistent_mode')\n assert \"Invalid pad: \" in exc.value.args[0]\n\n\[email protected]\ndef DummyInputLayer():\n def factory(shape):\n from lasagne.layers.input import InputLayer\n return InputLayer(shape)\n return factory\n\n\nclass TestConv1DLayer:\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv1d_test_sets()))\n @pytest.mark.parametrize(\"extra_kwargs\", [\n {},\n {'untie_biases': True},\n ])\n def test_defaults(self, DummyInputLayer,\n input, kernel, output, kwargs, extra_kwargs):\n kwargs.update(extra_kwargs)\n b, c, w = input.shape.eval()\n input_layer = DummyInputLayer((b, c, w))\n try:\n from lasagne.layers.conv import Conv1DLayer\n layer = Conv1DLayer(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pass\n\n def test_init_none_nonlinearity_bias(self, DummyInputLayer):\n from lasagne.layers.conv import Conv1DLayer\n input_layer = DummyInputLayer((1, 2, 3))\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),\n nonlinearity=None, b=None)\n assert layer.nonlinearity == lasagne.nonlinearities.identity\n assert layer.b is None\n\n def test_invalid_pad(self, DummyInputLayer):\n from lasagne.layers.conv import Conv1DLayer\n input_layer = DummyInputLayer((1, 2, 3))\n with pytest.raises(TypeError) as exc:\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),\n pad='_nonexistent_mode')\n assert \"iterable of int\" in exc.value.args[0]\n\n with pytest.raises(NotImplementedError) as exc:\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(4,),\n pad='same')\n assert \"requires odd filter size\" in exc.value.args[0]\n\n\nclass TestConv2DLayerImplementations:\n\n @pytest.fixture(\n params=[\n ('lasagne.layers', 'Conv2DLayer', {}),\n ('lasagne.layers.cuda_convnet',\n 'Conv2DCCLayer',\n {'flip_filters': True}),\n ('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}),\n ('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}),\n ],\n )\n def Conv2DImpl(self, request):\n impl_module_name, impl_name, impl_default_kwargs = request.param\n try:\n mod = importlib.import_module(impl_module_name)\n except ImportError:\n pytest.skip(\"{} not available\".format(impl_module_name))\n\n impl = getattr(mod, impl_name)\n\n def wrapper(*args, **kwargs):\n kwargs2 = impl_default_kwargs.copy()\n kwargs2.update(kwargs)\n return impl(*args, **kwargs2)\n\n wrapper.__name__ = impl_name\n return wrapper\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv2d_test_sets()))\n @pytest.mark.parametrize(\"extra_kwargs\", [\n {},\n {'untie_biases': True},\n ])\n def test_defaults(self, Conv2DImpl, DummyInputLayer,\n input, kernel, output, kwargs, extra_kwargs):\n kwargs.update(extra_kwargs)\n b, c, h, w = input.shape.eval()\n input_layer = DummyInputLayer((b, c, h, w))\n try:\n layer = Conv2DImpl(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2:],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pytest.skip()\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv2d_test_sets()))\n def test_with_nones(self, Conv2DImpl, DummyInputLayer,\n input, kernel, output, kwargs):\n b, c, h, w = input.shape.eval()\n input_layer = DummyInputLayer((None, c, None, None))\n try:\n layer = Conv2DImpl(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2:],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n\n assert layer.output_shape == (None,\n kernel.shape[0],\n None,\n None)\n assert actual.shape == output.shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pytest.skip()\n\n def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((1, 2, 3, 3))\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),\n nonlinearity=None, b=None)\n assert layer.nonlinearity == lasagne.nonlinearities.identity\n assert layer.b is None\n\n def test_invalid_pad(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((1, 2, 3))\n with pytest.raises(TypeError) as exc:\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),\n pad='_nonexistent_mode')\n assert \"iterable of int\" in exc.value.args[0]\n\n with pytest.raises(NotImplementedError) as exc:\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(4, 4),\n pad='same')\n assert \"requires odd filter size\" in exc.value.args[0]\n\n def test_get_params(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((128, 3, 32, 32))\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3))\n assert layer.get_params() == [layer.W, layer.b]\n assert layer.get_params(regularizable=False) == [layer.b]\n assert layer.get_params(regularizable=True) == [layer.W]\n assert layer.get_params(trainable=True) == [layer.W, layer.b]\n assert layer.get_params(trainable=False) == []\n assert layer.get_params(_nonexistent_tag=True) == []\n assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]\n\n\nclass TestConv2DDNNLayer:\n def test_import_without_gpu_or_cudnn_raises(self):\n from theano.sandbox.cuda import dnn\n if theano.config.device.startswith(\"gpu\") and dnn.dnn_available():\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.dnn\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.dnn import Conv2DDNNLayer\n except ImportError:\n pytest.skip(\"dnn not available\")\n\n input_layer = DummyInputLayer((1, 2, 3, 3))\n\n layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (1, 4, 7, 7)\n\n\nclass TestConv2DMMLayer:\n def test_import_without_gpu_raises(self):\n if theano.config.device.startswith(\"gpu\"):\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.corrmm\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.corrmm import Conv2DMMLayer\n except ImportError:\n pytest.skip(\"corrmm not available\")\n\n input_layer = DummyInputLayer((1, 2, 3, 3))\n\n layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (1, 4, 7, 7)\n\n\nclass TestConv2DCCLayer:\n def test_import_without_gpu_raises(self):\n if theano.config.device.startswith(\"gpu\"):\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.cuda_convnet\n\n def test_unsupported_settings(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((128, 3, 32, 32))\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 5))\n assert (\"Conv2DCCLayer only supports square filters\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3), stride=(1, 2))\n assert (\"Conv2DCCLayer only supports square strides\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=15,\n filter_size=(3, 3))\n assert (\"Conv2DCCLayer requires num_filters to be a multiple of 16\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3), pad=(1, 2))\n assert (\"Conv2DCCLayer only supports square padding\" in\n exc.value.args[0])\n\n input_layer = DummyInputLayer((128, 7, 32, 32))\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3))\n assert (\"Conv2DCCLayer requires the number of input channels to be \"\n \"1, 2, 3 or a multiple of 4\" in exc.value.args[0])\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((128, 3, 32, 32))\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (128, 16, 36, 36)\n\n def test_dimshuffle_false_shapes(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False)\n assert layer.W.get_value().shape == (4, 3, 3, 16)\n assert layer.b.get_value().shape == (16,)\n\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False, untie_biases=True)\n assert layer.W.get_value().shape == (4, 3, 3, 16)\n assert layer.b.get_value().shape == (16, 30, 30)\n\n def test_dimshuffle_false_get_output_for(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n # this implementation is tested against FilterActs instead of\n # theano.tensor.nnet.conv.conv2d because using the latter leads to\n # numerical precision errors.\n from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs\n filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)\n\n input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))\n kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))\n\n input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False, W=kernel, b=None,\n nonlinearity=None)\n\n output = np.array(filter_acts(input, kernel).eval())\n\n actual = layer.get_output_for(input).eval()\n actual = np.array(actual)\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n\nclass TestShuffleLayers:\n def test_bc01_to_c01b(self):\n from lasagne.layers.input import InputLayer\n try:\n from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = InputLayer((1, 2, 3, 4))\n layer = ShuffleBC01ToC01BLayer(input_layer)\n assert layer.output_shape == (2, 3, 4, 1)\n\n input = floatX(np.random.random((1, 2, 3, 4)))\n output = input.transpose(1, 2, 3, 0)\n actual = layer.get_output_for(theano.shared(input)).eval()\n assert np.allclose(output, actual)\n\n def test_c01b_to_bc01(self):\n from lasagne.layers.input import InputLayer\n try:\n from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = InputLayer((1, 2, 3, 4))\n layer = ShuffleC01BToBC01Layer(input_layer)\n assert layer.output_shape == (4, 1, 2, 3)\n\n input = floatX(np.random.random((1, 2, 3, 4)))\n output = input.transpose(3, 0, 1, 2)\n actual = layer.get_output_for(theano.shared(input)).eval()\n assert np.allclose(output, actual)\n",
"import numpy as np\nimport pytest\nimport theano\n\nfrom mock import Mock\n\n\nclass TestFlattenLayer:\n @pytest.fixture\n def layer(self):\n from lasagne.layers.shape import FlattenLayer\n return FlattenLayer(Mock(output_shape=(None,)))\n\n @pytest.fixture\n def layer_outdim3(self):\n from lasagne.layers.shape import FlattenLayer\n return FlattenLayer(Mock(output_shape=(None,)), outdim=3)\n\n @pytest.fixture\n def layer_outdim1(self):\n from lasagne.layers.shape import FlattenLayer\n return FlattenLayer(Mock(output_shape=(None,)), outdim=1)\n\n def test_get_output_shape_for(self, layer):\n input_shape = (2, 3, 4, 5)\n assert layer.get_output_shape_for(input_shape) == (2, 3 * 4 * 5)\n\n def test_get_output_for(self, layer):\n input = np.random.random((2, 3, 4, 5))\n result = layer.get_output_for(theano.shared(input)).eval()\n assert (result == input.reshape((input.shape[0], -1))).all()\n\n def test_get_output_shape_for_outdim3(self, layer_outdim3):\n input_shape = (2, 3, 4, 5)\n assert layer_outdim3.get_output_shape_for(input_shape) == (2, 3, 4 * 5)\n\n def test_get_output_for_outdim3(self, layer_outdim3):\n input = np.random.random((2, 3, 4, 5))\n result = layer_outdim3.get_output_for(theano.shared(input)).eval()\n assert (result == input.reshape(\n (input.shape[0], input.shape[1], -1))).all()\n\n def test_get_output_shape_for_outdim1(self, layer_outdim1):\n input_shape = (2, 3, 4, 5)\n assert layer_outdim1.get_output_shape_for(input_shape) == (\n 2 * 3 * 4 * 5, )\n\n def test_get_output_for_outdim1(self, layer_outdim1):\n input = np.random.random((2, 3, 4, 5))\n result = layer_outdim1.get_output_for(theano.shared(input)).eval()\n assert (result == input.reshape(-1)).all()\n\n def test_dim0_raises(self):\n from lasagne.layers.shape import FlattenLayer\n with pytest.raises(ValueError):\n FlattenLayer((2, 3, 4), outdim=0)\n\n\nclass TestPadLayer:\n @pytest.fixture\n def layerclass(self):\n from lasagne.layers.shape import PadLayer\n return PadLayer\n\n @pytest.mark.parametrize(\n \"width, input_shape, output_shape\",\n [(3, (2, 3, 4, 5), (2, 3, 10, 11)),\n ((2, 3), (2, 3, 4, 5), (2, 3, 8, 11)),\n (((1, 2), (3, 4)), (2, 3, 4, 5), (2, 3, 7, 12)),\n ])\n def test_get_output_shape_for(self, layerclass,\n width, input_shape, output_shape):\n layer = layerclass(Mock(output_shape=(None,)), width=width)\n assert layer.get_output_shape_for(input_shape) == output_shape\n\n def test_get_output_for(self, layerclass):\n layer = layerclass(Mock(output_shape=(None,)), width=2)\n input = np.zeros((1, 2, 10))\n trimmed = theano.shared(input[:, :, 2:-2])\n result = layer.get_output_for(trimmed).eval()\n\n assert (result == input).all()\n\n\nclass TestReshapeLayer:\n @pytest.fixture\n def layerclass(self):\n from lasagne.layers.shape import ReshapeLayer\n return ReshapeLayer\n\n @pytest.fixture\n def two_unknown(self):\n from lasagne.layers.input import InputLayer\n shape = (16, 3, None, None, 10)\n return (InputLayer(shape),\n theano.shared(np.ones((16, 3, 5, 7, 10))))\n\n def test_no_reference(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, (16, 3, 5, 7, 2, 5))\n assert layer.output_shape == (16, 3, 5, 7, 2, 5)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 5, 7, 2, 5)\n\n def test_reference_both(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, (-1, [1], [2], [3], 2, 5))\n assert layer.output_shape == (16, 3, None, None, 2, 5)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 5, 7, 2, 5)\n\n def test_reference_one(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, (-1, [1], [2], 7, 2, 5))\n assert layer.output_shape == (None, 3, None, 7, 2, 5)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 5, 7, 2, 5)\n\n def test_reference_twice(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, (-1, [1], [2], [3], 2, [2]))\n assert layer.output_shape == (None, 3, None, None, 2, None)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 5, 7, 2, 5)\n\n def test_merge_with_unknown(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, ([0], [1], [2], -1))\n assert layer.output_shape == (16, 3, None, None)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 5, 70)\n\n def test_merge_two_unknowns(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n layer = layerclass(inputlayer, ([0], [1], -1, [4]))\n assert layer.output_shape == (16, 3, None, 10)\n result = layer.get_output_for(inputdata).eval()\n assert result.shape == (16, 3, 35, 10)\n\n def test_size_mismatch(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n with pytest.raises(ValueError) as excinfo:\n layerclass(inputlayer, (17, 3, [2], [3], -1))\n assert 'match' in str(excinfo.value)\n\n def test_invalid_spec(self, layerclass, two_unknown):\n inputlayer, inputdata = two_unknown\n with pytest.raises(ValueError):\n layerclass(inputlayer, (-16, 3, 5, 7, 10))\n with pytest.raises(ValueError):\n layerclass(inputlayer, (-1, 3, 5, 7, -1))\n with pytest.raises(ValueError):\n layerclass(inputlayer, ([-1], 3, 5, 7, 10))\n with pytest.raises(ValueError):\n layerclass(inputlayer, ([0, 1], 3, 5, 7, 10))\n with pytest.raises(ValueError):\n layerclass(inputlayer, (None, 3, 5, 7, 10))\n with pytest.raises(ValueError):\n layerclass(inputlayer, (16, 3, 5, 7, [5]))\n with pytest.raises(ValueError):\n layerclass(inputlayer, (16, 3, theano.tensor.vector(), 7, 10))\n\n def test_symbolic_shape(self):\n from lasagne.layers import InputLayer, ReshapeLayer, get_output\n x = theano.tensor.tensor3()\n batch_size, seq_len, num_features = x.shape\n l_inp = InputLayer((None, None, None))\n l_rshp2 = ReshapeLayer(l_inp, (batch_size*seq_len, [2]))\n\n # we cannot infer any of the output shapes because they are symbolic.\n output_shape = l_rshp2.get_output_shape_for(\n (batch_size, seq_len, num_features))\n assert output_shape == (None, None)\n\n output = get_output(l_rshp2, x)\n out1 = output.eval({x: np.ones((3, 5, 6), dtype='float32')})\n out2 = output.eval({x: np.ones((4, 5, 7), dtype='float32')})\n\n assert out1.shape == (3*5, 6)\n assert out2.shape == (4*5, 7)\n\n\nclass TestDimshuffleLayer:\n @pytest.fixture\n def input_shape(self):\n return (2, 3, 1, 5, 7)\n\n @pytest.fixture\n def input_var(self):\n InputTensorType = theano.tensor.TensorType(\n 'float64', broadcastable=(False, False, True, False, False),\n name='DimShuffleTestTensor')\n return InputTensorType(name='x')\n\n @pytest.fixture\n def input_layer(self, input_shape, input_var):\n from lasagne.layers.input import InputLayer\n return InputLayer(input_shape, input_var)\n\n @pytest.fixture\n def input_shape_with_None(self):\n return (2, 3, None, 5, 7)\n\n @pytest.fixture\n def input_layer_with_None(self, input_shape_with_None, input_var):\n from lasagne.layers.input import InputLayer\n return InputLayer(input_shape_with_None, input_var)\n\n @pytest.fixture\n def input_data(self, input_shape):\n return np.ones(input_shape)\n\n def test_rearrange(self, input_data, input_var, input_layer):\n from lasagne.layers.shape import DimshuffleLayer\n ds = DimshuffleLayer(input_layer, [4, 3, 2, 1, 0])\n assert ds.output_shape == (7, 5, 1, 3, 2)\n assert ds.get_output_for(input_var).eval(\n {input_var: input_data}).shape == (7, 5, 1, 3, 2)\n\n def test_broadcast(self, input_data, input_var, input_layer):\n from lasagne.layers.shape import DimshuffleLayer\n ds = DimshuffleLayer(input_layer, [0, 1, 2, 3, 4, 'x'])\n assert ds.output_shape == (2, 3, 1, 5, 7, 1)\n assert ds.get_output_for(input_var).eval(\n {input_var: input_data}).shape == (2, 3, 1, 5, 7, 1)\n\n def test_collapse(self, input_data, input_var, input_layer):\n from lasagne.layers.shape import DimshuffleLayer\n ds_ok = DimshuffleLayer(input_layer, [0, 1, 3, 4])\n assert ds_ok.output_shape == (2, 3, 5, 7)\n assert ds_ok.get_output_for(input_var).eval(\n {input_var: input_data}).shape == (2, 3, 5, 7)\n with pytest.raises(ValueError):\n DimshuffleLayer(input_layer, [0, 1, 2, 4])\n\n def test_collapse_None(self, input_data, input_var, input_layer_with_None):\n from lasagne.layers.shape import DimshuffleLayer\n ds_ok = DimshuffleLayer(input_layer_with_None, [0, 1, 3, 4])\n assert ds_ok.output_shape == (2, 3, 5, 7)\n assert ds_ok.get_output_for(input_var).eval(\n {input_var: input_data}).shape == (2, 3, 5, 7)\n with pytest.raises(ValueError):\n DimshuffleLayer(input_layer_with_None, [0, 1, 2, 4])\n\n def test_invalid_pattern(self, input_data, input_var, input_layer):\n from lasagne.layers.shape import DimshuffleLayer\n with pytest.raises(ValueError):\n DimshuffleLayer(input_layer, ['q'])\n with pytest.raises(ValueError):\n DimshuffleLayer(input_layer, [0, 0, 1, 3, 4])\n with pytest.raises(ValueError):\n # There is no dimension 42\n DimshuffleLayer(input_layer, [0, 1, 2, 4, 42])\n\n\ndef test_slice_layer():\n from lasagne.layers import SliceLayer, InputLayer, get_output_shape,\\\n get_output\n from numpy.testing import assert_array_almost_equal as aeq\n in_shp = (3, 5, 2)\n l_inp = InputLayer(in_shp)\n l_slice_ax0 = SliceLayer(l_inp, axis=0, indices=0)\n l_slice_ax1 = SliceLayer(l_inp, axis=1, indices=slice(3, 5))\n l_slice_ax2 = SliceLayer(l_inp, axis=-1, indices=-1)\n\n x = np.arange(np.prod(in_shp)).reshape(in_shp).astype('float32')\n x1 = x[0]\n x2 = x[:, 3:5]\n x3 = x[:, :, -1]\n\n assert get_output_shape(l_slice_ax0) == x1.shape\n assert get_output_shape(l_slice_ax1) == x2.shape\n assert get_output_shape(l_slice_ax2) == x3.shape\n\n aeq(get_output(l_slice_ax0, x).eval(), x1)\n aeq(get_output(l_slice_ax1, x).eval(), x2)\n aeq(get_output(l_slice_ax2, x).eval(), x3)\n\n # test slicing None dimension\n in_shp = (2, None, 2)\n l_inp = InputLayer(in_shp)\n l_slice_ax1 = SliceLayer(l_inp, axis=1, indices=slice(3, 5))\n assert get_output_shape(l_slice_ax1) == (2, None, 2)\n aeq(get_output(l_slice_ax1, x).eval(), x2)\n"
] | [
[
"numpy.convolve",
"numpy.random.random",
"numpy.pad",
"numpy.allclose",
"numpy.array",
"numpy.zeros"
],
[
"numpy.prod",
"numpy.random.random",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
antoszy/RoboND-perception-exercises | [
"8c725e77316162ae485ccee94085fd2314be7ae0"
] | [
"Exercise-3/sensor_stick/src/sensor_stick/features.py"
] | [
"import matplotlib.colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pcl_helper import *\nnbinscol = 32\nnbinsnor = 20\n\ndef rgb_to_hsv(rgb_list):\n rgb_normalized = [1.0*rgb_list[0]/255, 1.0*rgb_list[1]/255, 1.0*rgb_list[2]/255]\n hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]\n return hsv_normalized\n\n\ndef compute_color_histograms(cloud, using_hsv=False):\n\t# Compute histograms for the clusters\n\tpoint_colors_list = []\n\n\t# Step through each point in the point cloud\n\tfor point in pc2.read_points(cloud, skip_nans=True):\n\t\trgb_list = float_to_rgb(point[3])\n\t\tif using_hsv:\n\t\t point_colors_list.append(rgb_to_hsv(rgb_list) * 255)\n\t\telse:\n\t\t point_colors_list.append(rgb_list)\n\n\t# Populate lists with color values\n\tchannel_1_vals = []\n\tchannel_2_vals = []\n\tchannel_3_vals = []\n\n\tfor color in point_colors_list:\n\t\tchannel_1_vals.append(color[0])\n\t\tchannel_2_vals.append(color[1])\n\t\tchannel_3_vals.append(color[2])\n\n\t# TODO: Compute histograms\n\n\thist_1 = np.histogram(channel_1_vals, bins = nbinscol, range = (0, 256))\n\thist_2 = np.histogram(channel_2_vals, bins = nbinscol, range = (0, 256))\n\thist_3 = np.histogram(channel_3_vals, bins = nbinscol, range = (0, 256))\n\n\t# TODO: Concatenate and normalize the histograms\n\tfeatures = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)\n\tnormed_features = features/np.sum(features)\n\n\treturn normed_features \n\n\ndef compute_normal_histograms(normal_cloud):\n\tnorm_x_vals = []\n\tnorm_y_vals = []\n\tnorm_z_vals = []\n\n\tfor norm_component in pc2.read_points(normal_cloud,\n\t\t field_names = ('normal_x', 'normal_y', 'normal_z'),\n\t\t skip_nans=True):\n\t\tnorm_x_vals.append(norm_component[0])\n\t\tnorm_y_vals.append(norm_component[1])\n\t\tnorm_z_vals.append(norm_component[2])\n\n\t# TODO: Compute histograms of normal values (just like with color)\n\thist_1 = np.histogram(norm_x_vals, bins = nbinsnor, range = (0, 256))\n\thist_2 = np.histogram(norm_y_vals, bins = nbinsnor, range = (0, 256))\n\thist_3 = np.histogram(norm_z_vals, bins = nbinsnor, range = (0, 256))\n\n\t# TODO: Concatenate and normalize the histograms\n\tfeatures = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)\n\tnormed_features = features/np.sum(features)\n\n\treturn normed_features\n"
] | [
[
"numpy.concatenate",
"numpy.histogram",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hrutkabence/tutorials | [
"bd76294860804aee8ecda5e1445464506bf02ee0",
"bd76294860804aee8ecda5e1445464506bf02ee0",
"bd76294860804aee8ecda5e1445464506bf02ee0"
] | [
"english/data_processing/lessons/code/vslide1.py",
"english/data_processing/lessons/code/sphere.py",
"english/img_processing/code/img_corr.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom math import hypot, atan2, sin, cos, pi, degrees\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef vplain(x1, y1, x2, y2):\n \"\"\" set up line equation\n vp[0] * x + vp[1] * y + vp[2] = 0\n\n x1, y1 - horizontal coordinates of the start point of the section\n x2, y2 - horizontal coordinates of the end point of the section\n returns a numpy array with coefficients of the vertical plane\n \"\"\"\n\n vp = np.zeros((3,))\n vp[0] = y1 - y2\n vp[1] = x2 - x1\n vp[2] = x1 * y2 - x2 * y1\n vp = vp / hypot(vp[0], vp[1]) # normalize\n return vp\n\ndef section(pc, x1, y1, x2, y2, tol):\n \"\"\" Select point from a point cloud near to a line\n\n pc - point cloud in a numpy array\n x1, y1 - horizontal coordinates of the start point of the section\n x2, y2 - horizontal coordinates of the end point of the section\n tol - tolerance distance from the section\n returns a numpy array with points near to the section\n \"\"\"\n pc1 = pc.copy()\n pc1[:, 2] = 1 # change to homogenous coordinates\n vp = vplain(x1, y1, x2, y2) # equation of vertical plain\n sec = pc[np.abs(np.dot(pc1, vp)) < tol] # select points close to the section\n\n return sec\n\ndef tr(e1, n1, e2, n2):\n \"\"\" set up transformation matrix for homogenous coordinates\n\n Parameters:\n e1, n1 - start point of the section line\n e2, n2 - end point of the section section line\n returns the transformation matrix\n \"\"\"\n de = e2 - e1\n dn = n2 - n1\n\n a = atan2(dn, de)\n ca = cos(a)\n sa = sin(a)\n return np.dot(np.array([[1, 0, 0], [0, 1, 0], [-e1, -n1, 1]]),\n np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 7:\n pc = np.loadtxt('lidar.txt', delimiter=',') ;# load point cloud\n x1 = 548060.0\n y1 = 5129130.0\n x2 = 549850.0\n y2 = 5129030.0\n #x1 = 549400\n #y1 = 5128900\n #x2 = 549200\n #y2 = 5129300\n tol = 1.0\n else:\n pc = np.loadtxt(sys.argv[1], delimiter=',') ;# load point cloud\n x1 = float(sys.argv[2])\n y1 = float(sys.argv[3])\n x2 = float(sys.argv[4])\n y2 = float(sys.argv[5])\n tol = float(sys.argv[6])\n # set up equation for vertical plain a * x + b * y + c = 0\n vp = vplain(x1, y1, x2, y2)\n sec = section(pc,x1,y1,x2,y2,tol) \n # transformation matrix\n trm = tr(x1, y1, x2, y2)\n if abs(np.dot(np.array([x1, y1, 1]), trm)[1]) > 1e-5 or \\\n abs(np.dot(np.array([x2, y2, 1]), trm)[1]) > 1e-5:\n print(\"tr error\")\n # make a copy of section points for homogenous transformation\n pc1 = sec.copy()\n pc1[:, 2] = 1\n pc1 = np.dot(pc1, trm) # rotate points into the section plain\n pc1[:, 2] = sec[:, 2] # copy back elevations to transformed points\n\n plt.plot(pc1[:,0], pc1[:,2], 'o')\n plt.xlabel('chainage (m)')\n plt.ylabel('elevation (m)')\n plt.axis('equal')\n plt.grid('on')\n plt.show() \n",
"import numpy as np\nfrom math import sqrt\nfrom sys import argv\n\ndef sphere(x_, y_, z_):\n \"\"\"\n calculate best fitting sphere (LSM) on points\n :param returns: x0, y0, z0, R\n \"\"\"\n n_ = x_.shape[0]\n a = np.c_[x_, y_, z_, np.full(n_, 1, 'float64')]\n b = -np.square(x_) - np.square(y_) - np.square(z_)\n res = np.linalg.lstsq(a, b, rcond=None)[0]\n return -0.5 * res[0], -0.5 * res[1], -0.5 * res[2], \\\n sqrt((res[0]**2 + res[1]**2 + res[2]**2) / 4 - res[3])\n\nif __name__ == \"__main__\":\n if len(argv) > 1:\n file_names = argv[1:]\n else:\n file_names = ['sphere1.txt']\n for file_name in file_names:\n pnts = np.genfromtxt(file_name, 'float64', delimiter=',')\n if pnts.shape[1] > 3:\n pnts = pnts[:,1:4] # skip first column (point id)\n sph = sphere(pnts[:,0], pnts[:,1], pnts[:,2])\n print(\"x0: {:.3f} y0: {:.3f} z0: {:.3f} R: {:.3f}\".format(sph[0], sph[1], sph[2], sph[3]))\n dr = np.sqrt(np.sum(np.square(pnts - sph[:3]), 1)) - sph[3] # difference in radius direction\n RMS = sqrt(np.sum(np.square(dr)) / pnts.shape[0])\n print(\"RMS: {:.3f}\".format(RMS))\n",
"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport sys\n\ndef img_correlation(img, templ):\n \"\"\" find most similar part to templ in img\n returns upper left corner of templ in image and statistic (square of differences)\n \"\"\"\n rows, cols = img.shape # image sizes\n trows, tcols = templ.shape # template sizes\n row = col = None # for best match position\n mins = trows * tcols * 255**2 # initial value statistic\n\n for i in range(rows - trows): # scan image rows\n i1 = i + trows # row for the bottom of template\n for j in range(cols - tcols): # scan image columns\n j1 = j + tcols # column for the right of template\n s = np.sum(np.square(templ - img[i:i1, j:j1])) # pixel wise scatistic\n if s < mins: # better statistic found?\n mins = s # store the actual best match\n row = i\n col = j\n return (col, row, s) # return position and statistic of best match\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(f'Usage: {sys.argv[0]} image_to_scan template_to_find')\n sys.exit()\n img = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE)\n if img is None:\n print(f'Image not found or failed to read: {sys.argv[1]}')\n sys.exit()\n templ = cv2.imread(sys.argv[2], cv2.IMREAD_GRAYSCALE)\n if templ is None:\n print(f'Template not found or failed to read: {sys.argv[2]}')\n sys.exit()\n print(img_correlation(img, templ))\n"
] | [
[
"numpy.dot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
],
[
"numpy.square",
"numpy.linalg.lstsq",
"numpy.full",
"numpy.genfromtxt"
],
[
"numpy.square"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MrJohnsson77/bat-country | [
"c0d29a0b32c196ca3d4c40fbaf960432b507e8bb"
] | [
"demo_guided.py"
] | [
"# USAGE\n# python demo_guided.py --base-model $CAFFE_ROOT/models/bvlc_googlenet \\\n#\t--image initial_images/clouds.jpg \\\n#\t--guide-image initial_images/seed_images/starry_night.jpg \\\n#\t--output examples/output/seeded/clouds_and_starry_night.jpg\n\n# import the necessary packages\nfrom batcountry import BatCountry\nfrom PIL import Image\nimport numpy as np\nimport argparse\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-b\", \"--base-model\", required=True, help=\"base model path\")\nap.add_argument(\"-l\", \"--layer\", type=str, default=\"inception_4c/output\",\n\thelp=\"layer of CNN to use\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to base image\")\nap.add_argument(\"-g\", \"--guide-image\", required=True, help=\"path to guide image\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"path to output image\")\nargs = ap.parse_args()\n\n# we can't stop here...\nbc = BatCountry(args.base_model)\nfeatures = bc.prepare_guide(Image.open(args.guide_image), end=args.layer)\nimage = bc.dream(np.float32(Image.open(args.image)), end=args.layer,\n\titer_n=20, objective_fn=BatCountry.guided_objective,\n\tobjective_features=features,)\nbc.cleanup()\n\n# write the output image to file\nresult = Image.fromarray(np.uint8(image))\nresult.save(args.output)"
] | [
[
"numpy.uint8"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.