repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
rajgiriUW/pyUSID | [
"064dcd81d9c42f4eb4782f0a41fd437b3f56f50c"
] | [
"pyUSID/io/hdf_utils/simple.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nLower-level and simpler USID-specific HDF5 utilities that facilitate higher-level data operations\n\nCreated on Tue Nov 3 21:14:25 2015\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nimport collections\nfrom warnings import warn\nimport sys\nimport h5py\nimport numpy as np\nimport dask.array as da\nfrom sidpy.hdf.hdf_utils import get_auxiliary_datasets, link_h5_obj_as_alias, \\\n write_simple_attrs, is_editable_h5, validate_h5_objs_in_same_h5_file, \\\n get_attr\nfrom sidpy.hdf.dtype_utils import validate_dtype\nfrom sidpy.hdf import hdf_utils as hut\nfrom sidpy.base.string_utils import validate_single_string_arg, validate_list_of_strings\nfrom sidpy.base.num_utils import contains_integers\nfrom sidpy.base.string_utils import clean_string_att\n\nfrom ..anc_build_utils import build_ind_val_matrices, INDICES_DTYPE, VALUES_DTYPE\nfrom ..dimension import DimType, Dimension\nfrom .base import write_book_keeping_attrs\n\nif sys.version_info.major == 3:\n unicode = str\n\"\"\"\n__all__ = ['assign_group_index', 'check_and_link_ancillary', 'check_for_matching_attrs', 'check_for_old',\n 'check_if_main', 'copy_attributes', 'copy_main_attributes']\n\"\"\"\n\n\ndef get_all_main(parent, verbose=False):\n \"\"\"\n Simple function to recursively print the contents of an hdf5 group\n\n Parameters\n ----------\n parent : :class:`h5py.Group`\n HDF5 Group to search within\n verbose : bool, optional. Default = False\n If true, extra print statements (usually for debugging) are enabled\n\n Returns\n -------\n main_list : list of h5py.Dataset\n The datasets found in the file that meet the 'Main Data' criteria.\n\n \"\"\"\n if not isinstance(parent, (h5py.Group, h5py.File)):\n raise TypeError('parent should be a h5py.File or h5py.Group object')\n\n from ..usi_data import USIDataset\n\n main_list = list()\n\n def __check(name, obj):\n if verbose:\n print(name, obj)\n if isinstance(obj, h5py.Dataset):\n if verbose:\n print(name, 'is an HDF5 Dataset.')\n ismain = check_if_main(obj)\n if ismain:\n if verbose:\n print(name, 'is a `Main` dataset.')\n main_list.append(USIDataset(obj))\n\n if verbose:\n print('Checking the group {} for `Main` datasets.'.format(parent.name))\n parent.visititems(__check)\n\n return main_list\n\n\ndef find_dataset(h5_group, dset_name):\n \"\"\"\n Uses visit() to find all datasets with the desired name\n\n Parameters\n ----------\n h5_group : :class:`h5py.Group`\n Group to search within for the Dataset\n dset_name : str\n Name of the dataset to search for\n\n Returns\n -------\n datasets : list\n List of [Name, object] pairs corresponding to datasets that match `ds_name`.\n\n \"\"\"\n from ..usi_data import USIDataset\n\n datasets = list()\n for obj in hut.find_dataset(h5_group, dset_name):\n try:\n datasets.append(USIDataset(obj))\n except TypeError:\n datasets.append(obj)\n\n return datasets\n\n\ndef find_results_groups(h5_main, tool_name, h5_parent_group=None):\n \"\"\"\n Finds a list of all groups containing results of the process of name\n `tool_name` being applied to the dataset\n\n Parameters\n ----------\n h5_main : h5 dataset reference\n Reference to the target dataset to which the tool was applied\n tool_name : String / unicode\n Name of the tool applied to the target dataset\n h5_parent_group : h5py.Group, optional. Default = None\n Parent group under which the results group will be searched for. Use\n this option when the results groups are contained in different HDF5\n file compared to `h5_main`. BY default, this function will search\n within the same group that contains `h5_main`\n\n Returns\n -------\n groups : list of references to :class:`h5py.Group` objects\n groups whose name contains the tool name and the dataset name\n\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if h5_parent_group is not None:\n if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_group = h5_main.parent\n\n dset_name = h5_main.name.split('/')[-1]\n groups = []\n for key in h5_parent_group.keys():\n if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):\n groups.append(h5_parent_group[key])\n return groups\n\n\ndef check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):\n \"\"\"\n This function will add references to auxilliary datasets as attributes\n of an input dataset.\n If the entries in anc_refs are valid references, they will be added\n as attributes with the name taken from the corresponding entry in\n anc_names.\n If an entry in anc_refs is not a valid reference, the function will\n attempt to get the attribute with the same name from the h5_main\n dataset\n\n Parameters\n ----------\n h5_dset : HDF5 Dataset\n dataset to which the attributes will be written\n anc_names : list of str\n the attribute names to be used\n h5_main : HDF5 Dataset, optional\n dataset from which attributes will be copied if `anc_refs` is None\n anc_refs : list of HDF5 Object References, optional\n references that correspond to the strings in `anc_names`\n\n Returns\n -------\n None\n\n Notes\n -----\n Either `h5_main` or `anc_refs` MUST be provided and `anc_refs` has the\n higher priority if both are present.\n\n \"\"\"\n if not isinstance(h5_dset, h5py.Dataset):\n raise TypeError('h5_dset should be a h5py.Dataset object')\n\n if isinstance(anc_names, (str, unicode)):\n anc_names = [anc_names]\n if isinstance(anc_refs, (h5py.Dataset, h5py.Group, h5py.File,\n h5py.Reference)):\n anc_refs = [anc_refs]\n\n if not isinstance(anc_names, (list, tuple)):\n raise TypeError('anc_names should be a list / tuple')\n if h5_main is not None:\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n validate_h5_objs_in_same_h5_file(h5_dset, h5_main)\n if anc_refs is not None:\n if not isinstance(anc_refs, (list, tuple)):\n raise TypeError('anc_refs should be a list / tuple')\n\n if anc_refs is None and h5_main is None:\n raise ValueError('No objected provided to link as ancillary')\n\n def __check_and_link_single(h5_obj_ref, target_ref_name):\n if isinstance(h5_obj_ref, h5py.Reference):\n # TODO: Same HDF5 file?\n h5_dset.attrs[target_ref_name] = h5_obj_ref\n elif isinstance(h5_obj_ref, (h5py.Dataset, h5py.Group, h5py.File)):\n validate_h5_objs_in_same_h5_file(h5_obj_ref, h5_dset)\n h5_dset.attrs[target_ref_name] = h5_obj_ref.ref\n elif h5_main is not None:\n h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])\n if len(h5_anc) == 1:\n link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)\n else:\n warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))\n warn(warnstring)\n\n if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):\n \"\"\"\n anc_refs can be iterated over\n \"\"\"\n for ref_name, h5_ref in zip(anc_names, anc_refs):\n __check_and_link_single(h5_ref, ref_name)\n elif anc_refs is not None:\n \"\"\"\n anc_refs is just a single value\n \"\"\"\n __check_and_link_single(anc_refs, anc_names)\n elif isinstance(anc_names, str) or isinstance(anc_names, unicode):\n \"\"\"\n Single name provided\n \"\"\"\n __check_and_link_single(None, anc_names)\n else:\n \"\"\"\n Iterable of names provided\n \"\"\"\n for name in anc_names:\n __check_and_link_single(None, name)\n\n h5_dset.file.flush()\n\n\ndef validate_main_dset(h5_main, must_be_h5):\n \"\"\"\n Checks to make sure that the provided object is a USID main dataset\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n h5_main : h5py.Dataset or numpy.ndarray or Dask.array.core.array\n object that represents the USID main data\n must_be_h5 : bool\n Set to True if the expecting an h5py.Dataset object.\n Set to False if expecting a numpy.ndarray or Dask.array.core.array\n\n Returns\n -------\n\n \"\"\"\n # Check that h5_main is a dataset\n if must_be_h5:\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))\n else:\n if not isinstance(h5_main, (np.ndarray, da.core.Array)):\n raise TypeError('raw_data should either be a np.ndarray or a da.core.Array')\n\n # Check dimensionality\n if len(h5_main.shape) != 2:\n raise ValueError('Main data is not 2D. Provided object has shape: {}'.format(h5_main.shape))\n\n\ndef validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):\n \"\"\"\n Checks ancillary HDF5 datasets against shape of a main dataset.\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n h5_inds : h5py.Dataset\n HDF5 dataset corresponding to the ancillary Indices dataset\n h5_vals : h5py.Dataset\n HDF5 dataset corresponding to the ancillary Values dataset\n main_shape : array-like\n Shape of the main dataset expressed as a tuple or similar\n is_spectroscopic : bool, Optional. Default = True\n set to True if ``dims`` correspond to Spectroscopic Dimensions.\n False otherwise.\n \"\"\"\n if not isinstance(h5_inds, h5py.Dataset):\n raise TypeError('h5_inds must be a h5py.Dataset object')\n if not isinstance(h5_vals, h5py.Dataset):\n raise TypeError('h5_vals must be a h5py.Dataset object')\n if h5_inds.shape != h5_vals.shape:\n raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '\n 'shape'.format(h5_inds.shape, h5_vals.shape))\n if isinstance(main_shape, (list, tuple)):\n if not contains_integers(main_shape, min_val=1) or \\\n len(main_shape) != 2:\n raise ValueError(\"'main_shape' must be a valid HDF5 dataset shape\")\n else:\n raise TypeError('main_shape should be of the following types:'\n 'h5py.Dataset, tuple, or list. {} provided'\n ''.format(type(main_shape)))\n\n if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:\n raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '\n 'should be equal'.format(int(is_spectroscopic),\n h5_inds.shape, main_shape))\n\n\ndef validate_dims_against_main(main_shape, dims, is_spectroscopic=True):\n \"\"\"\n Checks Dimension objects against a given shape for main datasets.\n Errors in parameters will result in Exceptions\n\n Parameters\n ----------\n main_shape : array-like\n Tuple or list with the shape of the main data\n dims : iterable\n List of Dimension objects\n is_spectroscopic : bool, Optional. Default = True\n set to True if ``dims`` correspond to Spectroscopic Dimensions.\n False otherwise.\n \"\"\"\n if not isinstance(main_shape, (list, tuple)):\n raise TypeError('main_shape should be a list or tuple. Provided object'\n ' was of type: {}'.format(type(main_shape)))\n if len(main_shape) != 2:\n raise ValueError('\"main_shape\" should be of length 2')\n contains_integers(main_shape, min_val=1)\n\n if isinstance(dims, Dimension):\n dims = [dims]\n elif not isinstance(dims, (list, tuple)):\n raise TypeError('\"dims\" must be a list or tuple of usid.Dimension '\n 'objects. Provided object was of type: {}'\n ''.format(type(dims)))\n if not all([isinstance(obj, Dimension) for obj in dims]):\n raise TypeError('One or more objects in \"dims\" was not usid.Dimension')\n\n if is_spectroscopic:\n main_dim = 1\n dim_category = 'Spectroscopic'\n else:\n main_dim = 0\n dim_category = 'Position'\n\n # TODO: This is where the dimension type will need to be taken into account\n lhs = main_shape[main_dim]\n rhs = np.product([len(x.values) for x in dims])\n if lhs != rhs:\n raise ValueError(dim_category +\n ' dimensions in main data of size: {} do not match '\n 'with product of values in provided Dimension objects'\n ': {}'.format(lhs, rhs))\n\n\ndef check_if_main(h5_main, verbose=False):\n \"\"\"\n Checks the input dataset to see if it has all the necessary\n features to be considered a Main dataset. This means it is\n 2D and has the following attributes:\n\n * Position_Indices\n * Position_Values\n * Spectroscopic_Indices\n * Spectroscopic_Values\n * quantity\n * units\n\n In addition, the shapes of the ancillary matrices should match with that of\n h5_main\n\n Parameters\n ----------\n h5_main : HDF5 Dataset\n Dataset of interest\n verbose : Boolean (Optional. Default = False)\n Whether or not to print statements\n\n Returns\n -------\n success : Boolean\n True if all tests pass\n\n \"\"\"\n try:\n validate_main_dset(h5_main, True)\n except Exception as exep:\n if verbose:\n print(exep)\n return False\n\n h5_name = h5_main.name.split('/')[-1]\n\n success = True\n\n # Check for Datasets\n dset_names = ['Position_Indices', 'Position_Values',\n 'Spectroscopic_Indices', 'Spectroscopic_Values']\n for name in dset_names:\n try:\n h5_anc_dset = h5_main.file[h5_main.attrs[name]]\n success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])\n except:\n if verbose:\n print('{} not found as an attribute of {}.'.format(name, h5_name))\n return False\n\n attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])\n if not attr_success:\n if verbose:\n print('{} does not have the mandatory \"quantity\" and \"units\" attributes'.format(h5_main.name))\n return False\n\n for attr_name in ['quantity', 'units']:\n val = get_attr(h5_main, attr_name)\n if not isinstance(val, (str, unicode)):\n if verbose:\n print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))\n return False\n\n # Blindly linking four datasets is still not sufficient. The sizes need to match:\n anc_shape_match = list()\n h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]\n h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]\n anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))\n for anc_dset in [h5_pos_vals, h5_pos_inds]:\n anc_shape_match.append(np.all(h5_main.shape[0] == anc_dset.shape[0]))\n if not np.all(anc_shape_match):\n if verbose:\n print('The shapes of the Position indices:{}, values:{} datasets did not match with that of the main '\n 'dataset: {}'.format(h5_pos_inds.shape, h5_pos_vals.shape, h5_main.shape))\n return False\n\n anc_shape_match = list()\n h5_spec_inds = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]\n h5_spec_vals = h5_main.file[h5_main.attrs['Spectroscopic_Values']]\n anc_shape_match.append(np.all(h5_spec_inds.shape == h5_spec_vals.shape))\n for anc_dset in [h5_spec_inds, h5_spec_vals]:\n anc_shape_match.append(np.all(h5_main.shape[1] == anc_dset.shape[1]))\n if not np.all(anc_shape_match):\n if verbose:\n print('The shapes of the Spectroscopic indices:{}, values:{} datasets did not match with that of the main '\n 'dataset: {}'.format(h5_spec_inds.shape, h5_spec_vals.shape, h5_main.shape))\n return False\n\n try:\n validate_anc_dset_attrs(h5_pos_inds, h5_pos_vals, is_spec=False)\n except ValueError:\n if verbose:\n print('Attributes of Position datasets did not match')\n return False\n try:\n validate_anc_dset_attrs(h5_spec_inds, h5_spec_vals, is_spec=True)\n except ValueError:\n if verbose:\n print('Attributes of Spectroscopic datasets did not match')\n return False\n\n return success\n\n\ndef validate_anc_dset_attrs(h5_inds, h5_vals, is_spec=True):\n \"\"\"\n Validates the attributes of a pair of indices and values datasets.\n Throws ValueErrors if any rule is not satisfied\n\n Parameters\n ----------\n h5_inds : h5py.Dataset\n Indices dataset\n h5_vals : h5py.Dataset\n Values Dataset\n is_spec : bool, optional. Default = True\n Set to True if spectroscopic. Else - Position datasets\n \"\"\"\n def lists_match(left, right):\n if len(left) != len(right):\n return False\n return all([l_it == r_it for l_it, r_it in zip(left, right)])\n\n v_names = get_attr(h5_vals, 'labels')\n v_units = get_attr(h5_vals, 'units')\n i_names = get_attr(h5_inds, 'labels')\n i_units = get_attr(h5_inds, 'units')\n\n for names, units, dset_type in zip([v_names, i_names], [v_units, i_units],\n ['Values', 'Indices']):\n if len(names) != len(units):\n raise ValueError('Length of labels: {} and units: {} for the {} '\n 'dataset do not match'\n ''.format(len(names), len(units), dset_type))\n for i_item, v_item, prop in zip([i_names, i_units], [v_names, v_units],\n ['labels', 'units']):\n if not lists_match(i_item, v_item):\n raise ValueError('The \"{}\" values of the Indices: {} and Values: '\n '{} datasets do not match'.format(prop, i_item,\n v_item))\n\n # Now check the rows / cols nums against size of any attr:\n if h5_inds.shape != h5_vals.shape:\n raise ValueError('Shape of Indices: {} and Values: {} datasets do '\n 'not match'.format(h5_inds.shape, h5_vals.shape))\n dim_ind = 1\n if is_spec:\n dim_ind = 0\n if h5_inds.shape[dim_ind] != len(v_names):\n raise ValueError('Length of mandatory attributes: {} did not match '\n 'dimension: {} of the ancillary dataset of shape: {}'\n ''.format(len(v_names), dim_ind, h5_inds.shape))\n\ndef link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals):\n \"\"\"\n Links the object references to the four position and spectroscopic datasets as\n attributes of `h5_main`\n\n Parameters\n ----------\n h5_main : h5py.Dataset\n 2D Dataset which will have the references added as attributes\n h5_pos_inds : h5py.Dataset\n Dataset that will be linked with the name 'Position_Indices'\n h5_pos_vals : h5py.Dataset\n Dataset that will be linked with the name 'Position_Values'\n h5_spec_inds : h5py.Dataset\n Dataset that will be linked with the name 'Spectroscopic_Indices'\n h5_spec_vals : h5py.Dataset\n Dataset that will be linked with the name 'Spectroscopic_Values'\n\n Returns\n -------\n pyUSID.USIDataset\n USIDataset version of h5_main now that it is a USID Main dataset\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n\n validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, h5_main.shape,\n is_spectroscopic=False)\n validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, h5_main.shape,\n is_spectroscopic=True)\n\n link_h5_obj_as_alias(h5_main, h5_pos_inds, 'Position_Indices')\n link_h5_obj_as_alias(h5_main, h5_pos_vals, 'Position_Values')\n link_h5_obj_as_alias(h5_main, h5_spec_inds, 'Spectroscopic_Indices')\n link_h5_obj_as_alias(h5_main, h5_spec_vals, 'Spectroscopic_Values')\n\n from ..usi_data import USIDataset\n try:\n # If all other conditions are satisfied\n return USIDataset(h5_main)\n except TypeError:\n # If some other conditions are yet to be satisfied\n return h5_main\n\n\ndef check_for_old(h5_base, tool_name, new_parms=None, target_dset=None,\n h5_parent_goup=None, verbose=False):\n \"\"\"\n Check to see if the results of a tool already exist and if they\n were performed with the same parameters.\n\n Parameters\n ----------\n h5_base : h5py.Dataset object\n Dataset on which the tool is being applied to\n tool_name : str\n process or analysis name\n new_parms : dict, optional\n Parameters with which this tool will be performed.\n target_dset : str, optional, default = None\n Name of the dataset whose attributes will be compared against new_parms.\n Default - checking against the group\n h5_parent_goup : h5py.Group, optional. Default = None\n The group to search under. Use this option when `h5_base` and\n the potential results groups (within `h5_parent_goup` are located\n in different HDF5 files. Default - search within h5_base.parent\n verbose : bool, optional, default = False\n Whether or not to print debugging statements\n\n Returns\n -------\n group : list\n List of all :class:`h5py.Group` objects with parameters matching those in `new_parms`\n \"\"\"\n if not isinstance(h5_base, h5py.Dataset):\n raise TypeError('h5_base should be a h5py.Dataset object')\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if h5_parent_goup is not None:\n if not isinstance(h5_parent_goup, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_goup = h5_base.parent\n\n if new_parms is None:\n new_parms = dict()\n else:\n if not isinstance(new_parms, dict):\n raise TypeError('new_parms should be a dict')\n if target_dset is not None:\n target_dset = validate_single_string_arg(target_dset, 'target_dset')\n\n matching_groups = []\n groups = find_results_groups(h5_base, tool_name,\n h5_parent_group=h5_parent_goup)\n\n for group in groups:\n if verbose:\n print('Looking at group - {}'.format(group.name.split('/')[-1]))\n\n h5_obj = group\n if target_dset is not None:\n if target_dset in group.keys():\n h5_obj = group[target_dset]\n else:\n if verbose:\n print('{} did not contain the target dataset: {}'.format(group.name.split('/')[-1],\n target_dset))\n continue\n\n if check_for_matching_attrs(h5_obj, new_parms=new_parms, verbose=verbose):\n # return group\n matching_groups.append(group)\n\n return matching_groups\n\n\ndef get_source_dataset(h5_group):\n \"\"\"\n Find the name of the source dataset used to create the input `h5_group`,\n so long as the source dataset is in the same HDF5 file\n\n Parameters\n ----------\n h5_group : :class:`h5py.Group`\n Child group whose source dataset will be returned\n\n Returns\n -------\n h5_source : USIDataset object\n Main dataset from which this group was generated\n\n \"\"\"\n if not isinstance(h5_group, h5py.Group):\n raise TypeError('h5_group should be a h5py.Group object')\n\n h5_parent_group = h5_group.parent\n group_name = h5_group.name.split('/')[-1]\n # What if the group name was not formatted according to Pycroscopy rules?\n name_split = group_name.split('-')\n if len(name_split) != 2:\n raise ValueError(\"The provided group's name could not be split by '-' as expected in \"\n \"SourceDataset-ProcessName_000\")\n h5_source = h5_parent_group[name_split[0]]\n\n if not isinstance(h5_source, h5py.Dataset):\n raise ValueError('Source object was not a dataset!')\n\n from ..usi_data import USIDataset\n\n return USIDataset(h5_source)\n\n\ndef assign_group_index(h5_parent_group, base_name, verbose=False):\n \"\"\"\n Searches the parent h5 group to find the next available index for the group\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` object\n Parent group under which the new group object will be created\n base_name : str or unicode\n Base name of the new group without index\n verbose : bool, optional. Default=False\n Whether or not to print debugging statements\n\n Returns\n -------\n base_name : str or unicode\n Base name of the new group with the next available index as a suffix\n\n \"\"\"\n if not isinstance(h5_parent_group, h5py.Group):\n raise TypeError('h5_parent_group should be a h5py.Group object')\n base_name = validate_single_string_arg(base_name, 'base_name')\n\n if len(base_name) == 0:\n raise ValueError('base_name should not be an empty string')\n\n if not base_name.endswith('_'):\n base_name += '_'\n\n temp = [key for key in h5_parent_group.keys()]\n if verbose:\n print('Looking for group names starting with {} in parent containing items: '\n '{}'.format(base_name, temp))\n previous_indices = []\n for item_name in temp:\n if isinstance(h5_parent_group[item_name], h5py.Group) and item_name.startswith(base_name):\n previous_indices.append(int(item_name.replace(base_name, '')))\n previous_indices = np.sort(previous_indices)\n if verbose:\n print('indices of existing groups with the same prefix: {}'.format(previous_indices))\n if len(previous_indices) == 0:\n index = 0\n else:\n index = previous_indices[-1] + 1\n return base_name + '{:03d}'.format(index)\n\n\ndef create_indexed_group(h5_parent_group, base_name):\n \"\"\"\n Creates a group with an indexed name (eg - 'Measurement_012') under h5_parent_group using the provided base_name\n as a prefix for the group's name\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`\n File or group within which the new group will be created\n base_name : str or unicode\n Prefix for the group name. This need not end with a '_'. It will be added automatically\n\n Returns\n -------\n\n \"\"\"\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should be a h5py.File or Group object')\n base_name = validate_single_string_arg(base_name, 'base_name')\n\n group_name = assign_group_index(h5_parent_group, base_name)\n h5_new_group = h5_parent_group.create_group(group_name)\n write_book_keeping_attrs(h5_new_group)\n return h5_new_group\n\n\ndef create_results_group(h5_main, tool_name, h5_parent_group=None):\n \"\"\"\n Creates a h5py.Group object autoindexed and named as 'DatasetName-ToolName_00x'\n\n Parameters\n ----------\n h5_main : h5py.Dataset object\n Reference to the dataset based on which the process / analysis is being performed\n tool_name : string / unicode\n Name of the Process / Analysis applied to h5_main\n h5_parent_group : h5py.Group, optional. Default = None\n Parent group under which the results group will be created. Use this\n option to write results into a new HDF5 file. By default, results will\n be written into the same group containing `h5_main`\n\n Returns\n -------\n h5_group : :class:`h5py.Group`\n Results group which can now house the results datasets\n\n \"\"\"\n if not isinstance(h5_main, h5py.Dataset):\n raise TypeError('h5_main should be a h5py.Dataset object')\n if h5_parent_group is not None:\n if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):\n raise TypeError(\"'h5_parent_group' should either be a h5py.File \"\n \"or h5py.Group object\")\n else:\n h5_parent_group = h5_main.parent\n\n tool_name = validate_single_string_arg(tool_name, 'tool_name')\n\n if '-' in tool_name:\n warn('tool_name should not contain the \"-\" character. Reformatted name from:{} to '\n '{}'.format(tool_name, tool_name.replace('-', '_')))\n tool_name = tool_name.replace('-', '_')\n\n group_name = h5_main.name.split('/')[-1] + '-' + tool_name + '_'\n group_name = assign_group_index(h5_parent_group, group_name)\n\n h5_group = h5_parent_group.create_group(group_name)\n\n write_book_keeping_attrs(h5_group)\n\n # Also add some basic attributes like source and tool name. This will allow relaxation of nomenclature restrictions:\n # this are NOT being used right now but will be in the subsequent versions of pyUSID\n write_simple_attrs(h5_group, {'tool': tool_name, 'num_source_dsets': 1})\n # in this case, there is only one source\n if h5_parent_group.file == h5_main.file:\n for dset_ind, dset in enumerate([h5_main]):\n h5_group.attrs['source_' + '{:03d}'.format(dset_ind)] = dset.ref\n\n return h5_group\n\n\ndef copy_main_attributes(h5_main, h5_new):\n \"\"\"\n Copies the units and quantity name from one dataset to another\n\n Parameters\n ----------\n h5_main : h5py.Dataset\n Dataset containing the target attributes\n h5_new : h5py.Dataset\n Dataset to which the target attributes are to be copied\n\n \"\"\"\n for param, param_name in zip([h5_main, h5_new], ['h5_main', 'h5_new']):\n if not isinstance(param, h5py.Dataset):\n raise TypeError(param_name + ' should be a h5py.Dataset object')\n\n for att_name in ['quantity', 'units']:\n if att_name not in h5_main.attrs:\n raise KeyError('Attribute: {} does not exist in {}'.format(att_name, h5_main))\n val = get_attr(h5_main, att_name)\n h5_new.attrs[att_name] = clean_string_att(val)\n\n\ndef create_empty_dataset(source_dset, dtype, dset_name, h5_group=None,\n new_attrs=None, skip_refs=False):\n \"\"\"\n Creates an empty dataset in the h5 file based on the provided dataset in\n the same or specified group\n\n Parameters\n ----------\n source_dset : h5py.Dataset object\n Source object that provides information on the group and shape of the dataset\n dtype : dtype\n Data type of the fit / guess datasets\n dset_name : String / Unicode\n Name of the dataset\n h5_group : :class:`h5py.Group`, optional. Default = None\n Group within which this dataset will be created\n new_attrs : dictionary (Optional)\n Any new attributes that need to be written to the dataset\n skip_refs : boolean, optional\n Should ObjectReferences be skipped when copying attributes from the\n `source_dset`\n\n Returns\n -------\n h5_new_dset : h5py.Dataset object\n Newly created dataset\n\n \"\"\"\n if not isinstance(source_dset, h5py.Dataset):\n raise TypeError('source_deset should be a h5py.Dataset object')\n _ = validate_dtype(dtype)\n if new_attrs is not None:\n if not isinstance(new_attrs, dict):\n raise TypeError('new_attrs should be a dictionary')\n else:\n new_attrs = dict()\n\n if h5_group is None:\n h5_group = source_dset.parent\n else:\n if not isinstance(h5_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_group should be a h5py.Group or h5py.File object')\n\n if source_dset.file != h5_group.file and not skip_refs:\n # Cannot carry over references\n warn('H5 object references will not be copied over since {} is in '\n 'a different HDF5 file as {}'.format(h5_group, source_dset))\n skip_refs = True\n\n dset_name = validate_single_string_arg(dset_name, 'dset_name')\n if '-' in dset_name:\n warn('dset_name should not contain the \"-\" character. Reformatted name from:{} to '\n '{}'.format(dset_name, dset_name.replace('-', '_')))\n dset_name = dset_name.replace('-', '_')\n\n kwargs = {'shape': source_dset.shape, 'dtype': dtype, 'compression': source_dset.compression,\n 'chunks': source_dset.chunks}\n\n if source_dset.file.driver == 'mpio':\n if kwargs.pop('compression', None) is not None:\n warn('This HDF5 file has been opened wth the \"mpio\" communicator. '\n 'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')\n\n if dset_name in h5_group.keys():\n if isinstance(h5_group[dset_name], h5py.Dataset):\n warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))\n h5_new_dset = h5_group[dset_name]\n # Make sure it has the correct shape and dtype\n if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):\n warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '\n 'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,\n source_dset.shape,\n h5_new_dset.dtype,\n dtype))\n del h5_new_dset, h5_group[dset_name]\n h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)\n else:\n raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),\n h5_group.name))\n\n else:\n h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)\n\n # This should link the ancillary datasets correctly\n h5_new_dset = hut.copy_attributes(source_dset, h5_new_dset,\n skip_refs=skip_refs)\n if source_dset.file != h5_group.file:\n hut.copy_linked_objects(source_dset, h5_new_dset)\n h5_new_dset.attrs.update(new_attrs)\n\n if check_if_main(h5_new_dset):\n from ..usi_data import USIDataset\n\n h5_new_dset = USIDataset(h5_new_dset)\n # update book keeping attributes\n write_book_keeping_attrs(h5_new_dset)\n\n return h5_new_dset\n\n\ndef check_for_matching_attrs(h5_obj, new_parms=None, verbose=False):\n \"\"\"\n Compares attributes in the given H5 object against those in the provided dictionary and returns True if\n the parameters match, and False otherwise\n\n Parameters\n ----------\n h5_obj : h5py object (Dataset or :class:`h5py.Group`)\n Object whose attributes will be compared against new_parms\n new_parms : dict, optional. default = empty dictionary\n Parameters to compare against the attributes present in h5_obj\n verbose : bool, optional, default = False\n Whether or not to print debugging statements\n\n Returns\n -------\n tests: bool\n Whether or not all paramters in new_parms matched with those in h5_obj's attributes\n\n \"\"\"\n if not isinstance(h5_obj, (h5py.Dataset, h5py.Group, h5py.File)):\n raise TypeError('h5_obj should be a h5py.Dataset, h5py.Group, or h5py.File object')\n if new_parms is None:\n new_parms = dict()\n else:\n if not isinstance(new_parms, dict):\n raise TypeError('new_parms should be a dictionary')\n\n tests = []\n for key in new_parms.keys():\n\n if verbose:\n print('Looking for new attribute named: {}'.format(key))\n\n # HDF5 cannot store None as an attribute anyway. ignore\n if new_parms[key] is None:\n continue\n\n try:\n old_value = get_attr(h5_obj, key)\n except KeyError:\n # if parameter was not found assume that something has changed\n if verbose:\n print('New parm: {} \\t- new parm not in group *****'.format(key))\n tests.append(False)\n break\n\n if isinstance(old_value, np.ndarray):\n if not isinstance(new_parms[key], collections.Iterable):\n if verbose:\n print('New parm: {} \\t- new parm not iterable unlike old parm *****'.format(key))\n tests.append(False)\n break\n new_array = np.array(new_parms[key])\n if old_value.size != new_array.size:\n if verbose:\n print('New parm: {} \\t- are of different sizes ****'.format(key))\n tests.append(False)\n else:\n try:\n answer = np.allclose(old_value, new_array)\n except TypeError:\n # comes here when comparing string arrays\n # Not sure of a better way\n answer = []\n for old_val, new_val in zip(old_value, new_array):\n answer.append(old_val == new_val)\n answer = np.all(answer)\n if verbose:\n print('New parm: {} \\t- match: {}'.format(key, answer))\n tests.append(answer)\n else:\n \"\"\"if isinstance(new_parms[key], collections.Iterable):\n if verbose:\n print('New parm: {} \\t- new parm is iterable unlike old parm *****'.format(key))\n tests.append(False)\n break\"\"\"\n answer = np.all(new_parms[key] == old_value)\n if verbose:\n print('New parm: {} \\t- match: {}'.format(key, answer))\n tests.append(answer)\n if verbose:\n print('')\n\n return all(tests)\n\n\ndef write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=False, base_name=None,\n slow_to_fast=False):\n \"\"\"\n Creates h5py.Datasets for the position OR spectroscopic indices and values of the data.\n Remember that the contents of the dataset can be changed if need be after the creation of the datasets.\n For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific\n dimension in the Spectroscopic_Values dataset can be manually overwritten.\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`\n Group under which the indices and values datasets will be created\n dimensions : Dimension or array-like of Dimension objects\n Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values\n datasets\n is_spectral : bool, optional. default = True\n Spectroscopic (True) or Position (False)\n verbose : Boolean, optional\n Whether or not to print statements for debugging purposes\n base_name : str or unicode, optional\n Prefix for the datasets. Default: 'Position' when is_spectral is False, 'Spectroscopic' otherwise\n slow_to_fast : bool, Optional. Default=False\n Set to True if the dimensions are arranged from slowest varying to fastest varying.\n Set to False otherwise.\n\n Returns\n -------\n h5_spec_inds : h5py.Dataset\n Dataset containing the position indices\n h5_spec_vals : h5py.Dataset\n Dataset containing the value at each position\n\n Notes\n -----\n `steps`, `initial_values`, `labels`, and 'units' must be the same length as\n `dimensions` when they are specified.\n\n Dimensions should be in the order from fastest varying to slowest.\n\n \"\"\"\n if isinstance(dimensions, Dimension):\n dimensions = [dimensions]\n if not isinstance(dimensions, (list, np.ndarray, tuple)):\n raise TypeError('dimensions should be array-like ')\n if not np.all([isinstance(x, Dimension) for x in dimensions]):\n raise TypeError('dimensions should be a sequence of Dimension objects')\n\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should be a h5py.File or Group object')\n if not is_editable_h5(h5_parent_group):\n raise ValueError('The provided h5 object is not valid / open')\n\n if base_name is not None:\n base_name = validate_single_string_arg(base_name, 'base_name')\n if not base_name.endswith('_'):\n base_name += '_'\n else:\n base_name = 'Position_'\n if is_spectral:\n base_name = 'Spectroscopic_'\n\n if not slow_to_fast:\n warn('In the future write_ind_val_dsets will default to requiring dimensions to be arranged from slowest to fastest varying')\n\n # check if the datasets already exist. If they do, there's no point in going any further\n for sub_name in ['Indices', 'Values']:\n if base_name + sub_name in h5_parent_group.keys():\n raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,\n h5_parent_group.name))\n modes = [dim.mode for dim in dimensions]\n sing_mode = np.unique(modes)\n\n if sing_mode.size > 1:\n raise NotImplementedError('Cannot yet work on combinations of modes for Dimensions. Consider doing manually')\n\n sing_mode = sing_mode[0]\n\n if sing_mode == DimType.DEFAULT:\n if slow_to_fast:\n # Ensure that the dimensions are arranged from fast to slow instead\n dimensions = dimensions[::-1]\n indices, values = build_ind_val_matrices([dim.values for dim in dimensions],\n is_spectral=is_spectral)\n\n # At this point, dimensions and unit values are arranged from fastest to slowest\n # We want dimensions to be arranged from slowest to fastest:\n rev_func = np.flipud if is_spectral else np.fliplr\n dimensions = dimensions[::-1]\n indices = rev_func(indices)\n values = rev_func(values)\n\n elif sing_mode == DimType.INCOMPLETE:\n lengths = np.unique([len(dim.values) for dim in dimensions])\n if len(lengths) > 1:\n raise ValueError('Values for dimensions not of same length')\n single_dim = np.arange(lengths[0], dtype=INDICES_DTYPE)\n indices = np.tile(single_dim, (2, 1)).T\n values = np.dstack(tuple([dim.values for dim in dimensions])).squeeze()\n\n if is_spectral:\n indices = indices.T\n values = values.T\n else:\n raise NotImplementedError('Cannot yet work on Dependent dimensions')\n\n if verbose:\n print('Indices:')\n print(indices)\n print('Values:')\n print(values)\n\n # Create the Datasets for both Indices and Values\n h5_indices = h5_parent_group.create_dataset(base_name + 'Indices', data=INDICES_DTYPE(indices), dtype=INDICES_DTYPE)\n h5_values = h5_parent_group.create_dataset(base_name + 'Values', data=VALUES_DTYPE(values), dtype=VALUES_DTYPE)\n\n for h5_dset in [h5_indices, h5_values]:\n write_simple_attrs(h5_dset, {'units': [x.units for x in dimensions], 'labels': [x.name for x in dimensions],\n 'type': [dim.mode.value for dim in dimensions]})\n\n warn('pyUSID.io.hdf_utils.simple.write_ind_val_dsets no longer creates'\n 'region references for each dimension. Please use '\n 'pyUSID.io.reg_ref.write_region_references to manually create region '\n 'references')\n\n return h5_indices, h5_values\n\n\ndef write_reduced_anc_dsets(h5_parent_group, h5_inds, h5_vals, dim_name, basename=None, is_spec=None,\n verbose=False):\n \"\"\"\n Creates new Ancillary Indices and Values datasets from the input datasets by dropping the specified dimensions\n\n Parameters\n ----------\n h5_parent_group : :class:`h5py.Group` or h5py.File\n Group under which the indices and values datasets will be created\n h5_inds : HDF5 Dataset\n Spectroscopic or Positions indices dataset\n h5_vals : HDF5 Dataset\n Spectroscopic or Positions values dataset\n dim_name : str or unicode or list of strings\n Names of the dimension(s) to remove\n basename : str or unicode, Optional\n String to which '_Indices' and '_Values' will be appended to get the names of the new datasets.\n Default = 'Position' or 'Spectroscopic'\n is_spec : bool, optional\n Whether or not the provided ancillary datasets are position or spectroscopic\n The user is recommended to supply this parameter whenever it is known or possible.\n By default, this function will attempt to recognize the answer based on the shape of the datasets.\n verbose : bool, optional. Default = False\n Whether or not to print debugging print statements\n\n Returns\n -------\n h5_inds_new : h5py.Dataset\n Reduced indices dataset\n h5_vals_new : h5py.Dataset\n Reduces values dataset\n\n \"\"\"\n if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):\n raise TypeError('h5_parent_group should either be a h5py. Group or File object')\n\n for param, param_name in zip([h5_inds, h5_vals], ['h5_inds', 'h5_vals']):\n if not isinstance(param, h5py.Dataset):\n raise TypeError(param_name + ' should be a h5py.Dataset object')\n if dim_name is not None:\n dim_name = validate_list_of_strings(dim_name, 'dim_name')\n\n all_dim_names = list(get_attr(h5_inds, 'labels'))\n for item in dim_name:\n if item not in all_dim_names:\n raise KeyError('Requested dimension: {} not in the list of labels: {}'.format(item, all_dim_names))\n\n ind_mat = h5_inds[()]\n val_mat = h5_vals[()]\n\n if is_spec is None:\n # Attempt to recognize the type automatically\n is_spec = False\n if ind_mat.shape[0] == ind_mat.shape[1]:\n raise ValueError('Unable automatically guess whether the provided datasets are position or '\n 'spectroscopic. Please explicitely specify via the \"is_spec\" boolean kwarg')\n if ind_mat.shape[0] < ind_mat.shape[1]:\n is_spec = True\n else:\n if not isinstance(is_spec, bool):\n raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))\n\n if basename is not None:\n basename = validate_single_string_arg(basename, 'basename')\n if basename.endswith('_'):\n basename = basename[:-1]\n else:\n if is_spec:\n basename = 'Spectroscopic'\n else:\n basename = 'Position'\n\n for sub_name in ['_Indices', '_Values']:\n if basename + sub_name in h5_parent_group.keys():\n raise KeyError('Dataset: {} already exists in provided group: {}'.format(basename + sub_name,\n h5_parent_group.name))\n\n if set(dim_name) != set(all_dim_names):\n # At least one dimension will remain\n\n if verbose:\n print('All Dimensions: {}. Dimensions to be removed: {}'.format(all_dim_names, dim_name))\n\n if not is_spec:\n # Convert to spectral shape\n ind_mat = np.transpose(ind_mat)\n val_mat = np.transpose(val_mat)\n\n # For all dimensions, find where the index = 0\n # basically, we are indexing all dimensions to 0\n first_indices = []\n keep_dim = np.ones(len(all_dim_names), dtype=bool)\n for cur_dim in dim_name:\n dim_ind = all_dim_names.index(cur_dim)\n keep_dim[dim_ind] = False\n # check equality against the minimum value instead of 0 to account for cases when a dimension does not start\n # from 0 (already been sliced) - think of multi-dimensional slicing!\n first_indices.append(ind_mat[dim_ind] == np.min(ind_mat[dim_ind]))\n first_indices = np.vstack(first_indices)\n\n if verbose:\n print('Raw first_indices:')\n print(first_indices)\n print('Dimensions to keep: {}'.format(keep_dim))\n\n step_starts = np.all(first_indices, axis=0)\n\n if verbose:\n print('Columns in dataset to keep:')\n print(step_starts)\n\n '''\n Extract all rows that we want to keep from input indices and values\n '''\n # TODO: handle TypeError: Indexing elements must be in increasing order\n ind_mat = ind_mat[keep_dim, :][:, step_starts]\n val_mat = val_mat[keep_dim, :][:, step_starts]\n\n if not is_spec:\n # Convert back to position shape\n ind_mat = np.transpose(ind_mat)\n val_mat = np.transpose(val_mat)\n\n '''\n Create new Datasets to hold the data\n Name them based on basename\n '''\n h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=ind_mat, dtype=h5_inds.dtype)\n h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=val_mat, dtype=h5_vals.dtype)\n # Extracting the labels from the original spectroscopic data sets\n labels = h5_inds.attrs['labels'][keep_dim]\n # Creating the dimension slices for the new spectroscopic data sets\n\n # Adding the labels and units to the new spectroscopic data sets\n for dset in [h5_inds_new, h5_vals_new]:\n write_simple_attrs(dset, {'labels': labels, 'units': h5_inds.attrs['units'][keep_dim]})\n\n else:\n # Remove all dimensions:\n h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=np.array([[0]]), dtype=INDICES_DTYPE)\n h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=np.array([[0]]), dtype=VALUES_DTYPE)\n\n for dset in [h5_inds_new, h5_vals_new]:\n write_simple_attrs(dset, {'labels': ['Single_Step'], 'units': ['a. u.']})\n\n return h5_inds_new, h5_vals_new\n"
] | [
[
"numpy.vstack",
"numpy.tile",
"numpy.allclose",
"numpy.transpose",
"numpy.iterable",
"numpy.arange",
"numpy.all",
"numpy.min",
"numpy.sort",
"numpy.array",
"numpy.unique"
]
] |
Jovian-Dsouza/Avenger_FaceNet | [
"e8bdffd017c9c27d4dc0f347f6992f760f1af5db"
] | [
"test.py"
] | [
"import os\nimport torch\nimport numpy as np\nfrom torchvision import transforms \nfrom torch import nn\nfrom torch.nn import Softmax\nfrom facenet_pytorch import MTCNN\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom loadOpenFace import prepareOpenFace\nfrom collections import OrderedDict\nimport argparse\n\n# Check if CUDA GPU is available\nuseCuda = torch.cuda.is_available()\nif useCuda:\n print('CUDA is avialable')\n device = torch.device('cuda:0')\nelse:\n print('CUDA is not avialable')\n device = torch.device('cpu')\n\ndef load_model_from_chk(chk_path):\n '''Returns model and idx_to_class dictionary'''\n try:\n # Load checkpoint \n checkpoint = torch.load(chk_path, map_location=torch.device('cpu'))\n idx_to_class = checkpoint['idx_to_class']\n\n # Load the inception model\n model = prepareOpenFace(useCuda)\n model.eval()\n n_classes = len(idx_to_class)\n\n # Initialize the classifier model\n classifier_model = nn.Sequential(OrderedDict([\n (\"nn4_small_v2\", model),\n (\"fc\", nn.Linear(736, n_classes))\n ]))\n\n # load the trained parameters\n classifier_model.load_state_dict(checkpoint['model_state_dict'])\n print(\"Model Loaded from %s\" % chk_path)\n return classifier_model, idx_to_class\n\n except FileNotFoundError:\n print(\"Model checkpoint not found %s\" % chk_path)\n return None\n\n# Load mtcnn to align and crop images\nmtcnn = MTCNN(\n image_size=160, margin=0, min_face_size=20,\n thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False,\n device=device\n)\n\n# tranfomation applied to croped image\nface_transform = transforms.Compose([transforms.Resize(96),\n transforms.ToTensor()])\n\nsoftmax = Softmax(dim=1)\n\n# Load the model \nchk_path = 'models/AvengersClassifier.pth'\nclassifier_model, idx_to_class = load_model_from_chk(chk_path)\nclassifier_model = classifier_model.to(device)\nclassifier_model.eval()\n\n\ndef predict(img_path, prob_theshold = 0.9):\n try:\n img = Image.open(img_path)\n except FileNotFoundError:\n return \n\n # Crop, Align and standardize the Image \n mtcnn_img = mtcnn(img.convert('RGB'))\n\n # If no face then return\n if mtcnn_img is None:\n plt.show()\n print(\"ERROR, Could not detect a face in image\")\n return\n \n # Convert to PIL image\n mtcnn_img = Image.fromarray(np.array(mtcnn_img.permute(1, 2, 0).numpy(), dtype=np.uint8))\n\n # Do the Prediction\n mtcnn_img = face_transform(mtcnn_img).unsqueeze(0)\n mtcnn_img = mtcnn_img.to(device)\n\n with torch.no_grad():\n label = classifier_model(mtcnn_img)\n label = softmax(label) # To Convert the logit to probabilities\n\n prob, pred = label.data.max(1, keepdim=True)\n prob, pred = float(prob), int(pred)\n\n if prob < prob_theshold:\n print(\"UNKNOWN FACE, but similar to %s with %0.2f%% probability\" %\n (idx_to_class[pred], 100 * prob))\n else:\n print(\"%s with %0.2f%% probability\" %\n (idx_to_class[pred], 100 * prob))\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Takes in image path and does prediction')\n parser.add_argument('-p', '--path', help='Image path')\n\n args = parser.parse_args()\n img_path = args.path\n\n print()\n predict(img_path)"
] | [
[
"torch.nn.Linear",
"torch.nn.Softmax",
"torch.no_grad",
"matplotlib.pyplot.show",
"torch.cuda.is_available",
"torch.device"
]
] |
ajinkyakhoche/Object-Detection-Project | [
"3964fd5b445957581205478bb46db58fba3a9fc3"
] | [
"ssd_keras/ssd7_training_inferenceonvideo.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"ssd7_training_inferenceonvideo.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1gMZm_sCuKq7g_cZIGfEcYyKoVw-U7jTX\n\"\"\"\n\n# from IPython.display import clear_output\n\n# ! rm -rf *\n# ! wget -O repo.zip https://github.com/pierluigiferrari/ssd_keras/archive/master.zip\n# ! unzip -o repo.zip\n# ! mv ssd_keras-master/* .\n# ! pip install tqdm\n# ! rm -rf ssd_keras-master\n# clear_output()\n# ! wget https://drive.google.com/uc?export=download&confirm=m0XG&id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D\n# ! rm *.md *.ipynb *.txt *.zip\n# ! ls\n\n\"\"\"# SSD7 Training Tutorial\n\nThis tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.\n\nDisclaimer about SSD7:\nAs you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.\n\"\"\"\n\nfrom keras.optimizers import Adam , SGD\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd7 import build_model\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\nfrom data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\nfrom data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n\n# %matplotlib inline\n\n\"\"\"## 1. Set the model configuration parameters\n\nThe cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.\n\nHere are just some comments on a few of the parameters, read the documentation for more details:\n\n* Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.\n* The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.\n* The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.\n* The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.\n* `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.\n* If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.\n* If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.\n* In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.\n* `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.\n\"\"\"\n\nimg_height = 300 # Height of the input images\nimg_width = 480 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nintensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nn_classes = 5 # Number of positive classes\nscales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\naspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes\ntwo_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\nsteps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\noffsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled\nnormalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size\n\n\"\"\"## 2. Build or load the model\n\nYou will want to execute either of the two code cells in the subsequent two sub-sections, not both.\n\n### 2.1 Create a new model\n\nIf you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.\n\nThe code cell below does the following things:\n1. It calls the function `build_model()` to build the model.\n2. It optionally loads some weights into the model.\n3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n\n`SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.\n\"\"\"\n\n# 1: Build the Keras model\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = build_model(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n aspect_ratios_per_layer=None,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=intensity_mean,\n divide_by_stddev=intensity_range)\n\n# 2: Optional: Load some weights\n\n#model.load_weights('./ssd7_weights.h5', by_name=True)\n\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model\n\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nsgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False) # Recommed to fix bug [https://github.com/pierluigiferrari/ssd_keras/issues/84]\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)\n\n\"\"\"### 2.2 Load a saved model\n\nIf you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.\n\nThe SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.\n\nThis next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.\n\"\"\"\n\nLOAD_MODEL = True\n\nif LOAD_MODEL:\n # TODO: Set the path to the `.h5` file of the model to be loaded.\n model_path = '../udacity_data/SavedModels/training1/ssd7_epoch-05_loss-2.5061_val_loss-2.5454.h5'\n\n # We need to create an SSDLoss object in order to pass that to the model loader.\n ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\n K.clear_session() # Clear previous models from memory.\n\n model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'compute_loss': ssd_loss.compute_loss})\n\n\"\"\"## 3. Set up the data generators for the training\n\nThe code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.\n\nNote that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.\n\nSet the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.\n\nThe `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n\nThe image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.\n\nAn `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.\n\n### Note:\n\nThe example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).\n\"\"\"\n\n# ! wget --header 'Host: doc-08-64-docs.googleusercontent.com' --user-agent 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-GB,en;q=0.5' --referer 'https://drive.google.com/uc?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D&export=download' --header 'Cookie: AUTH_jnah6s13kkbb9peqjnhhrvs24bcqfb6v=06338804252926118732|1535551200000|ag6qrtoegj3b578klq9mv59em3e2u2ll' --header 'Upgrade-Insecure-Requests: 1' 'https://doc-08-64-docs.googleusercontent.com/docs/securesc/dbqrqv6dp9ts3hf02kejajr0k5nf0854/g19v9tjp4on3gskf6gjiibmlmfk52r5q/1535551200000/01021765827329596762/06338804252926118732/1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D?e=download' --output-document 'udacity_driving_datasets.zip'\n# ! unzip udacity_driving_datasets.zip\n# #clear_output()\n# ! rm *.zip\n# ! ls\n\n# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets.\n\n# TODO: Set the paths to your dataset here.\n\n# Images\nimages_dir = '../udacity_data/udacity_driving_datasets/'\n\n# Ground truth\ntrain_labels_filename = '../udacity_data/udacity_driving_datasets/labels_train.csv'\nval_labels_filename = '../udacity_data/udacity_driving_datasets/labels_val.csv'\n\ntrain_dataset.parse_csv(images_dir=images_dir,\n labels_filename=train_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.\n include_classes='all')\n\nval_dataset.parse_csv(images_dir=images_dir,\n labels_filename=val_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],\n include_classes='all')\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\n#train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n#val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n\n# 3: Set the batch size.\n\nbatch_size = 16\n\n# 4: Define the image processing chain.\n\ndata_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),\n random_contrast=(0.5, 1.8, 0.5),\n random_saturation=(0.5, 1.8, 0.5),\n random_hue=(18, 0.5),\n random_flip=0.5,\n random_translate=((0.03,0.5), (0.03,0.5), 0.5),\n random_scale=(0.5, 2.0, 0.5),\n n_trials_max=3,\n clip_boxes=True,\n overlap_criterion='area',\n bounds_box_filter=(0.3, 1.0),\n bounds_validator=(0.5, 1.0),\n n_boxes_min=1,\n background=(0,0,0))\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n model.get_layer('classes5').output_shape[1:3],\n model.get_layer('classes6').output_shape[1:3],\n model.get_layer('classes7').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.3,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[data_augmentation_chain],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n\"\"\"## 4. Set the remaining training parameters and train the model\n\nWe've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.\n\nI'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.\n\"\"\"\n\n# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the weights.\nmodel_checkpoint = ModelCheckpoint(filepath='../udacity_data/SavedModels/training2/ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n\ncsv_logger = CSVLogger(filename='ssd7_training_log.csv',\n separator=',',\n append=True)\n\nearly_stopping = EarlyStopping(monitor='val_loss',\n min_delta=0.0,\n patience=10,\n verbose=1)\n\nreduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',\n factor=0.2,\n patience=8,\n verbose=1,\n epsilon=0.001,\n cooldown=0,\n min_lr=0.00001)\n\ncallbacks = [model_checkpoint,\n csv_logger,\n early_stopping,\n reduce_learning_rate]\n\n\"\"\"I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.\n\nInstead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.\n\nIn order to only run a partial training and resume smoothly later on, there are a few things you should note:\n1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.\n\"\"\"\n\n# TODO: Set the epochs to train for.\n# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 25\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)\n\n\"\"\"Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:\"\"\"\n\nplt.figure(figsize=(20,12))\nplt.plot(history.history['loss'], label='loss')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.legend(loc='upper right', prop={'size': 24});\n\n\"\"\"The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss.\n\n### 5. Make predictions\n\nNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.\n\nYou can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.\n\"\"\"\n\n# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images',\n 'processed_labels',\n 'filenames'},\n keep_images_without_gt=False)\n\n# 2: Generate samples\n\nbatch_images, batch_labels, batch_filenames = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(batch_labels[i])\n\n# 3: Make a prediction\n\ny_pred = model.predict(batch_images)\n\n\"\"\"Now let's decode the raw predictions in `y_pred`.\n\nHad we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n\n`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.\n\"\"\"\n\n# 4: Decode the raw prediction `y_pred`\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded[i])\n\n\"\"\"Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.\"\"\"\n\n# 5: Draw the predicted boxes onto the image\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_images[i])\n\ncurrent_axis = plt.gca()\n\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\nclasses = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\n\n# Draw the ground truth boxes in green (omit the label for more clarity)\nfor box in batch_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\n# Draw the predicted boxes in blue\nfor box in y_pred_decoded[i]:\n xmin = box[-4]\n ymin = box[-3]\n xmax = box[-2]\n ymax = box[-1]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n\n# !pip install pytube==9.1.0\n\n# from pytube import YouTube\n# YouTube('https://www.youtube.com/watch?v=_Ad7Co55alM').streams.first().download()\n\n# ! ls\n\n# ! mkdir output_frames\n\n# Offline video processing\n\n# i = 0\n\n# cap = cv2.VideoCapture('test_videos/Driving in Russia 4K video Car driving in winter.mp4')\n# width = int(cap.get(3))\n# height = int(cap.get(4))\n# property_id = int(cv2.CAP_PROP_FRAME_COUNT) \n# fps = cap.get(cv2.CAP_PROP_FPS)\n# total_frames = int(cv2.VideoCapture.get(cap, property_id))\n\n# # Define the codec and create VideoWriter object\n# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n# out = cv2.VideoWriter('output.avi',fourcc, fps, (width,height))\n\n# # Read until video is completed\n# for j in range(total_frames):\n# print(str(j)+'/'+str(total_frames))\n# # Capture frame-by-frame\n# ret, frame = cap.read()\n# if ret == True:\n \n# frame = frame[...,::-1]\n# frame_resized = cv2.resize(frame, (480, 300)) \n# frame_tensor = np.expand_dims(frame_resized, axis=0)\n# y_pred = model.predict(frame_tensor)\n# y_pred_decoded = decode_detections(y_pred,\n# confidence_thresh=0.5,\n# iou_threshold=0.45,\n# top_k=200,\n# normalize_coords=normalize_coords,\n# img_height=img_height,\n# img_width=img_width)\n \n# plt.figure(figsize=(20,12))\n# plt.imshow(frame_resized)\n\n# current_axis = plt.gca()\n\n# colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\n# classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\n\n# # Draw the predicted boxes in blue\n# for box in y_pred_decoded[i]:\n# xmin = box[-4]\n# ymin = box[-3]\n# xmax = box[-2]\n# ymax = box[-1]\n# color = colors[int(box[0])]\n# label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n \n \n# plt.savefig('output_frames/video_frame'+str(j)+'.png')\n# plt.close('all')\n \n# if j % 10 == 0:\n# clear_output()\n \n \n# # Break the loop\n# else: \n# break\n\n# out.release()\n# cap.release()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"numpy.set_printoptions",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.Rectangle"
]
] |
altaregos/NiaPy | [
"74f1b2827778d9086603f4a8cb523f6b5537212a",
"74f1b2827778d9086603f4a8cb523f6b5537212a"
] | [
"niapy/problems/zakharov.py",
"niapy/algorithms/modified/saba.py"
] | [
"# encoding=utf8\n\"\"\"Implementations of Zakharov function.\"\"\"\n\nimport numpy as np\nfrom niapy.problems.problem import Problem\n\n__all__ = ['Zakharov']\n\n\nclass Zakharov(Problem):\n r\"\"\"Implementations of Zakharov functions.\n\n Date: 2018\n\n Author: Klemen Berkovič\n\n License: MIT\n\n Function:\n **Zakharov Function**\n\n :math:`f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4`\n\n **Input domain:**\n The function can be defined on any input domain but it is usually\n evaluated on the hypercube :math:`x_i ∈ [-5, 10]`, for all :math:`i = 1, 2,..., D`.\n\n **Global minimum:**\n :math:`f(\\textbf{x}^*) = 0` at :math:`\\textbf{x}^* = (0, \\cdots, 0)`\n\n LaTeX formats:\n Inline:\n $f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4$\n\n Equation:\n \\begin{equation} f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4 \\end{equation}\n\n Domain:\n $-5 \\leq x_i \\leq 10$\n\n Reference:\n https://www.sfu.ca/~ssurjano/zakharov.html\n\n \"\"\"\n\n def __init__(self, dimension=4, lower=-5.0, upper=10.0, *args, **kwargs):\n r\"\"\"Initialize Zakharov problem..\n\n Args:\n dimension (Optional[int]): Dimension of the problem.\n lower (Optional[Union[float, Iterable[float]]]): Lower bounds of the problem.\n upper (Optional[Union[float, Iterable[float]]]): Upper bounds of the problem.\n\n See Also:\n :func:`niapy.problems.Problem.__init__`\n\n \"\"\"\n super().__init__(dimension, lower, upper, *args, **kwargs)\n\n @staticmethod\n def latex_code():\n r\"\"\"Return the latex code of the problem.\n\n Returns:\n str: Latex code.\n\n \"\"\"\n return r'''$f(\\textbf{x}) = \\sum_{i = 1}^D x_i^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^2 + \\left( \\sum_{i = 1}^D 0.5 i x_i \\right)^4$'''\n\n def _evaluate(self, x):\n sum1 = np.sum(x * x)\n sum2 = np.sum(0.5 * np.arange(1, self.dimension + 1) * x)\n return sum1 + sum2 ** 2 + sum2 ** 4\n",
"# encoding=utf8\nimport logging\n\nimport numpy as np\n\nfrom niapy.algorithms.algorithm import Algorithm\n\nlogging.basicConfig()\nlogger = logging.getLogger('niapy.algorithms.modified')\nlogger.setLevel('INFO')\n\n__all__ = ['AdaptiveBatAlgorithm', 'SelfAdaptiveBatAlgorithm']\n\n\nclass AdaptiveBatAlgorithm(Algorithm):\n r\"\"\"Implementation of Adaptive bat algorithm.\n\n Algorithm:\n Adaptive bat algorithm\n\n Date:\n April 2019\n\n Authors:\n Klemen Berkovič\n\n License:\n MIT\n\n Attributes:\n Name (List[str]): List of strings representing algorithm name.\n epsilon (float): Scaling factor.\n alpha (float): Constant for updating loudness.\n pulse_rate (float): Pulse rate.\n min_frequency (float): Minimum frequency.\n max_frequency (float): Maximum frequency.\n\n See Also:\n * :class:`niapy.algorithms.Algorithm`\n\n \"\"\"\n\n Name = ['AdaptiveBatAlgorithm', 'ABA']\n\n @staticmethod\n def info():\n r\"\"\"Get basic information about the algorithm.\n\n Returns:\n str: Basic information.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.info`\n\n \"\"\"\n return r\"\"\"TODO\"\"\"\n\n def __init__(self, population_size=100, starting_loudness=0.5, epsilon=0.001, alpha=1.0, pulse_rate=0.5,\n min_frequency=0.0, max_frequency=2.0, *args, **kwargs):\n \"\"\"Initialize AdaptiveBatAlgorithm.\n\n Args:\n population_size (Optional[int]): Population size.\n starting_loudness (Optional[float]): Starting loudness.\n epsilon (Optional[float]): Scaling factor.\n alpha (Optional[float]): Constant for updating loudness.\n pulse_rate (Optional[float]): Pulse rate.\n min_frequency (Optional[float]): Minimum frequency.\n max_frequency (Optional[float]): Maximum frequency.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.__init__`\n\n \"\"\"\n super().__init__(population_size, *args, **kwargs)\n self.starting_loudness = starting_loudness\n self.epsilon = epsilon\n self.alpha = alpha\n self.pulse_rate = pulse_rate\n self.min_frequency = min_frequency\n self.max_frequency = max_frequency\n\n def set_parameters(self, population_size=100, starting_loudness=0.5, epsilon=0.001, alpha=1.0, pulse_rate=0.5,\n min_frequency=0.0, max_frequency=2.0, **kwargs):\n r\"\"\"Set the parameters of the algorithm.\n\n Args:\n population_size (Optional[int]): Population size.\n starting_loudness (Optional[float]): Starting loudness.\n epsilon (Optional[float]): Scaling factor.\n alpha (Optional[float]): Constant for updating loudness.\n pulse_rate (Optional[float]): Pulse rate.\n min_frequency (Optional[float]): Minimum frequency.\n max_frequency (Optional[float]): Maximum frequency.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.set_parameters`\n\n \"\"\"\n super().set_parameters(population_size=population_size, **kwargs)\n self.starting_loudness = starting_loudness\n self.epsilon = epsilon\n self.alpha = alpha\n self.pulse_rate = pulse_rate\n self.min_frequency = min_frequency\n self.max_frequency = max_frequency\n\n def get_parameters(self):\n r\"\"\"Get algorithm parameters.\n\n Returns:\n Dict[str, Any]: Arguments values.\n\n See Also:\n * :func:`niapy.algorithms.algorithm.Algorithm.get_parameters`\n\n \"\"\"\n d = super().get_parameters()\n d.update({\n 'starting_loudness': self.starting_loudness,\n 'epsilon': self.epsilon,\n 'alpha': self.alpha,\n 'pulse_rate': self.pulse_rate,\n 'min_frequency': self.min_frequency,\n 'max_frequency': self.max_frequency\n })\n return d\n\n def init_population(self, task):\n r\"\"\"Initialize the starting population.\n\n Args:\n task (Task): Optimization task\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population.\n 2. New population fitness/function values.\n 3. Additional arguments:\n * loudness (float): Loudness.\n * velocities (numpy.ndarray[float]): Velocity.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.init_population`\n\n \"\"\"\n population, fitness, d = super().init_population(task)\n loudness = np.full(self.population_size, self.starting_loudness)\n velocities = np.zeros((self.population_size, task.dimension))\n d.update({'loudness': loudness, 'velocities': velocities})\n return population, fitness, d\n\n def local_search(self, best, loudness, task, **kwargs):\n r\"\"\"Improve the best solution according to the Yang (2010).\n\n Args:\n best (numpy.ndarray): Global best individual.\n loudness (float): Loudness.\n task (Task): Optimization task.\n\n Returns:\n numpy.ndarray: New solution based on global best individual.\n\n \"\"\"\n return task.repair(best + self.epsilon * loudness * self.normal(0, 1, task.dimension), rng=self.rng)\n\n def update_loudness(self, loudness):\n r\"\"\"Update loudness when the prey is found.\n\n Args:\n loudness (float): Loudness.\n\n Returns:\n float: New loudness.\n\n \"\"\"\n new_loudness = loudness * self.alpha\n return new_loudness if new_loudness > 1e-13 else self.starting_loudness\n\n def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):\n r\"\"\"Core function of Bat Algorithm.\n\n Args:\n task (Task): Optimization task.\n population (numpy.ndarray): Current population\n population_fitness (numpy.ndarray[float]): Current population fitness/function values\n best_x (numpy.ndarray): Current best individual\n best_fitness (float): Current best individual function/fitness value\n params (Dict[str, Any]): Additional algorithm arguments\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population\n 2. New population fitness/function values\n 3. Additional arguments:\n * loudness (numpy.ndarray[float]): Loudness.\n * velocities (numpy.ndarray[float]): Velocities.\n\n \"\"\"\n loudness = params.pop('loudness')\n velocities = params.pop('velocities')\n\n for i in range(self.population_size):\n frequency = self.min_frequency + (self.max_frequency - self.min_frequency) * self.random()\n velocities[i] += (population[i] - best_x) * frequency\n if self.random() > self.pulse_rate:\n solution = self.local_search(best=best_x, loudness=loudness[i], task=task, i=i, Sol=population)\n else:\n solution = task.repair(population[i] + velocities[i], rng=self.rng)\n new_fitness = task.eval(solution)\n if (new_fitness <= population_fitness[i]) and (self.random() < loudness[i]):\n population[i], population_fitness[i] = solution, new_fitness\n if new_fitness <= best_fitness:\n best_x, best_fitness, loudness[i] = solution.copy(), new_fitness, self.update_loudness(loudness[i])\n return population, population_fitness, best_x, best_fitness, {'loudness': loudness, 'velocities': velocities}\n\n\nclass SelfAdaptiveBatAlgorithm(AdaptiveBatAlgorithm):\n r\"\"\"Implementation of Hybrid bat algorithm.\n\n Algorithm:\n Self Adaptive Bat Algorithm\n\n Date:\n April 2019\n\n Author:\n Klemen Berkovič\n\n License:\n MIT\n\n Reference paper:\n Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniški vestnik, 2013. 1-7.\n\n Attributes:\n Name (List[str]): List of strings representing algorithm name.\n A_l (Optional[float]): Lower limit of loudness.\n A_u (Optional[float]): Upper limit of loudness.\n r_l (Optional[float]): Lower limit of pulse rate.\n r_u (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :class:`niapy.algorithms.basic.BatAlgorithm`\n\n \"\"\"\n\n Name = ['SelfAdaptiveBatAlgorithm', 'SABA']\n\n @staticmethod\n def info():\n r\"\"\"Get basic information about the algorithm.\n\n Returns:\n str: Basic information.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.info`\n\n \"\"\"\n return r\"\"\"Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniški vestnik, 2013. 1-7.\"\"\"\n\n def __init__(self, min_loudness=0.9, max_loudness=1.0, min_pulse_rate=0.001, max_pulse_rate=0.1, tao_1=0.1,\n tao_2=0.1, *args, **kwargs):\n \"\"\"Initialize SelfAdaptiveBatAlgorithm.\n\n Args:\n min_loudness (Optional[float]): Lower limit of loudness.\n max_loudness (Optional[float]): Upper limit of loudness.\n min_pulse_rate (Optional[float]): Lower limit of pulse rate.\n max_pulse_rate (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.__init__`\n\n \"\"\"\n super().__init__(*args, **kwargs)\n self.min_loudness = min_loudness\n self.max_loudness = max_loudness\n self.min_pulse_rate = min_pulse_rate\n self.max_pulse_rate = max_pulse_rate\n self.tao_1 = tao_1\n self.tao_2 = tao_2\n\n def set_parameters(self, min_loudness=0.9, max_loudness=1.0, min_pulse_rate=0.001, max_pulse_rate=0.1, tao_1=0.1, tao_2=0.1, **kwargs):\n r\"\"\"Set core parameters of HybridBatAlgorithm algorithm.\n\n Args:\n min_loudness (Optional[float]): Lower limit of loudness.\n max_loudness (Optional[float]): Upper limit of loudness.\n min_pulse_rate (Optional[float]): Lower limit of pulse rate.\n max_pulse_rate (Optional[float]): Upper limit of pulse rate.\n tao_1 (Optional[float]): Learning rate for loudness.\n tao_2 (Optional[float]): Learning rate for pulse rate.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.set_parameters`\n\n \"\"\"\n super().set_parameters(**kwargs)\n self.min_loudness = min_loudness\n self.max_loudness = max_loudness\n self.min_pulse_rate = min_pulse_rate\n self.max_pulse_rate = max_pulse_rate\n self.tao_1 = tao_1\n self.tao_2 = tao_2\n\n def get_parameters(self):\n r\"\"\"Get parameters of the algorithm.\n\n Returns:\n Dict[str, Any]: Parameters of the algorithm.\n\n See Also:\n * :func:`niapy.algorithms.modified.AdaptiveBatAlgorithm.get_parameters`\n\n \"\"\"\n d = AdaptiveBatAlgorithm.get_parameters(self)\n d.update({\n 'min_loudness': self.min_loudness,\n 'max_loudness': self.max_loudness,\n 'min_pulse_rate': self.min_pulse_rate,\n 'max_pulse_rate': self.max_pulse_rate,\n 'tao_1': self.tao_1,\n 'tao_2': self.tao_2\n })\n return d\n\n def init_population(self, task):\n population, fitness, d = super().init_population(task)\n pulse_rates = np.full(self.population_size, self.pulse_rate)\n d.update({'pulse_rates': pulse_rates})\n return population, fitness, d\n\n def self_adaptation(self, loudness, pulse_rate):\n r\"\"\"Adaptation step.\n\n Args:\n loudness (float): Current loudness.\n pulse_rate (float): Current pulse rate.\n\n Returns:\n Tuple[float, float]:\n 1. New loudness.\n 2. Nwq pulse rate.\n\n \"\"\"\n return self.min_loudness + self.random() * (\n self.max_loudness - self.min_loudness) if self.random() < self.tao_1 else loudness, self.min_pulse_rate + self.random() * (\n self.max_pulse_rate - self.min_pulse_rate) if self.random() < self.tao_2 else pulse_rate\n\n def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):\n r\"\"\"Core function of Bat Algorithm.\n\n Args:\n task (Task): Optimization task.\n population (numpy.ndarray): Current population\n population_fitness (numpy.ndarray[float]): Current population fitness/function values\n best_x (numpy.ndarray): Current best individual\n best_fitness (float): Current best individual function/fitness value\n params (Dict[str, Any]): Additional algorithm arguments\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:\n 1. New population\n 2. New population fitness/function values\n 3. Additional arguments:\n * loudness (numpy.ndarray[float]): Loudness.\n * pulse_rates (numpy.ndarray[float]): Pulse rate.\n * velocities (numpy.ndarray[float]): Velocities.\n\n \"\"\"\n loudness = params.pop('loudness')\n pulse_rates = params.pop('pulse_rates')\n velocities = params.pop('velocities')\n\n for i in range(self.population_size):\n loudness[i], pulse_rates[i] = self.self_adaptation(loudness[i], pulse_rates[i])\n frequency = self.min_frequency + (self.max_frequency - self.min_frequency) * self.random()\n velocities[i] += (population[i] - best_x) * frequency\n if self.random() > pulse_rates[i]:\n solution = self.local_search(best=best_x, loudness=loudness[i], task=task, i=i, population=population)\n else:\n solution = task.repair(population[i] + velocities[i], rng=self.rng)\n new_fitness = task.eval(solution)\n if (new_fitness <= population_fitness[i]) and (self.random() < (self.min_loudness - loudness[i]) / self.starting_loudness):\n population[i], population_fitness[i] = solution, new_fitness\n if new_fitness <= best_fitness:\n best_x, best_fitness = solution.copy(), new_fitness\n return population, population_fitness, best_x, best_fitness, {'loudness': loudness, 'pulse_rates': pulse_rates, 'velocities': velocities}\n"
] | [
[
"numpy.arange",
"numpy.sum"
],
[
"numpy.full",
"numpy.zeros"
]
] |
patrickbook/models | [
"718fb2c0d478ab6c9906a3dbf44099942a2c6426"
] | [
"official/nlp/modeling/networks/packed_sequence_embedding.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An embedding network supporting packed sequences and position ids.\"\"\"\n# pylint: disable=g-classes-have-attributes\nimport collections\nimport tensorflow as tf\n\nfrom official.modeling import tf_utils\nfrom official.nlp import keras_nlp\nfrom official.nlp.modeling import layers\n\n\[email protected]_keras_serializable(package='Text')\nclass PackedSequenceEmbedding(tf.keras.Model):\n \"\"\"An embedding network supporting packed sequences and position ids.\n\n This network implements an embedding layer similar to the one described in\n \"BERT: Pre-training of Deep Bidirectional Transformers for Language\n Understanding\" (https://arxiv.org/abs/1810.04805). On top of it, it supports\n to (1) pack multiple sequences into one sequence and (2) allow additional\n \"position_ids\" as input.\n\n Args:\n vocab_size: The size of the token vocabulary.\n type_vocab_size: The size of the type vocabulary.\n embedding_width: Width of token embeddings.\n hidden_size: The output size for this encoder.\n max_seq_length: The maximum sequence length for this encoder.\n initializer: The initializer for the embedding portion of this encoder.\n dropout_rate: The dropout rate to apply before the encoding layers.\n pack_multiple_sequences: If True, we can feed multiple sequences into one\n sequence for training and inference (they don't impact each other).\n use_position_id: Whether to expect `position_ids` as an input to the\n network. If False, the `position_ids` will be inferred: (1) when\n pack_multiple_sequences is False, we assume the position ids are 0, 1,\n 2, ..., seq_length - 1; (2) when pack_multiple_sequences is True, there\n may be multiple sub sequences, and for each sub sequence, its position\n ids start from 0, 1, 2, ...\n \"\"\"\n\n def __init__(self,\n vocab_size,\n type_vocab_size,\n embedding_width,\n hidden_size,\n max_seq_length,\n initializer,\n dropout_rate,\n use_position_id=False,\n pack_multiple_sequences=False,\n **kwargs):\n initializer = tf.keras.initializers.get(initializer)\n config_dict = {\n 'vocab_size': vocab_size,\n 'type_vocab_size': type_vocab_size,\n 'embedding_width': embedding_width,\n 'hidden_size': hidden_size,\n 'max_seq_length': max_seq_length,\n 'initializer': tf.keras.initializers.serialize(initializer),\n 'dropout_rate': dropout_rate,\n 'use_position_id': use_position_id,\n 'pack_multiple_sequences': pack_multiple_sequences,\n }\n\n word_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_word_ids')\n mask = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_mask')\n type_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='input_type_ids')\n inputs = {\n 'input_word_ids': word_ids,\n 'input_mask': mask,\n 'input_type_ids': type_ids,\n }\n if use_position_id:\n position_ids = tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name='position_ids')\n inputs['position_ids'] = position_ids\n else:\n position_ids = None\n\n if pack_multiple_sequences:\n sub_seq_mask = PackedSequenceMask()(word_ids)\n else:\n sub_seq_mask = None\n\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n name='word_embeddings')\n word_embeddings = embedding_layer(word_ids)\n\n # Always uses dynamic slicing for simplicity.\n position_embedding_layer = PositionEmbeddingWithSubSeqMask(\n initializer=initializer,\n use_dynamic_slicing=True,\n max_sequence_length=max_seq_length,\n name='position_embedding')\n position_embeddings = position_embedding_layer(\n word_embeddings, position_ids, sub_seq_mask)\n\n type_embeddings = (\n layers.OnDeviceEmbedding(\n vocab_size=type_vocab_size,\n embedding_width=embedding_width,\n initializer=initializer,\n use_one_hot=True,\n name='type_embeddings')(type_ids))\n\n embeddings = tf.keras.layers.Add()(\n [word_embeddings, position_embeddings, type_embeddings])\n embeddings = tf.keras.layers.LayerNormalization(\n name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(\n embeddings)\n embeddings = tf.keras.layers.Dropout(\n rate=dropout_rate, dtype=tf.float32)(\n embeddings)\n\n if embedding_width != hidden_size:\n embeddings = tf.keras.layers.experimental.EinsumDense(\n '...x,xy->...y',\n output_shape=hidden_size,\n bias_axes=None,\n kernel_initializer=initializer,\n name='embedding_projection')(\n embeddings)\n\n attention_mask = keras_nlp.layers.SelfAttentionMask()(embeddings, mask)\n if sub_seq_mask is not None:\n attention_mask = tf.keras.layers.Lambda(\n lambda x: x[0] * tf.cast(x[1], x[0].dtype))(\n [attention_mask, sub_seq_mask])\n\n outputs = [embeddings, attention_mask]\n super(PackedSequenceEmbedding, self).__init__(\n inputs=inputs, outputs=outputs, **kwargs)\n # TF does not track immutable attrs which do not contain Trackables,\n # so by creating a config namedtuple instead of a dict we avoid tracking it.\n config_cls = collections.namedtuple('Config', config_dict.keys())\n self._config = config_cls(**config_dict)\n self._embedding_layer = embedding_layer\n self._position_embedding_layer = position_embedding_layer\n\n def get_embedding_table(self):\n return self._embedding_layer.embeddings\n\n def get_config(self):\n return dict(self._config._asdict())\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\[email protected]_keras_serializable(package='Text')\nclass PackedSequenceMask(tf.keras.layers.Layer):\n \"\"\"A layer to create a mask to indicate multiple sub sequences.\"\"\"\n\n def call(self, input_ids):\n \"\"\"Implements call() for the layer.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n\n Returns:\n boolean Tensor of shape [batch_size, seq_length, seq_length]. [x, y, z]\n is True if for x'th instance in a batch, y'th token and z'th token are\n from the same sub sequence.\n \"\"\"\n # Suppose\n # - the first token in the parent sequence is [CLS].\n # - every sequence starts from [CLS].\n # - every sequence only contains one [CLS].\n seq_start_token = input_ids[:, 0:1]\n seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32)\n # Set different ids for different sub sequences.\n seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1)\n return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1]))\n\n\[email protected]_keras_serializable(package='Text')\nclass PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):\n \"\"\"Creates a positional embedding with sub-sequence masking.\n\n This layer creates a positional embedding as described in \"BERT: Pre-training\n of Deep Bidirectional Transformers for Language Understanding\"\n (https://arxiv.org/abs/1810.04805). On top of it, it supports\n `position_ids` and `sub_sequence_mask` tensors.\n\n This layer can be set up to either create a statically shaped slice or a\n dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor\n can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the\n input size must be fixed.\n\n Args:\n initializer: The initializer to use for the embedding weights. Defaults to\n \"glorot_uniform\".\n use_dynamic_slicing: Whether to use the dynamic slicing path.\n max_sequence_length: The maximum size of the dynamic sequence. Only\n applicable if `use_dynamic_slicing` is True.\n \"\"\"\n\n def __init__(self,\n initializer='glorot_uniform',\n use_dynamic_slicing=False,\n max_sequence_length=None,\n **kwargs):\n # We need to have a default dtype of float32, since the inputs (which Keras\n # usually uses to infer the dtype) will always be int32.\n if 'dtype' not in kwargs:\n kwargs['dtype'] = 'float32'\n\n super(PositionEmbeddingWithSubSeqMask, self).__init__(**kwargs)\n if use_dynamic_slicing and max_sequence_length is None:\n raise ValueError(\n 'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'\n )\n self._max_sequence_length = max_sequence_length\n self._initializer = tf.keras.initializers.get(initializer)\n self._use_dynamic_slicing = use_dynamic_slicing\n\n def get_config(self):\n config = {\n 'max_sequence_length': self._max_sequence_length,\n 'initializer': tf.keras.initializers.serialize(self._initializer),\n 'use_dynamic_slicing': self._use_dynamic_slicing,\n }\n base_config = super(PositionEmbeddingWithSubSeqMask, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dimension_list = input_shape.as_list()\n\n if len(dimension_list) != 3:\n raise ValueError('PositionEmbedding expects a 3-dimensional input tensor '\n 'of shape [batch, sequence, width]')\n seq_length = dimension_list[1]\n width = dimension_list[2]\n\n # If we are not using dynamic slicing, we must assume that the sequence\n # length is fixed and max_sequence_length should not be specified.\n if not self._use_dynamic_slicing:\n if seq_length is None:\n raise ValueError(\n 'PositionEmbedding must have `use_dynamic_slicing` set '\n 'to True (and max_sequence_length set) when the '\n 'sequence (1st) dimension of the input is None.')\n if self._max_sequence_length is not None:\n raise ValueError(\n 'When `use_dynamic_slicing` is False, max_sequence_length should '\n 'not be specified and we ought to use seq_length to get the '\n 'variable shape.')\n\n if self._max_sequence_length is not None:\n weight_sequence_length = self._max_sequence_length\n else:\n weight_sequence_length = seq_length\n\n self._position_embeddings = self.add_weight(\n 'embeddings',\n shape=[weight_sequence_length, width],\n initializer=self._initializer)\n\n super(PositionEmbeddingWithSubSeqMask, self).build(input_shape)\n\n def call(self, inputs, position_ids=None, sub_sequence_mask=None):\n \"\"\"Implements call() for the layer.\n\n When `position_ids` is specified, it will return the position embeddings\n corresponding to this `position_ids`; otherwise, `position_ids` will be\n inferred in the following way:\n\n (1) When `sub_sequence_mask` is None, we assume the position ids are\n 0, 1, 2, ..., seq_length - 1.\n (2) When `sub_sequence_mask` is specified, there may be multiple sub\n sequences, and for each sub sequence, its position ids start from\n 0, 1, 2, ...\n\n Args:\n inputs: Word embeddings in shape [batch, seq_length, embedding_dim].\n position_ids: An optional int32 tensor in shape [batch, seq_length].\n sub_sequence_mask: An optional bool tensor in shape [batch, seq_length,\n seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th\n token and z'th token are from the same sub sequence.\n\n Returns:\n The position embeddings in shape [batch, seq_length, embedding_dim].\n \"\"\"\n input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)\n if self._use_dynamic_slicing:\n position_embeddings = self._position_embeddings[:input_shape[1], :]\n else:\n position_embeddings = self._position_embeddings\n\n if position_ids is not None:\n return tf.gather(position_embeddings, position_ids)\n\n if sub_sequence_mask is None:\n return tf.broadcast_to(position_embeddings, input_shape)\n else:\n sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32)\n # For each sub sequence, its position ids start from 0, 1, 2, ...\n position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1\n return tf.gather(position_embeddings, position_ids)\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.cumsum",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.Dropout",
"tensorflow.equal",
"tensorflow.broadcast_to",
"tensorflow.keras.layers.experimental.EinsumDense",
"tensorflow.cast",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.layers.Add",
"tensorflow.keras.initializers.serialize",
"tensorflow.gather",
"tensorflow.transpose",
"tensorflow.keras.layers.Input"
]
] |
konstin/esm | [
"a39894c079ce314e1c0aaa607e8ae498111910a0"
] | [
"esm/model.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .modules import TransformerLayer, PositionalEmbedding # noqa\n\n\nclass ProteinBertModel(nn.Module):\n @classmethod\n def add_args(cls, parser):\n parser.add_argument(\n \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\"\n )\n parser.add_argument(\n \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\"\n )\n parser.add_argument(\n \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to logits\"\n )\n parser.add_argument(\n \"--ffn_embed_dim\",\n default=5120,\n type=int,\n metavar=\"N\",\n help=\"embedding dimension for FFN\",\n )\n parser.add_argument(\n \"--attention_heads\",\n default=20,\n type=int,\n metavar=\"N\",\n help=\"number of attention heads\",\n )\n\n def __init__(self, args, alphabet_size, padding_idx):\n super().__init__()\n self.args = args\n self.alphabet_size = alphabet_size\n self.padding_idx = padding_idx\n self.embed_scale = math.sqrt(self.args.embed_dim)\n self._init_submodules()\n\n def _init_submodules(self):\n self.embed_tokens = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.embed_positions = PositionalEmbedding(self.args.embed_dim, self.padding_idx)\n self.layers = nn.ModuleList(\n [\n TransformerLayer(\n self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads\n )\n for _ in range(self.args.layers)\n ]\n )\n self.embed_out = nn.Parameter(\n torch.zeros((self.alphabet_size, self.args.embed_dim))\n )\n self.embed_out_bias = None\n if self.args.final_bias:\n self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))\n\n def forward(self, tokens, repr_layers=[]):\n assert tokens.ndim == 2\n padding_mask = tokens.eq(self.padding_idx)\n if not padding_mask.any():\n padding_mask = None\n\n x = self.embed_scale * self.embed_tokens(tokens)\n x = x + self.embed_positions(tokens)\n\n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n # (B, T, E) => (T, B, E)\n x = x.transpose(0, 1)\n\n for layer_idx, layer in enumerate(self.layers):\n x, _ = layer(x, self_attn_padding_mask=padding_mask)\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.transpose(0, 1)\n\n x = F.linear(x, self.embed_out, bias=self.embed_out_bias)\n\n # (T, B, E) => (B, T, E)\n x = x.transpose(0, 1)\n\n result = {\"logits\": x, \"representations\": hidden_representations}\n\n return result\n\n @property\n def num_layers(self):\n return self.args.layers\n"
] | [
[
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.functional.linear"
]
] |
AryamanSrii/Mecha-Karen | [
"4a5c7318f8c458495eee72a13be5db8a0113ed28"
] | [
"Bot/src/funhouse/image.py"
] | [
"# !/usr/bin/python\n\n\"\"\"\nCopyright ©️: 2020 Seniatical / _-*™#7519\nLicense: Apache 2.0\nA permissive license whose main conditions require preservation of copyright and license notices.\nContributors provide an express grant of patent rights.\nLicensed works, modifications, and larger works may be distributed under different terms and without source code.\nFULL LICENSE CAN BE FOUND AT:\n https://www.apache.org/licenses/LICENSE-2.0.html\nAny violation to the license, will result in moderate action\nYou are legally required to mention (original author, license, source and any changes made)\n\"\"\"\n\nimport typing\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import BucketType\nfrom PIL import Image, ImageDraw\nfrom io import BytesIO\nimport aiohttp\nimport MK\nimport numpy as np\nimport random\nimport cv2\n\nfrom core._ import extract_\nfrom core._.image.effects import *\nfrom core._.image._ import sort_size, save_image\nfrom core._.image.cloud import APISESSION\n\nclass _Image(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.converter = commands.MemberConverter()\n self.vac_api = APISESSION.Client()\n self.client = MK.Async.Client(bot.env('API_TOKEN'))\n self.ses = aiohttp.ClientSession()\n self.cache = bot.cache\n self.loop = bot.loop\n self.beard_image = Image.open('./storage/images/beard.png')\n self.wasted_template = Image.open('./storage/images/wasted.png').resize((900, 900))\n\n self.emoji_c = commands.PartialEmojiConverter()\n\n bot.api_c = self.client\n\n @staticmethod\n def pixelate(image_to_pixelate: Image) -> Image:\n return image_to_pixelate.resize((32, 32), resample=Image.NEAREST).resize((1024, 1024), resample=Image.NEAREST)\n\n @staticmethod\n def quantize(image_to_quantize: Image) -> Image:\n return image_to_quantize.quantize()\n\n @commands.command(name='Trash')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def trash(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/trash.jpg')\n\n author = Image.open(_author).convert('RGBA').resize((130, 134))\n member = Image.open(_user).convert('RGBA').resize((105, 109))\n\n im.paste(author, (260, 120))\n im.paste(member, (105, 7))\n\n with BytesIO() as b:\n im.save(b, 'PNG')\n b.seek(0)\n file = discord.File(fp=b, filename='trash.png')\n return file\n\n author_av = BytesIO(await ctx.author.avatar.read())\n user_av = await extract_.get_stream(ctx, query=argument)\n\n if not user_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n await ctx.send(\n embed=discord.Embed(title='Hes getting recycled', colour=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://trash.png'),\n file=future.result())\n\n @commands.command(name='Slap')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def slap(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/slap.jpg')\n\n author = Image.open(_author).convert('RGBA').resize((310, 310))\n member = Image.open(_user).convert('RGBA').resize((320, 320))\n\n im = im.copy()\n im.paste(author, (465, 70))\n im.paste(member, (810, 350))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n return discord.File(buffer, filename='slapped.png')\n\n author_av = BytesIO(await ctx.author.avatar.read())\n user_av = await extract_.get_stream(ctx, query=argument)\n\n if not user_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='He just got SLAPPED!',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://slapped.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Spank')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def spank(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/spank.jpg').convert('RGBA')\n\n author = Image.open(_author).convert('RGBA').resize((230, 230))\n member = Image.open(_user).convert('RGBA').resize((320, 320))\n\n im = im.copy()\n im.paste(member, (750, 25))\n im.paste(author, (1200, 455))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='spanked.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n user_av = BytesIO(await ctx.author.avatar.read())\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='Who\\'s being a naughty boy',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://spanked.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Boot')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def boot(self, ctx, *, argument: str = None):\n\n def execute(_author, _user):\n im = Image.open('./storage/images/boot.jpg')\n\n _author = Image.open(_author).convert('RGBA').resize((50, 54))\n _user = Image.open(_user).convert('RGBA').resize((50, 54))\n\n im = im.copy()\n im.paste(_author, (183, 13))\n im.paste(_user, (33, 12))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='booted.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n user_av = BytesIO(await ctx.author.avatar.read())\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av, user_av)\n await future\n\n embed = discord.Embed(title='Right in the sacks',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://booted.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Obese')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def obese(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/obese.jpg').convert('RGBA').resize((900, 900))\n\n _author = Image.open(_author).convert('RGBA').resize((220, 220))\n im.paste(_author, (457, 135))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='obese.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='He\\'s not that fat *yet*.',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://obese.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Bird')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def bird(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/bird.jpg').convert('RGBA').resize((900, 900))\n _author = Image.open(_author).convert('RGBA').resize((220, 220))\n im.paste(_author, (555, 60))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='bird.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='Somebody is preparing to migrate',\n colour=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://bird.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Delete')\n @commands.bot_has_guild_permissions(send_messages=True, attach_files=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def delete(self, ctx, *, argument: str = None):\n\n def execute(_author):\n im = Image.open('./storage/images/delete.jpg').convert('RGB')\n\n _author = Image.open(_author).convert('RGBA').resize((196, 196))\n im.paste(_author, (121, 137))\n\n with BytesIO() as buffer:\n im.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='delete.png')\n return file\n\n author_av = await extract_.get_stream(ctx, query=argument)\n\n if not author_av:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, author_av)\n await future\n\n embed = discord.Embed(title='Moving file to the recycle bin',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://delete.png')\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(name='Invert')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def invert(self, ctx, argument: str = None, animate: str = '--true', *size) -> typing.Union[discord.MessageReference, discord.Embed]:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.invert, stream, animate, *size)\n embed = discord.Embed(title='Inverted!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Equalize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def equalize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.equalize, stream, animate, *size)\n embed = discord.Embed(title='Equalized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Grayscale')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def grayscale(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.grayscale, stream, animate, *size)\n embed = discord.Embed(title='Grayscaled!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Mirror')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def mirror(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.mirror, stream, animate, *size)\n embed = discord.Embed(title='Mirrored!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Posterize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def posterize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.posterize, stream, animate, *size, {'bits': 1})\n embed = discord.Embed(title='Posterized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Solarize')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def solarize(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.solarize, stream, animate, *size, {'threshold': 255})\n embed = discord.Embed(title='Solarized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Transpose')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def transpose(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.exif_transpose, stream, animate, *size)\n embed = discord.Embed(title='Transposed!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Flip')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def flip(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.flip, stream, animate, *size)\n embed = discord.Embed(title='Flipped!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n\n try:\n await ctx.message.reply(file=file, embed=embed)\n except Exception:\n ## FILE TOO LARGE\n return await ctx.message.reply(content='Oh No, This file was too large!')\n\n @commands.command(name='Gamma')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def gamma(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('gamma', str(img))\n except Exception as e:\n print(e)\n return await ctx.send('Invalid image URL passed.')\n\n file = discord.File(fp=img, filename='gamma.png')\n embed = discord.Embed(title='Gammafied!', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://gamma.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Rainbow')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def rainbow(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('rainbow', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='autumn.png')\n embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://autumn.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Autumn')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def autumn(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('autumn', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='autumn.png')\n embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://autumn.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Inferno')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def inferno(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('hsv', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='inferno.png')\n embed = discord.Embed(title='Inferno Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://inferno.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Twilight')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def twilight(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('twilight', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='twilight.png')\n embed = discord.Embed(title='Twilight Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://twilight.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Warp')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def warp(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('warp', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='warp.png')\n embed = discord.Embed(title='Warped Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://warp.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Blur')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, BucketType.user)\n async def blur(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('blur', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='blur.png')\n embed = discord.Embed(title='You now look like a foggy mirror!',\n color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://blur.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Swirl')\n @commands.bot_has_guild_permissions(send_messages=True, embed_links=True)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def swirl(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('swirl', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n file = discord.File(fp=img, filename='swirl.png')\n embed = discord.Embed(title='Round and a round', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://swirl.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command(name='Achievement')\n @commands.cooldown(1, 10, BucketType.user)\n async def achievement(self, ctx, *, message: str = None):\n message = 'Nothing.' if not message else message\n message = message.replace(' ', '%20')\n url = 'https://minecraftskinstealer.com/achievement/{}/Achievement%20Earned!/{}'.format(random.randrange(40),\n message)\n embed = discord.Embed(colour=discord.Colour.red()).set_image(url=url)\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cartoon(self, ctx, *, argument: str = None):\n img = await extract_.get_url(ctx, query=argument)\n try:\n img = await self.client.image('cartoon', str(img))\n except Exception:\n return await ctx.send('Invalid image URL passed.')\n\n file = discord.File(fp=img, filename='cartoon.png')\n embed = discord.Embed(title='Cartoon Filter', color=ctx.author.color).set_image(url='attachment://cartoon.png')\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def beard(self, ctx, *args):\n if not args:\n user = ctx.author\n pos_x: str = '290'\n pos_y: str = '250'\n beard_x: str = '300'\n beard_y = '300'\n else:\n try:\n user = await self.converter.convert(ctx, args[0])\n except commands.errors.MemberNotFound:\n user = ctx.author\n if len(args) > 1:\n pos_x = args[1]\n else:\n pos_x = '290'\n if len(args) > 2:\n pos_y = args[2]\n else:\n pos_y = '250'\n if len(args) > 3:\n beard_x = args[3]\n else:\n beard_x = '300'\n if len(args) > 4:\n beard_y = args[4]\n else:\n beard_y = '300'\n try:\n positions = [pos_x, pos_y, beard_x, beard_y]\n new_pos = list(map(int, positions))\n if any([i for i in new_pos if i > 900 or i < 1]):\n return await ctx.send('Markers cannot be larger than 900 or less than 1')\n except ValueError:\n return await ctx.send('Markers to place or resize the beard must be numbers!')\n user = user or ctx.author\n\n raw_beard = self.beard_image\n\n beard = raw_beard.resize((new_pos[2], new_pos[3]))\n\n avatar = Image.open(BytesIO(await user.avatar.with_format(format='png').read())).convert(\n 'RGBA').resize((900, 900))\n avatar.paste(beard, (new_pos[0], new_pos[1]), beard)\n\n with BytesIO() as buffer:\n avatar.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='bearded.jpg')\n\n embed = discord.Embed(title=f'Given {user.display_name} a nice beard', color=user.color).set_image(\n url='attachment://bearded.jpg')\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def wasted(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n\n def execute(image):\n img = Image.open(image).convert('RGB').resize((900, 900))\n img = img.point(lambda p: p * 0.5)\n\n img.paste(self.wasted_template, (0, 0), self.wasted_template)\n\n with BytesIO() as buffer:\n img.save(buffer, 'PNG')\n buffer.seek(0)\n file = discord.File(fp=buffer, filename='wasted.jpg')\n return file\n\n image = await self.loop.run_in_executor(None,\n execute,\n BytesIO(await user.avatar.with_format(format='png').read())\n )\n await ctx.send(embed=discord.Embed(title='Wasted', colour=user.colour).set_image(url='attachment://wasted.jpg'),\n file=image)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def gayify(self, ctx, argument: str = None, animate: str = '--true', *size):\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.send('Invalid image provided')\n\n file = await self.loop.run_in_executor(None, gayify_, stream, animate, *size)\n embed = discord.Embed(title=f'Gay Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://{}'.format(file.filename))\n await ctx.send(file=file, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def distracted(self, ctx, user1: discord.Member = None, user2: discord.Member = None,\n user3: discord.Member = None):\n m1 = user1 or ctx.author\n m2 = user2 or ctx.author\n m3 = user3 or ctx.author\n user = await self.vac_api.distracted_bf(m1.avatar.with_format(format='png'),\n m2.avatar.with_format(format='png'),\n m3.avatar.with_format(format='png'))\n image_out = discord.File(fp=await user.read(), filename=\"distracted.png\")\n embed = discord.Embed(title=f'Oh no.', color=random.randint(0x000000, 0xFFFFFF)).set_image(\n url='attachment://distracted.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def dos(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.dock_of_shame(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"dockofshame.png\")\n embed = discord.Embed(title=f'SHAME THEM!', color=user.colour).set_image(url='attachment://dockofshame.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def drip(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.drip(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"drip.png\")\n embed = discord.Embed(title=f'Speechless', color=user.colour).set_image(url='attachment://drip.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cr(self, ctx, *, text: str):\n user = await self.vac_api.car_reverse(text)\n image_out = discord.File(fp=await user.read(), filename=\"carreverse.png\")\n embed = discord.Embed(title=f'Car Reverse Meme', color=ctx.author.colour).set_image(\n url='attachment://carreverse.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cmm(self, ctx, *, text: str):\n user = await self.vac_api.change_my_mind(text)\n image_out = discord.File(fp=await user.read(), filename=\"changemymind.png\")\n embed = discord.Embed(title=f'Change My Mind.', color=ctx.author.colour).set_image(\n url='attachment://changemymind.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def heaven(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.heaven(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"heaven.png\")\n embed = discord.Embed(title=f'They have ascended.', color=user.colour).set_image(url='attachment://heaven.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def table_flip(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n data = await self.vac_api.table_flip(user.avatar.with_format(format='png'))\n image_out = discord.File(fp=await data.read(), filename=\"tableflip.png\")\n embed = discord.Embed(title=f'{user.display_name} looks fiesty.', color=user.colour).set_image(\n url='attachment://tableflip.png')\n await ctx.send(file=image_out, embed=embed)\n\n @commands.command(aliases=['color'], name='Colour')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def get_colour(self, ctx, colour):\n try:\n colour = int((str((await self.converter.convert(ctx, colour)).colour)).replace('#', '0x'), 16)\n except Exception:\n try:\n colour = int(colour.replace('#', '0x'), 16)\n except Exception:\n return await ctx.send('Invalid hex code provided.')\n with BytesIO() as b:\n new = Image.new(mode='RGB', size=(900, 900), color=colour)\n new.save(b, 'PNG')\n b.seek(0)\n await ctx.send(file=discord.File(fp=b, filename='{}.png'.format(colour)),\n embed=discord.Embed(title='Created new colour:', colour=colour).set_image(\n url='attachment://{}.png'.format(colour)))\n\n @commands.command(name='8bit')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def bittify(self, ctx, argument: str = None, animate: str = '--true', *size) -> discord.Embed:\n _io = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(_io, animate, size):\n avatar = Image.open(_io)\n duration = avatar.info.get('duration')\n loops = avatar.info.get('loop')\n\n if not size and not getattr(_io, 'discord', False):\n size = avatar.size\n else:\n size = sort_size(*size)\n\n if getattr(avatar, 'is_animated', False) and animate.lower() == '--true':\n frames = []\n for _ in range(avatar.n_frames):\n avatar.seek(_)\n frames.append(self.quantize(self.pixelate(avatar)).resize(size))\n return save_image(frames, filename='8bit.gif', duration=duration, loop=loops)\n\n eightbit = self.pixelate(avatar)\n eightbit = self.quantize(eightbit).resize(size)\n\n with BytesIO() as buffer:\n eightbit.save(buffer, format=\"PNG\")\n buffer.seek(0)\n\n file = discord.File(buffer, filename=\"8bit.png\")\n return file\n\n if not _io:\n return await ctx.send('Invalid image provided')\n\n future = self.loop.run_in_executor(None, execute, _io, animate, size)\n await future\n\n embed = discord.Embed(\n title=\"8-Bit filter\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://{}\".format(future.result().filename))\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def oil(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n oil = cv2.xphoto.oilPainting(image, 7, 1)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(oil)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='oilpainting.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Oil Painting\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://oilpainting.png\")\n await ctx.send(file=future.result(), embed=embed)\n\n @commands.command(aliases=['watercolor'])\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def watercolour(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n water_colour = cv2.stylization(image, sigma_s=60, sigma_r=0.6)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(water_colour)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='watercolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Watercolour Painting\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://watercolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @commands.group(invoke_without_command=True)\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def sketch(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(dst_gray)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='sketchnocolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Sketched your image\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://sketchnocolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @sketch.command(aliases=['color'], name='colour')\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def sketch_colour(self, ctx, *, argument: str = None):\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.send('Invalid image provided')\n\n def execute(image):\n image.seek(0)\n\n file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)\n image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.waitKey(1)\n\n try:\n dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)\n except Exception:\n return False\n\n with BytesIO() as buffer:\n image = Image.fromarray(dst_color)\n image.save(buffer, format='PNG')\n buffer.seek(0)\n file = discord.File(buffer, filename='sketchcolour.png')\n return file\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n if not future.result():\n return await ctx.send('Oh No! Looks like your image cannot be drawn.')\n\n embed = discord.Embed(\n title=\"Sketched your image\",\n colour=ctx.author.colour\n )\n embed.set_image(url=\"attachment://sketchcolour.png\")\n return await ctx.send(file=future.result(), embed=embed)\n\n @commands.command()\n async def expand(self, ctx, user: discord.Member = None):\n user = user or ctx.author\n\n message = await ctx.send(embed=discord.Embed(description='<a:online:834143953221582927> | Building GIF',\n colour=discord.Colour.green()))\n\n def execute(image):\n images = []\n\n width = 900\n center = width // 2\n color_1 = (0, 255, 0)\n background_colour = (255, 255, 255)\n max_radius = int(center * 1.5)\n step = 55\n\n avatar = Image.open(image).convert('RGB')\n\n for i in range(1, max_radius, step):\n im = Image.new('RGB', (width, width), background_colour)\n\n image = avatar.resize((width, width))\n\n npImage = np.array(image)\n h, w = im.size\n\n alpha = Image.new('L', image.size, 0)\n draw = ImageDraw.Draw(alpha)\n draw.pieslice((center - i, center - i, center + i, center + i), 0, 360, fill=255)\n\n npAlpha = np.array(alpha)\n npImage = np.dstack((npImage, npAlpha))\n\n image = Image.fromarray(npImage).convert('RGBA')\n\n im.paste(image, (0, 0), image)\n\n images.append(im)\n\n with BytesIO() as buffer:\n images[0].save(buffer, format='GIF', optimize=False, duration=150, append_images=images[1:],\n save_all=True, quality=1, loop=0)\n buffer.seek(0)\n return discord.File(buffer, filename='expand.gif')\n\n image = BytesIO(await user.avatar.with_format(format='jpg').read())\n\n future = self.loop.run_in_executor(None, execute, image)\n await future\n\n gif_message = await ctx.send(file=future.result())\n\n return await message.edit(embed=discord.Embed(\n description='<:Done:835812226345598986> | [Message Link]({}) | [Image Link]({})'.format(\n gif_message.jump_url, gif_message.attachments[0].url),\n colour=discord.Colour.green()))\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def glitch(self, ctx, argument: str = None, level: str = 'low', animated: str = '--true',\n *size) -> typing.Union[typing.Optional[discord.Embed], discord.MessageReference]:\n image = await extract_.get_stream(ctx, query=argument)\n\n if not image:\n return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')\n\n levels = {\n 'low': 2,\n 'medium': 5,\n 'high': 10\n }\n try:\n level = levels.get(level.lower()) if level.lower() in levels else float(level)\n except Exception:\n level = 2\n\n if level < 0 or level > 10:\n return await ctx.send('Max level for glitching images starts at 0 and is capped at 10!')\n\n future = self.loop.run_in_executor(None, glitch_, image, level, animated, size)\n await future\n try:\n return await ctx.send(embed=discord.Embed(\n title='Glitch Effect',\n colour=random.randint(0x000000, 0xFFFFFF)\n ).set_image(url='attachment://glitched.gif'), file=future.result())\n except Exception:\n return await ctx.send('Oops, this level was abit too high for your image - please retry with a lower level')\n\n @commands.command()\n @commands.cooldown(1, 30, commands.BucketType.user)\n async def image(self, ctx, *, query: str = None):\n if not query:\n return await ctx.send('Need to give an image to search for!')\n url = 'https://api.pexels.com/v1/search?query={}&per_page={}'.format(query, random.randint(1, 100))\n auth = self.bot.env('PEXEL_API_TOKEN')\n r = requests.get(url, headers={'Authorization': auth}).json()\n try:\n await ctx.send(\n embed=discord.Embed(\n title='Search results for {}'.format(\n query.title()\n ),\n colour=discord.Color.red(),\n ).set_image(url=random.choice(r['photos'])['src']['large2x'])\n )\n except IndexError:\n return await ctx.send('No Image was Found Under the Context **{}**'.format(query.title()))\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def spin(self, ctx, argument: str = None, animate: str = '--true') -> discord.Message:\n image = await extract_.get_stream(ctx, query=argument)\n if not image:\n return await ctx.send('Invalid image provided')\n\n future = await self.loop.run_in_executor(None, spin_, image, animate)\n return await ctx.send(embed=discord.Embed(\n title='Spun around and around',\n colour=random.randint(0x000000, 0xFFFFFF)\n ).set_image(url='attachment://spin.gif'), file=future)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def evilpatrick(self, ctx, argument: str = None) -> discord.MessageReference:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Invalid image provided')\n\n def execute(stream):\n image = Image.open(stream).resize((150, 150)).convert('RGB')\n frames = []\n\n with BytesIO() as buffer:\n with Image.open('./storage/images/evil.gif') as _base:\n for _ in range(_base.n_frames):\n _base.seek(_)\n\n temp = _base.copy().convert('RGBA')\n temp.paste(image, (205, 20))\n\n frames.append(temp)\n\n frames[0].save(\n buffer, 'GIF',\n append_images=frames[1:],\n loop=0, duration=(_base.info.get('duration') or 0),\n save_all=True\n )\n buffer.seek(0)\n return discord.File(fp=buffer, filename='evil.gif')\n image = await self.loop.run_in_executor(None, execute, stream)\n return await ctx.message.reply(\n embed=discord.Embed(\n title='Evil!',\n colour=discord.Colour.red()\n ).set_image(url='attachment://evil.gif'), file=image)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def salt(self, ctx, argument: str = None) -> discord.MessageReference:\n stream = await extract_.get_stream(ctx, query=argument)\n\n if not stream:\n return await ctx.message.reply(content='Invalid image provided')\n\n def execute(stream):\n image = Image.open(stream).resize((300, 300)).convert('RGB')\n frames = []\n\n with BytesIO() as buffer:\n with Image.open('./storage/images/salty.gif') as _base:\n for _ in range(_base.n_frames):\n _base.seek(_)\n\n temp = _base.copy().resize((200, 200)).convert('RGBA')\n image_ = image.copy()\n image_.paste(temp, (120, 10), temp)\n\n frames.append(image_)\n\n frames[0].save(\n buffer, 'GIF',\n append_images=frames[1:],\n loop=0, duration=(_base.info.get('duration') or 0),\n save_all=True\n )\n buffer.seek(0)\n return discord.File(fp=buffer, filename='salty.gif')\n image = await self.loop.run_in_executor(None, execute, stream)\n return await ctx.message.reply(\n embed=discord.Embed(\n title='Salty!',\n colour=discord.Colour.red()\n ).set_image(url='attachment://salty.gif'), file=image)\n\ndef setup(bot):\n bot.add_cog(_Image(bot))\n"
] | [
[
"numpy.array",
"numpy.dstack"
]
] |
NunoEdgarGFlowHub/pyfolio | [
"68efdcc2e2d0f140ddbc408a260c6318ac8b06d3"
] | [
"pyfolio/tears.py"
] | [
"#\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import division\n\nfrom time import time\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport scipy.stats\nimport pandas as pd\n\nfrom . import timeseries\nfrom . import utils\nfrom . import pos\nfrom . import txn\nfrom . import round_trips\nfrom . import plotting\nfrom . import _seaborn as sns\nfrom .plotting import plotting_context\n\ntry:\n from . import bayesian\nexcept ImportError:\n warnings.warn(\n \"Could not import bayesian submodule due to missing pymc3 dependency.\",\n ImportWarning)\n\n\ndef timer(msg_body, previous_time):\n current_time = time()\n run_time = current_time - previous_time\n message = \"\\nFinished \" + msg_body + \" (required {:.2f} seconds).\"\n print(message.format(run_time))\n\n return current_time\n\n\ndef create_full_tear_sheet(returns,\n positions=None,\n transactions=None,\n benchmark_rets=None,\n gross_lev=None,\n slippage=None,\n live_start_date=None,\n sector_mappings=None,\n bayesian=False,\n round_trips=False,\n hide_positions=False,\n cone_std=(1.0, 1.5, 2.0),\n bootstrap=False,\n set_context=True):\n \"\"\"\n Generate a number of tear sheets that are useful\n for analyzing a strategy's performance.\n\n - Fetches benchmarks if needed.\n - Creates tear sheets for returns, and significant events.\n If possible, also creates tear sheets for position analysis,\n transaction analysis, and Bayesian analysis.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - Time series with decimal returns.\n - Example:\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\n positions : pd.DataFrame, optional\n Daily net position values.\n - Time series of dollar amount invested in each position and cash.\n - Days where stocks are not held can be represented by 0 or NaN.\n - Non-working capital is labelled 'cash'\n - Example:\n index 'AAPL' 'MSFT' cash\n 2004-01-09 13939.3800 -14012.9930 711.5585\n 2004-01-12 14492.6300 -14624.8700 27.1821\n 2004-01-13 -13853.2800 13653.6400 -43.6375\n transactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\n gross_lev : pd.Series, optional\n The leverage of a strategy.\n - Time series of the sum of long and short exposure per share\n divided by net asset value.\n - Example:\n 2009-12-04 0.999932\n 2009-12-07 0.999783\n 2009-12-08 0.999880\n 2009-12-09 1.000283\n slippage : int/float, optional\n Basis points of slippage to apply to returns before generating\n tearsheet stats and plots.\n If a value is provided, slippage parameter sweep\n plots will be generated from the unadjusted returns.\n Transactions and positions must also be passed.\n - See txn.adjust_returns_for_slippage for more details.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period. This datetime should be normalized.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n bayesian: boolean, optional\n If True, causes the generation of a Bayesian tear sheet.\n round_trips: boolean, optional\n If True, causes the generation of a round trip tear sheet.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\n bootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\n set_context : boolean, optional\n If True, set default plotting style context.\n - See plotting.context().\n \"\"\"\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n\n # If the strategy's history is longer than the benchmark's, limit strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n if slippage is not None and transactions is not None:\n turnover = txn.get_turnover(positions, transactions,\n period=None, average=False)\n unadjusted_returns = returns.copy()\n returns = txn.adjust_returns_for_slippage(returns, turnover, slippage)\n else:\n unadjusted_returns = None\n\n create_returns_tear_sheet(\n returns,\n live_start_date=live_start_date,\n cone_std=cone_std,\n benchmark_rets=benchmark_rets,\n bootstrap=bootstrap,\n set_context=set_context)\n\n create_interesting_times_tear_sheet(returns,\n benchmark_rets=benchmark_rets,\n set_context=set_context)\n\n if positions is not None:\n create_position_tear_sheet(returns, positions,\n gross_lev=gross_lev,\n hide_positions=hide_positions,\n set_context=set_context,\n sector_mappings=sector_mappings)\n\n if transactions is not None:\n create_txn_tear_sheet(returns, positions, transactions,\n unadjusted_returns=unadjusted_returns,\n set_context=set_context)\n if round_trips:\n create_round_trip_tear_sheet(\n positions=positions,\n transactions=transactions,\n sector_mappings=sector_mappings)\n\n if bayesian:\n create_bayesian_tear_sheet(returns,\n live_start_date=live_start_date,\n benchmark_rets=benchmark_rets,\n set_context=set_context)\n\n\n@plotting_context\ndef create_returns_tear_sheet(returns, live_start_date=None,\n cone_std=(1.0, 1.5, 2.0),\n benchmark_rets=None,\n bootstrap=False,\n return_fig=False):\n \"\"\"\n Generate a number of plots for analyzing a strategy's returns.\n\n - Fetches benchmarks, then creates the plots on a single figure.\n - Plots: rolling returns (with cone), rolling beta, rolling sharpe,\n rolling Fama-French risk factors, drawdowns, underwater plot, monthly\n and annual return plots, daily similarity plots,\n and return quantile box plot.\n - Will also print the start and end dates of the strategy,\n performance statistics, drawdown periods, and the return range.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\n benchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n bootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n \"\"\"\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n # If the strategy's history is longer than the benchmark's, limit\n # strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n df_cum_rets = timeseries.cum_returns(returns, starting_value=1)\n print(\"Entire data start date: \" + str(df_cum_rets\n .index[0].strftime('%Y-%m-%d')))\n print(\"Entire data end date: \" + str(df_cum_rets\n .index[-1].strftime('%Y-%m-%d')))\n\n print('\\n')\n\n plotting.show_perf_stats(returns, benchmark_rets,\n bootstrap=bootstrap,\n live_start_date=live_start_date)\n\n if live_start_date is not None:\n vertical_sections = 11\n live_start_date = utils.get_utc_timestamp(live_start_date)\n else:\n vertical_sections = 10\n\n if bootstrap:\n vertical_sections += 1\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_rolling_returns = plt.subplot(gs[:2, :])\n ax_rolling_returns_vol_match = plt.subplot(gs[2, :],\n sharex=ax_rolling_returns)\n ax_rolling_beta = plt.subplot(gs[3, :], sharex=ax_rolling_returns)\n ax_rolling_sharpe = plt.subplot(gs[4, :], sharex=ax_rolling_returns)\n ax_rolling_risk = plt.subplot(gs[5, :], sharex=ax_rolling_returns)\n ax_drawdown = plt.subplot(gs[6, :], sharex=ax_rolling_returns)\n ax_underwater = plt.subplot(gs[7, :], sharex=ax_rolling_returns)\n ax_monthly_heatmap = plt.subplot(gs[8, 0])\n ax_annual_returns = plt.subplot(gs[8, 1])\n ax_monthly_dist = plt.subplot(gs[8, 2])\n ax_return_quantiles = plt.subplot(gs[9, :])\n\n plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=cone_std,\n ax=ax_rolling_returns)\n ax_rolling_returns.set_title(\n 'Cumulative Returns')\n\n plotting.plot_rolling_returns(\n returns,\n factor_returns=benchmark_rets,\n live_start_date=live_start_date,\n cone_std=None,\n volatility_match=True,\n legend_loc=None,\n ax=ax_rolling_returns_vol_match)\n ax_rolling_returns_vol_match.set_title(\n 'Cumulative returns volatility matched to benchmark.')\n\n plotting.plot_rolling_beta(\n returns, benchmark_rets, ax=ax_rolling_beta)\n\n plotting.plot_rolling_sharpe(\n returns, ax=ax_rolling_sharpe)\n\n plotting.plot_rolling_fama_french(\n returns, ax=ax_rolling_risk)\n\n # Drawdowns\n plotting.plot_drawdown_periods(\n returns, top=5, ax=ax_drawdown)\n\n plotting.plot_drawdown_underwater(\n returns=returns, ax=ax_underwater)\n\n plotting.show_worst_drawdown_periods(returns)\n\n df_weekly = timeseries.aggregate_returns(returns, 'weekly')\n df_monthly = timeseries.aggregate_returns(returns, 'monthly')\n\n print('\\n')\n plotting.show_return_range(returns, df_weekly)\n\n plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)\n plotting.plot_annual_returns(returns, ax=ax_annual_returns)\n plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)\n\n plotting.plot_return_quantiles(\n returns,\n df_weekly,\n df_monthly,\n ax=ax_return_quantiles)\n\n if bootstrap:\n ax_bootstrap = plt.subplot(gs[10, :])\n plotting.plot_perf_stats(returns, benchmark_rets,\n ax=ax_bootstrap)\n\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_position_tear_sheet(returns, positions, gross_lev=None,\n show_and_plot_top_pos=2, hide_positions=False,\n return_fig=False, sector_mappings=None):\n \"\"\"\n Generate a number of plots for analyzing a\n strategy's positions and holdings.\n\n - Plots: gross leverage, exposures, top positions, and holdings.\n - Will also print the top positions held.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n gross_lev : pd.Series, optional\n The leverage of a strategy.\n - See full explanation in create_full_tear_sheet.\n show_and_plot_top_pos : int, optional\n By default, this is 2, and both prints and plots the\n top 10 positions.\n If this is 0, it will only plot; if 1, it will only print.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n Overrides show_and_plot_top_pos to 0 to suppress text output.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n \"\"\"\n\n if hide_positions:\n show_and_plot_top_pos = 0\n vertical_sections = 6 if sector_mappings is not None else 5\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_gross_leverage = plt.subplot(gs[0, :])\n ax_exposures = plt.subplot(gs[1, :], sharex=ax_gross_leverage)\n ax_top_positions = plt.subplot(gs[2, :], sharex=ax_gross_leverage)\n ax_max_median_pos = plt.subplot(gs[3, :], sharex=ax_gross_leverage)\n ax_holdings = plt.subplot(gs[4, :], sharex=ax_gross_leverage)\n\n positions_alloc = pos.get_percent_alloc(positions)\n\n if gross_lev is not None:\n plotting.plot_gross_leverage(returns, gross_lev, ax=ax_gross_leverage)\n\n plotting.plot_exposures(returns, positions_alloc, ax=ax_exposures)\n\n plotting.show_and_plot_top_positions(\n returns,\n positions_alloc,\n show_and_plot=show_and_plot_top_pos,\n hide_positions=hide_positions,\n ax=ax_top_positions)\n\n plotting.plot_max_median_position_concentration(positions,\n ax=ax_max_median_pos)\n\n plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)\n\n if sector_mappings is not None:\n sector_exposures = pos.get_sector_exposures(positions, sector_mappings)\n if len(sector_exposures.columns) > 1:\n sector_alloc = pos.get_percent_alloc(sector_exposures)\n sector_alloc = sector_alloc.drop('cash', axis='columns')\n ax_sector_alloc = plt.subplot(gs[5, :], sharex=ax_gross_leverage)\n plotting.plot_sector_allocations(returns, sector_alloc,\n ax=ax_sector_alloc)\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_txn_tear_sheet(returns, positions, transactions,\n unadjusted_returns=None, return_fig=False):\n \"\"\"\n Generate a number of plots for analyzing a strategy's transactions.\n\n Plots: turnover, daily volume, and a histogram of daily volume.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n unadjusted_returns : pd.Series, optional\n Daily unadjusted returns of the strategy, noncumulative.\n Will plot additional swippage sweep analysis.\n - See pyfolio.plotting.plot_swippage_sleep and\n pyfolio.plotting.plot_slippage_sensitivity\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n \"\"\"\n vertical_sections = 5 if unadjusted_returns is not None else 3\n\n fig = plt.figure(figsize=(14, vertical_sections * 6))\n gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)\n ax_turnover = plt.subplot(gs[0, :])\n ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover)\n ax_turnover_hist = plt.subplot(gs[2, :])\n\n plotting.plot_turnover(\n returns,\n transactions,\n positions,\n ax=ax_turnover)\n\n plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)\n\n try:\n plotting.plot_daily_turnover_hist(transactions, positions,\n ax=ax_turnover_hist)\n except ValueError:\n warnings.warn('Unable to generate turnover plot.', UserWarning)\n\n if unadjusted_returns is not None:\n ax_slippage_sweep = plt.subplot(gs[3, :])\n plotting.plot_slippage_sweep(unadjusted_returns,\n transactions,\n positions,\n ax=ax_slippage_sweep\n )\n ax_slippage_sensitivity = plt.subplot(gs[4, :])\n plotting.plot_slippage_sensitivity(unadjusted_returns,\n transactions,\n positions,\n ax=ax_slippage_sensitivity\n )\n for ax in fig.axes:\n plt.setp(ax.get_xticklabels(), visible=True)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_round_trip_tear_sheet(positions, transactions,\n sector_mappings=None,\n return_fig=False):\n \"\"\"\n Generate a number of figures and plots describing the duration,\n frequency, and profitability of trade \"round trips.\"\n A round trip is started when a new long or short position is\n opened and is only completed when the number of shares in that\n position returns to or crosses zero.\n\n Parameters\n ----------\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n sector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n \"\"\"\n\n transactions_closed = round_trips.add_closing_transactions(positions,\n transactions)\n trades = round_trips.extract_round_trips(transactions_closed)\n\n if len(trades) < 5:\n warnings.warn(\n \"\"\"Fewer than 5 round-trip trades made.\n Skipping round trip tearsheet.\"\"\", UserWarning)\n return\n\n ndays = len(positions)\n\n print(trades.drop(['open_dt', 'close_dt', 'symbol'],\n axis='columns').describe())\n print('Percent of round trips profitable = {:.4}%'.format(\n (trades.pnl > 0).mean() * 100))\n\n winning_round_trips = trades[trades.pnl > 0]\n losing_round_trips = trades[trades.pnl < 0]\n print('Mean return per winning round trip = {:.4}'.format(\n winning_round_trips.returns.mean()))\n print('Mean return per losing round trip = {:.4}'.format(\n losing_round_trips.returns.mean()))\n\n print('A decision is made every {:.4} days.'.format(ndays / len(trades)))\n print('{:.4} trading decisions per day.'.format(len(trades) * 1. / ndays))\n print('{:.4} trading decisions per month.'.format(\n len(trades) * 1. / (ndays / 21)))\n\n plotting.show_profit_attribution(trades)\n\n if sector_mappings is not None:\n sector_trades = round_trips.apply_sector_mappings_to_round_trips(\n trades, sector_mappings)\n plotting.show_profit_attribution(sector_trades)\n\n fig = plt.figure(figsize=(14, 3 * 6))\n\n fig = plt.figure(figsize=(14, 3 * 6))\n gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)\n\n ax_trade_lifetimes = plt.subplot(gs[0, :])\n ax_prob_profit_trade = plt.subplot(gs[1, 0])\n ax_holding_time = plt.subplot(gs[1, 1])\n ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])\n ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])\n\n plotting.plot_round_trip_life_times(trades, ax=ax_trade_lifetimes)\n\n plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)\n\n trade_holding_times = [x.days for x in trades['duration']]\n sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)\n ax_holding_time.set(xlabel='holding time in days')\n\n sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)\n ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')\n\n sns.distplot(trades.returns * 100, kde=False,\n ax=ax_pnl_per_round_trip_pct)\n ax_pnl_per_round_trip_pct.set(\n xlabel='Round-trip returns in %')\n\n gs.tight_layout(fig)\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_interesting_times_tear_sheet(\n returns, benchmark_rets=None, legend_loc='best', return_fig=False):\n \"\"\"\n Generate a number of returns plots around interesting points in time,\n like the flash crash and 9/11.\n\n Plots: returns around the dotcom bubble burst, Lehmann Brothers' failure,\n 9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing\n bubble burst, EZB IR, Great Recession (August 2007, March and September\n of 2008, Q1 & Q2 2009), flash crash, April and October 2014.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n benchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n legend_loc : plt.legend_loc, optional\n The legend's location.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n \"\"\"\n rets_interesting = timeseries.extract_interesting_date_ranges(returns)\n\n if len(rets_interesting) == 0:\n warnings.warn('Passed returns do not overlap with any'\n 'interesting times.', UserWarning)\n return\n\n print('\\nStress Events')\n print(np.round(pd.DataFrame(rets_interesting).describe().transpose().loc[\n :, ['mean', 'min', 'max']], 3))\n\n if benchmark_rets is None:\n benchmark_rets = utils.get_symbol_rets('SPY')\n # If the strategy's history is longer than the benchmark's, limit\n # strategy\n if returns.index[0] < benchmark_rets.index[0]:\n returns = returns[returns.index > benchmark_rets.index[0]]\n\n bmark_interesting = timeseries.extract_interesting_date_ranges(\n benchmark_rets)\n\n num_plots = len(rets_interesting)\n # 2 plots, 1 row; 3 plots, 2 rows; 4 plots, 2 rows; etc.\n num_rows = int((num_plots + 1) / 2.0)\n fig = plt.figure(figsize=(14, num_rows * 6.0))\n gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5)\n\n for i, (name, rets_period) in enumerate(rets_interesting.items()):\n\n # i=0 -> 0, i=1 -> 0, i=2 -> 1 ;; i=0 -> 0, i=1 -> 1, i=2 -> 0\n ax = plt.subplot(gs[int(i / 2.0), i % 2])\n timeseries.cum_returns(rets_period).plot(\n ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2)\n timeseries.cum_returns(bmark_interesting[name]).plot(\n ax=ax, color='gray', label='SPY', alpha=0.6)\n ax.legend(['algo',\n 'SPY'],\n loc=legend_loc)\n ax.set_title(name, size=14)\n ax.set_ylabel('Returns')\n ax.set_xlabel('')\n\n plt.show()\n if return_fig:\n return fig\n\n\n@plotting_context\ndef create_bayesian_tear_sheet(returns, benchmark_rets=None,\n live_start_date=None, samples=2000,\n return_fig=False, stoch_vol=False):\n \"\"\"\n Generate a number of Bayesian distributions and a Bayesian\n cone plot of returns.\n\n Plots: Sharpe distribution, annual volatility distribution,\n annual alpha distribution, beta distribution, predicted 1 and 5\n day returns distributions, and a cumulative returns cone plot.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\n benchmark_rets : pd.Series or pd.DataFrame, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\n live_start_date : datetime, optional\n The point in time when the strategy began live\n trading, after its backtest period.\n samples : int, optional\n Number of posterior samples to draw.\n return_fig : boolean, optional\n If True, returns the figure that was plotted on.\n set_context : boolean, optional\n If True, set default plotting style context.\n stoch_vol : boolean, optional\n If True, run and plot the stochastic volatility model\n \"\"\"\n\n if live_start_date is None:\n raise NotImplementedError(\n 'Bayesian tear sheet requires setting of live_start_date'\n )\n\n # start by benchmark is S&P500\n fama_french = False\n if benchmark_rets is None:\n benchmark_rets = pd.DataFrame(\n utils.get_symbol_rets('SPY',\n start=returns.index[0],\n end=returns.index[-1]))\n # unless user indicates otherwise\n elif isinstance(benchmark_rets, str) and (benchmark_rets ==\n 'Fama-French'):\n fama_french = True\n rolling_window = utils.APPROX_BDAYS_PER_MONTH * 6\n benchmark_rets = timeseries.rolling_fama_french(\n returns, rolling_window=rolling_window)\n\n live_start_date = utils.get_utc_timestamp(live_start_date)\n df_train = returns.loc[returns.index < live_start_date]\n df_test = returns.loc[returns.index >= live_start_date]\n\n # Run T model with missing data\n print(\"Running T model\")\n previous_time = time()\n # track the total run time of the Bayesian tear sheet\n start_time = previous_time\n\n trace_t, ppc_t = bayesian.run_model('t', df_train,\n returns_test=df_test,\n samples=samples, ppc=True)\n previous_time = timer(\"T model\", previous_time)\n\n # Compute BEST model\n print(\"\\nRunning BEST model\")\n trace_best = bayesian.run_model('best', df_train,\n returns_test=df_test,\n samples=samples)\n previous_time = timer(\"BEST model\", previous_time)\n\n # Plot results\n\n fig = plt.figure(figsize=(14, 10 * 2))\n gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)\n\n axs = []\n row = 0\n\n # Plot Bayesian cone\n ax_cone = plt.subplot(gs[row, :])\n bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)\n previous_time = timer(\"plotting Bayesian cone\", previous_time)\n\n # Plot BEST results\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n axs.append(plt.subplot(gs[row, 0]))\n axs.append(plt.subplot(gs[row, 1]))\n row += 1\n # Effect size across two\n axs.append(plt.subplot(gs[row, :]))\n\n bayesian.plot_best(trace=trace_best, axs=axs)\n previous_time = timer(\"plotting BEST results\", previous_time)\n\n # Compute Bayesian predictions\n row += 1\n ax_ret_pred_day = plt.subplot(gs[row, 0])\n ax_ret_pred_week = plt.subplot(gs[row, 1])\n day_pred = ppc_t[:, 0]\n p5 = scipy.stats.scoreatpercentile(day_pred, 5)\n sns.distplot(day_pred,\n ax=ax_ret_pred_day\n )\n ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)\n ax_ret_pred_day.set_xlabel('Predicted returns 1 day')\n ax_ret_pred_day.set_ylabel('Frequency')\n ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=ax_ret_pred_day.transAxes)\n previous_time = timer(\"computing Bayesian predictions\", previous_time)\n\n # Plot Bayesian VaRs\n week_pred = (\n np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]\n p5 = scipy.stats.scoreatpercentile(week_pred, 5)\n sns.distplot(week_pred,\n ax=ax_ret_pred_week\n )\n ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)\n ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')\n ax_ret_pred_week.set_ylabel('Frequency')\n ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,\n verticalalignment='bottom',\n horizontalalignment='right',\n transform=ax_ret_pred_week.transAxes)\n previous_time = timer(\"plotting Bayesian VaRs estimate\", previous_time)\n\n # Run alpha beta model\n print(\"\\nRunning alpha beta model\")\n benchmark_rets = benchmark_rets.loc[df_train.index]\n trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,\n bmark=benchmark_rets,\n samples=samples)\n previous_time = timer(\"running alpha beta model\", previous_time)\n\n # Plot alpha and beta\n row += 1\n ax_alpha = plt.subplot(gs[row, 0])\n ax_beta = plt.subplot(gs[row, 1])\n if fama_french:\n sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,\n ax=ax_alpha)\n betas = ['SMB', 'HML', 'UMD']\n nbeta = trace_alpha_beta['beta'].shape[1]\n for i in range(nbeta):\n sns.distplot(trace_alpha_beta['beta'][100:, i], ax=ax_beta,\n label=betas[i])\n plt.legend()\n else:\n sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,\n ax=ax_alpha)\n sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)\n ax_alpha.set_xlabel('Annual Alpha')\n ax_alpha.set_ylabel('Belief')\n ax_beta.set_xlabel('Beta')\n ax_beta.set_ylabel('Belief')\n previous_time = timer(\"plotting alpha beta model\", previous_time)\n\n if stoch_vol:\n # run stochastic volatility model\n returns_cutoff = 400\n print(\n \"\\nRunning stochastic volatility model on \"\n \"most recent {} days of returns.\".format(returns_cutoff)\n )\n if df_train.size > returns_cutoff:\n df_train_truncated = df_train[-returns_cutoff:]\n _, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)\n previous_time = timer(\n \"running stochastic volatility model\", previous_time)\n\n # plot log(sigma) and log(nu)\n print(\"\\nPlotting stochastic volatility model\")\n row += 1\n ax_sigma_log = plt.subplot(gs[row, 0])\n ax_nu_log = plt.subplot(gs[row, 1])\n sigma_log = trace_stoch_vol['sigma_log']\n sns.distplot(sigma_log, ax=ax_sigma_log)\n ax_sigma_log.set_xlabel('log(Sigma)')\n ax_sigma_log.set_ylabel('Belief')\n nu_log = trace_stoch_vol['nu_log']\n sns.distplot(nu_log, ax=ax_nu_log)\n ax_nu_log.set_xlabel('log(nu)')\n ax_nu_log.set_ylabel('Belief')\n\n # plot latent volatility\n row += 1\n ax_volatility = plt.subplot(gs[row, :])\n bayesian.plot_stoch_vol(\n df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)\n previous_time = timer(\n \"plotting stochastic volatility model\", previous_time)\n\n total_time = time() - start_time\n print(\"\\nTotal runtime was {:.2f} seconds.\".format(total_time))\n\n gs.tight_layout(fig)\n\n plt.show()\n if return_fig:\n return fig\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.cumprod",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.gridspec.GridSpec"
]
] |
brsr/mapproj | [
"1ec1694149a69da6393ecb94650f7164e3cfd2e1"
] | [
"bin/circlepack.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 14 14:15:06 2021\n\n@author: brsr\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mapproj\nimport fiona\nfrom shapely.geometry import Point, LineString, MultiPolygon, Polygon\nimport geopandas\nimport pyproj\ngeod = pyproj.Geod(a=1, f=0)\nn = 9\na = np.arctan(1/2)/np.pi*180\nactrlpts3 = np.array([[15+0, 15+36, 15-36],\n [-a, a, a]])\n#actrlpts3 = np.array([[ 0, 0, 90],\n# [90, 0, 0]])\nctrlpoly3 = mapproj.geodesics(actrlpts3[0], actrlpts3[1], geod, includepts=True)\ntgtpts3 = mapproj.complex_to_float2d(1j*np.exp(2j/3*np.arange(3)*np.pi)).T\nbp = mapproj.Barycentric(tgtpts3)\n\ngrid3 = mapproj.Barycentric.grid(1/8)\ngridp3 = mapproj.Barycentric.gridpolys(n=9)\n#%%\ngridbary = mapproj.transeach(bp.transform, gridp3)\nconformal = mapproj.ConformalTri3(actrlpts3, tgtpts3)\ninvframe = mapproj.transeach(conformal.invtransform, gridbary)#slooooow\ninvframev = mapproj.transeach(mapproj.UnitVector.transform, invframe)\ninvframe.plot()\n\n#%%\nres = geod.inv(actrlpts3[0], actrlpts3[1],\n np.roll(actrlpts3[0], -1), np.roll(actrlpts3[1], -1))\ncornerangle = np.pi/180*(res[0] - np.roll(res[1], 1)).mean() #np.pi*2/5 #\nedgelength = res[2].mean()\n\ninitial = conformal.ctrlpts_v\nanglesumtarget = np.ones(shape=(n+1,n+1))\nanglesumtarget = np.tril(anglesumtarget, -1)[::-1]\n#anglesumtarget[..., 0] = 0\n#anglesumtarget[-1] = 0\nanglesumtarget[anglesumtarget == 0] = np.nan\nind = np.arange(0,n)\nedgeweight = np.ones(n)*2\nedgeweight[[0, -1]] = 1\nedge1 = (ind, 0)\nedge2 = (0, ind)\nedge3 = (ind,ind[::-1])\nanglesumtarget[edge1] = 1/2\nanglesumtarget[edge2] = 1/2\nanglesumtarget[edge3] = 1/2\nanglesumtarget *= 2*np.pi\nanglesumtarget[0, 0] = cornerangle\nanglesumtarget[-2, 0] = cornerangle\nanglesumtarget[0, -2] = cornerangle\n\nmsplitframe = np.array([[0, 1, 2],\n [2, 0, 1]])\nmsplit1 = np.tile(msplitframe, (3, n, n))[..., :n,:n]\nmsplit = (msplit1 + np.arange(3)[:, np.newaxis, np.newaxis]) % 3\nmsplit = msplit == 0\nmsplit[:, ~np.isfinite(anglesumtarget[:-1,:-1])] = False\n#neighbors like this\n# n n\n# n x n\n# n n\n\nneighbors = np.array([[ 1, 1, 0, -1, -1, 0],\n [ 0, -1, -1, 0, 1, 1]])\ngrindex = np.array(np.meshgrid(ind, ind))\n\nneighborhood = neighbors[..., np.newaxis, np.newaxis] + grindex[:,np.newaxis]\n\nfindex = np.array(np.where(np.isfinite(anglesumtarget))).T\nr = np.ones(shape=anglesumtarget.shape, dtype=float)*cornerangle/(2*n-2)\nr[~np.isfinite(anglesumtarget)] = np.nan\nr[[0, -2, 0], [0, 0, -2]] /= 3\n#%%\nfor i in range(128):\n x = r[:-1, :-1]\n y = r[neighborhood[0], neighborhood[1]]\n z = np.roll(y, 1, axis=0)\n if np.any(x+y+z > np.pi):\n break\n locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/\n (np.sin(x+y)*np.sin(x+z)))\n #locos_x_yz = np.arccos(((x+y)**2 + (x+z)**2 - (y+z)**2)/\n # (2*(x+y)*(x+z)))\n anglesum = np.nansum(locos_x_yz, axis=0)\n pctdiff = (anglesum/anglesumtarget[:-1,:-1])\n pctdiff /= np.nanmean(pctdiff)\n #pctdiff -= np.clip(pctdiff, 0.9, 1.1)\n #pctdiff /= np.nanmean(pctdiff)\n #ind = np.unravel_index(np.nanargmax(abs(pctdiff)), pctdiff.shape)\n r[:-1, :-1] *= pctdiff\n r *= edgelength/(r[edge1]@edgeweight)\n print(i, np.nanmax(abs(pctdiff-1)))\n if np.nanmax(abs(pctdiff-1)) < 1E-7:\n break\n #print(ind, r[ind], pctdiff[ind])\n\n#print(r[edge1]@edgeweight, edgelength)\nprint(np.round(r[:-1,:-1], 3))\n#%%0.9999999999999746 1.0000000000000149\n#%%\nfor i in range(36*256):\n ind = findex[i % findex.shape[0]]\n x = r[ind[0], ind[1]]\n y = r[neighbors[0] + ind[0], neighbors[1] + ind[1]]\n z = np.roll(y, 1, axis=0)\n locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/\n (np.sin(x+y)*np.sin(x+z)))\n anglesum = np.nansum(locos_x_yz, axis=0)\n pctdiff = anglesum/anglesumtarget[ind[0],ind[1]]#np.clip(, 0.8, 1.2)\n r[ind[0], ind[1]] *= pctdiff\n r *= edgelength/(r[edge1]@edgeweight)\n #print(ind, r[ind[0], ind[1]], pctdiff)\n\nprint(r[edge1]@edgeweight, np.pi/2)\nprint(np.round(r[:-1,:-1], 3))\n#%%\nvertices = np.ones((3,n+1,n+1))*np.nan\nvertices[:,0,0] = initial[:,0]\nvertices[:,-2,0] = initial[:,1]\nvertices[:,0,-2] = initial[:,2]\n\nr1 = r[edge1]\nt = (r1[:-1] + r1[1:]).cumsum()/edgelength\nt = np.concatenate([[0,], t])\ne1 = mapproj.slerp(initial[:,0], initial[:,1], t[:, np.newaxis]).T\ne2 = mapproj.slerp(initial[:,0], initial[:,2], t[:, np.newaxis]).T\ne3 = mapproj.slerp(initial[:,2], initial[:,1], t[:, np.newaxis]).T\nvertices[:,edge1[0], edge1[1]] = e1\nvertices[:,edge2[0], edge2[1]] = e2\nvertices[:,edge3[0], edge3[1]] = e3\n#%%\nfor i in range(1, n-1):\n for j in range(1, n-i-1):\n index = np.array([i, j])\n indexnb = index[:,np.newaxis] + neighbors\n vertexnb = vertices[:, indexnb[0], indexnb[1]]\n rnb = r[indexnb[0], indexnb[1]]\n ri = r[i, j]\n filled = np.all(np.isfinite(vertexnb), axis=0)\n vertexnb = vertexnb[:, filled]\n rnb = rnb[filled]\n cl = np.cos(rnb+ri)\n lq = np.linalg.lstsq(vertexnb.T, cl)\n v = lq[0]\n norm = np.linalg.norm(v)\n v /= norm\n vertices[:, i, j] = v\n print(i, j, filled.sum(), lq, norm)\n\nvindex = np.all(np.isfinite(vertices), axis=0)\nresult = mapproj.UnitVector.invtransform_v(vertices)\n#%%\nfig, axes = plt.subplots(ncols = 3, figsize=(10, 8), sharex=True, sharey=True)\naxes[0].plot(vertices[0], vertices[1])\naxes[1].plot(vertices[0], vertices[2])\naxes[2].plot(vertices[1], vertices[2])\nfor ax in axes:\n ax.set_aspect('equal')\n#%%\nfig, ax = plt.subplots(figsize=(10, 8))\ninvframe.plot(ax=ax)\nax.scatter(*result, color='k')\nax.scatter(*actrlpts3, color='y')\n#%%\ntriframe = np.array([[[0,0,1],\n [0,1,0]],\n [[1,0,1],\n [1,1,0]]])\ntris = []\nfor i in range(n-1):\n for j in range(n-i-1):\n for tf in triframe:\n xy = result[:,i+tf[0], j+tf[1]]\n if np.all(np.isfinite(xy)):\n tris.append(Polygon(xy.T))\n\ngptris = geopandas.GeoSeries(tris)\n#use geopandas.intersect to determine which grid cell a point lands in"
] | [
[
"numpy.ones",
"numpy.any",
"numpy.nansum",
"numpy.meshgrid",
"numpy.tril",
"numpy.isfinite",
"numpy.nanmean",
"numpy.cos",
"numpy.round",
"numpy.tile",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.linalg.norm",
"numpy.roll",
"numpy.arctan",
"numpy.linalg.lstsq",
"numpy.array",
"numpy.sin",
"numpy.concatenate"
]
] |
odedzewi/coremltools | [
"fdd5630c423c0fc4f1a04c3f5a3c17b808a15505",
"fdd5630c423c0fc4f1a04c3f5a3c17b808a15505"
] | [
"coremltools/converters/mil/mil/ops/defs/scatter_gather.py",
"coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py"
] | [
"# Copyright (c) 2020, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nimport numpy as np\nimport numbers\n\nfrom coremltools.converters.mil.mil import Operation, types\nfrom coremltools.converters.mil.mil.input_type import (\n DefaultInputs,\n InputSpec,\n IntInputType,\n IntTensorInputType,\n TensorInputType,\n StringInputType,\n)\nfrom coremltools.converters.mil.mil.operation import precondition\nfrom coremltools.converters.mil.mil.ops.defs._op_reqs import register_op\nfrom coremltools.converters.mil.mil.types.symbolic import is_compatible_symbolic_vector, is_symbolic\n\nfrom coremltools.converters.mil.mil.operation import (\n SYMBOL,\n VALUE\n)\n\n\n@register_op(doc_str=\"\")\nclass gather(Operation):\n \"\"\"\n Gather slices from input ``x`` along dimension ``axis`` according to ``indices``,\n similar to `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.\n\n * If ``indices`` is scalar (0-D):\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]\n\n Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``.\n\n * If ``indices`` is 1-D tensor:\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}]\n\n The output has rank ``rank(x)``.\n\n * In general:\n\n .. math::\n output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =\n .. math::\n x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]\n\n Where ``M = rank(x)``.\n\n Parameters\n ----------\n x: tensor<\\*D,T> (Required)\n indices: tensor<\\*N,i32> (Required)\n * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``.\n axis: const i32 (Optional. Default=``0``)\n * Negative axis is supported.\n\n Returns\n -------\n tensor<\\*K,T>\n * Where ``K = D[:axis] + N + D[axis+1:]``.\n\n Attributes\n ----------\n T: fp32\n\n References\n ----------\n See `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.\n\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntInputType(),\n axis=IntInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n )\n\n def __init__(self, **kwargs):\n super(gather, self).__init__(**kwargs)\n\n @precondition(allow=VALUE | SYMBOL)\n def value_inference(self):\n x = self.x.sym_val\n indices = self.indices.val\n if indices is None:\n # only allow x to be symbolic. indices cannot.\n return None\n scalar_indices = isinstance(indices, numbers.Integral)\n axis = self.axis.val\n if scalar_indices:\n res = np.take(x, [indices], axis)\n res2 = np.squeeze(res, axis=axis)\n if isinstance(res2, np.ndarray) and len(res2.shape) == 0:\n # res2 is a scalar, but represented as np.array(symbol,\n # dtype=np.object) which np.squeeze can't remove.\n return res2.item()\n return res2\n return np.take(x, indices, axis)\n\n def type_inference(self):\n out_type = self.x.dtype\n\n if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n output_rank = self.x.rank - 1 + self.indices.rank\n if output_rank == 0:\n # output scalar\n return out_type\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.x.rank\n out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]\n return types.tensor(out_type, out_shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``\n by operation ``mode``.\n\n Example: ``mode == update``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Example: ``mode == add``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +\n .. math::\n x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n data: tensor<\\*D, T> (Required)\n indices: tensor<[C],T> (Required)\n * 1-D tensor.\n updates: tensor<\\*K, T> (Required)\n * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.\n axis: const i32 (Optional)\n * Default to ``0``.\n mode: const string (Optional)\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n * Default value is ``update``.\n\n Returns\n -------\n tensor<\\*D, T>\n * With the same type and shape as input ``x``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n axis=IntInputType(const=True, optional=True),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter, self).__init__(**kwargs)\n\n def type_inference(self):\n if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.data.rank\n expected_updates_shape = (\n self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]\n )\n\n err = \"Updates shape {} is incorrect. It should be {}.\".format(self.updates.shape, expected_updates_shape)\n assert is_compatible_symbolic_vector(\n self.updates.shape, tuple(expected_updates_shape)\n ), err\n\n return self.data.sym_type\n\n\n@register_op(doc_str=\"\")\nclass gather_along_axis(Operation):\n \"\"\"\n Take the values along ``axis`` at locations ``indices``.\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n x: tensor<\\*D, T> (Required)\n indices: tensor<\\*K, T> (Required)\n * ``rank(indices) == rank(x)``.\n axis: const i32 (Optional):\n * Default to ``0``.\n\n Returns\n -------\n tensor<\\*D, T>:\n * Output tensor has the same shape as ``indices``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntTensorInputType(),\n axis=IntInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n )\n\n def __init__(self, **kwargs):\n super(gather_along_axis, self).__init__(**kwargs)\n\n @precondition(allow=VALUE)\n def value_inference(self):\n x = self.x.val\n indices = self.indices.val\n axis = self.axis.val\n return np.take_along_axis(x, indices, axis)\n\n def type_inference(self):\n\n if self.x.rank != self.indices.rank:\n raise ValueError(\n \"Rank mismatch between input and indices. \\\n Input rank: {}, indices rank: {}\".format(\n self.x.rank, self.indices.rank\n )\n )\n\n if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.x.rank\n\n for i in range(self.x.rank):\n if i != axis:\n assert self.x.shape[i] == self.indices.shape[i]\n\n return types.tensor(self.x.dtype, self.indices.shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter_along_axis(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``\n by operation ``mode``.\n\n Example: ``mode == update``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Example: ``mode == add``.\n\n * For ``i`` in ``[0, len(indices)]``:\n\n .. math::\n idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]\n .. math::\n output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =\n .. math::\n updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +\n .. math::\n x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]\n\n * For ``j! = i``:\n\n .. math::\n output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =\n .. math::\n data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]\n\n Parameters\n ----------\n data: tensor<\\*D, T> (Required)\n indices: tensor<\\*K,T> (Required)\n * ``rank(indices) == rank(data)``.\n updates: tensor<\\*K, T> (Required)\n * Must be the same shape as ``indices``.\n axis: const i32 (Optional)\n * Default to ``0``.\n mode: const string (Optional)\n * Default to ``add``.\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n\n Returns\n -------\n tensor<\\*D, T>\n * With the same type and shape as input ``x``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n axis=IntInputType(const=True, optional=True),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n axis=0,\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter_along_axis, self).__init__(**kwargs)\n\n @precondition(allow=VALUE)\n def value_inference(self):\n data = np.copy(self.data.val)\n indices = self.indices.val\n updates = self.updates.val\n axis = self.axis.val\n np_output = data\n np.put_along_axis(np_output, indices, updates, axis=axis)\n return np_output\n\n def type_inference(self):\n if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:\n raise IndexError(\n \"Axis value {} is out of bounds for {} node {}\".format(\n self.axis.val, self.op_type, self.name\n )\n )\n\n axis = self.axis.val\n axis = axis if axis >= 0 else axis + self.data.rank\n\n assert is_compatible_symbolic_vector(\n self.indices.shape, self.updates.shape\n )\n assert self.data.rank == self.indices.rank\n for i in range(self.data.rank):\n if i != axis:\n assert self.data.shape[i] == self.indices.shape[i]\n\n return self.data.sym_type\n\n\n@register_op(doc_str=\"\")\nclass gather_nd(Operation):\n \"\"\"\n Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.\n\n The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice\n of ``x``:\n\n .. math::\n output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]\n\n Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank\n ``rank(x) - indices.shape[-1]``.\n\n Parameters\n ----------\n x: tensor<\\*D,T> (Required)\n indices: tensor<\\*K,i32> (Required)\n\n Returns\n -------\n tensor<\\*V,T>\n * ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.\n\n Attributes\n ----------\n T: fp32\n\n References\n ----------\n See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.\n \"\"\"\n\n input_spec = InputSpec(\n x=TensorInputType(),\n indices=IntTensorInputType(),\n )\n\n def __init__(self, **kwargs):\n super(gather_nd, self).__init__(**kwargs)\n\n def type_inference(self):\n assert self.indices.shape[-1] <= self.x.rank\n out_type = self.x.dtype\n out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]\n return types.tensor(out_type, out_shape)\n\n\n@register_op(doc_str=\"\")\nclass scatter_nd(Operation):\n \"\"\"\n Scatter ``updates`` to ``data`` at locations ``indices``.\n\n The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a\n slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``\n has rank ``rank(data) - indices.shape[-1]``.\n\n * Example: ``mode == update``: The ``output`` is set to ``data`` initially, and\n the op updates ``output`` as follows:\n\n .. math::\n output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]\n\n * Example: ``mode == add``. The update rule is:\n\n .. math::\n output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]\n\n Parameters\n ----------\n data: tensor<\\*D,T> (Required)\n indices: tensor<\\*K,i32> (Required)\n updates: tensor<\\*K, T> (Required)\n * Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.\n mode: const string (Optional)\n * Default to ``add``.\n * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,\n ``div``, ``max``, ``min``.\n\n Returns\n -------\n tensor<\\*D,T>\n * A tensor with the same shape and type as ``data``.\n\n Attributes\n ----------\n T: fp32\n \"\"\"\n\n input_spec = InputSpec(\n data=TensorInputType(),\n indices=IntTensorInputType(),\n updates=TensorInputType(),\n mode=StringInputType(const=True, optional=True),\n )\n\n def default_inputs(self):\n return DefaultInputs(\n mode=\"add\",\n )\n\n def __init__(self, **kwargs):\n super(scatter_nd, self).__init__(**kwargs)\n\n def type_inference(self):\n assert self.indices.shape[-1] <= self.data.rank\n expected_updates_shape = (\n self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]\n )\n assert is_compatible_symbolic_vector(\n self.updates.shape, tuple(expected_updates_shape)\n )\n return self.data.sym_type\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2020, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nfrom coremltools.converters.mil.mil.passes.pass_registry import register_pass\nfrom coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass\nfrom coremltools.converters.mil.input_types import ImageType\n# import mil internal ops to add it to the builder\nfrom coremltools.converters.mil.mil.ops import defs as _ops\nfrom coremltools.converters.mil.mil import Builder as mb\nfrom coremltools.converters.mil.mil.types import nptype_from_builtin\n\nimport numpy as np\n\n@register_pass(namespace=\"mil_backend\")\nclass insert_image_preprocessing_ops(AbstractGraphPass):\n \"\"\"\n Insert preprocessing ops, right after the input if its of type Image\n \"\"\"\n def apply(self, prog):\n for f_name, f in prog.functions.items():\n if f_name == 'main':\n _insert_image_preprocessing_ops(f, prog)\n\n\ndef _insert_image_preprocessing_ops(block, prog):\n input_types = list(prog.main_input_types)\n\n for input_type in input_types:\n if isinstance(input_type, ImageType):\n if input_type.name not in block.inputs:\n continue\n\n input_var = block.inputs[input_type.name]\n placeholder_op = block.placeholder_inputs[input_type.name]\n first_op = block.operations[0]\n old_var = placeholder_op.outputs[0]\n has_bias = np.any(np.array(input_type.bias) != 0)\n with block:\n last_output = input_var\n input_nptype = nptype_from_builtin(type(last_output.dtype()))\n if input_type.scale != 1:\n last_output = mb.mul(x=last_output,\n y=np.array(input_type.scale, dtype=input_nptype),\n before_op=first_op, name=input_var.name + \"__scaled__\")\n if has_bias:\n if input_type.color_layout == \"G\":\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype),\n before_op=first_op, name=input_var.name + \"__biased__\")\n else:\n if len(last_output.shape) == 3:\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]),\n before_op=first_op, name=input_var.name + \"__biased__\")\n elif len(last_output.shape) == 4:\n last_output = mb.add(x=last_output,\n y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]),\n before_op=first_op, name=input_var.name + \"__biased__\")\n else:\n raise TypeError(\"Unsupported rank for image input type.\")\n\n if last_output != input_var:\n block.replace_uses_of_var_after_op(anchor_op=last_output.op,\n old_var=old_var,\n new_var=last_output)\n"
] | [
[
"numpy.squeeze",
"numpy.take",
"numpy.copy",
"numpy.take_along_axis",
"numpy.put_along_axis"
],
[
"numpy.array"
]
] |
Tabor-Research-Group/ChemOS | [
"50117f572e95e68dc4dccb624cedb28dbfc6e419"
] | [
"ParamGenerator/Spearmint/spearmint/utils/compression.py"
] | [
"# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\nimport zlib\nimport numpy as np\n\nCOMPRESS_TYPE = 'compressed array'\n\n# TODO: see if there is a better way to encode this than base64\n# It takes about 0.65 seconds to compress a 1000x1000 array on a 2011 Macbook air\ndef compress_array(a):\n return {'ctype' : COMPRESS_TYPE,\n 'shape' : list(a.shape),\n 'value' : (zlib.compress(a))}#.encode('base64'))}\n\n# It takes about 0.15 seconds to decompress a 1000x1000 array on a 2011 Macbook air\ndef decompress_array(a):\n# return np.fromstring(zlib.decompress(a['value'].decode('base64'))).reshape(a['shape'])\n return np.fromstring(zlib.decompress(a['value'])).reshape(a['shape'])\n\ndef compress_nested_container(u_container):\n if isinstance(u_container, dict):\n cdict = {}\n for key, value in u_container.items():\n if isinstance(value, dict) or isinstance(value, list):\n cdict[key] = compress_nested_container(value)\n else:\n if isinstance(value, np.ndarray):\n cdict[key] = compress_array(value)\n else:\n cdict[key] = value\n\n return cdict\n elif isinstance(u_container, list):\n clist = []\n for value in u_container:\n if isinstance(value, dict) or isinstance(value, list):\n clist.append(compress_nested_container(value))\n else:\n if isinstance(value, np.ndarray):\n clist.append(compress_array(value))\n else:\n clist.append(value)\n\n return clist\n\ndef decompress_nested_container(c_container):\n if isinstance(c_container, dict):\n# if c_container.has_key('ctype') and c_container['ctype'] == COMPRESS_TYPE:\n if 'ctype' in c_container.keys() and c_container['ctype'] == COMPRESS_TYPE:\n try:\n return decompress_array(c_container)\n except:\n raise Exception('Container does not contain a valid array.')\n else:\n udict = {}\n for key, value in c_container.items():\n if isinstance(value, dict) or isinstance(value, list):\n udict[key] = decompress_nested_container(value)\n else:\n udict[key] = value\n\n return udict\n elif isinstance(c_container, list):\n ulist = []\n for value in c_container:\n if isinstance(value, dict) or isinstance(value, list):\n ulist.append(decompress_nested_container(value))\n else:\n ulist.append(value)\n\n return ulist\n\ndef test_compression():\n b = np.random.randn(10)\n c = np.random.randn(5,1)\n e = np.random.randn(2,3)\n f = np.random.randn(1,2)\n g = np.random.randn(4,2,3)\n\n d = {'a': {'b': b, 'c': c}, 'e': [e,[f,g]]}\n\n dc = compress_nested_container(d)\n du = decompress_nested_container(dc)\n\n v1 = [d['a']['b'], d['a']['c'], d['e'][0], d['e'][1][0], d['e'][1][1]]\n v2 = [du['a']['b'], du['a']['c'], du['e'][0], du['e'][1][0], du['e'][1][1]]\n\n comp = [np.all(i==j) for i,j in zip(v1,v2)]\n\n return np.all(comp)\n\nif __name__ == '__main__':\n test_compression()\n"
] | [
[
"numpy.all",
"numpy.random.randn"
]
] |
Hanscal/unlp | [
"93a630cac7957f1ddd38f34403ec6577a277e10a"
] | [
"unlp/unsupervised/Word2Vec/get_file.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@description: Download file.\n\"\"\"\n\nimport hashlib\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport time\nimport typing\nimport zipfile\nfrom pathlib import Path\n\nimport numpy as np\nimport six\nfrom six.moves.urllib.error import HTTPError\nfrom six.moves.urllib.error import URLError\nfrom six.moves.urllib.request import urlretrieve\n\n\nclass Progbar(object):\n \"\"\"\n Displays a progress bar.\n\n :param target: Total number of steps expected, None if unknown.\n :param width: Progress bar width on screen.\n :param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n :param stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over time. Metrics in this list\n will be displayed as-is. All others will be averaged\n by the progbar before display.\n :param interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n\n def __init__(\n self,\n target,\n width=30,\n verbose=1,\n interval=0.05,\n ):\n \"\"\"Init.\"\"\"\n self.target = target\n self.width = width\n self.verbose = verbose\n self.interval = interval\n\n self._dynamic_display = ((hasattr(sys.stdout,\n 'isatty') and sys.stdout.isatty()\n ) or 'ipykernel' in sys.modules)\n self._total_width = 0\n self._seen_so_far = 0\n self._start = time.time()\n self._last_update = 0\n\n def update(self, current):\n \"\"\"Updates the progress bar.\"\"\"\n self._seen_so_far = current\n\n now = time.time()\n info = ' - {0:.0f}s'.format(now - self._start)\n if self.verbose == 1:\n if (now - self._last_update < self.interval and self.target is not\n None and current < self.target):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.floor(np.log10(self.target))) + 1\n bar = '{2:{0:d}d}/{1} ['.format(\n numdigits, self.target, current)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '{0:7d}/Unknown'.format(current)\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n if self.target is not None and current < self.target:\n eta = int(time_per_unit * (self.target - current))\n if eta > 3600:\n eta_format = ('{0:d}:{1:02d}:{2:02d}'.format(\n eta // 3600, (eta % 3600) // 60, eta % 60))\n elif eta > 60:\n eta_format = '{0:d}:{1:02d}'.format(eta // 60, eta % 60)\n else:\n eta_format = '{0:d}s'.format(eta)\n\n info = ' - ETA: {0}'.format(eta_format)\n else:\n if time_per_unit >= 1:\n info += ' {0:.0f}s/step'.format(time_per_unit)\n elif time_per_unit >= 1e-3:\n info += ' {0:.0f}ms/step'.format(time_per_unit * 1e3)\n else:\n info += ' {0:.0f}us/step'.format(time_per_unit * 1e6)\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if self.target is not None and current >= self.target:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is None or current >= self.target:\n info += '\\n'\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now\n\n\ndef _extract_archive(file_path, path='.', archive_format='auto'):\n \"\"\"\n Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.\n\n :param file_path: path to the archive file\n :param path: path to extract the archive file\n :param archive_format: Archive format to try for extracting the file.\n Options are 'auto', 'tar', 'zip', and None.\n 'tar' includes tar, tar.gz, and tar.bz files.\n The default 'auto' is ['tar', 'zip'].\n None or an empty list will return no matches found.\n\n :return: True if a match was found and an archive extraction was completed,\n False otherwise.\n \"\"\"\n if archive_format is None:\n return False\n if archive_format == 'auto':\n archive_format = ['tar', 'zip']\n if isinstance(archive_format, six.string_types):\n archive_format = [archive_format]\n\n for archive_type in archive_format:\n if archive_type == 'tar':\n open_fn = tarfile.open\n is_match_fn = tarfile.is_tarfile\n if archive_type == 'zip':\n open_fn = zipfile.ZipFile\n is_match_fn = zipfile.is_zipfile\n\n if is_match_fn(file_path):\n with open_fn(file_path) as archive:\n try:\n archive.extractall(path)\n except (tarfile.TarError, RuntimeError,\n KeyboardInterrupt):\n if os.path.exists(path):\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n raise\n return True\n return False\n\n\ndef get_file(\n fname: str = None,\n origin: str = None,\n untar: bool = False,\n extract: bool = False,\n md5_hash: typing.Any = None,\n file_hash: typing.Any = None,\n hash_algorithm: str = 'auto',\n archive_format: str = 'auto',\n cache_subdir: typing.Union[Path, str] = 'data',\n cache_dir: typing.Union[Path, str] = 'dataset',\n verbose: int = 1\n) -> str:\n \"\"\"\n Downloads a file from a URL if it not already in the cache.\n\n By default the file at the url `origin` is downloaded to the\n cache_dir `~/.project/datasets`, placed in the cache_subdir `data`,\n and given the filename `fname`. The final location of a file\n `example.txt` would therefore be `~/.project/datasets/data/example.txt`.\n\n Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.\n Passing a hash will verify the file after download. The command line\n programs `shasum` and `sha256sum` can compute the hash.\n\n :param fname: Name of the file. If an absolute path `/path/to/file.txt` is\n specified the file will be saved at that location.\n :param origin: Original URL of the file.\n :param untar: Deprecated in favor of 'extract'. Boolean, whether the file\n should be decompressed.\n :param md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file\n for verification.\n :param file_hash: The expected hash string of the file after download.\n The sha256 and md5 hash algorithms are both supported.\n :param cache_subdir: Subdirectory under the cache dir where the file is\n saved. If an absolute path `/path/to/folder` is specified the file\n will be saved at that location.\n :param hash_algorithm: Select the hash algorithm to verify the file.\n options are 'md5', 'sha256', and 'auto'. The default 'auto' detects\n the hash algorithm in use.\n :papram extract: True tries extracting the file as an Archive, like tar\n or zip.\n :param archive_format: Archive format to try for extracting the file.\n Options are 'auto', 'tar', 'zip', and None.\n 'tar' includes tar, tar.gz, and tar.bz files.\n The default 'auto' is ['tar', 'zip'].\n None or an empty list will return no matches found.\n :param cache_dir: Location to store cached files, when None it defaults to\n the [project.USER_DATA_DIR](~/.project/datasets).\n :param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n\n :return: Path to the downloaded file.\n \"\"\"\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.text2vec')\n datadir = os.path.join(datadir_base, cache_subdir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated because the file hash '\n 'does not match the original value of file_hash.'\n ' We will re-download the data.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(\n target=total_size, verbose=verbose)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {} : {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt):\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath\n\n\ndef validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n \"\"\"\n Validates a file against a sha256 or md5 hash.\n\n :param fpath: path to the file being validated\n :param file_hash: The expected hash string of the file.\n The sha256 and md5 hash algorithms are both supported.\n :param algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.\n The default 'auto' detects the hash algorithm in use.\n :param chunk_size: Bytes to read at a time, important for large files.\n\n :return: Whether the file is valid.\n \"\"\"\n if ((algorithm == 'sha256') or (algorithm == 'auto' and len(\n file_hash) == 64)):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False\n\n\ndef hash_file(fpath, algorithm='sha256', chunk_size=65535):\n \"\"\"\n Calculates a file sha256 or md5 hash.\n\n :param fpath: path to the file being validated\n :param algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.\n The default 'auto' detects the hash algorithm in use.\n :param chunk_size: Bytes to read at a time, important for large files.\n\n :return: The file hash.\n \"\"\"\n if algorithm == 'sha256':\n hasher = hashlib.sha256()\n else:\n hasher = hashlib.md5()\n\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n\n return hasher.hexdigest()\n"
] | [
[
"numpy.log10"
]
] |
hahahawu/Tagger | [
"180a0412abf571797638d024b8dacf9d776ee6f9"
] | [
"scripts/input_converter.py"
] | [
"# input_converter.py\n# author: Playinf\n# email: [email protected]\n\n\nimport os\nimport six\nimport json\nimport random\nimport argparse\nimport tensorflow as tf\n\n\ndef load_vocab(filename):\n fd = open(filename, \"r\")\n\n count = 0\n vocab = {}\n for line in fd:\n word = line.strip()\n vocab[word] = count\n count += 1\n\n fd.close()\n return vocab\n\n\ndef to_json(dictionary):\n \"\"\" Convert python dictionary to JSON format \"\"\"\n return json.dumps(dictionary)\n\n\ndef to_dictionary(example):\n \"\"\" Convert JSON/tf.train.Example to python dictionary \"\"\"\n if isinstance(example, str):\n dictionary = json.loads(example)\n elif isinstance(example, tf.train.Example):\n dictionary = {}\n keys = example.features.feature.keys()\n values = example.features.feature.values()\n\n for (k, v) in zip(keys, values):\n int64_list = list(v.int64_list.value)\n float_list = list(v.float_list.value)\n bytes_list = list(v.bytes_list.value)\n\n if int64_list:\n dictionary[k] = int64_list\n elif float_list:\n dictionary[k] = float_list\n elif bytes_list:\n dictionary[k] = bytes_list\n else:\n raise ValueError(\"All lists are empty.\")\n else:\n raise ValueError(\"Unsupported format\")\n\n return dictionary\n\n\ndef to_example(dictionary):\n \"\"\" Convert python dictionary to tf.train.Example \"\"\"\n features = {}\n\n for (k, v) in six.iteritems(dictionary):\n if not v:\n raise ValueError(\"Empty generated field: %s\", str((k, v)))\n\n if isinstance(v[0], six.integer_types):\n int64_list = tf.train.Int64List(value=v)\n features[k] = tf.train.Feature(int64_list=int64_list)\n elif isinstance(v[0], float):\n float_list = tf.train.FloatList(value=v)\n features[k] = tf.train.Feature(float_list=float_list)\n elif isinstance(v[0], six.string_types):\n bytes_list = tf.train.BytesList(value=v)\n features[k] = tf.train.Feature(bytes_list=bytes_list)\n else:\n raise ValueError(\"Value is neither an int nor a float; \"\n \"v: %s type: %s\" % (str(v[0]), str(type(v[0]))))\n\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n\ndef read_records(filename):\n \"\"\" Read TensorFlow record \"\"\"\n reader = tf.python_io.tf_record_iterator(filename)\n records = []\n\n for record in reader:\n records.append(record)\n if len(records) % 10000 == 0:\n tf.logging.info(\"read: %d\", len(records))\n\n return records\n\n\ndef write_records(records, out_filename):\n \"\"\" Write to TensorFlow record \"\"\"\n writer = tf.python_io.TFRecordWriter(out_filename)\n\n for count, record in enumerate(records):\n writer.write(record)\n if count % 10000 == 0:\n tf.logging.info(\"write: %d\", count)\n\n writer.close()\n\n\ndef convert_record_to_json(pattern, output_name, output_dir, num_shards=1):\n \"\"\" Convert TensorFlow record to JSON format \"\"\"\n output_files = []\n writers = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.gfile.GFile(output_file, \"w\"))\n\n filenames = tf.gfile.Glob(pattern)\n records = []\n\n for filename in filenames:\n records.extend(read_records(filename))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n example = tf.train.Example()\n example.ParseFromString(record)\n features = to_dictionary(example)\n json_str = to_json(features)\n writers[shard].write(json_str + \"\\n\")\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\n# format:\n# pred-pos tokens ||| labels\ndef convert_plain_to_json(name, vocabs, output_name, output_dir, num_shards,\n lower=True, shuffle=True):\n \"\"\" Convert plain SRL data to TensorFlow record \"\"\"\n vocab_token = load_vocab(vocabs[0])\n vocab_label = load_vocab(vocabs[1])\n records = []\n unk = vocab_token[\"<unk>\"]\n\n with open(name) as fd:\n for line in fd:\n features, labels = line.strip().split(\"|||\")\n features = features.strip().split(\" \")\n labels = labels.strip().split(\" \")\n pred_pos = features[0]\n inputs = features[1:]\n\n if lower:\n inputs = [item.lower() for item in inputs]\n\n inputs = [vocab_token[item] if item in vocab_token else unk\n for item in inputs]\n labels = [vocab_label[item] for item in labels]\n preds = [0 for _ in inputs]\n preds[int(pred_pos)] = 1\n\n feature = {\n \"inputs\": inputs,\n \"preds\": preds,\n \"targets\": labels\n }\n records.append(feature)\n\n if shuffle:\n random.shuffle(records)\n\n writers = []\n output_files = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.gfile.GFile(output_file, \"w\"))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n features = record\n json_str = to_json(features)\n writers[shard].write(json_str + \"\\n\")\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\n# format:\n# pred-pos tokens ||| labels\ndef convert_plain_to_record(name, vocabs, output_name, output_dir, num_shards,\n lower=True, shuffle=True):\n \"\"\" Convert plain SRL data to TensorFlow record \"\"\"\n vocab_token = load_vocab(vocabs[0])\n vocab_label = load_vocab(vocabs[1])\n records = []\n unk = vocab_token[\"<unk>\"]\n\n with open(name) as fd:\n for line in fd:\n features, labels = line.strip().split(\"|||\")\n features = features.strip().split()\n labels = labels.strip().split()\n pred_pos = features[0]\n inputs = features[1:]\n\n if lower:\n inputs = [item.lower() for item in inputs]\n\n inputs = [vocab_token[item] if item in vocab_token else unk\n for item in inputs]\n labels = [vocab_label[item] for item in labels]\n preds = [0 for _ in inputs]\n preds[int(pred_pos)] = 1\n\n feature = {\n \"inputs\": inputs,\n \"preds\": preds,\n \"targets\": labels\n }\n records.append(feature)\n\n if shuffle:\n random.shuffle(records)\n\n output_files = []\n writers = []\n\n for shard in xrange(num_shards):\n output_filename = \"%s-%.5d-of-%.5d\" % (output_name, shard, num_shards)\n output_file = os.path.join(output_dir, output_filename)\n output_files.append(output_file)\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n counter, shard = 0, 0\n\n for record in records:\n counter += 1\n example = to_example(record)\n writers[shard].write(example.SerializeToString())\n shard = (shard + 1) % num_shards\n\n for writer in writers:\n writer.close()\n\n\ndef parse_args():\n msg = \"convert srl data to TensorFlow record format\"\n usage = \"srl_input_converter.py [<args>] [-h | --help]\"\n parser = argparse.ArgumentParser(description=msg, usage=usage)\n\n msg = \"path of source file\"\n parser.add_argument(\"--input_path\", required=True, type=str, help=msg)\n msg = \"output name\"\n parser.add_argument(\"--output_name\", required=True, type=str, help=msg)\n msg = \"output directory\"\n parser.add_argument(\"--output_dir\", required=True, type=str, help=msg)\n msg = \"path of vocabulary\"\n parser.add_argument(\"--vocab\", type=str, nargs=2, help=msg)\n msg = \"number of output shards\"\n parser.add_argument(\"--num_shards\", default=100, type=int, help=msg)\n msg = \"shuffle inputs\"\n parser.add_argument(\"--shuffle\", action=\"store_true\", help=msg)\n msg = \"use lowercase\"\n parser.add_argument(\"--lower\", action=\"store_true\", help=msg)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n convert_plain_to_record(args.input_path, args.vocab, args.output_name,\n args.output_dir, args.num_shards, args.lower,\n args.shuffle)\n"
] | [
[
"tensorflow.logging.info",
"tensorflow.gfile.GFile",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Int64List",
"tensorflow.gfile.Glob",
"tensorflow.train.Feature",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.BytesList",
"tensorflow.train.Example"
]
] |
dmachlanski/ce807 | [
"17c9b7ddd71906c018cd213a674f37cbed36856d"
] | [
"run.py"
] | [
"import numpy as np\nimport pandas as pd\nimport re, argparse, datetime\nfrom timeit import default_timer\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\nfrom sklearn.model_selection import train_test_split, cross_validate\nfrom sklearn.metrics import f1_score, make_scorer\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n\ndef get_parser():\n \"\"\" Builds the argument parser for the program. \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')\n parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')\n parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')\n parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')\n parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')\n parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')\n parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')\n parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')\n parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')\n parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')\n parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')\n parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')\n parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')\n\n return parser\n\ndef get_data(options):\n \"\"\" Loads and pre-processes the data. \"\"\"\n if options.verbose > 0:\n print(f'Loading data [dataset: {options.data_name}, fraction: {options.data_fraction}, test size: {options.test_size}]')\n \n # Load the data.\n location_prefix = '../../' if options.is_batch_mode else ''\n data = pd.read_csv(f'{location_prefix}data/{options.data_name}.csv')\n\n # Get raw values from the DataFrame.\n X_all = data['title'].values\n # Labels are separated by a '\\t' character. Convert them into a list of labels per each data row.\n Y_all = [x.split('\\t') for x in data['labels'].values]\n\n # Get only a fraction of the data if necessary\n if options.data_fraction < 1.0:\n data_slice = int(options.data_fraction * X_all.shape[0])\n X_raw, Y_raw = X_all[:data_slice], Y_all[:data_slice]\n else:\n X_raw, Y_raw = X_all, Y_all\n\n # Allow for tokens fitting into the following pattern only.\n word_regexp = r\"(?u)\\b[a-zA-Z_][a-zA-Z_]+\\b\"\n # Take only the most frequent 25k words. Use unigrams.\n terms = CountVectorizer(input='content', stop_words='english', binary=False, token_pattern=word_regexp, max_features=25000, ngram_range=(1, 1))\n X = terms.fit_transform(X_raw)\n\n # Binrize the labels (convert them into a sparse matrix of one-hot vectors).\n mlb = MultiLabelBinarizer(sparse_output=True)\n Y = mlb.fit_transform(Y_raw)\n\n return train_test_split(X, Y, test_size=options.test_size)\n\ndef get_model(options):\n \"\"\" Prepare a classifier for training. \"\"\"\n classifiers = {\n \"dt\" : DecisionTreeClassifier(max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features),\n \"xts\" : ExtraTreesClassifier(n_estimators=options.n_estimators,\n n_jobs=options.n_jobs,\n max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features),\n \"rf\" : RandomForestClassifier(n_estimators=options.n_estimators,\n n_jobs=options.n_jobs,\n max_depth=options.max_depth,\n min_samples_split=options.min_ss,\n max_features=options.max_features)\n }\n\n # Prepare the pipeline that consists of TF-IDF representation and a classifier.\n trf = TfidfTransformer(sublinear_tf=False, use_idf=True, norm='l2')\n clf = Pipeline([(\"trf\", trf), (\"clf\", classifiers[options.clf_key])])\n\n return clf\n\nif __name__ == \"__main__\":\n # Get and parse passed arguments.\n parser = get_parser()\n options = parser.parse_args()\n\n if options.verbose > 0:\n print('### Starting ###')\n print('Arguments:', options)\n\n X_train, X_test, Y_train, Y_test = get_data(options)\n\n clf = get_model(options)\n\n # The program can be run in either a 'cross-validation' or a 'test' mode.\n # The former performs k-fold cross-validation, while the latter fits the selected model\n # on the training data and runs predictions against the test set.\n # Both modes report samples-based F1-score, fitting time and prediction time (in seconds).\n if options.mode == 'cv':\n if options.verbose > 0:\n print(f'Running {options.cv}-fold cross-validation')\n\n scores = cross_validate(clf, X_train.toarray(), Y_train.toarray(), cv=options.cv,\n scoring=make_scorer(f1_score, average='samples'), n_jobs=options.n_jobs, verbose=options.verbose)\n\n test_score = scores['test_score']\n fit_time = scores['fit_time']\n score_time = scores['score_time']\n print(\"F1-score: %0.2f (+/- %0.2f)\" % (test_score.mean(), test_score.std()))\n print(\"Fit time: %0.2f (+/- %0.2f)\" % (fit_time.mean(), fit_time.std()))\n print(\"Prediction time: %0.2f (+/- %0.2f)\" % (score_time.mean(), score_time.std()))\n else:\n if options.verbose > 0:\n print('Training the model')\n \n fit_time_start = default_timer()\n clf.fit(X_train.toarray(), Y_train.toarray())\n fit_time_end = default_timer()\n\n if options.verbose > 0:\n print('Running predictions')\n\n pred_time_start = default_timer()\n Y_pred = clf.predict(X_test.toarray())\n pred_time_end = default_timer()\n\n test_score = f1_score(Y_test.toarray(), Y_pred, average='samples')\n print(\"F1-score: %0.2f\" % (test_score))\n print(\"Fit time: %0.2f\" % (fit_time_end - fit_time_start))\n print(\"Prediction time: %0.2f\" % (pred_time_end - pred_time_start))"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.preprocessing.MultiLabelBinarizer",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.pipeline.Pipeline",
"sklearn.model_selection.train_test_split"
]
] |
josephgalestian/taichiV2-master | [
"12a63a05fdccc824205b1ee6545e4706bf473405"
] | [
"python/taichi/lang/kernel_impl.py"
] | [
"import ast\nimport functools\nimport inspect\nimport re\nimport sys\nimport textwrap\n\nimport numpy as np\nimport taichi.lang\nfrom taichi._lib import core as _ti_core\nfrom taichi.lang import impl, runtime_ops\nfrom taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,\n transform_tree)\nfrom taichi.lang.enums import Layout\nfrom taichi.lang.exception import (TaichiCompilationError,\n TaichiRuntimeTypeError, TaichiSyntaxError)\nfrom taichi.lang.expr import Expr\nfrom taichi.lang.matrix import MatrixType\nfrom taichi.lang.shell import _shell_pop_print, oinspect\nfrom taichi.lang.util import has_pytorch, to_taichi_type\nfrom taichi.linalg.sparse_matrix import sparse_matrix_builder\nfrom taichi.types import any_arr, primitive_types, template\n\nfrom taichi import _logging\n\nif has_pytorch():\n import torch\n\n\ndef func(fn):\n \"\"\"Marks a function as callable in Taichi-scope.\n\n This decorator transforms a Python function into a Taichi one. Taichi\n will JIT compile it into native instructions.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> @ti.func\n >>> def foo(x):\n >>> return x + 2\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> print(foo(40)) # 42\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n\n fun = Func(fn, _classfunc=is_classfunc)\n\n @functools.wraps(fn)\n def decorated(*args):\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef pyfunc(fn):\n \"\"\"Marks a function as callable in both Taichi and Python scopes.\n\n When called inside the Taichi scope, Taichi will JIT compile it into\n native instructions. Otherwise it will be invoked directly as a\n Python function.\n\n See also :func:`~taichi.lang.kernel_impl.func`.\n\n Args:\n fn (Callable): The Python function to be decorated\n\n Returns:\n Callable: The decorated function\n \"\"\"\n is_classfunc = _inside_class(level_of_class_stackframe=3)\n fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)\n\n @functools.wraps(fn)\n def decorated(*args):\n return fun.__call__(*args)\n\n decorated._is_taichi_function = True\n return decorated\n\n\ndef _get_tree_and_ctx(self,\n excluded_parameters=(),\n is_kernel=True,\n arg_features=None,\n args=None,\n ast_builder=None):\n file = oinspect.getsourcefile(self.func)\n src, start_lineno = oinspect.getsourcelines(self.func)\n src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]\n tree = ast.parse(textwrap.dedent(\"\\n\".join(src)))\n\n func_body = tree.body[0]\n func_body.decorator_list = []\n\n global_vars = _get_global_vars(self.func)\n\n for i, arg in enumerate(func_body.args.args):\n anno = arg.annotation\n if isinstance(anno, ast.Name):\n global_vars[anno.id] = self.argument_annotations[i]\n\n if isinstance(func_body.returns, ast.Name):\n global_vars[func_body.returns.id] = self.return_type\n\n if is_kernel or impl.get_runtime().experimental_real_function:\n # inject template parameters into globals\n for i in self.template_slot_locations:\n template_var_name = self.argument_names[i]\n global_vars[template_var_name] = args[i]\n\n return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,\n is_kernel=is_kernel,\n func=self,\n arg_features=arg_features,\n global_vars=global_vars,\n argument_data=args,\n src=src,\n start_lineno=start_lineno,\n file=file,\n ast_builder=ast_builder)\n\n\nclass Func:\n function_counter = 0\n\n def __init__(self, _func, _classfunc=False, _pyfunc=False):\n self.func = _func\n self.func_id = Func.function_counter\n Func.function_counter += 1\n self.compiled = None\n self.classfunc = _classfunc\n self.pyfunc = _pyfunc\n self.argument_annotations = []\n self.argument_names = []\n self.return_type = None\n self.extract_arguments()\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n self.taichi_functions = {} # The |Function| class in C++\n\n def __call__(self, *args):\n if not impl.inside_kernel():\n if not self.pyfunc:\n raise TaichiSyntaxError(\n \"Taichi functions cannot be called from Python-scope.\"\n \" Use @ti.pyfunc if you wish to call Taichi functions \"\n \"from both Python-scope and Taichi-scope.\")\n return self.func(*args)\n\n if impl.get_runtime().experimental_real_function:\n if impl.get_runtime().current_kernel.is_grad:\n raise TaichiSyntaxError(\n \"Real function in gradient kernels unsupported.\")\n instance_id, _ = self.mapper.lookup(args)\n key = _ti_core.FunctionKey(self.func.__name__, self.func_id,\n instance_id)\n if self.compiled is None:\n self.compiled = {}\n if key.instance_id not in self.compiled:\n self.do_compile(key=key, args=args)\n return self.func_call_rvalue(key=key, args=args)\n tree, ctx = _get_tree_and_ctx(\n self,\n is_kernel=False,\n args=args,\n ast_builder=impl.get_runtime().prog.current_ast_builder())\n ret = transform_tree(tree, ctx)\n if not impl.get_runtime().experimental_real_function:\n if self.return_type and not ctx.returned:\n raise TaichiSyntaxError(\n \"Function has a return type but does not have a return statement\"\n )\n return ret\n\n def func_call_rvalue(self, key, args):\n # Skip the template args, e.g., |self|\n assert impl.get_runtime().experimental_real_function\n non_template_args = []\n for i, anno in enumerate(self.argument_annotations):\n if not isinstance(anno, template):\n non_template_args.append(args[i])\n non_template_args = impl.make_expr_group(non_template_args)\n return Expr(\n _ti_core.make_func_call_expr(\n self.taichi_functions[key.instance_id], non_template_args))\n\n def do_compile(self, key, args):\n tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)\n fn = impl.get_runtime().prog.create_function(key)\n\n def func_body():\n ctx.ast_builder = fn.ast_builder()\n transform_tree(tree, ctx)\n\n self.taichi_functions[key.instance_id] = fn\n self.compiled[key.instance_id] = func_body\n self.taichi_functions[key.instance_id].set_function_body(func_body)\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise TaichiSyntaxError(\n 'Taichi functions do not support variable positional parameters (i.e., *args)'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise TaichiSyntaxError(\n 'Taichi functions do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi functions only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if annotation is inspect.Parameter.empty:\n if i == 0 and self.classfunc:\n annotation = template()\n # TODO: pyfunc also need type annotation check when real function is enabled,\n # but that has to happen at runtime when we know which scope it's called from.\n elif not self.pyfunc and impl.get_runtime(\n ).experimental_real_function:\n raise TaichiSyntaxError(\n f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'\n )\n else:\n if not id(annotation\n ) in primitive_types.type_ids and not isinstance(\n annotation, template):\n raise TaichiSyntaxError(\n f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n\nclass TaichiCallableTemplateMapper:\n def __init__(self, annotations, template_slot_locations):\n self.annotations = annotations\n self.num_args = len(annotations)\n self.template_slot_locations = template_slot_locations\n self.mapping = {}\n\n @staticmethod\n def extract_arg(arg, anno):\n if isinstance(anno, template):\n if isinstance(arg, taichi.lang.snode.SNode):\n return arg.ptr\n if isinstance(arg, taichi.lang.expr.Expr):\n return arg.ptr.get_underlying_ptr_address()\n if isinstance(arg, _ti_core.Expr):\n return arg.get_underlying_ptr_address()\n if isinstance(arg, tuple):\n return tuple(\n TaichiCallableTemplateMapper.extract_arg(item, anno)\n for item in arg)\n return arg\n if isinstance(anno, any_arr):\n if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):\n anno.check_element_dim(arg, 0)\n anno.check_element_shape(())\n anno.check_field_dim(len(arg.shape))\n return arg.dtype, len(arg.shape), (), Layout.AOS\n if isinstance(arg, taichi.lang.matrix.VectorNdarray):\n anno.check_element_dim(arg, 1)\n anno.check_element_shape((arg.n, ))\n anno.check_field_dim(len(arg.shape))\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout\n if isinstance(arg, taichi.lang.matrix.MatrixNdarray):\n anno.check_element_dim(arg, 2)\n anno.check_element_shape((arg.n, arg.m))\n anno.check_field_dim(len(arg.shape))\n anno.check_layout(arg)\n return arg.dtype, len(arg.shape) + 2, (arg.n,\n arg.m), arg.layout\n # external arrays\n element_dim = 0 if anno.element_dim is None else anno.element_dim\n layout = Layout.AOS if anno.layout is None else anno.layout\n shape = tuple(arg.shape)\n if len(shape) < element_dim:\n raise ValueError(\n f\"Invalid argument into ti.any_arr() - required element_dim={element_dim}, \"\n f\"but the argument has only {len(shape)} dimensions\")\n element_shape = (\n ) if element_dim == 0 else shape[:\n element_dim] if layout == Layout.SOA else shape[\n -element_dim:]\n return to_taichi_type(arg.dtype), len(shape), element_shape, layout\n # Use '#' as a placeholder because other kinds of arguments are not involved in template instantiation\n return '#'\n\n def extract(self, args):\n extracted = []\n for arg, anno in zip(args, self.annotations):\n extracted.append(self.extract_arg(arg, anno))\n return tuple(extracted)\n\n def lookup(self, args):\n if len(args) != self.num_args:\n raise TypeError(\n f'{self.num_args} argument(s) needed but {len(args)} provided.'\n )\n\n key = self.extract(args)\n if key not in self.mapping:\n count = len(self.mapping)\n self.mapping[key] = count\n return self.mapping[key], key\n\n\ndef _get_global_vars(_func):\n # Discussions: https://github.com/taichi-dev/taichi/issues/282\n global_vars = _func.__globals__.copy()\n\n freevar_names = _func.__code__.co_freevars\n closure = _func.__closure__\n if closure:\n freevar_values = list(map(lambda x: x.cell_contents, closure))\n for name, value in zip(freevar_names, freevar_values):\n global_vars[name] = value\n\n return global_vars\n\n\nclass Kernel:\n counter = 0\n\n def __init__(self, _func, is_grad, _classkernel=False):\n self.func = _func\n self.kernel_counter = Kernel.counter\n Kernel.counter += 1\n self.is_grad = is_grad\n self.grad = None\n self.argument_annotations = []\n self.argument_names = []\n self.return_type = None\n self.classkernel = _classkernel\n self.extract_arguments()\n self.template_slot_locations = []\n for i, anno in enumerate(self.argument_annotations):\n if isinstance(anno, template):\n self.template_slot_locations.append(i)\n self.mapper = TaichiCallableTemplateMapper(\n self.argument_annotations, self.template_slot_locations)\n impl.get_runtime().kernels.append(self)\n self.reset()\n self.kernel_cpp = None\n\n def reset(self):\n self.runtime = impl.get_runtime()\n if self.is_grad:\n self.compiled_functions = self.runtime.compiled_grad_functions\n else:\n self.compiled_functions = self.runtime.compiled_functions\n\n def extract_arguments(self):\n sig = inspect.signature(self.func)\n if sig.return_annotation not in (inspect._empty, None):\n self.return_type = sig.return_annotation\n params = sig.parameters\n arg_names = params.keys()\n for i, arg_name in enumerate(arg_names):\n param = params[arg_name]\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'\n )\n if param.kind == inspect.Parameter.VAR_POSITIONAL:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support variable positional parameters (i.e., *args)'\n )\n if param.default is not inspect.Parameter.empty:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support default values for arguments'\n )\n if param.kind == inspect.Parameter.KEYWORD_ONLY:\n raise TaichiSyntaxError(\n 'Taichi kernels do not support keyword parameters')\n if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise TaichiSyntaxError(\n 'Taichi kernels only support \"positional or keyword\" parameters'\n )\n annotation = param.annotation\n if param.annotation is inspect.Parameter.empty:\n if i == 0 and self.classkernel: # The |self| parameter\n annotation = template()\n else:\n raise TaichiSyntaxError(\n 'Taichi kernels parameters must be type annotated')\n else:\n if isinstance(annotation, (template, any_arr)):\n pass\n elif id(annotation) in primitive_types.type_ids:\n pass\n elif isinstance(annotation, sparse_matrix_builder):\n pass\n elif isinstance(annotation, MatrixType):\n pass\n else:\n raise TaichiSyntaxError(\n f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'\n )\n self.argument_annotations.append(annotation)\n self.argument_names.append(param.name)\n\n def materialize(self, key=None, args=None, arg_features=None):\n if key is None:\n key = (self.func, 0)\n self.runtime.materialize()\n if key in self.compiled_functions:\n return\n grad_suffix = \"\"\n if self.is_grad:\n grad_suffix = \"_grad\"\n kernel_name = f\"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}\"\n _logging.trace(f\"Compiling kernel {kernel_name}...\")\n\n tree, ctx = _get_tree_and_ctx(\n self,\n args=args,\n excluded_parameters=self.template_slot_locations,\n arg_features=arg_features)\n\n if self.is_grad:\n KernelSimplicityASTChecker(self.func).visit(tree)\n\n # Do not change the name of 'taichi_ast_generator'\n # The warning system needs this identifier to remove unnecessary messages\n def taichi_ast_generator(kernel_cxx):\n if self.runtime.inside_kernel:\n raise TaichiSyntaxError(\n \"Kernels cannot call other kernels. I.e., nested kernels are not allowed. \"\n \"Please check if you have direct/indirect invocation of kernels within kernels. \"\n \"Note that some methods provided by the Taichi standard library may invoke kernels, \"\n \"and please move their invocations to Python-scope.\")\n self.runtime.inside_kernel = True\n self.runtime.current_kernel = self\n try:\n ctx.ast_builder = kernel_cxx.ast_builder()\n transform_tree(tree, ctx)\n if not impl.get_runtime().experimental_real_function:\n if self.return_type and not ctx.returned:\n raise TaichiSyntaxError(\n \"Kernel has a return type but does not have a return statement\"\n )\n finally:\n self.runtime.inside_kernel = False\n self.runtime.current_kernel = None\n\n taichi_kernel = impl.get_runtime().prog.create_kernel(\n taichi_ast_generator, kernel_name, self.is_grad)\n\n self.kernel_cpp = taichi_kernel\n\n assert key not in self.compiled_functions\n self.compiled_functions[key] = self.get_function_body(taichi_kernel)\n\n def get_torch_callbacks(self, v, has_torch, is_ndarray=True):\n callbacks = []\n\n def get_call_back(u, v):\n def call_back():\n u.copy_(v)\n\n return call_back\n\n assert has_torch\n assert isinstance(v, torch.Tensor)\n if v._is_view():\n raise ValueError(\n \"Torch view tensors are not supported, please call tensor.clone() before passing it into taichi kernel.\"\n )\n tmp = v\n taichi_arch = self.runtime.prog.config.arch\n # Ndarray means its memory is allocated on the specified taichi arch.\n # Since torch only supports CPU & CUDA, torch-base ndarray only supports\n # taichi cpu/cuda backend as well.\n # Note I put x64/arm64/cuda here to be more specific.\n assert not is_ndarray or taichi_arch in (\n _ti_core.Arch.cuda, _ti_core.Arch.x64, _ti_core.Arch.arm64\n ), \"Torch-based ndarray is only supported on taichi x64/arm64/cuda backend.\"\n\n if str(v.device).startswith('cuda'):\n # External tensor on cuda\n if taichi_arch != _ti_core.Arch.cuda:\n # copy data back to cpu\n host_v = v.to(device='cpu', copy=True)\n tmp = host_v\n callbacks.append(get_call_back(v, host_v))\n else:\n # External tensor on cpu\n if taichi_arch == _ti_core.Arch.cuda:\n gpu_v = v.cuda()\n tmp = gpu_v\n callbacks.append(get_call_back(v, gpu_v))\n return tmp, callbacks\n\n def get_function_body(self, t_kernel):\n # The actual function body\n def func__(*args):\n assert len(args) == len(\n self.argument_annotations\n ), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'\n\n tmps = []\n callbacks = []\n has_external_arrays = False\n has_torch = has_pytorch()\n ndarray_use_torch = impl.get_runtime().ndarray_use_torch\n\n actual_argument_slot = 0\n launch_ctx = t_kernel.make_launch_context()\n for i, v in enumerate(args):\n needed = self.argument_annotations[i]\n if isinstance(needed, template):\n continue\n provided = type(v)\n # Note: do not use sth like \"needed == f32\". That would be slow.\n if id(needed) in primitive_types.real_type_ids:\n if not isinstance(v, (float, int)):\n raise TaichiRuntimeTypeError(i, needed.to_string(),\n provided)\n launch_ctx.set_arg_float(actual_argument_slot, float(v))\n elif id(needed) in primitive_types.integer_type_ids:\n if not isinstance(v, int):\n raise TaichiRuntimeTypeError(i, needed.to_string(),\n provided)\n launch_ctx.set_arg_int(actual_argument_slot, int(v))\n elif isinstance(needed, sparse_matrix_builder):\n # Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument\n launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())\n elif isinstance(needed, any_arr) and isinstance(\n v, taichi.lang._ndarray.Ndarray):\n has_external_arrays = True\n v = v.arr\n if ndarray_use_torch:\n is_ndarray = True\n tmp, torch_callbacks = self.get_torch_callbacks(\n v, has_torch, is_ndarray)\n callbacks += torch_callbacks\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement(), v.shape)\n else:\n launch_ctx.set_arg_ndarray(actual_argument_slot, v)\n elif isinstance(needed, any_arr) and (self.match_ext_arr(v)):\n has_external_arrays = True\n is_numpy = isinstance(v, np.ndarray)\n if is_numpy:\n tmp = np.ascontiguousarray(v)\n # Purpose: DO NOT GC |tmp|!\n tmps.append(tmp)\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.ctypes.data),\n tmp.nbytes, v.shape)\n else:\n is_ndarray = False\n tmp, torch_callbacks = self.get_torch_callbacks(\n v, has_torch, is_ndarray)\n callbacks += torch_callbacks\n launch_ctx.set_arg_external_array_with_shape(\n actual_argument_slot, int(tmp.data_ptr()),\n tmp.element_size() * tmp.nelement(), v.shape)\n\n elif isinstance(needed, MatrixType):\n if id(needed.dtype) in primitive_types.real_type_ids:\n for a in range(needed.n):\n for b in range(needed.m):\n if not isinstance(v[a, b], (int, float)):\n raise TaichiRuntimeTypeError(\n i, needed.dtype.to_string(),\n type(v[a, b]))\n launch_ctx.set_arg_float(\n actual_argument_slot, float(v[a, b]))\n actual_argument_slot += 1\n elif id(needed.dtype) in primitive_types.integer_type_ids:\n for a in range(needed.n):\n for b in range(needed.m):\n if not isinstance(v[a, b], int):\n raise TaichiRuntimeTypeError(\n i, needed.dtype.to_string(),\n type(v[a, b]))\n launch_ctx.set_arg_int(actual_argument_slot,\n int(v[a, b]))\n actual_argument_slot += 1\n else:\n raise ValueError(\n f'Matrix dtype {needed.dtype} is not integer type or real type.'\n )\n continue\n else:\n raise ValueError(\n f'Argument type mismatch. Expecting {needed}, got {type(v)}.'\n )\n actual_argument_slot += 1\n # Both the class kernels and the plain-function kernels are unified now.\n # In both cases, |self.grad| is another Kernel instance that computes the\n # gradient. For class kernels, args[0] is always the kernel owner.\n if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:\n self.runtime.target_tape.insert(self, args)\n\n t_kernel(launch_ctx)\n\n ret = None\n ret_dt = self.return_type\n has_ret = ret_dt is not None\n\n if has_ret or (impl.current_cfg().async_mode\n and has_external_arrays):\n runtime_ops.sync()\n\n if has_ret:\n if id(ret_dt) in primitive_types.integer_type_ids:\n ret = t_kernel.get_ret_int(0)\n else:\n ret = t_kernel.get_ret_float(0)\n\n if callbacks:\n for c in callbacks:\n c()\n\n return ret\n\n return func__\n\n @staticmethod\n def match_ext_arr(v):\n has_array = isinstance(v, np.ndarray)\n if not has_array and has_pytorch():\n has_array = isinstance(v, torch.Tensor)\n return has_array\n\n def ensure_compiled(self, *args):\n instance_id, arg_features = self.mapper.lookup(args)\n key = (self.func, instance_id)\n self.materialize(key=key, args=args, arg_features=arg_features)\n return key\n\n # For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__\n # Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)\n @_shell_pop_print\n def __call__(self, *args, **kwargs):\n if self.is_grad and impl.current_cfg().opt_level == 0:\n _logging.warn(\n \"\"\"opt_level = 1 is enforced to enable gradient computation.\"\"\"\n )\n impl.current_cfg().opt_level = 1\n assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'\n key = self.ensure_compiled(*args)\n return self.compiled_functions[key](*args)\n\n\n# For a Taichi class definition like below:\n#\n# @ti.data_oriented\n# class X:\n# @ti.kernel\n# def foo(self):\n# ...\n#\n# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is\n# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',\n# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class\n# inherits, i.e. class X(object):, then in both versions, |code_context| is\n# 'class X(object):'...\n_KERNEL_CLASS_STACKFRAME_STMT_RES = [\n re.compile(r'@(\\w+\\.)?data_oriented'),\n re.compile(r'class '),\n]\n\n\ndef _inside_class(level_of_class_stackframe):\n try:\n maybe_class_frame = sys._getframe(level_of_class_stackframe)\n statement_list = inspect.getframeinfo(maybe_class_frame)[3]\n first_statment = statement_list[0].strip()\n for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:\n if pat.match(first_statment):\n return True\n except:\n pass\n return False\n\n\ndef _kernel_impl(_func, level_of_class_stackframe, verbose=False):\n # Can decorators determine if a function is being defined inside a class?\n # https://stackoverflow.com/a/8793684/12003165\n is_classkernel = _inside_class(level_of_class_stackframe + 1)\n\n if verbose:\n print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')\n primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)\n adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)\n # Having |primal| contains |grad| makes the tape work.\n primal.grad = adjoint\n\n if is_classkernel:\n # For class kernels, their primal/adjoint callables are constructed\n # when the kernel is accessed via the instance inside\n # _BoundedDifferentiableMethod.\n # This is because we need to bind the kernel or |grad| to the instance\n # owning the kernel, which is not known until the kernel is accessed.\n #\n # See also: _BoundedDifferentiableMethod, data_oriented.\n @functools.wraps(_func)\n def wrapped(*args, **kwargs):\n # If we reach here (we should never), it means the class is not decorated\n # with @ti.data_oriented, otherwise getattr would have intercepted the call.\n clsobj = type(args[0])\n assert not hasattr(clsobj, '_data_oriented')\n raise TaichiSyntaxError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n else:\n\n @functools.wraps(_func)\n def wrapped(*args, **kwargs):\n try:\n return primal(*args, **kwargs)\n except TaichiCompilationError as e:\n raise type(e)('\\n' + str(e)) from None\n\n wrapped.grad = adjoint\n\n wrapped._is_wrapped_kernel = True\n wrapped._is_classkernel = is_classkernel\n wrapped._primal = primal\n wrapped._adjoint = adjoint\n return wrapped\n\n\ndef kernel(fn):\n \"\"\"Marks a function as a Taichi kernel.\n\n A Taichi kernel is a function written in Python, and gets JIT compiled by\n Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).\n The top-level ``for`` loops are automatically parallelized, and distributed\n to either a CPU thread pool or massively parallel GPUs.\n\n Kernel's gradient kernel would be generated automatically by the AutoDiff system.\n\n See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.\n\n Args:\n fn (Callable): the Python function to be decorated\n\n Returns:\n Callable: The decorated function\n\n Example::\n\n >>> x = ti.field(ti.i32, shape=(4, 8))\n >>>\n >>> @ti.kernel\n >>> def run():\n >>> # Assigns all the elements of `x` in parallel.\n >>> for i in x:\n >>> x[i] = i\n \"\"\"\n return _kernel_impl(fn, level_of_class_stackframe=3)\n\n\nclass _BoundedDifferentiableMethod:\n def __init__(self, kernel_owner, wrapped_kernel_func):\n clsobj = type(kernel_owner)\n if not getattr(clsobj, '_data_oriented', False):\n raise TaichiSyntaxError(\n f'Please decorate class {clsobj.__name__} with @ti.data_oriented'\n )\n self._kernel_owner = kernel_owner\n self._primal = wrapped_kernel_func._primal\n self._adjoint = wrapped_kernel_func._adjoint\n self._is_staticmethod = wrapped_kernel_func._is_staticmethod\n self.__name__ = None\n\n def __call__(self, *args, **kwargs):\n if self._is_staticmethod:\n return self._primal(*args, **kwargs)\n return self._primal(self._kernel_owner, *args, **kwargs)\n\n def grad(self, *args, **kwargs):\n return self._adjoint(self._kernel_owner, *args, **kwargs)\n\n\ndef data_oriented(cls):\n \"\"\"Marks a class as Taichi compatible.\n\n To allow for modularized code, Taichi provides this decorator so that\n Taichi kernels can be defined inside a class.\n\n See also https://docs.taichi.graphics/lang/articles/advanced/odop\n\n Example::\n\n >>> @ti.data_oriented\n >>> class TiArray:\n >>> def __init__(self, n):\n >>> self.x = ti.field(ti.f32, shape=n)\n >>>\n >>> @ti.kernel\n >>> def inc(self):\n >>> for i in self.x:\n >>> self.x[i] += 1.0\n >>>\n >>> a = TiArray(32)\n >>> a.inc()\n\n Args:\n cls (Class): the class to be decorated\n\n Returns:\n The decorated class.\n \"\"\"\n def _getattr(self, item):\n method = cls.__dict__.get(item, None)\n is_property = method.__class__ == property\n is_staticmethod = method.__class__ == staticmethod\n if is_property:\n x = method.fget\n else:\n x = super(cls, self).__getattribute__(item)\n if hasattr(x, '_is_wrapped_kernel'):\n if inspect.ismethod(x):\n wrapped = x.__func__\n else:\n wrapped = x\n wrapped._is_staticmethod = is_staticmethod\n assert inspect.isfunction(wrapped)\n if wrapped._is_classkernel:\n ret = _BoundedDifferentiableMethod(self, wrapped)\n ret.__name__ = wrapped.__name__\n if is_property:\n return ret()\n return ret\n if is_property:\n return x(self)\n return x\n\n cls.__getattribute__ = _getattr\n cls._data_oriented = True\n\n return cls\n\n\n__all__ = [\"data_oriented\", \"func\", \"kernel\"]\n"
] | [
[
"numpy.ascontiguousarray"
]
] |
AI-Huang/XOR_Gate_NN | [
"d97c7fd7e5b046e84bd862081ab800b9ccbb1672"
] | [
"xor_gate_nn/datasets/keras_fn/datasets.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Date : Feb-09-21 22:23\n# @Author : Kelly Hwong ([email protected])\n\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass XOR_Dataset(tf.keras.utils.Sequence):\n \"\"\"XOR_Dataset.\"\"\"\n\n def __init__(\n self,\n batch_size=1,\n shuffle=False,\n seed=42,\n ):\n self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n self.y = np.array([[0], [1], [1], [0]])\n\n assert batch_size <= 4\n self.batch_size = batch_size # one by one learning\n self.index = self._set_index_array()\n self.shuffle = shuffle\n\n def __getitem__(self, batch_index):\n \"\"\"Gets batch at batch_index `batch_index`.\n\n Arguments:\n batch_index: batch_index of the batch in the Sequence.\n\n Returns:\n batch_x, batch_y: a batch of sequence data.\n \"\"\"\n batch_size = self.batch_size\n\n sample_index = \\\n self.index[batch_index * batch_size:(batch_index+1) * batch_size]\n\n batch_x = np.empty((batch_size, 2))\n batch_y = np.empty(batch_size)\n\n for _, i in enumerate(sample_index):\n batch_x[_, ] = self.X[i, :]\n batch_y[_] = self.y[i, :]\n\n return batch_x, batch_y\n\n def __len__(self):\n \"\"\"Number of batches in the Sequence.\n Returns:\n The number of batches in the Sequence.\n \"\"\"\n return int(np.ceil(self.index.shape[0] / self.batch_size))\n\n def __iter__(self):\n \"\"\"Create a generator that iterate over the Sequence.\"\"\"\n for item in (self[i] for i in range(len(self))):\n yield item\n\n def _set_index_array(self):\n \"\"\"_set_index_array\n \"\"\"\n N = 4\n return np.arange(0, N)\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array",
"numpy.arange",
"numpy.ceil",
"numpy.empty"
]
] |
austinpeel/jax | [
"ca766caa02296023bd6714bb7fdba064a45e2258"
] | [
"jax/experimental/loops.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loops is an **experimental** module for syntactic sugar for loops and control-flow.\n\nThe current implementation should convert loops correctly to JAX internal\nrepresentation, and most transformations should work (see below), but we have\nnot yet fine-tuned the performance of the resulting XLA compilation!\n\nBy default, loops and control-flow in JAX are executed and inlined during tracing.\nFor example, in the following code the `for` loop is unrolled during JAX tracing::\n\n arr = np.zeros(5)\n for i in range(arr.shape[0]):\n arr[i] += 2.\n if i % 2 == 0:\n arr[i] += 1.\n\nIn order to capture the structured control-flow one has to use the higher-order\nJAX operations, which require you to express the body of the loops and\nconditionals as functions, and the array updates using a functional style that\nreturns an updated array, e.g.::\n\n arr = np.zeros(5)\n def loop_body(i, acc_arr):\n arr1 = ops.index_update(acc_arr, i, acc_arr[i] + 2.)\n return lax.cond(i % 2 == 0,\n arr1,\n lambda arr1: ops.index_update(arr1, i, arr1[i] + 1),\n arr1,\n lambda arr1: arr1)\n arr = lax.fori_loop(0, arr.shape[0], loop_body, arr)\n\nThe default notation quickly gets unreadable with deeper nested loops.\nWith the utilities in this module you can write loops and conditionals that\nlook closer to plain Python, as long as you keep the loop-carried state in a\nspecial `loops.scope` object and use `for` loops over special\n`scope.range` iterators::\n\n from jax.experimental import loops\n with loops.Scope() as s:\n s.arr = np.zeros(5) # Create the mutable state of the loop as `scope` fields.\n for i in s.range(s.arr.shape[0]):\n s.arr = ops.index_update(s.arr, i, s.arr[i] + 2.)\n for _ in s.cond_range(i % 2 == 0): # Conditionals as loops with 0 or 1 iterations\n s.arr = ops.index_update(s.arr, i, s.arr[i] + 1.)\n\nLoops constructed with `range` must have literal constant bounds. If you need\nloops with dynamic bounds, you can use the more general `while_range` iterator.\nHowever, in that case that `grad` transformation is not supported::\n\n s.idx = start\n for _ in s.while_range(lambda: s.idx < end):\n s.idx += 1\n\nNotes:\n * Loops and conditionals to be functionalized can appear only inside scopes\n constructed with `loops.Scope` and they must use one of the `Scope.range`\n iterators. All other loops are unrolled during tracing, as usual in JAX.\n * Only scope data (stored in fields of the scope object) is functionalized.\n All other state, e.g., in other Python variables, will not be considered as\n being part of the loop output. All references to the mutable state should be\n through the scope: `s.arr`.\n * Conceptually, this model is still \"functional\" in the sense that a loop over\n a `Scope.range` behaves as a function whose input and output is the scope data.\n * Scopes should be passed down to callees that need to use loop\n functionalization, or they may be nested.\n * The programming model is that the loop body over a `scope.range` is traced\n only once, using abstract shape values, similar to how JAX traces function\n bodies.\n\nRestrictions:\n * The tracing of the loop body should not exit prematurely with `return`,\n `exception`, `break`. This would be detected and reported as errors when we\n encounter unnested scopes.\n * The loop index variable should not be used after the loop. Similarly, one\n should not use outside the loop data computed in the loop body, except data\n stored in fields of the scope object.\n * No new mutable state can be created inside a loop to be functionalized.\n All mutable state must be created outside all loops and conditionals.\n * For a `while` loop, the conditional function is not allowed to modify the\n scope state. This is a checked error. Also, for `while` loops the `grad`\n transformation does not work. An alternative that allows `grad` is a bounded\n loop (`range`).\n\nTransformations:\n * All transformations are supported, except `grad` is not supported for\n `Scope.while_range` loops.\n * `vmap` is very useful for such loops because it pushes more work into the\n inner-loops, which should help performance for accelerators.\n\nFor usage example, see tests/loops_test.py.\n\"\"\"\n\n\nimport copy\nfrom functools import partial\nimport itertools\nimport numpy as np\nimport traceback\nfrom typing import Any, List, cast\n\nfrom jax import abstract_arrays\nfrom jax import lax, core\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax import tree_util\nfrom jax import numpy as jnp\nfrom jax.interpreters import partial_eval as pe\nfrom jax.util import safe_map\nfrom jax.config import config\n\n\nclass Scope(object):\n \"\"\"A scope context manager to keep the state of loop bodies for functionalization.\n\n Usage::\n\n with Scope() as s:\n s.data = 0.\n for i in s.range(5):\n s.data += 1.\n return s.data\n\n \"\"\"\n\n def __init__(self):\n self._mutable_state = {} # state to be functionalized, indexed by name.\n self._active_ranges = [] # stack of active ranges, last one is the innermost.\n self._count_subtraces = 0 # How many net started subtraces, for error recovery\n\n def range(self, first, second=None, third=None):\n \"\"\"Creates an iterator for bounded iterations to be functionalized.\n\n The body is converted to a `lax.scan`, for which all JAX transformations work.\n The `first`, `second`, and `third` arguments must be integer literals.\n\n Usage::\n\n range(5) # start=0, end=5, step=1\n range(1, 5) # start=1, end=5, step=1\n range(1, 5, 2) # start=1, end=5, step=2\n\n s.out = 1.\n for i in scope.range(5):\n s.out += 1.\n \"\"\"\n if third is not None:\n start = int(first)\n stop = int(second)\n step = int(third)\n else:\n step = 1\n if second is not None:\n start = int(first)\n stop = int(second)\n else:\n start = 0\n stop = int(first)\n return _BodyTracer(self, _BoundedLoopBuilder(start, stop, step))\n\n def cond_range(self, pred):\n \"\"\"Creates a conditional iterator with 0 or 1 iterations based on the boolean.\n\n The body is converted to a `lax.cond`. All JAX transformations work.\n\n Usage::\n\n for _ in scope.cond_range(s.field < 0.):\n s.field = - s.field\n \"\"\"\n # TODO: share these checks with lax_control_flow.cond\n if len(np.shape(pred)) != 0:\n raise TypeError(\n \"Pred must be a scalar, got {} of shape {}.\".format(pred, np.shape(pred)))\n\n try:\n pred_dtype = np.result_type(pred)\n except TypeError as err:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred)) from err\n\n if pred_dtype.kind != 'b':\n if pred_dtype.kind in 'iuf':\n pred = pred != 0\n else:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred_dtype))\n\n return _BodyTracer(self, _CondBuilder(pred))\n\n def while_range(self, cond_func):\n \"\"\"Creates an iterator that continues as long as `cond_func` returns true.\n\n The body is converted to a `lax.while_loop`.\n The `grad` transformation does not work.\n\n Usage::\n\n for _ in scope.while_range(lambda: s.loss > 1.e-5):\n s.loss = loss(...)\n\n Args:\n cond_func: a lambda with no arguments, the condition for the \"while\".\n \"\"\"\n return _BodyTracer(self, _WhileBuilder(cond_func))\n\n def _push_range(self, range_):\n for ar in self._active_ranges:\n if ar is range_:\n raise ValueError(\"Range is reused nested inside itself.\")\n self._active_ranges.append(range_)\n\n def _pop_range(self, range_):\n if not (range_ is self._active_ranges[-1]):\n self._error_premature_exit_range()\n self._active_ranges.pop()\n\n def _error_premature_exit_range(self):\n \"\"\"Raises error about premature exit from a range\"\"\"\n msg = \"Some ranges have exited prematurely. The innermost such range is at\\n{}\"\n raise ValueError(msg.format(self._active_ranges[-1].location()))\n\n def __getattr__(self, key):\n \"\"\"Accessor for scope data.\n\n Called only if the attribute is not found, which will happen when we read\n scope data that has been stored in self._mutable_state.\n \"\"\"\n mt_val = self._mutable_state.get(key)\n if mt_val is None:\n raise AttributeError(\n \"Reading uninitialized data '{}' from the scope.\".format(key))\n return mt_val\n\n def __setattr__(self, key, value):\n \"\"\"Update scope data to be functionalized.\n\n Called for *all* attribute setting.\n \"\"\"\n if key in [\"_active_ranges\", \"_mutable_state\", \"_count_subtraces\"]:\n object.__setattr__(self, key, value)\n else:\n if self._active_ranges and key not in self._mutable_state:\n raise ValueError(\n \"New mutable state '{}' cannot be created inside a loop.\".format(key))\n self._mutable_state[key] = value\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n try:\n if exc_type is None:\n if self._active_ranges: # We have some ranges that we did not exit properly\n self._error_premature_exit_range()\n return True\n else:\n # The exception may come from inside one or more ranges. We let the current\n # exception propagate, assuming it terminates the tracing. If not, the\n # tracers may be left in an inconsistent state.\n return False # re-raise\n finally:\n # Ensure we leave the global trace_state as we found it\n while self._count_subtraces > 0:\n self.end_subtrace()\n\n def start_subtrace(self):\n \"\"\"Starts a nested trace, returns the Trace object.\"\"\"\n # TODO: This follows the __enter__ part of core.new_main.\n if config.omnistaging_enabled:\n level = core.thread_local_state.trace_state.trace_stack.next_level()\n main = core.MainTrace(level, pe.JaxprTrace)\n core.thread_local_state.trace_state.trace_stack.push(main)\n self._count_subtraces += 1\n return pe.JaxprTrace(main, core.cur_sublevel())\n else:\n level = core.thread_local_state.trace_state.trace_stack.next_level(False)\n main = core.MainTrace(level, pe.JaxprTrace)\n core.thread_local_state.trace_state.trace_stack.push(main, False)\n self._count_subtraces += 1\n return pe.JaxprTrace(main, core.cur_sublevel())\n\n def end_subtrace(self):\n # TODO: This follows the __exit__ part of core.new_main\n if config.omnistaging_enabled:\n core.thread_local_state.trace_state.trace_stack.pop()\n else:\n core.thread_local_state.trace_state.trace_stack.pop(False)\n self._count_subtraces -= 1\n\n\nclass _BodyTracer(object):\n \"\"\"Traces the body of the loop and builds a functional control-flow representation.\n\n This class is also an iterator, only the first iteration is traced.\n \"\"\"\n\n def __init__(self, scope, loop_builder):\n \"\"\"\n Params:\n scope: the current scope\n loop_builder: instance of _LoopBuilder\n \"\"\"\n self.scope = scope\n self.loop_builder = loop_builder\n self.first_iteration = True # If we are tracing the first iteration\n # Stack trace, without this line and the s.range function\n self.stack = traceback.StackSummary.from_list(\n cast(List[Any], traceback.extract_stack()[:-2]))\n\n # Next are state kept from the start of the first iteration to the end of the iteration.\n self.carried_state_initial = {}\n # The parameters that were created for state upon entering an arbitrary iteration.\n self.carried_state_vars = {}\n\n self.trace = None\n # List of scope fields carried through the loop\n self.carried_state_names = None\n self.init_tree = None # The PyTreeDef corresponding to carried_state_names\n self.init_vals = None # The values corresponding to self.init_tree\n\n def location(self):\n \"\"\"A multiline string representing the source location of the range.\"\"\"\n if self.stack is not None:\n return \" \".join(self.stack.format())\n else:\n return \"\"\n\n def __iter__(self):\n \"\"\"Called before starting the first iteration.\"\"\"\n self.first_iteration = True # In case we reuse the range\n return self\n\n def __next__(self):\n if self.first_iteration:\n self.first_iteration = False\n self.scope._push_range(self)\n self.start_tracing_body()\n return self._index_var\n else:\n self.end_tracing_body()\n self.scope._pop_range(self)\n raise StopIteration # Trace only one iteration.\n\n def next(self): # For PY2\n return self.__next__()\n\n def start_tracing_body(self):\n \"\"\"Called upon starting the tracing of the loop body.\"\"\"\n # Make a copy of the current value of the mutable state\n self.carried_state_initial = copy.copy(self.scope._mutable_state)\n # The entire state is carried.\n self.carried_state_names = sorted(self.scope._mutable_state.keys())\n\n # TODO: This is the first part of partial_eval.trace_to_subjaxpr. Share.\n self.trace = self.scope.start_subtrace()\n # Set the scope._mutable_state to new tracing variables.\n for key, initial in self.carried_state_initial.items():\n mt_aval = _BodyTracer.abstractify(initial)\n mt_pval = pe.PartialVal.unknown(mt_aval)\n mt_var = self.trace.new_arg(mt_pval)\n self.carried_state_vars[key] = mt_var\n self.scope._mutable_state[key] = mt_var\n\n index_var_aval = _BodyTracer.abstractify(0)\n index_var_pval = pe.PartialVal.unknown(index_var_aval)\n self._index_var = self.trace.new_arg(index_var_pval)\n\n def end_tracing_body(self):\n \"\"\"Called when we are done tracing one iteration of the body.\"\"\"\n # We will turn the body of the loop into a function that takes some values\n # for the scope state (carried_state_names) and returns the values for the\n # same state fields after one execution of the body. For some of the ranges,\n # e.g., scope.range, the function will also take the index_var as last parameter.\n in_tracers = [self.carried_state_vars[ms] for ms in self.carried_state_names]\n if self.loop_builder.can_use_index_var():\n in_tracers += [self._index_var]\n\n # Make the jaxpr for the body of the loop\n # TODO: See which mutable state was changed in the one iteration.\n # For now, we assume all state changes.\n body_out_tracers = tuple([self.scope._mutable_state[ms]\n for ms in self.carried_state_names])\n try:\n # If the body actually uses the index variable, and is not allowed to\n # (e.g., cond_range and while_range), then in_tracers will not contain\n # the tracer for the index_var, and trace_to_jaxpr_finalize will throw\n # an assertion error.\n body_closed_jaxpr, body_const_vals = _BodyTracer.trace_to_jaxpr_finalize(\n in_tracers=in_tracers,\n out_tracers=body_out_tracers,\n trace=self.trace)\n except core.UnexpectedTracerError as e:\n if \"Tracer not among input tracers\" in str(e):\n raise ValueError(\"Body of cond_range or while_range should not use the \"\n \"index variable returned by iterator.\") from e\n raise\n # End the subtrace for the loop body, before we trace the condition\n self.scope.end_subtrace()\n\n carried_init_val = tuple([self.carried_state_initial[ms]\n for ms in self.carried_state_names])\n carried_init_vals, carried_tree = tree_util.tree_flatten(carried_init_val)\n\n carried_out_vals = self.loop_builder.build_output_vals(\n self.scope, self.carried_state_names, carried_tree,\n carried_init_vals, body_closed_jaxpr, body_const_vals)\n carried_mutable_state_unflattened = tree_util.tree_unflatten(carried_tree,\n carried_out_vals)\n\n # Update the mutable state with the values of the changed vars, after the loop.\n for ms, mv in zip(self.carried_state_names, carried_mutable_state_unflattened):\n self.scope._mutable_state[ms] = mv\n\n @staticmethod\n def abstractify(x):\n return abstract_arrays.raise_to_shaped(core.get_aval(x))\n\n @staticmethod\n def trace_to_jaxpr_finalize(in_tracers, out_tracers, trace, instantiate=True):\n # TODO: This is the final part of the partial_eval.trace_to_subjaxpr. Share.\n instantiate = [instantiate] * len(out_tracers)\n out_tracers = safe_map(trace.full_raise, safe_map(core.full_lower, out_tracers))\n out_tracers = safe_map(partial(pe.instantiate_const_at, trace),\n instantiate, out_tracers)\n jaxpr, consts, env = pe.tracers_to_jaxpr(in_tracers, out_tracers)\n assert not env # TODO: this is from partial_eval.trace_to_jaxpr. Share.\n closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())\n return closed_jaxpr, consts\n\n\nclass _LoopBuilder(object):\n \"\"\"Abstract superclass for the loop builders\"\"\"\n\n def can_use_index_var(self):\n \"\"\"Whether this kind of loop can use the index var returned by the range iterator.\"\"\"\n raise NotImplementedError\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n \"\"\"Builds the output values for the loop carried state.\n\n Params:\n scope: the current Scope object.\n carried_state_names: the list of names of mutable state fields that is\n carried through the body.\n carried_tree: the PyTreeDef for the tuple of carried_state_names.\n init_vals: the initial values on body entry corresponding to the init_tree.\n body_closed_jaxpr: the Jaxpr for the body returning the new values of\n carried_state_names.\n body_const_vals: the constant values for the body.\n\n Returns:\n the output tracer corresponding to the lax primitive representing the loop.\n \"\"\"\n raise NotImplementedError\n\n def __str__(self):\n raise NotImplementedError\n\n\nclass _BoundedLoopBuilder(_LoopBuilder):\n \"\"\"Builds a lax operation corresponding to a bounded range iteration.\"\"\"\n\n def __init__(self, start, stop, step):\n self.start = start\n self.stop = stop\n self.step = step\n self._index_var = None # The parameter for the index variable\n\n def can_use_index_var(self):\n return True\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n arange_val = jnp.arange(self.start, stop=self.stop, step=self.step)\n return lax_control_flow.scan_p.bind(*itertools.chain(body_const_vals,\n init_vals, [arange_val]),\n reverse=False, length=arange_val.shape[0],\n jaxpr=body_closed_jaxpr,\n num_consts=len(body_const_vals),\n num_carry=len(init_vals),\n linear=(False,) * (len(body_const_vals) +\n len(init_vals) + 1),\n unroll=1)\n\n\nclass _CondBuilder(_LoopBuilder):\n \"\"\"Builds a lax.cond operation.\"\"\"\n\n def __init__(self, pred):\n self.index = lax.convert_element_type(pred, np.int32)\n\n def can_use_index_var(self):\n return False\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n # Simulate a pass-through false branch\n in_vals, in_tree = tree_util.tree_flatten(\n (body_const_vals, tree_util.tree_unflatten(carried_tree, init_vals)))\n in_avals = safe_map(_BodyTracer.abstractify, in_vals)\n pass_through_closed_jaxpr, pass_through_const_vals, _ = (\n lax_control_flow._initial_style_jaxpr(\n lambda *args: args[1],\n in_tree,\n tuple(in_avals)))\n assert len(pass_through_const_vals) == 0\n args = list(itertools.chain(body_const_vals, init_vals))\n return lax_control_flow.cond_p.bind(\n self.index, *args,\n branches=(pass_through_closed_jaxpr, body_closed_jaxpr),\n linear=(False,) * len(args))\n\n\nclass _WhileBuilder(_LoopBuilder):\n \"\"\"Builds a lax.while operation.\"\"\"\n\n def __init__(self, cond_func):\n self.cond_func = cond_func # Function with 0 arguments (can reference the scope)\n\n def can_use_index_var(self):\n return False\n\n def build_output_vals(self, scope, carried_state_names, carried_tree,\n init_vals, body_closed_jaxpr, body_const_vals):\n # Trace the conditional function. cond_func takes 0 arguments, but\n # for lax.while we need a conditional function that takes the\n # carried_state_names. _initial_style_jaxpr will start its own trace and\n # will create tracers for all the carried state. We must put these values\n # in the scope._mutable_state before we trace the conditional\n # function.\n def cond_func_wrapped(*args):\n assert len(args) == len(carried_state_names)\n for ms, init_ms in zip(carried_state_names, args):\n scope._mutable_state[ms] = init_ms\n res = self.cond_func()\n # Conditional function is not allowed to modify the scope state\n for ms, init_ms in zip(carried_state_names, args):\n if not (scope._mutable_state[ms] is init_ms):\n msg = \"Conditional function modifies scope.{} field.\"\n raise ValueError(msg.format(ms))\n return res\n\n init_avals = safe_map(_BodyTracer.abstractify, init_vals)\n cond_jaxpr, cond_consts, cond_tree = (\n lax_control_flow._initial_style_jaxpr(cond_func_wrapped,\n carried_tree,\n tuple(init_avals)))\n # TODO: share these checks with lax_control_flow.while\n if not tree_util.treedef_is_leaf(cond_tree):\n msg = \"cond_fun must return a boolean scalar, but got pytree {}.\"\n raise TypeError(msg.format(cond_tree))\n if cond_jaxpr.out_avals != [abstract_arrays.ShapedArray((), np.bool_)]:\n msg = \"cond_fun must return a boolean scalar, but got output type(s) {}.\"\n raise TypeError(msg.format(cond_jaxpr.out_avals))\n\n return lax_control_flow.while_p.bind(*itertools.chain(cond_consts,\n body_const_vals,\n init_vals),\n cond_nconsts=len(cond_consts),\n cond_jaxpr=cond_jaxpr,\n body_nconsts=len(body_const_vals),\n body_jaxpr=body_closed_jaxpr)\n"
] | [
[
"numpy.result_type",
"numpy.shape"
]
] |
ko-ya346/python_asr | [
"251d8a4ff810fbeb5f7b63229139944195ab7cb5"
] | [
"04dnn_hmm/02_train_dnn.py"
] | [
"# -*- coding: utf-8 -*-\n\n#\n# DNNを学習します.\n#\n\n# Pytorchを用いた処理に必要なモジュールをインポート\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch import optim\n\n# 作成したDatasetクラスをインポート\nfrom my_dataset import SequenceDataset\n\n# 数値演算用モジュール(numpy)をインポート\nimport numpy as np\n\n# プロット用モジュール(matplotlib)をインポート\nimport matplotlib.pyplot as plt\n\n# hmmfunc.pyからMonoPhoneHMMクラスをインポート\nfrom hmmfunc import MonoPhoneHMM\n\n# モデルの定義をインポート\nfrom my_model import MyDNN\n\n# json形式の入出力を行うモジュールをインポート\nimport json\n\n# os, sys, shutilモジュールをインポート\nimport os\nimport sys\nimport shutil\n\n#\n# メイン関数\n#\nif __name__ == \"__main__\":\n \n #\n # 設定ここから\n #\n\n # 訓練データの特徴量リスト\n train_feat_scp = \\\n '../01compute_features/mfcc/train_small/feats.scp'\n # 訓練データのラベル(アライメント)ファイル\n train_label_file = \\\n './exp/data/train_small/alignment'\n \n # 訓練データから計算された\n # 特徴量の平均/標準偏差ファイル\n mean_std_file = \\\n '../01compute_features/mfcc/train_small/mean_std.txt'\n\n # 開発データの特徴量リスト\n dev_feat_scp = \\\n '../01compute_features/mfcc/dev/feats.scp'\n # 開発データのラベル(アライメント)ファイル\n dev_label_file = \\\n './exp/data/dev/alignment'\n\n # HMMファイル\n # HMMファイルは音素数と状態数の\n # 情報を得るためだけに使う\n hmm_file = '../03gmm_hmm/exp/model_3state_2mix/10.hmm'\n\n # 学習結果を出力するディレクトリ\n output_dir = os.path.join('exp', 'model_dnn')\n\n # ミニバッチに含める発話数\n batch_size = 5\n\n # 最大エポック数\n max_num_epoch = 60\n\n # 中間層のレイヤー数\n num_layers = 4\n\n # 中間層の次元数\n hidden_dim = 1024\n\n # splice: 前後 n フレームの特徴量を結合する\n # 次元数は(splice*2+1)倍になる\n splice = 5\n\n # 初期学習率\n initial_learning_rate = 0.008\n\n # 学習率の減衰やEarly stoppingの\n # 判定を開始するエポック数\n # (= 最低限このエポックまではどれだけ\n # validation結果が悪くても学習を続ける)\n lr_decay_start_epoch = 7\n\n # 学習率を減衰する割合\n # (減衰後学習率 <- 現在の学習率*lr_decay_factor)\n # 1.0以上なら,減衰させない\n lr_decay_factor = 0.5\n\n # Early stoppingの閾値\n # 最低損失値を更新しない場合が\n # 何エポック続けば学習を打ち切るか\n early_stop_threshold = 3\n\n #\n # 設定ここまで\n #\n\n # 出力ディレクトリが存在しない場合は作成する\n os.makedirs(output_dir, exist_ok=True)\n\n # 設定を辞書形式にする\n config = {'num_layers': num_layers, \n 'hidden_dim': hidden_dim,\n 'splice': splice,\n 'batch_size': batch_size,\n 'max_num_epoch': max_num_epoch,\n 'initial_learning_rate': initial_learning_rate,\n 'lr_decay_start_epoch': lr_decay_start_epoch, \n 'lr_decay_factor': lr_decay_factor,\n 'early_stop_threshold': early_stop_threshold}\n\n # 設定をJSON形式で保存する\n conf_file = os.path.join(output_dir, 'config.json')\n with open(conf_file, mode='w') as f:\n json.dump(config, f, indent=4)\n\n # 特徴量の平均/標準偏差ファイルを読み込む\n with open(mean_std_file, mode='r') as f:\n # 全行読み込み\n lines = f.readlines()\n # 1行目(0始まり)が平均値ベクトル(mean),\n # 3行目が標準偏差ベクトル(std)\n mean_line = lines[1]\n std_line = lines[3]\n # スペース区切りのリストに変換\n feat_mean = mean_line.split()\n feat_std = std_line.split()\n # numpy arrayに変換\n feat_mean = np.array(feat_mean, \n dtype=np.float32)\n feat_std = np.array(feat_std, \n dtype=np.float32)\n # 平均/標準偏差ファイルをコピーする\n shutil.copyfile(mean_std_file,\n os.path.join(output_dir, 'mean_std.txt'))\n\n # 次元数の情報を得る\n feat_dim = np.size(feat_mean)\n\n # DNNの出力層の次元数を得るために,\n # HMMの音素数と状態数を得る\n # MonoPhoneHMMクラスを呼び出す\n hmm = MonoPhoneHMM()\n # HMMを読み込む\n hmm.load_hmm(hmm_file)\n # DNNの出力層の次元数は音素数x状態数\n dim_out = hmm.num_phones * hmm.num_states\n # バッチデータ作成の際にラベルを埋める値\n # はdim_out以上の値にする\n pad_index = dim_out\n \n # ニューラルネットワークモデルを作成する\n # 入力特徴量の次元数は\n # feat_dim * (2*splice+1)\n dim_in = feat_dim * (2*splice+1)\n model = MyDNN(dim_in=dim_in,\n dim_hidden=hidden_dim,\n dim_out=dim_out, \n num_layers=num_layers)\n print(model)\n\n # オプティマイザを定義\n # ここでは momentum stochastic gradient descent\n # を使用\n optimizer = optim.SGD(model.parameters(), \n lr=initial_learning_rate,\n momentum=0.99)\n\n # 訓練データのデータセットを作成する\n # padding_indexはdim_out以上の値に設定する\n train_dataset = SequenceDataset(train_feat_scp,\n train_label_file,\n feat_mean,\n feat_std,\n pad_index,\n splice)\n # 開発データのデータセットを作成する\n dev_dataset = SequenceDataset(dev_feat_scp,\n dev_label_file,\n feat_mean,\n feat_std,\n pad_index,\n splice)\n \n # 訓練データのDataLoaderを呼び出す\n # 訓練データはシャッフルして用いる\n # (num_workerは大きい程処理が速くなりますが,\n # PCに負担が出ます.PCのスペックに応じて\n # 設定してください)\n train_loader = DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n # 開発データのDataLoaderを呼び出す\n # 開発データはデータはシャッフルしない\n dev_loader = DataLoader(dev_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4)\n\n # クロスエントロピーを損失関数として用いる\n criterion = \\\n nn.CrossEntropyLoss(ignore_index=pad_index)\n\n # CUDAが使える場合はモデルパラメータをGPUに,\n # そうでなければCPUに配置する\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n model = model.to(device)\n\n # モデルをトレーニングモードに設定する\n model.train()\n\n # 訓練データの処理と開発データの処理を\n # for でシンプルに記述するために,辞書データ化しておく\n dataset_loader = {'train': train_loader,\n 'validation': dev_loader}\n\n # 各エポックにおける損失値と誤り率の履歴\n loss_history = {'train': [],\n 'validation': []}\n error_history = {'train': [],\n 'validation': []}\n \n # 本プログラムでは,validation時の損失値が\n # 最も低かったモデルを保存する.\n # そのため,最も低い損失値,\n # そのときのモデルとエポック数を記憶しておく\n best_loss = -1\n best_model = None\n best_epoch = 0\n # Early stoppingフラグ.Trueになると学習を打ち切る\n early_stop_flag = False\n # Early stopping判定用(損失値の最低値が\n # 更新されないエポックが何回続いているか)のカウンタ\n counter_for_early_stop = 0\n\n # ログファイルの準備\n log_file = open(os.path.join(output_dir,\n 'log.txt'),\n mode='w')\n log_file.write('epoch\\ttrain loss\\t'\\\n 'train err\\tvalid loss\\tvalid err')\n\n # エポックの数だけループ\n for epoch in range(max_num_epoch):\n # early stopフラグが立っている場合は,\n # 学習を打ち切る\n if early_stop_flag:\n print(' Early stopping.'\\\n ' (early_stop_threshold = %d)' \\\n % (early_stop_threshold))\n log_file.write('\\n Early stopping.'\\\n ' (early_stop_threshold = %d)' \\\n % (early_stop_threshold))\n break\n\n # エポック数を表示\n print('epoch %d/%d:' % (epoch+1, max_num_epoch))\n log_file.write('\\n%d\\t' % (epoch+1))\n\n # trainフェーズとvalidationフェーズを交互に実施する\n for phase in ['train', 'validation']:\n # このエポックにおける累積損失値と発話数\n total_loss = 0\n total_utt = 0\n # このエポックにおける累積認識誤り文字数と総文字数\n total_error = 0\n total_frames = 0\n\n # 各フェーズのDataLoaderから1ミニバッチ\n # ずつ取り出して処理する.\n # これを全ミニバッチ処理が終わるまで繰り返す.\n # ミニバッチに含まれるデータは,\n # 音声特徴量,ラベル,フレーム数,\n # ラベル長,発話ID\n for (features, labels, feat_len,\n label_len, utt_ids) \\\n in dataset_loader[phase]:\n\n # CUDAが使える場合はデータをGPUに,\n # そうでなければCPUに配置する\n features, labels = \\\n features.to(device), labels.to(device)\n\n # 勾配をリセット\n optimizer.zero_grad()\n\n # モデルの出力を計算(フォワード処理)\n outputs = model(features)\n\n # この時点でoutputsは\n # [バッチサイズ, フレーム数, ラベル数]\n # の3次元テンソル.\n # CrossEntropyLossを使うためには\n # [サンプル数, ラベル数]の2次元テンソル\n # にする必要があるので,viewを使って\n # 変形する\n b_size, f_size, _ = outputs.size()\n outputs = outputs.view(b_size * f_size,\n dim_out)\n # labelsは[バッチサイズ, フレーム]の\n # 2次元テンソル.\n # CrossEntropyLossを使うためには\n # [サンプル数]の1次元テンソルにする\n # 必要があるので.viewを使って変形する.\n # 1次元への変形はview(-1)で良い.\n # (view(b_size*f_size)でも良い)\n labels = labels.view(-1)\n \n # 損失値を計算する.\n loss = criterion(outputs, labels)\n \n # 訓練フェーズの場合は,\n # 誤差逆伝搬を実行し,\n # モデルパラメータを更新する\n if phase == 'train':\n # 勾配を計算する\n loss.backward()\n # オプティマイザにより,\n # パラメータを更新する\n optimizer.step()\n\n # 損失値を累積する\n total_loss += loss.item()\n # 処理した発話数をカウントする\n total_utt += b_size\n\n #\n # フレーム単位の誤り率を計算する\n #\n # 推定ラベルを得る\n _, hyp = torch.max(outputs, 1)\n # ラベルにpad_indexを埋めた\n # フレームを取り除く\n hyp = hyp[labels != pad_index]\n ref = labels[labels != pad_index]\n # 推定ラベルと正解ラベルが不一致な\n # フレーム数を得る\n error = (hyp != ref).sum()\n\n # 誤りフレーム数を累積する\n total_error += error\n # 総フレーム数を累積する\n total_frames += len(ref)\n \n #\n # このフェーズにおいて,1エポック終了\n # 損失値,認識エラー率,モデルの保存等を行う\n # \n\n # 損失値の累積値を,処理した発話数で割る\n epoch_loss = total_loss / total_utt\n # 画面とログファイルに出力する\n print(' %s loss: %f' \\\n % (phase, epoch_loss))\n log_file.write('%.6f\\t' % (epoch_loss))\n # 履歴に加える\n loss_history[phase].append(epoch_loss)\n\n # 総誤りフレーム数を,総フレーム数で\n # 割ってエラー率に換算\n epoch_error = 100.0 * total_error \\\n / total_frames\n # 画面とログファイルに出力する\n print(' %s error rate: %f %%' \\\n % (phase, epoch_error))\n log_file.write('%.6f\\t' % (epoch_error))\n # 履歴に加える\n error_history[phase].append(epoch_error)\n\n #\n # validationフェーズ特有の処理\n #\n if phase == 'validation':\n if epoch == 0 or best_loss > epoch_loss:\n # 損失値が最低値を更新した場合は,\n # その時のモデルを保存する\n best_loss = epoch_loss\n torch.save(model.state_dict(),\n output_dir+'/best_model.pt')\n best_epoch = epoch\n # Early stopping判定用の\n # カウンタをリセットする\n counter_for_early_stop = 0\n else:\n # 最低値を更新しておらず,\n if epoch+1 >= lr_decay_start_epoch:\n # かつlr_decay_start_epoch以上の\n # エポックに達している場合\n if counter_for_early_stop+1 \\\n >= early_stop_threshold:\n # 更新していないエポックが,\n # 閾値回数以上続いている場合,\n # Early stopping フラグを立てる\n early_stop_flag = True\n else:\n # Early stopping条件に\n # 達していない場合は\n # 学習率を減衰させて学習続行\n if lr_decay_factor < 1.0:\n for i, param_group \\\n in enumerate(\\\n optimizer.param_groups):\n if i == 0:\n lr = param_group['lr']\n dlr = lr_decay_factor \\\n * lr\n print(' (Decay '\\\n 'learning rate:'\\\n ' %f -> %f)' \\\n % (lr, dlr))\n log_file.write(\\\n '(Decay learning'\\\n ' rate: %f -> %f)'\\\n % (lr, dlr))\n param_group['lr'] = dlr\n # Early stopping判定用の\n # カウンタを増やす\n counter_for_early_stop += 1\n \n #\n # 全エポック終了\n # 学習済みモデルの保存とログの書き込みを行う\n #\n print('---------------Summary'\\\n '------------------')\n log_file.write('\\n---------------Summary'\\\n '------------------\\n')\n\n # 最終エポックのモデルを保存する\n torch.save(model.state_dict(), \n os.path.join(output_dir,'final_model.pt'))\n print('Final epoch model -> %s/final_model.pt' \\\n % (output_dir))\n log_file.write('Final epoch model ->'\\\n ' %s/final_model.pt\\n' \\\n % (output_dir))\n\n # 最終エポックの情報\n for phase in ['train', 'validation']:\n # 最終エポックの損失値を出力\n print(' %s loss: %f' \\\n % (phase, loss_history[phase][-1]))\n log_file.write(' %s loss: %f\\n' \\\n % (phase, loss_history[phase][-1]))\n # 最終エポックのエラー率を出力 \n print(' %s error rate: %f %%' \\\n % (phase, error_history[phase][-1]))\n log_file.write(' %s error rate: %f %%\\n' \\\n % (phase, error_history[phase][-1]))\n\n # ベストエポックの情報\n # (validationの損失が最小だったエポック)\n print('Best epoch model (%d-th epoch)'\\\n ' -> %s/best_model.pt' \\\n % (best_epoch+1, output_dir))\n log_file.write('Best epoch model (%d-th epoch)'\\\n ' -> %s/best_model.pt\\n' \\\n % (best_epoch+1, output_dir))\n for phase in ['train', 'validation']:\n # ベストエポックの損失値を出力\n print(' %s loss: %f' \\\n % (phase, loss_history[phase][best_epoch]))\n log_file.write(' %s loss: %f\\n' \\\n % (phase, loss_history[phase][best_epoch]))\n # ベストエポックのエラー率を出力\n print(' %s error rate: %f %%' \\\n % (phase, error_history[phase][best_epoch]))\n log_file.write(' %s error rate: %f %%\\n' \\\n % (phase, error_history[phase][best_epoch]))\n\n # 損失値の履歴(Learning Curve)グラフにして保存する\n fig1 = plt.figure()\n for phase in ['train', 'validation']:\n plt.plot(loss_history[phase],\n label=phase+' loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n fig1.legend()\n fig1.savefig(output_dir+'/loss.png')\n\n # 認識誤り率の履歴グラフにして保存する\n fig2 = plt.figure()\n for phase in ['train', 'validation']:\n plt.plot(error_history[phase],\n label=phase+' error')\n plt.xlabel('Epoch')\n plt.ylabel('Error [%]')\n fig2.legend()\n fig2.savefig(output_dir+'/error.png')\n\n # ログファイルを閉じる\n log_file.close()\n\n"
] | [
[
"torch.utils.data.DataLoader",
"matplotlib.pyplot.figure",
"torch.nn.CrossEntropyLoss",
"numpy.size",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"torch.max",
"numpy.array",
"matplotlib.pyplot.plot",
"torch.device",
"matplotlib.pyplot.xlabel"
]
] |
SilviaVec/Realtime-Action-Recognition | [
"330a64fc1b2158b1884a1ee86b9cc875925fc121"
] | [
"src/s2_put_skeleton_txts_to_a_single_txt.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n'''\nRead multiple skeletons txts and saved them into a single txt.\nIf an image doesn't have skeleton, discard it.\nIf an image label is not `CLASSES`, discard it.\nInput:\n `skeletons/00001.txt` ~ `skeletons/xxxxx.txt` from `SRC_DETECTED_SKELETONS_FOLDER`.\nOutput:\n `skeletons_info.txt`. The filepath is `DST_ALL_SKELETONS_TXT`.\n'''\n\nimport numpy as np\nimport simplejson\nimport collections\n\nif True: # Include project path\n import sys\n import os\n ROOT = os.path.dirname(os.path.abspath(__file__))+\"/../\"\n CURR_PATH = os.path.dirname(os.path.abspath(__file__))+\"/\"\n sys.path.append(ROOT)\n\n # import utils.lib_feature_proc # This is no needed,\n # because this script only transfer (part of) the data from many txts to a single txt,\n # without doing any data analsysis.\n\nimport utils.lib_commons as lib_commons\n\n\ndef par(path): # Pre-Append ROOT to the path if it's not absolute\n return ROOT + path if (path and path[0] != \"/\") else path\n\n# -- Settings\n\n\ncfg_all = lib_commons.read_yaml(ROOT + \"config/config.yaml\")\ncfg = cfg_all[\"s2_put_skeleton_txts_to_a_single_txt.py\"]\n\nCLASSES = np.array(cfg_all[\"classes\"])\n\nSKELETON_FILENAME_FORMAT = cfg_all[\"skeleton_filename_format\"]\n\nSRC_DETECTED_SKELETONS_FOLDER = par(cfg[\"input\"][\"detected_skeletons_folder\"])\nDST_ALL_SKELETONS_TXT = par(cfg[\"output\"][\"all_skeletons_txt\"])\n\nIDX_PERSON = 0 # Only use the skeleton of the 0th person in each image\nIDX_ACTION_LABEL = 3 # [1, 7, 54, \"jump\", \"jump_03-02-12-34-01-795/00240.jpg\"]\n\n# -- Helper function\n\n\ndef read_skeletons_from_ith_txt(i):\n ''' \n Arguments:\n i {int}: the ith skeleton txt. Zero-based index.\n If there are mutliple people, then there are multiple skeletons' data in this txt.\n Return:\n skeletons_in_ith_txt {list of list}:\n Length of each skeleton data is supposed to be 56 = 5 image info + 51 xyz positions. \n '''\n filename = SRC_DETECTED_SKELETONS_FOLDER + \\\n SKELETON_FILENAME_FORMAT.format(i)\n skeletons_in_ith_txt = lib_commons.read_listlist(filename)\n return skeletons_in_ith_txt\n\n\ndef get_length_of_one_skeleton_data(filepaths):\n ''' Find a non-empty txt file, and then get the length of one skeleton data.\n The data length should be 59, where:\n 59 = 5 + 54.\n 5: [cnt_action, cnt_clip, cnt_image, action_label, filepath]\n See utils.lib_io.get_training_imgs_info for more details\n 54: 18 joints * 3 xyz positions\n '''\n for i in range(len(filepaths)):\n skeletons = read_skeletons_from_ith_txt(i)\n if len(skeletons):\n skeleton = skeletons[IDX_PERSON]\n data_size = len(skeleton)\n assert(data_size == 59) #MODIFIED\n return data_size\n raise RuntimeError(f\"No valid txt under: {SRC_DETECTED_SKELETONS_FOLDER}.\")\n\n\n# -- Main\nif __name__ == \"__main__\":\n ''' Read multiple skeletons txts and saved them into a single txt. '''\n\n # -- Get skeleton filenames\n filepaths = lib_commons.get_filenames(SRC_DETECTED_SKELETONS_FOLDER,\n use_sort=True, with_folder_path=True)\n num_skeletons = len(filepaths)\n\n # -- Check data length of one skeleton\n data_length = get_length_of_one_skeleton_data(filepaths)\n print(\"Data length of one skeleton is {data_length}\")\n\n # -- Read in skeletons and push to all_skeletons\n all_skeletons = []\n labels_cnt = collections.defaultdict(int)\n for i in range(num_skeletons):\n\n # Read skeletons from a txt\n skeletons = read_skeletons_from_ith_txt(i)\n if not skeletons: # If empty, discard this image.\n continue\n skeleton = skeletons[IDX_PERSON]\n label = skeleton[IDX_ACTION_LABEL]\n if label not in CLASSES: # If invalid label, discard this image.\n continue\n labels_cnt[label] += 1 \n\n # Push to result\n all_skeletons.append(skeleton)\n\n # Print\n if i == 1 or i % 100 == 0: \n print(\"{}/{}\".format(i, num_skeletons))\n\n # -- Save to txt\n with open(DST_ALL_SKELETONS_TXT, 'w') as f:\n simplejson.dump(all_skeletons, f)\n\n print(f\"There are {len(all_skeletons)} skeleton data.\")\n print(f\"They are saved to {DST_ALL_SKELETONS_TXT}\")\n print(\"Number of each action: \")\n for label in CLASSES:\n print(f\" {label}: {labels_cnt[label]}\")\n"
] | [
[
"numpy.array"
]
] |
dlee0156/bilateral-connectome | [
"26fe165341bb79379fecdd8bc5d7b5bfe3983fdc"
] | [
"pkg/pkg/stats/fisher_exact_nonunity.py"
] | [
"from scipy.stats import nchypergeom_fisher\nimport numpy as np\n\n\ndef fisher_exact_nonunity(table, alternative=\"two-sided\", null_odds=1):\n \"\"\"Perform a Fisher exact test on a 2x2 contingency table.\n Parameters\n ----------\n table : array_like of ints\n A 2x2 contingency table. Elements must be non-negative integers.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n * 'two-sided'\n * 'less': one-sided\n * 'greater': one-sided\n See the Notes for more details.\n null_odds : float, optional (default=1)\n A (possibly non-unity) null odds ratio.\n Returns\n -------\n oddsratio : float\n This is prior odds ratio and not a posterior estimate.\n p_value : float\n P-value, the probability of obtaining a distribution at least as\n extreme as the one that was actually observed, assuming that the\n null hypothesis is true.\n See Also\n --------\n chi2_contingency : Chi-square test of independence of variables in a\n contingency table. This can be used as an alternative to\n `fisher_exact` when the numbers in the table are large.\n barnard_exact : Barnard's exact test, which is a more powerful alternative\n than Fisher's exact test for 2x2 contingency tables.\n boschloo_exact : Boschloo's exact test, which is a more powerful alternative\n than Fisher's exact test for 2x2 contingency tables.\n Notes\n -----\n *Null hypothesis and p-values*\n The null hypothesis is that the input table is from the hypergeometric\n distribution with parameters (as used in `hypergeom`)\n ``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the\n input table is ``[[a, b], [c, d]]``. This distribution has support\n ``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values\n in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``\n can be interpreted as the upper-left element of a 2x2 table, so the\n tables in the distribution have form::\n [ x n - x ]\n [N - x M - (n + N) + x]\n For example, if::\n table = [6 2]\n [1 4]\n then the support is ``2 <= x <= 7``, and the tables in the distribution\n are::\n [2 6] [3 5] [4 4] [5 3] [6 2] [7 1]\n [5 0] [4 1] [3 2] [2 3] [1 4] [0 5]\n The probability of each table is given by the hypergeometric distribution\n ``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to\n three significant digits)::\n x 2 3 4 5 6 7\n p 0.0163 0.163 0.408 0.326 0.0816 0.00466\n These can be computed with::\n >>> from scipy.stats import hypergeom\n >>> table = np.array([[6, 2], [1, 4]])\n >>> M = table.sum()\n >>> n = table[0].sum()\n >>> N = table[:, 0].sum()\n >>> start, end = hypergeom.support(M, n, N)\n >>> hypergeom.pmf(np.arange(start, end+1), M, n, N)\n array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,\n 0.004662 ])\n The two-sided p-value is the probability that, under the null hypothesis,\n a random table would have a probability equal to or less than the\n probability of the input table. For our example, the probability of\n the input table (where ``x = 6``) is 0.0816. The x values where the\n probability does not exceed this are 2, 6 and 7, so the two-sided p-value\n is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::\n >>> from scipy.stats import fisher_exact\n >>> oddsr, p = fisher_exact(table, alternative='two-sided')\n >>> p\n 0.10256410256410257\n The one-sided p-value for ``alternative='greater'`` is the probability\n that a random table has ``x >= a``, which in our example is ``x >= 6``,\n or ``0.0816 + 0.00466 ~= 0.08626``::\n >>> oddsr, p = fisher_exact(table, alternative='greater')\n >>> p\n 0.08624708624708627\n This is equivalent to computing the survival function of the\n distribution at ``x = 5`` (one less than ``x`` from the input table,\n because we want to include the probability of ``x = 6`` in the sum)::\n >>> hypergeom.sf(5, M, n, N)\n 0.08624708624708627\n For ``alternative='less'``, the one-sided p-value is the probability\n that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),\n or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::\n >>> oddsr, p = fisher_exact(table, alternative='less')\n >>> p\n 0.9953379953379957\n This is equivalent to computing the cumulative distribution function\n of the distribution at ``x = 6``:\n >>> hypergeom.cdf(6, M, n, N)\n 0.9953379953379957\n *Odds ratio*\n The calculated odds ratio is different from the one R uses. This SciPy\n implementation returns the (more common) \"unconditional Maximum\n Likelihood Estimate\", while R uses the \"conditional Maximum Likelihood\n Estimate\".\n Examples\n --------\n Say we spend a few days counting whales and sharks in the Atlantic and\n Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the\n Indian ocean 2 whales and 5 sharks. Then our contingency table is::\n Atlantic Indian\n whales 8 2\n sharks 1 5\n We use this table to find the p-value:\n >>> from scipy.stats import fisher_exact\n >>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])\n >>> pvalue\n 0.0349...\n The probability that we would observe this or an even more imbalanced ratio\n by chance is about 3.5%. A commonly used significance level is 5%--if we\n adopt that, we can therefore conclude that our observed imbalance is\n statistically significant; whales prefer the Atlantic while sharks prefer\n the Indian ocean.\n \"\"\"\n dist = nchypergeom_fisher\n\n # int32 is not enough for the algorithm\n c = np.asarray(table, dtype=np.int64)\n if not c.shape == (2, 2):\n raise ValueError(\"The input `table` must be of shape (2, 2).\")\n\n if np.any(c < 0):\n raise ValueError(\"All values in `table` must be nonnegative.\")\n\n if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):\n # If both values in a row or column are zero, the p-value is 1 and\n # the odds ratio is NaN.\n return np.nan, 1.0\n\n if c[1, 0] > 0 and c[0, 1] > 0:\n oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])\n else:\n oddsratio = np.inf\n\n n1 = c[0, 0] + c[0, 1]\n n2 = c[1, 0] + c[1, 1]\n n = c[0, 0] + c[1, 0]\n\n rv = dist(n1 + n2, n1, n, null_odds)\n\n def binary_search(n, n1, n2, side):\n \"\"\"Binary search for where to begin halves in two-sided test.\"\"\"\n if side == \"upper\":\n minval = mode\n maxval = n\n else:\n minval = 0\n maxval = mode\n guess = -1\n while maxval - minval > 1:\n if maxval == minval + 1 and guess == minval:\n guess = maxval\n else:\n guess = (maxval + minval) // 2\n pguess = rv.pmf(guess)\n if side == \"upper\":\n ng = guess - 1\n else:\n ng = guess + 1\n if pguess <= pexact < rv.pmf(ng):\n break\n elif pguess < pexact:\n maxval = guess\n else:\n minval = guess\n if guess == -1:\n guess = minval\n if side == \"upper\":\n while guess > 0 and rv.pmf(guess) < pexact * epsilon:\n guess -= 1\n while rv.pmf(guess) > pexact / epsilon:\n guess += 1\n else:\n while rv.pmf(guess) < pexact * epsilon:\n guess += 1\n while guess > 0 and rv.pmf(guess) > pexact / epsilon:\n guess -= 1\n return guess\n\n if alternative == \"less\":\n pvalue = rv.cdf(c[0, 0])\n elif alternative == \"greater\":\n # Same formula as the 'less' case, but with the second column.\n pvalue = rv.sf(c[0, 0] - 1)\n elif alternative == \"two-sided\":\n mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))\n pexact = dist.pmf(c[0, 0], n1 + n2, n1, n, null_odds)\n pmode = dist.pmf(mode, n1 + n2, n1, n, null_odds)\n\n epsilon = 1 - 1e-4\n if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:\n return oddsratio, 1.0\n\n elif c[0, 0] < mode:\n plower = dist.cdf(c[0, 0], n1 + n2, n1, n, null_odds)\n if dist.pmf(n, n1 + n2, n1, n, null_odds) > pexact / epsilon:\n return oddsratio, plower\n\n guess = binary_search(n, n1, n2, \"upper\")\n pvalue = plower + dist.sf(guess - 1, n1 + n2, n1, n, null_odds)\n else:\n pupper = dist.sf(c[0, 0] - 1, n1 + n2, n1, n, null_odds)\n if dist.pmf(0, n1 + n2, n1, n, null_odds) > pexact / epsilon:\n return oddsratio, pupper\n\n guess = binary_search(n, n1, n2, \"lower\")\n pvalue = pupper + dist.cdf(guess, n1 + n2, n1, n, null_odds)\n else:\n msg = \"`alternative` should be one of {'two-sided', 'less', 'greater'}\"\n raise ValueError(msg)\n\n pvalue = min(pvalue, 1.0)\n\n return oddsratio, pvalue\n"
] | [
[
"numpy.maximum",
"numpy.any",
"numpy.abs",
"numpy.asarray"
]
] |
JunaidAkhter/vmc_jax | [
"4f0dcc9f32cb6885cad3c5d797d9f9e01247f737"
] | [
"sg_sr/sr_data/sr_cplx/svd/cpxrbm.py"
] | [
"import sys\n# Find jVMC package\n#sys.path.append(\"/Users/akhter/githesis-/jvmc/vmc_jax\")\nsys.path.append(\"/Users/akhter/thesis/vmc_jax\")\n\n\nimport jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\n\nimport jax.random as random\nimport jax.numpy as jnp\nimport numpy as np\nfrom jax.tree_util import tree_flatten, tree_unflatten\nimport jVMC\n\nimport tensornetwork as tn\ntn.set_default_backend(\"jax\")\n\nimport functools\nfrom typing import Any, Callable, Sequence, Optional\nimport flax\nfrom flax import linen as nn\nfrom flax import optim\nfrom jax import lax\nfrom functools import partial\n\nimport jVMC.nets.initializers as init\nimport jVMC.global_defs as global_defs\n\nimport time\n\n\n\n\n\n# DMRG energies produced with the TeNPy library https://github.com/tenpy/tenpy\n#DMRG_energies = {\"10\": -1.0545844370449059, \"20\": -1.0900383739, \"100\": -1.1194665474274852}\n\nL = 16 # system size\ng = -0.7 # strength of external field\n\n# Set up hamiltonian for open boundary conditions\nhamiltonian = jVMC.operator.BranchFreeOperator()\nfor l in range(L - 1):\n hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))\n hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))\nhamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))\n\ndef svd(dp,shape, rank=L):\n\n \"\"\"Takes in the concatenated matrix and spits out the copressed one\"\"\"\n \n #getting the real and the complex parts of the matrix\n real_matrix = jnp.reshape(dp[:L*h], (L,h)) \n complex_matrix = jnp.reshape(dp[L*h:], (L,h))\n print(\"real_matrix\", real_matrix, \"complex_matrix:\", complex_matrix)\n #creating the W matrix from the real and the complex parts \n matrix = jax.lax.complex(real_matrix, complex_matrix)\n print(\"matrix:\", matrix)\n #Now that we have the matrix we can svd it and reject some of the singular values. \n tensor1 = jnp.reshape(matrix, shape)\n print(\"tensor1_shape and atype:\", tensor1.shape, type(tensor1))\n #reshaping the matrix in a tensor of given shape e.g. a four legged tensor\n node = tn.Node(tensor1)\n #now we perform the svd of the node keeping the left two and the right two legs as they are \n u, vh, _ = tn.split_node(node, left_edges=[node[0], node[1]], right_edges=[node[2],node[3]], max_singular_values=r)\n print(\"shape of u:\", u.shape, \"shape of vh:\", vh.shape)\n node_contracted = (u @ vh).tensor\n matrix_returned = jnp.reshape(node_contracted, (matrix.shape))\n print(\"shape of matrix_returned:\", matrix_returned.shape)\n return matrix_returned\n \n\ndef simulate(rng, iterations, rank, t_step):\n net = net_init\n psi = jVMC.vqs.NQS(net, seed=rng) # Variational wave function\n\n\n # Set up sampler\n #tic = time.perf_counter()\n sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,\n numChains=100, sweepSteps=L,\n numSamples=30000, thermalizationSweeps=25)\n #toc = time.perf_counter()\n \n #print(\" == Total time for sampling step: %fs\\n\" % (toc - tic))\n\n # Set up TDVP\n tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,\n svdTol=1e-8, diagonalShift=10, makeReal='real')\n\n stepper = jVMC.util.stepper.Euler(timeStep=t_step) # ODE integrator\n\n\n res = []\n \n for n in range(iterations):\n dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)\n print(\"dp_inserted\", dp)\n dp = svd(dp, (4,4,2,2), rank = r)\n \n dp = jnp.concatenate([p.ravel() for p in tree_flatten(dp)[0]])\n dp = jnp.concatenate([dp.real, dp.imag])\n print(\"dp_returned\", dp)\n psi.set_parameters(dp)\n\n print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)\n\n res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])\n np.savetxt('dp', dp) \n return np.array(res)\n\n\n#iterations = 2500\n#rng_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\niterations = 2\nrng_list = [0, 1]\ntime_step = 12e-2 \nh = L\nnet_init = jVMC.nets.CpxRBM(numHidden = h, bias = False)\n\n#rank_list = jnp.arange(L/2, L+1)\nrank_list = [8,9]\nresults = []\nfor j,rng in enumerate(rng_list):\n \n E_0_aarray = np.zeros((iterations, len(rng_list)))#an empty two dimensional array corresponding to the D and \"rng\".\n\n for r in rank_list:\n \n #print(\"rng:\", rng)\n res = simulate(rng, iterations, rank=r, t_step = time_step)\n E_0 = res + 1.0660513358196495#this energy is for 16 spins\n #adding the energy values obtained to the first entry of the row\n #print(\"length\", len(E_0))\n E_0_aarray[:, j] = E_0[:, 0]\n #print(\"final_energy:\", E_0[-1])\n \n results.apend(E_0_aarray)\n\n#print(\"E_array\", E_0_aarray)\n\nnp.savetxt('cpxrbm_16_h16_sr_12t', np.array(results), header='Data for CpxRBM with h = 16 for 1 initializations')\n"
] | [
[
"numpy.array",
"numpy.savetxt"
]
] |
haidi-ustc/scikit-nano | [
"ef9b24165ba37918b3f520657f7311ba139b3e7d"
] | [
"sknano/structures/_nanotube_bundle.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n==============================================================================\nNanotube bundle base class (:mod:`sknano.structures._nanotube_bundle`)\n==============================================================================\n\n.. currentmodule:: sknano.structures._nanotube_bundle\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n__docformat__ = 'restructuredtext en'\n\nimport numbers\n\nimport numpy as np\n\nfrom sknano.core.atoms import Atom, vdw_radius_from_basis\nfrom sknano.core.refdata import aCC, grams_per_Da\nfrom sknano.core.math import Vector\nfrom ._extras import get_chiral_indices\n\n__all__ = ['compute_bundle_density', 'NanotubeBundleMixin',\n 'NanotubeBundleBase']\n\n\ndef compute_bundle_density(*Ch, r_vdw=None, bond=None,\n element1=None, element2=None):\n \"\"\"Compute nanotube bundle mass density \\\n :math:`\\\\rho_{\\\\mathrm{bundle}}(n, m)` in :math:`\\\\mathrm{g/cm^3}`.\n\n .. math::\n\n \\\\rho_{\\\\mathrm{bundle}}(n, m) = \\\\frac{8\\\\pi^2 m_{\\\\mathrm{C}}\n \\\\sqrt{n^2 + m^2 + nm}}{9\\\\sqrt{3}a_{\\\\mathrm{CC}}^3 \\\\times\n \\\\left(\\\\sqrt{n^2 + m^2 + nm} +\n \\\\frac{\\\\pi d_{\\\\mathrm{vdW}}}{\\\\sqrt{3}a_{\\\\mathrm{CC}}}\\\\right)^2}\n\n Parameters\n ----------\n *Ch : {:class:`python:tuple` or :class:`python:int`\\ s}\n Either a 2-tuple of ints or 2 integers giving the chiral indices\n of the nanotube chiral vector\n :math:`\\\\mathbf{C}_h = n\\\\mathbf{a}_1 + m\\\\mathbf{a}_2 = (n, m)`.\n r_vdw : int\n van der Waals radius of nanotube atoms\n bond : float, optional\n Bond length.\n\n Returns\n -------\n float\n :math:`\\\\rho_{\\\\mathrm{bundle}}` in units of\n :math:`\\\\mathrm{\\\\frac{g}{cm^3}}`\n\n \"\"\"\n n, m, _ = get_chiral_indices(*Ch)\n\n if bond is None:\n bond = aCC\n\n if element1 is None:\n element1 = 'C'\n if element2 is None:\n element2 = 'C'\n\n if r_vdw is None:\n r_vdw = vdw_radius_from_basis(element1, element2)\n\n if element1 == element2:\n bundle_density = 8 * np.pi ** 2 * Atom(element1).mass * \\\n np.sqrt(n ** 2 + m ** 2 + n * m) / \\\n (9 * np.sqrt(3) * bond ** 3 *\n (np.sqrt(n ** 2 + m ** 2 + n * m) +\n 2 * np.pi * r_vdw / (np.sqrt(3) * bond)) ** 2)\n else:\n bundle_density = 0\n\n # there are 1.6605e-24 grams / Da and 1e-8 cm / angstrom\n bundle_density *= grams_per_Da / (1e-8) ** 3\n return bundle_density\n\n\nclass NanotubeBundleMixin:\n \"\"\"Mixin class for nanotube bundles.\"\"\"\n\n @property\n def nx(self):\n \"\"\"Number of nanotubes along the :math:`x`-axis.\"\"\"\n return self._nx\n\n @nx.setter\n def nx(self, value):\n \"\"\"Set :math:`n_x`\"\"\"\n if not (isinstance(value, numbers.Number) or value > 0):\n raise TypeError('Expected a positive integer.')\n self._nx = int(value)\n\n @nx.deleter\n def nx(self):\n del self._nx\n\n @property\n def ny(self):\n \"\"\"Number of nanotubes along the :math:`y`-axis.\"\"\"\n return self._ny\n\n @ny.setter\n def ny(self, value):\n \"\"\"Set :math:`n_y`\"\"\"\n if not (isinstance(value, numbers.Number) or value > 0):\n raise TypeError('Expected a positive integer.')\n self._ny = int(value)\n\n @ny.deleter\n def ny(self):\n del self._ny\n\n @property\n def Lx(self):\n return self.nx * (self.dt + 2 * self.vdw_radius) / 10\n\n @property\n def Ly(self):\n return self.ny * (self.dt + 2 * self.vdw_radius) / 10\n\n @property\n def bundle_geometry(self):\n return self._bundle_geometry\n\n @bundle_geometry.setter\n def bundle_geometry(self, value):\n if value is not None and value not in self._bundle_geometries:\n print('Unrecognized `bundle_geometry`: {!r}'.format(value))\n value = None\n self._bundle_geometry = value\n\n @property\n def bundle_packing(self):\n return self._bundle_packing\n\n @bundle_packing.setter\n def bundle_packing(self, value):\n if value is None and \\\n self.bundle_geometry in ('square', 'rectangle'):\n value = 'ccp'\n elif value is None and \\\n self.bundle_geometry in ('triangle', 'hexagon'):\n value = 'hcp'\n\n if value is not None and value not in ('ccp', 'hcp'):\n raise ValueError('Expected value to be `hcp` or `ccp`')\n\n self._bundle_packing = value\n # self.generate_bundle_coords()\n\n @bundle_packing.deleter\n def bundle_packing(self):\n del self._bundle_packing\n\n @property\n def bundle_mass(self):\n return self.Ntubes * self.tube_mass\n\n @property\n def Natoms(self):\n \"\"\"Number of atoms in nanotube bundle.\n\n **Returns total number of atoms in nanotube bundle.**\n Use :attr:`~NanotubeBundleMixin.Natoms_per_tube` to\n get a list of the number of atoms in each nanotube in\n the bundle.\n\n \"\"\"\n return np.asarray(self.Natoms_list).sum()\n\n @property\n def Natoms_per_bundle(self):\n return self.Natoms\n\n @property\n def Natoms_list(self):\n return [nanotube.Natoms for nanotube in self.bundle_list]\n\n @property\n def Ntubes(self):\n return len(self.bundle_coords)\n\n @property\n def Natoms_per_tube(self):\n \"\"\"Alias for :attr:`~NanotubeBundleMixin.Natoms_list`.\"\"\"\n return self.Natoms_list\n\n def generate_bundle_coords(self):\n \"\"\"Generate coordinates of bundle tubes.\"\"\"\n self.r1 = Vector()\n self.r2 = Vector()\n self.bundle_coords = []\n\n self.r1.x = self.dt + 2 * self.vdw_radius\n if self.bundle_packing in ('cubic', 'ccp'):\n self.r2.y = self.r1.x\n else:\n self.r2.x = self.r1.x * np.cos(2 * np.pi / 3)\n self.r2.y = self.r1.x * np.sin(2 * np.pi / 3)\n if self.bundle_packing is None:\n self._bundle_packing = 'hcp'\n\n if self.bundle_geometry == 'hexagon':\n nrows = max(self.nx, self.ny, 3)\n if nrows % 2 != 1:\n nrows += 1\n\n ntubes_per_end_rows = int((nrows + 1) / 2)\n\n row = 0\n ntubes_per_row = nrows\n while ntubes_per_row >= ntubes_per_end_rows:\n if row == 0:\n for n in range(ntubes_per_row):\n dr = n * self.r1\n self.bundle_coords.append(dr)\n else:\n for nx in range(ntubes_per_row):\n for ny in (-row, row):\n dr = Vector()\n dr.x = abs(ny * self.r2.x)\n dr.y = ny * self.r2.y\n dr = nx * self.r1 + dr\n self.bundle_coords.append(dr)\n row += 1\n ntubes_per_row = nrows - row\n\n elif self.bundle_geometry == 'rectangle':\n Lx = 10 * self.Lx\n for nx in range(self.nx):\n for ny in range(self.ny):\n dr = nx * self.r1 + ny * self.r2\n while dr.x < 0:\n dr.x += Lx\n self.bundle_coords.append(dr)\n\n elif self.bundle_geometry == 'square':\n pass\n elif self.bundle_geometry == 'triangle':\n pass\n else:\n for nx in range(self.nx):\n for ny in range(self.ny):\n dr = nx * self.r1 + ny * self.r2\n self.bundle_coords.append(dr)\n\n\nclass NanotubeBundleBase(NanotubeBundleMixin):\n \"\"\"Nanotube bundle structure base class.\"\"\"\n\n _bundle_geometries = ['square', 'rectangle', 'hexagon']\n\n def __init__(self, *args, nx=1, ny=1, bundle_packing=None,\n bundle_geometry=None, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n self.nx = nx\n self.ny = ny\n self.bundle_geometry = bundle_geometry\n self.bundle_packing = bundle_packing\n self.bundle_list = []\n self.generate_bundle_coords()\n\n def todict(self):\n attrdict = super().todict()\n attrdict.update(dict(nx=self.nx, ny=self.ny,\n bundle_packing=self.bundle_packing,\n bundle_geometry=self.bundle_geometry))\n return attrdict\n"
] | [
[
"numpy.sqrt",
"numpy.sin",
"numpy.asarray",
"numpy.cos"
]
] |
nagapavan525/wtfml | [
"f2211addbe423a51b4dbbdec5a40d09649412452"
] | [
"wtfml/data_loaders/image/classification.py"
] | [
"\"\"\"\n__author__: Abhishek Thakur\n\"\"\"\n\nimport torch\n\nimport numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageFile\n\ntry:\n import torch_xla.core.xla_model as xm\n\n _xla_available = True\nexcept ImportError:\n _xla_available = False\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass ClassificationDataset:\n def __init__(self, image_paths, targets, resize, augmentations=None):\n \"\"\"\n :param image_paths: list of paths to images\n :param targets: numpy array\n :param resize: tuple or None\n :param augmentations: albumentations augmentations\n \"\"\"\n self.image_paths = image_paths\n self.targets = targets\n self.resize = resize\n self.augmentations = augmentations\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, item):\n image = Image.open(self.image_paths[item])\n targets = self.targets[item]\n if self.resize is not None:\n image = image.resize(\n (self.resize[1], self.resize[0]), resample=Image.BILINEAR\n )\n image = np.array(image)\n if self.augmentations is not None:\n augmented = self.augmentations(image=image)\n image = augmented[\"image\"]\n image = np.transpose(image, (2, 0, 1)).astype(np.float32)\n return {\n \"image\": torch.tensor(image),\n \"targets\": torch.tensor(targets),\n }\n\n\nclass ClassificationDataLoader:\n def __init__(self, image_paths, targets, resize, augmentations=None):\n \"\"\"\n :param image_paths: list of paths to images\n :param targets: numpy array\n :param resize: tuple or None\n :param augmentations: albumentations augmentations\n \"\"\"\n self.image_paths = image_paths\n self.targets = targets\n self.resize = resize\n self.augmentations = augmentations\n self.dataset = ClassificationDataset(\n image_paths=self.image_paths,\n targets=self.targets,\n resize=self.resize,\n augmentations=self.augmentations,\n )\n\n def fetch(self, batch_size, num_workers, drop_last=False, shuffle=True, tpu=False):\n \"\"\"\n :param batch_size: batch size\n :param num_workers: number of processes to use\n :param drop_last: drop the last batch?\n :param shuffle: True/False\n :param tpu: True/False, to use tpu or not\n \"\"\"\n sampler = None\n if tpu:\n sampler = torch.utils.data.distributed.DistributedSampler(\n self.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=shuffle,\n )\n\n data_loader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=batch_size,\n sampler=sampler,\n drop_last=drop_last,\n num_workers=num_workers,\n )\n return data_loader\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.tensor",
"numpy.transpose"
]
] |
jcrist/pyblis | [
"d9c67d40a15c656a4681ba1b9ca0c52eff40163c"
] | [
"pyblis/tests/utils.py"
] | [
"import pytest\n\nimport numpy as np\n\n\nall_dtypes = pytest.mark.parametrize('dtype', ['f4', 'f8', 'c8', 'c16'])\n\n\nclass Base(object):\n def rand(self, dtype, shape=()):\n a = np.random.normal(size=shape).astype(dtype)\n if np.issubdtype(dtype, np.complexfloating):\n a += np.random.normal(size=a.shape) * 1j\n return a if a.shape else a.reshape((1,))[0]\n\n def call_base(self, *args, **kwargs):\n return self.call(*args, **kwargs)\n\n\nclass NumbaMixin(object):\n @property\n def error_cls(self):\n import numba\n return numba.errors.TypingError\n\n @classmethod\n def setup_class(cls):\n base, full = cls.compile()\n cls.base = staticmethod(base)\n cls.full = staticmethod(full)\n\n def call(self, *args, **kwargs):\n return self.full(*args, **kwargs)\n\n def call_base(self, *args, **kwargs):\n return self.base(*args, **kwargs)\n"
] | [
[
"numpy.random.normal",
"numpy.issubdtype"
]
] |
FrancisDinh/Smart-Energy-Project | [
"16b021e127d9ac5c01653abc31d8cc5d0a7a05c6"
] | [
"application/DemandSideNew/Building/DemandProfile.py"
] | [
"import os, sys\nimport json\nimport os.path\nimport numpy\n\nclass DemandProfile:\n def __init__(self):\n cwd = os.getcwd()\n self.fname = cwd + '/demand-profile.json'\n \n def get_data(self):\n demand={}\n with open(self.fname) as demand_info:\n demand = json.load(demand_info)\n return demand\n\n def calculate_total_demand(self):\n data = self.get_data()\n total_energy_data=[]\n num=0\n total_demand = numpy.zeros(24)\n for i in data:\n value = i[str(1+num)][\"Circulation Pump\"]+i[str(1+num)][\"Dish Washer\"]+i[str(1+num)][\"Freezer\"]+i[str(1+num)][\"Washing Machine\"]\n total_demand[num] = value\n num+=1\n return total_demand\n\n#sample object\n#sample = DemandProfile()\n#print(sample.calculate_total_demand())"
] | [
[
"numpy.zeros"
]
] |
Koukyosyumei/Senjyu | [
"70faa45e13cb3b1ccdee8a40146a03d60abe11e5"
] | [
"src/senjyu/ml/clustering/kmeans.py"
] | [
"import numpy as np\nfrom mpi4py import MPI\n\n\nclass Kmeans:\n def __init__(self, k=3, num_iterations=100, seed=42):\n self.k = k\n self.num_iterations = num_iterations\n self.centorids = None\n self.dim = None\n self.n = None\n\n np.random.seed(seed)\n\n def train(self, X, parallel=False):\n if parallel:\n pass\n else:\n return self._train_standalone(X)\n\n def _init_distiution(self, args=None):\n self.args = args\n self.comm = MPI.COMM_WORLD\n self.rank = self.comm.Get_rank()\n self.size = self.comm.Get_size()\n\n def _em_standalone(self, X):\n # E-step\n distance = np.zeros((self.k, self.n))\n for cluster_id in range(self.k):\n distance[cluster_id, :] = np.linalg.norm(\n X - self.centorids[cluster_id, :], axis=1\n )\n pred = np.argmin(distance, axis=0)\n\n # M-step\n for cluster_id in range(self.k):\n self.centorids[cluster_id, :] = np.mean(X[pred == cluster_id, :], axis=0)\n\n return pred\n\n def _train_standalone(self, X):\n self.n = X.shape[0]\n self.dim = X.shape[1]\n self.centorids = np.random.normal(0, 1, (self.k, self.dim))\n\n for _ in range(self.num_iterations):\n pred = self._em_standalone(X)\n\n return pred\n"
] | [
[
"numpy.zeros",
"numpy.argmin",
"numpy.random.seed",
"numpy.random.normal",
"numpy.linalg.norm",
"numpy.mean"
]
] |
Aaron-YunZhao/xalpha | [
"76dc6390cb5714b1c004f7e79e4af832ad1e6fa5"
] | [
"xalpha/realtime.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodule for realtime watch and notfication\n\"\"\"\n\nimport datetime as dt\nimport smtplib\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr, parseaddr\nfrom re import match\n\nimport pandas as pd\n\nfrom xalpha.cons import today\nfrom xalpha.info import _download, fundinfo\nfrom xalpha.trade import trade\n\n\ndef _format_addr(s):\n \"\"\"\n parse the email sender and receiver, Chinese encode and support\n\n :param s: eg. 'name <[email protected]>, name2 <[email protected]>'\n \"\"\"\n name, addr = parseaddr(s)\n return formataddr((Header(name, \"utf-8\").encode(), addr))\n\n\ndef mail(\n title,\n content,\n sender=None,\n receiver=None,\n password=None,\n server=None,\n port=None,\n sender_name=\"sender\",\n receiver_name=None,\n):\n \"\"\"\n send email\n\n :param title: str, title of the email\n :param content: str, content of the email, plain text only\n :param conf: all other paramters can be import as a dictionay, eg.conf = {'sender': '[email protected]',\n 'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',\n 'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.\n The receiver_name and sender_name options can be omitted.\n \"\"\"\n ret = True\n try:\n if receiver_name is None:\n receiver_name = [\"receiver\" for _ in receiver]\n msg = MIMEText(content, \"plain\", \"utf-8\")\n msg[\"From\"] = _format_addr(\"%s <%s>\" % (sender_name, sender))\n # 括号里的对应发件人邮箱昵称、发件人邮箱账号\n receivestr = \"\"\n for i, s in enumerate(receiver):\n receivestr += receiver_name[i]\n receivestr += \" <\"\n receivestr += s\n receivestr += \">, \"\n msg[\"To\"] = _format_addr(receivestr) # 括号里的对应收件人邮箱昵称、收件人邮箱账号\n msg[\"Subject\"] = title # 邮件的主题,即标题\n\n server = smtplib.SMTP_SSL(server, port) # 发件人邮箱中的SMTP服务器和端口号\n server.login(sender, password) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(\n sender, receiver, msg.as_string()\n ) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit()\n except Exception:\n ret = False\n return ret\n\n\nclass rtdata:\n \"\"\"\n get real time data of specific funds\n\n :param code: string of six digitals for funds\n \"\"\"\n\n def __init__(self, code):\n url = \"http://fundgz.1234567.com.cn/js/\" + code + \".js\"\n page = _download(url)\n self.code = code\n self.rtvalue = float(match(r'.*\"gsz\":\"(\\d*\\.\\d*)\",.*', page.text)[1])\n self.name = match(r'.*\"name\":\"([^,]*)\",.*', page.text)[1]\n self.time = dt.datetime.strptime(\n match(r'.*\"gztime\":\"([\\d\\s\\-\\:]*)\".*', page.text)[1], \"%Y-%m-%d %H:%M\"\n )\n\n\ndef rfundinfo(\n code, round_label=0, dividend_label=0, fetch=False, save=False, path=\"\", form=\"csv\"\n):\n \"\"\"\n give a fundinfo object with todays estimate netvalue at running time\n\n :param code: string of six digitals for funds\n :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init\n :param save: boolean, when open the save option, info classes automatically save the class to files\n :param path: string, the file path prefix of IO\n :param form: string, the format of IO, options including: 'csv'\n :returns: the fundinfo object\n \"\"\"\n fundobj = fundinfo(\n code,\n round_label=round_label,\n dividend_label=dividend_label,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n )\n rt = rtdata(code)\n rtdate = dt.datetime.combine(rt.time, dt.time.min)\n rtvalue = rt.rtvalue\n if (rtdate - fundobj.price.iloc[-1].date).days > 0:\n fundobj.price = fundobj.price.append(\n pd.DataFrame(\n [[rtdate, rtvalue, fundobj.price.iloc[-1].totvalue, 0]],\n columns=[\"date\", \"netvalue\", \"totvalue\", \"comment\"],\n ),\n ignore_index=True,\n )\n return fundobj\n\n\nclass review:\n \"\"\"\n review policys and give the realtime purchase suggestions\n\n :param policylist: list of policy object\n :param namelist: list of names of corresponding policy, default as 0 to n-1\n :param date: object of datetime, check date, today is prefered, date other than is not guaranteed\n \"\"\"\n\n def __init__(self, policylist, namelist=None, date=today()):\n self.warn = []\n self.message = []\n self.policylist = policylist\n if namelist is None:\n self.namelist = [i for i in range(len(policylist))]\n else:\n self.namelist = namelist\n assert len(self.policylist) == len(self.namelist)\n for i, policy in enumerate(policylist):\n row = policy.status[policy.status[\"date\"] == date]\n if len(row) == 1:\n warn = (\n policy.aim.name,\n policy.aim.code,\n row.iloc[0].loc[policy.aim.code],\n self.namelist[i],\n )\n self.warn.append(warn)\n if warn[2] > 0:\n sug = \"买入%s元\" % warn[2]\n elif warn[2] < 0:\n ratio = -warn[2] / 0.005 * 100\n share = (\n trade(fundinfo(warn[1]), policy.status)\n .briefdailyreport()\n .get(\"currentshare\", 0)\n )\n share = -warn[2] / 0.005 * share\n sug = \"卖出%s%%的份额,也即%s份额\" % (ratio, share)\n self.message.append(\n \"根据%s计划,建议%s,%s(%s)\" % (warn[3], sug, warn[0], warn[1])\n )\n self.content = \"\\n\".join(map(str, self.message))\n\n def __str__(self):\n return self.content\n\n def notification(self, conf):\n \"\"\"\n send email of self.content, at least support for qq email sender\n\n :param conf: the configuration dictionary for email send settings, no ** before the dict in needed.\n eg.conf = {'sender': '[email protected]',\n 'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',\n 'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.\n The receiver_name and sender_name options can be omitted.\n \"\"\"\n if self.content:\n ret = mail(\"Notification\", self.content, **conf)\n if ret:\n print(\"邮件发送成功\")\n else:\n print(\"邮件发送失败\")\n else:\n print(\"没有提醒待发送\")\n"
] | [
[
"pandas.DataFrame"
]
] |
ngduyanhece/ConvLab | [
"a04582a77537c1a706fbf64715baa9ad0be1301a"
] | [
"convlab/modules/e2e/multiwoz/Mem2Seq/utils/utils_babi_mem2seq.py"
] | [
"# Modified by Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\n\nimport torch\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nfrom utils.config import *\nfrom utils.until_temp import entityList\n\n\ndef hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\nMEM_TOKEN_SIZE = 3\n\nclass Lang:\n def __init__(self):\n self.word2index = {}\n self.word2count = {}\n self.index2word = {UNK_token: 'UNK', PAD_token: \"PAD\", EOS_token: \"EOS\", SOS_token: \"SOS\"}\n self.n_words = 4 # Count default tokens\n \n def index_words(self, story, trg=False):\n if trg:\n for word in story.split(' '):\n self.index_word(word)\n else:\n for word_triple in story:\n for word in word_triple:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\nclass Dataset(data.Dataset):\n \"\"\"Custom data.Dataset compatible with data.DataLoader.\"\"\"\n def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):\n \"\"\"Reads source and target sequences from txt files.\"\"\"\n self.src_seqs = src_seq\n self.trg_seqs = trg_seq\n self.index_seqs = index_seq \n self.gate_seq = gate_seq \n self.num_total_seqs = len(self.src_seqs)\n self.src_word2id = src_word2id\n self.trg_word2id = trg_word2id\n self.max_len = max_len\n self.conv_seq = conv_seq\n self.ent = ent\n self.ID = ID\n self.kb_arr = kb_arr\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (source and target).\"\"\"\n src_seq = self.src_seqs[index]\n trg_seq = self.trg_seqs[index]\n index_s = self.index_seqs[index]\n gete_s = self.gate_seq[index]\n src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)\n trg_seq = self.preprocess(trg_seq, self.trg_word2id)\n index_s = self.preprocess_inde(index_s,src_seq)\n gete_s = self.preprocess_gate(gete_s)\n conv_seq = self.conv_seq[index]\n conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)\n ID = self.ID[index]\n kb_arr = self.kb_arr[index]\n \n return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr\n\n def __len__(self):\n return self.num_total_seqs\n \n def preprocess(self, sequence, word2id, trg=True):\n \"\"\"Converts words to ids.\"\"\"\n if trg:\n story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]\n else:\n story = []\n for i, word_triple in enumerate(sequence):\n story.append([])\n for ii, word in enumerate(word_triple):\n temp = word2id[word] if word in word2id else UNK_token\n story[i].append(temp)\n try:\n story = torch.Tensor(story)\n except:\n print(sequence)\n print(story)\n return story\n\n def preprocess_inde(self, sequence, src_seq):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [len(src_seq)-1]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_gate(self, sequence):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [0]\n sequence = torch.Tensor(sequence)\n return sequence\n\ndef collate_fn(data):\n def merge(sequences,max_len):\n lengths = [len(seq) for seq in sequences]\n if (max_len):\n padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i,:end,:] = seq[:end]\n else:\n padded_seqs = torch.ones(len(sequences), max(lengths)).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i, :end] = seq[:end]\n return padded_seqs, lengths\n\n # sort a list by sequence length (descending order) to use pack_padded_sequence\n data.sort(key=lambda x: len(x[0]), reverse=True)\n # seperate source and target sequences\n src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)\n # merge sequences (from tuple of 1D tensor to 2D tensor)\n src_seqs, src_lengths = merge(src_seqs,max_len)\n trg_seqs, trg_lengths = merge(trg_seqs,None)\n ind_seqs, _ = merge(ind_seqs,None)\n gete_s, _ = merge(gete_s,None)\n conv_seqs, conv_lengths = merge(conv_seq, max_len)\n \n src_seqs = Variable(src_seqs).transpose(0,1)\n trg_seqs = Variable(trg_seqs).transpose(0,1)\n ind_seqs = Variable(ind_seqs).transpose(0,1)\n gete_s = Variable(gete_s).transpose(0,1)\n conv_seqs = Variable(conv_seqs).transpose(0,1)\n\n if USE_CUDA:\n src_seqs = src_seqs.cuda()\n trg_seqs = trg_seqs.cuda()\n ind_seqs = ind_seqs.cuda()\n gete_s = gete_s.cuda()\n conv_seqs = conv_seqs.cuda()\n return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr\n\ndef read_langs(file_name, entity, max_line = None):\n logging.info((\"Reading lines from {}\".format(file_name)))\n data=[]\n contex_arr = []\n conversation_arr = []\n kb_arr = []\n u=None\n r=None\n user_counter = 0\n system_counter = 0\n system_res_counter = 0\n KB_counter = 0\n dialog_counter = 0\n with open(file_name) as fin:\n cnt_ptr = 0\n cnt_voc = 0\n max_r_len = 0\n cnt_lin = 1\n time_counter = 1 \n for line in fin:\n line=line.strip()\n if line:\n nid, line = line.split(' ', 1)\n if '\\t' in line:\n u, r = line.split('\\t')\n if u!='<SILENCE>': user_counter += 1\n system_counter += 1\n\n gen_u = generate_memory(u, \"$u\", str(time_counter)) \n contex_arr += gen_u\n conversation_arr += gen_u\n\n r_index = []\n gate = []\n for key in r.split(' '):\n if ENTPTR: \n if (key in entity):\n index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else:\n index = len(contex_arr) \n cnt_voc +=1 \n else: \n index = len(contex_arr) \n gate.append(0) \n cnt_voc +=1 \n else:\n index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else: \n index = len(contex_arr)\n gate.append(0) \n cnt_voc +=1 \n r_index.append(index)\n system_res_counter += 1 \n\n if len(r_index) > max_r_len: \n max_r_len = len(r_index)\n contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]\n \n ent = []\n for key in r.split(' '):\n if(key in entity):\n ent.append(key)\n\n data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])\n gen_r = generate_memory(r, \"$s\", str(time_counter)) \n contex_arr += gen_r\n conversation_arr += gen_r\n\n time_counter += 1\n else:\n KB_counter += 1\n r=line\n if USEKB:\n temp = generate_memory(r, \"\", \"\") \n contex_arr += temp\n kb_arr += temp\n else:\n cnt_lin+=1\n if(max_line and cnt_lin>=max_line):\n break\n contex_arr=[]\n conversation_arr = []\n kb_arr = []\n time_counter = 1\n dialog_counter += 1\n max_len = max([len(d[0]) for d in data])\n logging.info(\"Pointer percentace= {} \".format(cnt_ptr/(cnt_ptr+cnt_voc)))\n logging.info(\"Max responce Len: {}\".format(max_r_len))\n logging.info(\"Max Input Len: {}\".format(max_len))\n logging.info(\"Avg. User Utterances: {}\".format(user_counter*1.0/dialog_counter))\n logging.info(\"Avg. Bot Utterances: {}\".format(system_counter*1.0/dialog_counter))\n logging.info(\"Avg. KB results: {}\".format(KB_counter*1.0/dialog_counter))\n logging.info(\"Avg. responce Len: {}\".format(system_res_counter*1.0/system_counter))\n \n print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])\n return data, max_len, max_r_len\n\ndef generate_memory(sent, speaker, time):\n sent_new = []\n sent_token = sent.split(' ')\n if speaker==\"$u\" or speaker==\"$s\":\n for word in sent_token:\n temp = [word, speaker, 't'+str(time)] + [\"PAD\"]*(MEM_TOKEN_SIZE-3)\n sent_new.append(temp)\n else:\n if sent_token[1]==\"R_rating\":\n sent_token = sent_token + [\"PAD\"]*(MEM_TOKEN_SIZE-len(sent_token))\n else:\n sent_token = sent_token[::-1] + [\"PAD\"]*(MEM_TOKEN_SIZE-len(sent_token))\n sent_new.append(sent_token)\n return sent_new\n\ndef get_seq(pairs,lang,batch_size,type,max_len): \n x_seq = []\n y_seq = []\n ptr_seq = []\n gate_seq = []\n conv_seq = []\n ent = []\n ID = []\n kb_arr = []\n for pair in pairs:\n x_seq.append(pair[0])\n y_seq.append(pair[1])\n ptr_seq.append(pair[2])\n gate_seq.append(pair[3])\n conv_seq.append(pair[4])\n ent.append(pair[5])\n ID.append(pair[6])\n kb_arr.append(pair[7])\n if(type):\n lang.index_words(pair[0])\n lang.index_words(pair[1], trg=True)\n \n dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=type,\n collate_fn=collate_fn)\n return data_loader\n\ndef prepare_data_seq(task,batch_size=100,shuffle=True):\n file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)\n file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)\n file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)\n if (int(task) != 6):\n file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)\n\n if int(task)!=6:\n ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))\n else:\n ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))\n\n pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)\n pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)\n pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)\n\n max_r_test_OOV = 0\n max_len_test_OOV = 0\n if (int(task) != 6):\n pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)\n \n max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1\n max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1\n lang = Lang()\n \n train = get_seq(pair_train,lang,batch_size,True,max_len)\n dev = get_seq(pair_dev,lang,batch_size,False,max_len)\n test = get_seq(pair_test,lang,batch_size,False,max_len)\n if (int(task) != 6):\n testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)\n else:\n testOOV = []\n \n logging.info(\"Read %s sentence pairs train\" % len(pair_train))\n logging.info(\"Read %s sentence pairs dev\" % len(pair_dev))\n logging.info(\"Read %s sentence pairs test\" % len(pair_test))\n if (int(task) != 6):\n logging.info(\"Read %s sentence pairs test\" % len(pair_test_OOV)) \n logging.info(\"Max len Input %s \" % max_len)\n logging.info(\"Vocab_size %s \" % lang.n_words)\n logging.info(\"USE_CUDA={}\".format(USE_CUDA))\n\n return train, dev, test, testOOV, lang, max_len, max_r"
] | [
[
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.Tensor"
]
] |
Ethan-Yang0101/Mini-DeepText-Project | [
"6ed70fae7d00610b942fb9b2526d11ebfd1b48f7"
] | [
"Mini-DeepText-2.0/train.py"
] | [
"\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom TextDataset import TextDataset\nfrom Model.BasicModel.TextCLRModel import TextCLRModel\nfrom Model.BasicModel.TextSLBModel import TextSLBModel\nfrom Model.BasicModel.TextNMTModel import TextNMTModel\nfrom Model.BasicModel.TextDSMModel import TextDSMModel\nfrom Model.Transformer.Transformer import Transformer\nfrom Vectorizer.CLRVectorizer import CLRVectorizer\nfrom Vectorizer.SLBVectorizer import SLBVectorizer\nfrom Vectorizer.NMTVectorizer import NMTVectorizer\nfrom Vectorizer.DSMVectorizer import DSMVectorizer\nfrom Utils.Data import read_json_dataset\nfrom ModelTrainer import ModelTrainer\nfrom Utils.Config import Config\nimport json\nimport sys\nimport os\n\n\ndef get_data_loaders(args, dataset):\n '''通过数据集创建用于训练,验证和测试的数据批生成器'''\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n if os.path.exists(args.vectorizer_file):\n parameters = {'dataset': dataset,\n 'split_ratio': args.split_ratio,\n 'max_seq_length': args.max_seq_length,\n 'task': args.task,\n 'vectorizer_file': args.vectorizer_file}\n dataset = TextDataset.dataset_load_vectorizer(**parameters)\n else:\n parameters = {'dataset': dataset,\n 'split_ratio': args.split_ratio,\n 'max_seq_length': args.max_seq_length,\n 'task': args.task,\n 'cutoff': args.cutoff}\n dataset = TextDataset.dataset_make_vectorizer(**parameters)\n dataset.save_vectorizer(args.vectorizer_file)\n dataset.set_split('train')\n train_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n dataset.set_split('val')\n val_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n dataset.set_split('test')\n test_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n data_loaders = (train_data_loader, val_data_loader, test_data_loader)\n return data_loaders\n\n\ndef get_task_model(args, vectorizer):\n '''根据任务类型获取用于训练的模型类型'''\n model = None\n if args.task == 'classification':\n if args.model_name == 'TextCLRModel':\n model = TextCLRModel(\n num_embeddings=len(vectorizer.source_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n num_classes=len(vectorizer.label_vocab),\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'labeling':\n if args.model_name == 'TextSLBModel':\n model = TextSLBModel(\n num_embeddings=len(vectorizer.source_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'matching':\n if args.model_name == 'TextDSMModel':\n model = TextDSMModel(\n num_embeddings1=len(vectorizer.source_vocab),\n num_embeddings2=len(vectorizer.target_vocab),\n embedding_dim=args.embedding_size,\n rnn_hidden_size=args.rnn_hidden_size,\n padding_idx=vectorizer.source_vocab.mask_index,\n batch_first=True)\n if args.task == 'translation':\n if args.model_name == 'Transformer':\n model = Transformer(\n source_vocab_size=len(vectorizer.source_vocab),\n target_vocab_size=len(vectorizer.target_vocab),\n source_embed_dim=args.source_embed_dim,\n target_embed_dim=args.target_embed_dim,\n encoder_n_heads=args.encoder_n_heads,\n decoder_n_heads=args.decoder_n_heads,\n encoder_hid_dim=args.encoder_hid_dim,\n decoder_hid_dim=args.decoder_hid_dim,\n encoder_n_layers=args.encoder_n_layers,\n decoder_n_layers=args.decoder_n_layers,\n encoder_max_seq_len=args.max_seq_length,\n decoder_max_seq_len=args.max_seq_length\n )\n if args.model_name == 'TextNMTModel':\n model = TextNMTModel(\n source_num_embeddings=len(vectorizer.source_vocab),\n source_embedding_size=args.source_embedding_size,\n target_num_embeddings=len(vectorizer.target_vocab),\n target_embedding_size=args.target_embedding_size,\n encoding_size=args.encoding_size)\n return model\n\n\ndef get_optimizer(args, model):\n '''获取想要使用的优化器'''\n if args.optimizer == 'adam':\n return optim.Adam(model.parameters(), lr=args.learning_rate)\n\n\ndef get_loss_func(args):\n '''根据任务类型获取损失函数'''\n if args.task == 'classification':\n return nn.CrossEntropyLoss()\n if args.task == 'matching':\n return nn.CrossEntropyLoss()\n if args.task == 'labeling':\n return sequence_loss\n if args.task == 'translation':\n return sequence_loss\n\n\ndef sequence_loss(pred, target, mask_index):\n '''用于计算序列模型的损失函数'''\n pred = pred.contiguous().view(-1, pred.size(2))\n target = target.contiguous().view(-1)\n return F.cross_entropy(pred, target, ignore_index=mask_index)\n\n\ndef get_vectorizer(args):\n '''根据任务获取矢量化器'''\n with open(args.vectorizer_file, \"r\") as fp:\n if args.task == 'classification':\n return CLRVectorizer.from_serializable(json.load(fp))\n if args.task == 'matching':\n return DSMVectorizer.from_serializable(json.load(fp))\n if args.task == 'labeling':\n return GENVectorizer.from_serializable(json.load(fp))\n if args.task == 'translation':\n return NMTVectorizer.from_serializable(json.load(fp))\n\n\nif __name__ == '__main__':\n # 获取配置文件信息\n config_filename = sys.argv[1]\n config = Config.from_config_json(config_filename)\n args = config.args\n # 获取数据集\n dataset = read_json_dataset(args.data_filepath, args.max_seq_length)\n # 获取数据批生成器\n data_loaders = get_data_loaders(args, dataset)\n # 获取模型\n vectorizer = get_vectorizer(args)\n model = get_task_model(args, vectorizer)\n # 获取优化器\n optimizer = get_optimizer(args, model)\n # 获取损失函数\n loss_func = get_loss_func(args)\n # 获取训练器\n model_trainer = ModelTrainer(\n args, data_loaders, model, optimizer, loss_func)\n # 训练模型\n model_trainer.train_val_test_model()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.cross_entropy"
]
] |
Rikorose/DeepFilterNet | [
"afe6bfb53efae70207e18df7ed372c2cfe337fee"
] | [
"DeepFilterNet/df/utils.py"
] | [
"import collections\nimport math\nimport os\nimport random\nimport subprocess\nfrom socket import gethostname\nfrom typing import Any, Dict, Set, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom loguru import logger\nfrom torch import Tensor\nfrom torch._six import string_classes\nfrom torch.autograd import Function\nfrom torch.types import Number\n\nfrom df.config import config\nfrom df.model import ModelParams\n\ntry:\n from torchaudio.functional import resample as ta_resample\nexcept ImportError:\n from torchaudio.compliance.kaldi import resample_waveform as ta_resample # type: ignore\n\n\ndef get_resample_params(method: str) -> Dict[str, Any]:\n params = {\n \"sinc_fast\": {\"resampling_method\": \"sinc_interpolation\", \"lowpass_filter_width\": 16},\n \"sinc_best\": {\"resampling_method\": \"sinc_interpolation\", \"lowpass_filter_width\": 64},\n \"kaiser_fast\": {\n \"resampling_method\": \"kaiser_window\",\n \"lowpass_filter_width\": 16,\n \"rolloff\": 0.85,\n \"beta\": 8.555504641634386,\n },\n \"kaiser_best\": {\n \"resampling_method\": \"kaiser_window\",\n \"lowpass_filter_width\": 16,\n \"rolloff\": 0.9475937167399596,\n \"beta\": 14.769656459379492,\n },\n }\n assert method in params.keys(), f\"method must be one of {list(params.keys())}\"\n return params[method]\n\n\ndef resample(audio: Tensor, orig_sr: int, new_sr: int, method=\"sinc_fast\"):\n params = get_resample_params(method)\n return ta_resample(audio, orig_sr, new_sr, **params)\n\n\ndef get_device():\n s = config(\"DEVICE\", default=\"\", section=\"train\")\n if s == \"\":\n if torch.cuda.is_available():\n DEVICE = torch.device(\"cuda:0\")\n else:\n DEVICE = torch.device(\"cpu\")\n else:\n DEVICE = torch.device(s)\n return DEVICE\n\n\ndef as_complex(x: Tensor):\n if torch.is_complex(x):\n return x\n if x.shape[-1] != 2:\n raise ValueError(f\"Last dimension need to be of length 2 (re + im), but got {x.shape}\")\n if x.stride(-1) != 1:\n x = x.contiguous()\n return torch.view_as_complex(x)\n\n\ndef as_real(x: Tensor):\n if torch.is_complex(x):\n return torch.view_as_real(x)\n return x\n\n\nclass angle_re_im(Function):\n \"\"\"Similar to torch.angle but robustify the gradient for zero magnitude.\"\"\"\n\n @staticmethod\n def forward(ctx, re: Tensor, im: Tensor):\n ctx.save_for_backward(re, im)\n return torch.atan2(im, re)\n\n @staticmethod\n def backward(ctx, grad: Tensor) -> Tuple[Tensor, Tensor]:\n re, im = ctx.saved_tensors\n grad_inv = grad / (re.square() + im.square()).clamp_min_(1e-10)\n return -im * grad_inv, re * grad_inv\n\n\nclass angle(Function):\n \"\"\"Similar to torch.angle but robustify the gradient for zero magnitude.\"\"\"\n\n @staticmethod\n def forward(ctx, x: Tensor):\n ctx.save_for_backward(x)\n return torch.atan2(x.imag, x.real)\n\n @staticmethod\n def backward(ctx, grad: Tensor):\n (x,) = ctx.saved_tensors\n grad_inv = grad / (x.real.square() + x.imag.square()).clamp_min_(1e-10)\n return torch.view_as_complex(torch.stack((-x.imag * grad_inv, x.real * grad_inv), dim=-1))\n\n\ndef check_finite_module(obj, name=\"Module\", _raise=True) -> Set[str]:\n out: Set[str] = set()\n if isinstance(obj, torch.nn.Module):\n for name, child in obj.named_children():\n out = out | check_finite_module(child, name)\n for name, param in obj.named_parameters():\n out = out | check_finite_module(param, name)\n for name, buf in obj.named_buffers():\n out = out | check_finite_module(buf, name)\n if _raise and len(out) > 0:\n raise ValueError(f\"{name} not finite during checkpoint writing including: {out}\")\n return out\n\n\ndef make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:\n \"\"\"Transforms Tensor to numpy.\n Args:\n x: An instance of torch tensor or caffe blob name\n\n Returns:\n numpy.array: Numpy array\n \"\"\"\n if isinstance(x, np.ndarray):\n return x\n if np.isscalar(x):\n return np.array([x])\n if isinstance(x, Tensor):\n return x.detach().cpu().numpy()\n raise NotImplementedError(\n \"Got {}, but numpy array, scalar, or torch tensor are expected.\".format(type(x))\n )\n\n\ndef get_norm_alpha(log: bool = True) -> float:\n p = ModelParams()\n a_ = _calculate_norm_alpha(sr=p.sr, hop_size=p.hop_size, tau=p.norm_tau)\n precision = 3\n a = 1.0\n while a >= 1.0:\n a = round(a_, precision)\n precision += 1\n if log:\n logger.info(f\"Running with normalization window alpha = '{a}'\")\n return a\n\n\ndef _calculate_norm_alpha(sr: int, hop_size: int, tau: float):\n \"\"\"Exponential decay factor alpha for a given tau (decay window size [s]).\"\"\"\n dt = hop_size / sr\n return math.exp(-dt / tau)\n\n\ndef check_manual_seed(seed: int = None):\n \"\"\"If manual seed is not specified, choose a random one and communicate it to the user.\"\"\"\n seed = seed or random.randint(1, 10000)\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n return seed\n\n\ndef get_git_root():\n git_local_dir = os.path.dirname(os.path.abspath(__file__))\n args = [\"git\", \"-C\", git_local_dir, \"rev-parse\", \"--show-toplevel\"]\n return subprocess.check_output(args).strip().decode()\n\n\ndef get_commit_hash():\n \"\"\"Returns the current git commit.\"\"\"\n try:\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n commit = subprocess.check_output(args).strip().decode()\n except subprocess.CalledProcessError:\n # probably not in git repo\n commit = None\n return commit\n\n\ndef get_host() -> str:\n return gethostname()\n\n\ndef get_branch_name():\n try:\n git_dir = os.path.dirname(os.path.abspath(__file__))\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--abbrev-ref\", \"HEAD\"]\n branch = subprocess.check_output(args).strip().decode()\n except subprocess.CalledProcessError:\n # probably not in git repo\n branch = None\n return branch\n\n\n# from pytorch/ignite:\ndef apply_to_tensor(input_, func):\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\"\"\"\n if isinstance(input_, torch.nn.Module):\n return [apply_to_tensor(c, func) for c in input_.children()]\n elif isinstance(input_, torch.nn.Parameter):\n return func(input_.data)\n elif isinstance(input_, Tensor):\n return func(input_)\n elif isinstance(input_, string_classes):\n return input_\n elif isinstance(input_, collections.Mapping):\n return {k: apply_to_tensor(sample, func) for k, sample in input_.items()}\n elif isinstance(input_, collections.Iterable):\n return [apply_to_tensor(sample, func) for sample in input_]\n elif input_ is None:\n return input_\n else:\n return input_\n\n\ndef detach_hidden(hidden: Any) -> Any:\n \"\"\"Cut backpropagation graph.\n Auxillary function to cut the backpropagation graph by detaching the hidden\n vector.\n \"\"\"\n return apply_to_tensor(hidden, Tensor.detach)\n"
] | [
[
"torch.stack",
"torch.view_as_complex",
"torch.view_as_real",
"torch.manual_seed",
"numpy.random.seed",
"torch.device",
"torch.cuda.is_available",
"numpy.array",
"numpy.isscalar",
"torch.is_complex",
"torch.atan2"
]
] |
iPieter/kiwi | [
"76b66872fce68873809a0dea112e2ed552ae5b63",
"76b66872fce68873809a0dea112e2ed552ae5b63"
] | [
"examples/sklearn_logistic_regression/train.py",
"examples/hyperparam/search_random.py"
] | [
"import numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\nimport kiwi\nimport kiwi.sklearn\n\nif __name__ == \"__main__\":\n X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1, 1, 0])\n lr = LogisticRegression()\n lr.fit(X, y)\n score = lr.score(X, y)\n print(\"Score: %s\" % score)\n kiwi.log_metric(\"score\", score)\n kiwi.sklearn.log_model(lr, \"model\")\n print(\"Model saved in run %s\" % kiwi.active_run().info.run_uuid)\n",
"\"\"\"\nExample of hyperparameter search in MLflow using simple random search.\n\nThe run method will evaluate random combinations of parameters in a new MLflow run.\n\nThe runs are evaluated based on validation set loss. Test set score is calculated to verify the\nresults.\n\nSeveral runs can be run in parallel.\n\"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport click\n\nimport numpy as np\n\nimport kiwi\nimport kiwi.sklearn\nimport kiwi.tracking\nimport kiwi.projects\nfrom kiwi.tracking.client import MlflowClient\n\n_inf = np.finfo(np.float64).max\n\n\[email protected](help=\"Perform grid search over train (main entry point).\")\[email protected](\"--max-runs\", type=click.INT, default=32,\n help=\"Maximum number of runs to evaluate.\")\[email protected](\"--max-p\", type=click.INT, default=1,\n help=\"Maximum number of parallel runs.\")\[email protected](\"--epochs\", type=click.INT, default=32,\n help=\"Number of epochs\")\[email protected](\"--metric\", type=click.STRING, default=\"rmse\",\n help=\"Metric to optimize on.\")\[email protected](\"--seed\", type=click.INT, default=97531,\n help=\"Seed for the random generator\")\[email protected](\"training_data\")\ndef run(training_data, max_runs, max_p, epochs, metric, seed):\n train_metric = \"train_{}\".format(metric)\n val_metric = \"val_{}\".format(metric)\n test_metric = \"test_{}\".format(metric)\n np.random.seed(seed)\n tracking_client = kiwi.tracking.MlflowClient()\n\n def new_eval(nepochs,\n experiment_id,\n null_train_loss=_inf,\n null_val_loss=_inf,\n null_test_loss=_inf):\n def eval(parms):\n lr, momentum = parms\n with kiwi.start_run(nested=True) as child_run:\n p = kiwi.projects.run(\n run_id=child_run.info.run_id,\n uri=\".\",\n entry_point=\"train\",\n parameters={\n \"training_data\": training_data,\n \"epochs\": str(nepochs),\n \"learning_rate\": str(lr),\n \"momentum\": str(momentum),\n \"seed\": str(seed)},\n experiment_id=experiment_id,\n synchronous=False)\n succeeded = p.wait()\n if succeeded:\n training_run = tracking_client.get_run(p.run_id)\n metrics = training_run.data.metrics\n # cap the loss at the loss of the null model\n train_loss = min(null_train_loss, metrics[train_metric])\n val_loss = min(null_val_loss, metrics[val_metric])\n test_loss = min(null_test_loss, metrics[test_metric])\n else:\n # run failed => return null loss\n tracking_client.set_terminated(p.run_id, \"FAILED\")\n train_loss = null_train_loss\n val_loss = null_val_loss\n test_loss = null_test_loss\n kiwi.log_metrics({\n \"train_{}\".format(metric): train_loss,\n \"val_{}\".format(metric): val_loss,\n \"test_{}\".format(metric): test_loss\n })\n return p.run_id, train_loss, val_loss, test_loss\n\n return eval\n\n with kiwi.start_run() as run:\n experiment_id = run.info.experiment_id\n _, null_train_loss, null_val_loss, null_test_loss = new_eval(0, experiment_id)((0, 0))\n runs = [(np.random.uniform(1e-5, 1e-1), np.random.uniform(0, 1.0)) for _ in range(max_runs)]\n with ThreadPoolExecutor(max_workers=max_p) as executor:\n _ = executor.map(new_eval(epochs,\n experiment_id,\n null_train_loss,\n null_val_loss,\n null_test_loss),\n runs)\n\n # find the best run, log its metrics as the final metrics of this run.\n client = MlflowClient()\n runs = client.search_runs([experiment_id],\n \"tags.mlflow.parentRunId = '{run_id}' \".format(\n run_id=run.info.run_id\n ))\n best_val_train = _inf\n best_val_valid = _inf\n best_val_test = _inf\n best_run = None\n for r in runs:\n if r.data.metrics[\"val_rmse\"] < best_val_valid:\n best_run = r\n best_val_train = r.data.metrics[\"train_rmse\"]\n best_val_valid = r.data.metrics[\"val_rmse\"]\n best_val_test = r.data.metrics[\"test_rmse\"]\n kiwi.set_tag(\"best_run\", best_run.info.run_id)\n kiwi.log_metrics({\n \"train_{}\".format(metric): best_val_train,\n \"val_{}\".format(metric): best_val_valid,\n \"test_{}\".format(metric): best_val_test\n })\n\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"numpy.array",
"sklearn.linear_model.LogisticRegression"
],
[
"numpy.random.uniform",
"numpy.finfo",
"numpy.random.seed"
]
] |
cf-vrgl/pandas | [
"0b68d87a4438a13f14a2ed5af2e432df02eb0b2c"
] | [
"pandas/core/computation/pytables.py"
] | [
"\"\"\" manage PyTables query interface via Expressions \"\"\"\nfrom __future__ import annotations\n\nimport ast\nfrom functools import partial\nfrom typing import Any\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n Timedelta,\n Timestamp,\n)\nfrom pandas.compat.chainmap import DeepChainMap\n\nfrom pandas.core.dtypes.common import is_list_like\n\nimport pandas.core.common as com\nfrom pandas.core.computation import (\n expr,\n ops,\n scope as _scope,\n)\nfrom pandas.core.computation.common import ensure_decoded\nfrom pandas.core.computation.expr import BaseExprVisitor\nfrom pandas.core.computation.ops import (\n UndefinedVariableError,\n is_term,\n)\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.base import Index\n\nfrom pandas.io.formats.printing import (\n pprint_thing,\n pprint_thing_encoded,\n)\n\n\nclass PyTablesScope(_scope.Scope):\n __slots__ = (\"queryables\",)\n\n queryables: dict[str, Any]\n\n def __init__(\n self,\n level: int,\n global_dict=None,\n local_dict=None,\n queryables: dict[str, Any] | None = None,\n ):\n super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)\n self.queryables = queryables or {}\n\n\nclass Term(ops.Term):\n env: PyTablesScope\n\n def __new__(cls, name, env, side=None, encoding=None):\n if isinstance(name, str):\n klass = cls\n else:\n klass = Constant\n return object.__new__(klass)\n\n def __init__(self, name, env: PyTablesScope, side=None, encoding=None):\n super().__init__(name, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n # must be a queryables\n if self.side == \"left\":\n # Note: The behavior of __new__ ensures that self.name is a str here\n if self.name not in self.env.queryables:\n raise NameError(f\"name {repr(self.name)} is not defined\")\n return self.name\n\n # resolve the rhs (and allow it to be None)\n try:\n return self.env.resolve(self.name, is_local=False)\n except UndefinedVariableError:\n return self.name\n\n # read-only property overwriting read/write property\n @property # type: ignore[misc]\n def value(self):\n return self._value\n\n\nclass Constant(Term):\n def __init__(self, value, env: PyTablesScope, side=None, encoding=None):\n assert isinstance(env, PyTablesScope), type(env)\n super().__init__(value, env, side=side, encoding=encoding)\n\n def _resolve_name(self):\n return self._name\n\n\nclass BinOp(ops.BinOp):\n\n _max_selectors = 31\n\n op: str\n queryables: dict[str, Any]\n condition: str | None\n\n def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):\n super().__init__(op, lhs, rhs)\n self.queryables = queryables\n self.encoding = encoding\n self.condition = None\n\n def _disallow_scalar_only_bool_ops(self):\n pass\n\n def prune(self, klass):\n def pr(left, right):\n \"\"\"create and return a new specialized BinOp from myself\"\"\"\n if left is None:\n return right\n elif right is None:\n return left\n\n k = klass\n if isinstance(left, ConditionBinOp):\n if isinstance(right, ConditionBinOp):\n k = JointConditionBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n elif isinstance(left, FilterBinOp):\n if isinstance(right, FilterBinOp):\n k = JointFilterBinOp\n elif isinstance(left, k):\n return left\n elif isinstance(right, k):\n return right\n\n return k(\n self.op, left, right, queryables=self.queryables, encoding=self.encoding\n ).evaluate()\n\n left, right = self.lhs, self.rhs\n\n if is_term(left) and is_term(right):\n res = pr(left.value, right.value)\n elif not is_term(left) and is_term(right):\n res = pr(left.prune(klass), right.value)\n elif is_term(left) and not is_term(right):\n res = pr(left.value, right.prune(klass))\n elif not (is_term(left) or is_term(right)):\n res = pr(left.prune(klass), right.prune(klass))\n\n return res\n\n def conform(self, rhs):\n \"\"\"inplace conform rhs\"\"\"\n if not is_list_like(rhs):\n rhs = [rhs]\n if isinstance(rhs, np.ndarray):\n rhs = rhs.ravel()\n return rhs\n\n @property\n def is_valid(self) -> bool:\n \"\"\"return True if this is a valid field\"\"\"\n return self.lhs in self.queryables\n\n @property\n def is_in_table(self) -> bool:\n \"\"\"\n return True if this is a valid column name for generation (e.g. an\n actual column in the table)\n \"\"\"\n return self.queryables.get(self.lhs) is not None\n\n @property\n def kind(self):\n \"\"\"the kind of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"kind\", None)\n\n @property\n def meta(self):\n \"\"\"the meta of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"meta\", None)\n\n @property\n def metadata(self):\n \"\"\"the metadata of my field\"\"\"\n return getattr(self.queryables.get(self.lhs), \"metadata\", None)\n\n def generate(self, v) -> str:\n \"\"\"create and return the op string for this TermValue\"\"\"\n val = v.tostring(self.encoding)\n return f\"({self.lhs} {self.op} {val})\"\n\n def convert_value(self, v) -> TermValue:\n \"\"\"\n convert the expression that is in the term to something that is\n accepted by pytables\n \"\"\"\n\n def stringify(value):\n if self.encoding is not None:\n return pprint_thing_encoded(value, encoding=self.encoding)\n return pprint_thing(value)\n\n kind = ensure_decoded(self.kind)\n meta = ensure_decoded(self.meta)\n if kind == \"datetime64\" or kind == \"datetime\":\n if isinstance(v, (int, float)):\n v = stringify(v)\n v = ensure_decoded(v)\n v = Timestamp(v)\n if v.tz is not None:\n v = v.tz_convert(\"UTC\")\n return TermValue(v, v.value, kind)\n elif kind == \"timedelta64\" or kind == \"timedelta\":\n if isinstance(v, str):\n v = Timedelta(v).value\n else:\n v = Timedelta(v, unit=\"s\").value\n return TermValue(int(v), v, kind)\n elif meta == \"category\":\n metadata = extract_array(self.metadata, extract_numpy=True)\n if v not in metadata:\n result = -1\n else:\n # error: Incompatible types in assignment (expression has type\n # \"Union[Any, ndarray]\", variable has type \"int\")\n result = metadata.searchsorted( # type: ignore[assignment]\n v, side=\"left\"\n )\n return TermValue(result, result, \"integer\")\n elif kind == \"integer\":\n v = int(float(v))\n return TermValue(v, v, kind)\n elif kind == \"float\":\n v = float(v)\n return TermValue(v, v, kind)\n elif kind == \"bool\":\n if isinstance(v, str):\n v = not v.strip().lower() in [\n \"false\",\n \"f\",\n \"no\",\n \"n\",\n \"none\",\n \"0\",\n \"[]\",\n \"{}\",\n \"\",\n ]\n else:\n v = bool(v)\n return TermValue(v, v, kind)\n elif isinstance(v, str):\n # string quoting\n return TermValue(v, stringify(v), \"string\")\n else:\n raise TypeError(f\"Cannot compare {v} of type {type(v)} to {kind} column\")\n\n def convert_values(self):\n pass\n\n\nclass FilterBinOp(BinOp):\n filter: tuple[Any, Any, Index] | None = None\n\n def __repr__(self) -> str:\n if self.filter is None:\n return \"Filter: Not Initialized\"\n return pprint_thing(f\"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]\")\n\n def invert(self):\n \"\"\"invert the filter\"\"\"\n if self.filter is not None:\n self.filter = (\n self.filter[0],\n self.generate_filter_op(invert=True),\n self.filter[2],\n )\n return self\n\n def format(self):\n \"\"\"return the actual filter format\"\"\"\n return [self.filter]\n\n def evaluate(self):\n\n if not self.is_valid:\n raise ValueError(f\"query term is not valid [{self}]\")\n\n rhs = self.conform(self.rhs)\n values = list(rhs)\n\n if self.is_in_table:\n\n # if too many values to create the expression, use a filter instead\n if self.op in [\"==\", \"!=\"] and len(values) > self._max_selectors:\n\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n return self\n return None\n\n # equality conditions\n if self.op in [\"==\", \"!=\"]:\n\n filter_op = self.generate_filter_op()\n self.filter = (self.lhs, filter_op, Index(values))\n\n else:\n raise TypeError(\n f\"passing a filterable condition to a non-table indexer [{self}]\"\n )\n\n return self\n\n def generate_filter_op(self, invert: bool = False):\n if (self.op == \"!=\" and not invert) or (self.op == \"==\" and invert):\n return lambda axis, vals: ~axis.isin(vals)\n else:\n return lambda axis, vals: axis.isin(vals)\n\n\nclass JointFilterBinOp(FilterBinOp):\n def format(self):\n raise NotImplementedError(\"unable to collapse Joint Filters\")\n\n def evaluate(self):\n return self\n\n\nclass ConditionBinOp(BinOp):\n def __repr__(self) -> str:\n return pprint_thing(f\"[Condition : [{self.condition}]]\")\n\n def invert(self):\n \"\"\"invert the condition\"\"\"\n # if self.condition is not None:\n # self.condition = \"~(%s)\" % self.condition\n # return self\n raise NotImplementedError(\n \"cannot use an invert condition when passing to numexpr\"\n )\n\n def format(self):\n \"\"\"return the actual ne format\"\"\"\n return self.condition\n\n def evaluate(self):\n\n if not self.is_valid:\n raise ValueError(f\"query term is not valid [{self}]\")\n\n # convert values if we are in the table\n if not self.is_in_table:\n return None\n\n rhs = self.conform(self.rhs)\n values = [self.convert_value(v) for v in rhs]\n\n # equality conditions\n if self.op in [\"==\", \"!=\"]:\n\n # too many values to create the expression?\n if len(values) <= self._max_selectors:\n vs = [self.generate(v) for v in values]\n self.condition = f\"({' | '.join(vs)})\"\n\n # use a filter after reading\n else:\n return None\n else:\n self.condition = self.generate(values[0])\n\n return self\n\n\nclass JointConditionBinOp(ConditionBinOp):\n def evaluate(self):\n self.condition = f\"({self.lhs.condition} {self.op} {self.rhs.condition})\"\n return self\n\n\nclass UnaryOp(ops.UnaryOp):\n def prune(self, klass):\n\n if self.op != \"~\":\n raise NotImplementedError(\"UnaryOp only support invert type ops\")\n\n operand = self.operand\n operand = operand.prune(klass)\n\n if operand is not None and (\n issubclass(klass, ConditionBinOp)\n and operand.condition is not None\n or not issubclass(klass, ConditionBinOp)\n and issubclass(klass, FilterBinOp)\n and operand.filter is not None\n ):\n return operand.invert()\n return None\n\n\nclass PyTablesExprVisitor(BaseExprVisitor):\n const_type = Constant\n term_type = Term\n\n def __init__(self, env, engine, parser, **kwargs):\n super().__init__(env, engine, parser)\n for bin_op in self.binary_ops:\n bin_node = self.binary_op_nodes_map[bin_op]\n setattr(\n self,\n f\"visit_{bin_node}\",\n lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),\n )\n\n def visit_UnaryOp(self, node, **kwargs):\n if isinstance(node.op, (ast.Not, ast.Invert)):\n return UnaryOp(\"~\", self.visit(node.operand))\n elif isinstance(node.op, ast.USub):\n return self.const_type(-self.visit(node.operand).value, self.env)\n elif isinstance(node.op, ast.UAdd):\n raise NotImplementedError(\"Unary addition not supported\")\n\n def visit_Index(self, node, **kwargs):\n return self.visit(node.value).value\n\n def visit_Assign(self, node, **kwargs):\n cmpr = ast.Compare(\n ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]\n )\n return self.visit(cmpr)\n\n def visit_Subscript(self, node, **kwargs):\n # only allow simple subscripts\n\n value = self.visit(node.value)\n slobj = self.visit(node.slice)\n try:\n value = value.value\n except AttributeError:\n pass\n\n if isinstance(slobj, Term):\n # In py39 np.ndarray lookups with Term containing int raise\n slobj = slobj.value\n\n try:\n return self.const_type(value[slobj], self.env)\n except TypeError as err:\n raise ValueError(\n f\"cannot subscript {repr(value)} with {repr(slobj)}\"\n ) from err\n\n def visit_Attribute(self, node, **kwargs):\n attr = node.attr\n value = node.value\n\n ctx = type(node.ctx)\n if ctx == ast.Load:\n # resolve the value\n resolved = self.visit(value)\n\n # try to get the value to see if we are another expression\n try:\n resolved = resolved.value\n except (AttributeError):\n pass\n\n try:\n return self.term_type(getattr(resolved, attr), self.env)\n except AttributeError:\n\n # something like datetime.datetime where scope is overridden\n if isinstance(value, ast.Name) and value.id == attr:\n return resolved\n\n raise ValueError(f\"Invalid Attribute context {ctx.__name__}\")\n\n def translate_In(self, op):\n return ast.Eq() if isinstance(op, ast.In) else op\n\n def _rewrite_membership_op(self, node, left, right):\n return self.visit(node.op), node.op, left, right\n\n\ndef _validate_where(w):\n \"\"\"\n Validate that the where statement is of the right type.\n\n The type may either be String, Expr, or list-like of Exprs.\n\n Parameters\n ----------\n w : String term expression, Expr, or list-like of Exprs.\n\n Returns\n -------\n where : The original where clause if the check was successful.\n\n Raises\n ------\n TypeError : An invalid data type was passed in for w (e.g. dict).\n \"\"\"\n if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):\n raise TypeError(\n \"where must be passed as a string, PyTablesExpr, \"\n \"or list-like of PyTablesExpr\"\n )\n\n return w\n\n\nclass PyTablesExpr(expr.Expr):\n \"\"\"\n Hold a pytables-like expression, comprised of possibly multiple 'terms'.\n\n Parameters\n ----------\n where : string term expression, PyTablesExpr, or list-like of PyTablesExprs\n queryables : a \"kinds\" map (dict of column name -> kind), or None if column\n is non-indexable\n encoding : an encoding that will encode the query terms\n\n Returns\n -------\n a PyTablesExpr object\n\n Examples\n --------\n 'index>=date'\n \"columns=['A', 'D']\"\n 'columns=A'\n 'columns==A'\n \"~(columns=['A','B'])\"\n 'index>df.index[3] & string=\"bar\"'\n '(index>df.index[3] & index<=df.index[6]) | string=\"bar\"'\n \"ts>=Timestamp('2012-02-01')\"\n \"major_axis>=20130101\"\n \"\"\"\n\n _visitor: PyTablesExprVisitor | None\n env: PyTablesScope\n expr: str\n\n def __init__(\n self,\n where,\n queryables: dict[str, Any] | None = None,\n encoding=None,\n scope_level: int = 0,\n ):\n\n where = _validate_where(where)\n\n self.encoding = encoding\n self.condition = None\n self.filter = None\n self.terms = None\n self._visitor = None\n\n # capture the environment if needed\n local_dict: DeepChainMap[Any, Any] = DeepChainMap()\n\n if isinstance(where, PyTablesExpr):\n local_dict = where.env.scope\n _where = where.expr\n\n elif is_list_like(where):\n where = list(where)\n for idx, w in enumerate(where):\n if isinstance(w, PyTablesExpr):\n local_dict = w.env.scope\n else:\n w = _validate_where(w)\n where[idx] = w\n _where = \" & \".join(f\"({w})\" for w in com.flatten(where))\n else:\n # _validate_where ensures we otherwise have a string\n _where = where\n\n self.expr = _where\n self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)\n\n if queryables is not None and isinstance(self.expr, str):\n self.env.queryables.update(queryables)\n self._visitor = PyTablesExprVisitor(\n self.env,\n queryables=queryables,\n parser=\"pytables\",\n engine=\"pytables\",\n encoding=encoding,\n )\n self.terms = self.parse()\n\n def __repr__(self) -> str:\n if self.terms is not None:\n return pprint_thing(self.terms)\n return pprint_thing(self.expr)\n\n def evaluate(self):\n \"\"\"create and return the numexpr condition and filter\"\"\"\n try:\n self.condition = self.terms.prune(ConditionBinOp)\n except AttributeError as err:\n raise ValueError(\n f\"cannot process expression [{self.expr}], [{self}] \"\n \"is not a valid condition\"\n ) from err\n try:\n self.filter = self.terms.prune(FilterBinOp)\n except AttributeError as err:\n raise ValueError(\n f\"cannot process expression [{self.expr}], [{self}] \"\n \"is not a valid filter\"\n ) from err\n\n return self.condition, self.filter\n\n\nclass TermValue:\n \"\"\"hold a term value the we use to construct a condition/filter\"\"\"\n\n def __init__(self, value, converted, kind: str):\n assert isinstance(kind, str), kind\n self.value = value\n self.converted = converted\n self.kind = kind\n\n def tostring(self, encoding) -> str:\n \"\"\"quote the string if not encoded else encode and return\"\"\"\n if self.kind == \"string\":\n if encoding is not None:\n return str(self.converted)\n return f'\"{self.converted}\"'\n elif self.kind == \"float\":\n # python 2 str(float) is not always\n # round-trippable so use repr()\n return repr(self.converted)\n return str(self.converted)\n\n\ndef maybe_expression(s) -> bool:\n \"\"\"loose checking if s is a pytables-acceptable expression\"\"\"\n if not isinstance(s, str):\n return False\n ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + (\"=\",)\n\n # make sure we have an op at least\n return any(op in s for op in ops)\n"
] | [
[
"pandas.core.indexes.base.Index",
"pandas.compat.chainmap.DeepChainMap",
"pandas.core.computation.common.ensure_decoded",
"pandas.io.formats.printing.pprint_thing_encoded",
"pandas.io.formats.printing.pprint_thing",
"pandas._libs.tslibs.Timestamp",
"pandas.core.computation.ops.is_term",
"pandas._libs.tslibs.Timedelta",
"pandas.core.common.flatten",
"pandas.core.construction.extract_array",
"pandas.core.dtypes.common.is_list_like"
]
] |
goodfree/ActorCloud | [
"9c34b371c23464981323ef9865d9913bde1fe09c"
] | [
"server/app/services/tasks_scheduler/async_tasks/app/excels/devices_import.py"
] | [
"import json\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import Dict, AnyStr\n\nimport pandas as pd\n\nfrom actor_libs.database.async_db import db\nfrom actor_libs.tasks.backend import update_task\nfrom actor_libs.tasks.exceptions import TaskException\nfrom actor_libs.utils import generate_uuid\nfrom ._utils import pg_to_excel\nfrom ._utils import read_excel\nfrom .multi_language import (\n ImportStatus, STATUS_MESSAGE, IMPORT_RENAME_ZH, IMPORT_ERROR_RENAME\n)\nfrom .sql_statements import (\n device_import_sql, dict_code_sql,\n query_tenant_devices_limit_sql,\n)\nfrom .validate import validates_schema\nfrom ..config import project_config\n\n\n__all__ = ['devices_import_task']\n\n\nlogger = logging.getLogger(__name__)\n\n\nasync def devices_import_task(request_dict):\n \"\"\"\n {'taskID', 'language', 'filePath', 'tenantID', 'userIntID'}\n \"\"\"\n\n task_id = request_dict['taskID']\n await _update_task_progress(\n task_id, status=2, progress=10,\n import_status=ImportStatus.UPLOADED\n )\n dict_code = await get_dict_code(request_dict['language'])\n import_records = await read_devices_excels(\n request_dict, dict_code\n )\n if not import_records:\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=15, import_status=ImportStatus.FAILED\n )\n raise TaskException(code=500, error_code='FAILED')\n correct_records, error_records = await handle_import_records(\n import_records, request_dict\n )\n correct_num, error_nums = len(correct_records), len(error_records)\n result_info = {\n 'success': correct_num,\n 'failed': error_nums\n }\n if correct_num > 0:\n await _import_correct_rows(correct_records, correct_num, request_dict)\n if error_records:\n try:\n export_path = await _export_error_rows(\n error_records, dict_code, request_dict\n )\n result_info['excelPath'] = export_path\n except Exception as e:\n logger.error(f\"error_records: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=3,\n progress=100, import_status=ImportStatus.COMPLETED,\n result=result_info,\n )\n\n\nasync def get_dict_code(language: AnyStr) -> Dict:\n dict_code = {}\n query_dict_code = await db.fetch_many(\n dict_code_sql.format(language=language)\n )\n for item in query_dict_code:\n # {code:{label:value}...}\n dict_code[item[0]] = dict(zip(item[2], item[1]))\n return dict_code\n\n\nasync def read_devices_excels(request_dict: Dict, dict_code):\n try:\n rename_dict = IMPORT_RENAME_ZH if request_dict['language'] != 'en' else None\n data_frame = await read_excel(\n request_dict['filePath'], rename_dict=rename_dict,\n replace_dict=dict_code\n )\n data_frame = await _handle_data_frame(data_frame)\n import_records = data_frame.to_dict('records')\n await _update_task_progress(\n request_dict['taskID'], status=2,\n progress=30, import_status=ImportStatus.READING\n )\n except Exception as e:\n logger.error(f\"read_devices_excels: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=35, import_status=ImportStatus.TEMPLATE_ERROR\n )\n raise TaskException(code=500, error_code='TEMPLATE_ERROR')\n return import_records\n\n\nasync def _handle_data_frame(data_frame):\n cover_float = ['longitude', 'latitude']\n data_frame[cover_float] = data_frame[cover_float].astype(float)\n # nan -> None\n data_frame = data_frame.where((pd.notnull(data_frame)), None)\n return data_frame\n\n\nasync def handle_import_records(import_records, request_dict):\n # use schema to validate imported data\n\n correct_records = []\n correct_record_append = correct_records.append\n error_records = []\n error_record_append = error_records.append\n try:\n validated_result = await validates_schema(\n import_records, request_dict\n )\n await _update_task_progress(\n request_dict['taskID'], status=2, progress=50,\n import_status=ImportStatus.VALIDATING\n )\n except Exception as e:\n logger.error(f\"validates_schema: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4, progress=55,\n import_status=ImportStatus.ABNORMAL\n )\n raise TaskException(code=500, error_code='ABNORMAL')\n rows_error_msg, devices_attr_info = validated_result\n products_info = devices_attr_info['products_info']\n gateways_info = devices_attr_info['gateways_info']\n\n for row, record in enumerate(import_records):\n if rows_error_msg.get(row):\n record.update(rows_error_msg[row])\n error_record_append(record)\n else:\n product_name = record['product']\n gateway_name = record['gateway']\n if products_info.get(product_name):\n record['productID'] = products_info[product_name]['productID']\n record['cloudProtocol'] = products_info[product_name]['cloudProtocol']\n if gateways_info.get(gateway_name):\n record['gateway'] = gateways_info[gateway_name]['id']\n record = await set_device_default_value(record)\n correct_record_append(record)\n return correct_records, error_records\n\n\nasync def _import_correct_rows(correct_records, correct_num, request_dict):\n is_exceed_limit = await _check_devices_limit(correct_num, request_dict)\n if is_exceed_limit:\n await _update_task_progress(\n request_dict['taskID'], status=4, progress=70,\n import_status=ImportStatus.LIMITED\n )\n raise TaskException(code=500, error_code='LIMITED')\n try:\n await _insert_correct_rows(correct_records, request_dict)\n await _update_task_progress(\n request_dict['taskID'], status=2,\n progress=80, import_status=ImportStatus.IMPORTING\n )\n except Exception as e:\n logger.error(f\"_import_correct_rows: {e}\")\n await _update_task_progress(\n request_dict['taskID'], status=4,\n progress=85, import_status=ImportStatus.FAILED\n )\n raise TaskException(code=500, error_code='FAILED')\n\n\nasync def _check_devices_limit(correct_num, request_dict) -> bool:\n \"\"\"\n Check if the device limit is exceeded\n :return True if exceed limit otherwise False\n \"\"\"\n\n check_status = False\n query_sql = query_tenant_devices_limit_sql.format(\n tenantID=request_dict['tenantID']\n )\n query_result = await db.fetch_row(query_sql)\n if query_result:\n device_sum, devices_limit = query_result\n if device_sum + correct_num > devices_limit:\n check_status = True\n return check_status\n\n\nasync def _insert_correct_rows(correct_records, request_dict):\n default_columns = [\n \"createAt\", \"deviceName\", \"deviceType\", \"productID\",\n \"authType\", \"upLinkNetwork\", \"deviceID\", \"deviceUsername\", \"token\",\n \"location\", \"latitude\", \"longitude\",\n \"manufacturer\", \"serialNumber\", \"softVersion\", \"hardwareVersion\",\n \"deviceConsoleIP\", \"deviceConsoleUsername\", \"deviceConsolePort\",\n \"mac\", \"upLinkSystem\", \"gateway\", \"parentDevice\",\n \"loraData\", \"lwm2mData\", \"userIntID\", \"tenantID\"\n ]\n create_at = datetime.now()\n async with db.pool.acquire() as conn:\n async with conn.transaction():\n for record in correct_records:\n record['createAt'] = create_at\n record['userIntID'] = request_dict['userIntID']\n record['tenantID'] = request_dict['tenantID']\n miss_columns = set(default_columns) - set(record.keys())\n record.update({c: None for c in miss_columns})\n execute_sql = device_import_sql.format(**record)\n execute_sql = execute_sql.replace(\"'None'\", \"NULL\")\n execute_sql = execute_sql.replace(\"'NULL'\", \"NULL\")\n await conn.execute(execute_sql)\n\n\nasync def _export_error_rows(errors_rows, dict_code, request_dict):\n \"\"\" Export processing failure data to excel \"\"\"\n\n column_sort = list(IMPORT_ERROR_RENAME.keys())\n error_dict_code = defaultdict(dict)\n for code, code_value in dict_code.items():\n for code_k, code_v in code_value.items():\n error_dict_code[code][code_v] = code_k\n data_frame = pd.DataFrame(errors_rows)\n data_frame = data_frame[column_sort].replace(error_dict_code)\n if request_dict['language'] != 'en':\n data_frame = data_frame.rename(columns=IMPORT_ERROR_RENAME)\n state_dict = await pg_to_excel(\n export_path=project_config.get('EXPORT_EXCEL_PATH'),\n table_name='ErrorImportDevicesW5',\n export_data=data_frame,\n tenant_uid=request_dict['tenantID'])\n export_path = state_dict.get('excelPath')\n return export_path\n\n\nasync def set_device_default_value(device_info):\n if device_info.get('upLinkSystem') != 3:\n device_info['gateway'] = None\n if device_info.get('upLinkSystem') == 3 and not device_info.get('gateway'):\n device_info['upLinkSystem'] = 1\n device_info['gateway'] = None\n if device_info.get('cloudProtocol') == 3:\n # lwm2m protocol\n if device_info.get('deviceID'):\n imei = device_info['deviceID']\n else:\n imei = generate_uuid(size=15)\n device_info['deviceID'] = imei\n lwm2m_data = {\n 'autoSub': 0,\n 'IMEI': imei,\n 'IMSI': imei\n }\n device_info['lwm2mData'] = json.dumps(lwm2m_data)\n if not device_info.get('deviceID'):\n device_info['deviceID'] = generate_uuid()\n if not device_info.get('deviceUsername'):\n device_info['deviceUsername'] = generate_uuid()\n if not device_info.get('token'):\n device_info['token'] = device_info['deviceUsername']\n if not device_info.get('token'):\n device_info['token'] = device_info['deviceUsername']\n device_info['upLinkNetwork'] = 1\n device_info['deviceType'] = 1 # end_devices\n return device_info\n\n\nasync def _update_task_progress(task_id,\n *,\n status=None,\n progress=None,\n import_status=None,\n result=None):\n if not result:\n result = {}\n result['message'] = STATUS_MESSAGE.get(import_status)\n result['code'] = import_status.value\n update_dict = {\n 'status': status,\n 'progress': progress,\n 'result': result,\n 'taskID': task_id\n }\n await update_task(task_id, update_dict)\n return result\n"
] | [
[
"pandas.notnull",
"pandas.DataFrame"
]
] |
micka59200/Python-Baseball | [
"dda463b1ba49e70dab676d1d3e57edc8238d0df6"
] | [
"stats/defense.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom frames import games, info, events\n\nplays = games.query(\"type == 'play' & event != 'NP'\")\nplays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']\n\npa = plays.loc[plays['player'].shift() != plays['player'], ['year', 'game_id', 'inning', 'team', 'player']]\npa = pa.groupby(['year', 'game_id', 'team']).size().reset_index(name='PA')\n\nevents = events.set_index(['year', 'game_id', 'team', 'event_type'])\nevents = events.unstack().fillna(0).reset_index()\nevents.columns = events.columns.droplevel()\nevents.columns = ['year', 'game_id', 'team', 'BB', 'E', 'H', 'HBP', 'HR', 'ROE', 'SO']\nevents = events.rename_axis(None, axis='columns')\nevents_plus_pa = pd.merge(events, pa, how='outer', left_on=['year', 'game_id', 'team'], right_on=['year', 'game_id', 'team'])\ndefense = pd.merge(events_plus_pa, info)\ndefense.loc[:, 'DER'] = 1 - ((defense['H'] + defense['ROE']) / (defense['PA'] - defense['BB'] -defense['SO'] - defense['HBP'] - defense['HR']))\ndefense.loc[:, 'year'] = pd.to_numeric(defense['year'])\nder = defense.loc[defense['year'] >= 1978, ['year', 'defense', 'DER']]\n\nder = der.pivot(index='year', columns='defense', values='DER')\nder.plot(x_compat=True, xticks=range(1978, 2018, 4), rot=45)\n\nplt.show()"
] | [
[
"pandas.to_numeric",
"matplotlib.pyplot.show",
"pandas.merge"
]
] |
rpachauri/connect4 | [
"6caf6965afaaff6883193ac295c6ac5b1f4e9c4a",
"6caf6965afaaff6883193ac295c6ac5b1f4e9c4a"
] | [
"connect_four/evaluation/incremental_victor/graph/graph_manager_add_solution_profile.py",
"connect_four/evaluation/victor/evaluator/evaluator_profile.py"
] | [
"import cProfile\n\nimport gym\n\nimport numpy as np\n\nfrom connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager\nfrom connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager\nfrom connect_four.problem import ConnectFourGroupManager\n\nenv = gym.make('connect_four-v0')\nenv.state = np.array([\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 0, 0, 0, ],\n ],\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 1, 1, 1, 0, 0, ],\n [0, 0, 0, 0, 1, 0, 0, ],\n ],\n])\n\n# noinspection SpellCheckingInspection\ncfgm = ConnectFourGroupManager(env_variables=env.env_variables)\nvsm = VictorSolutionManager(env_variables=env.env_variables)\n\nplayer, row, col = 0, 5, 0\n\ngm = GraphManager(player=player, problem_manager=cfgm, solution_manager=vsm)\n\n_, removed_problems = cfgm.move(player=player, row=row, col=col)\nfor problem in removed_problems:\n gm._remove_problem(problem)\n\nremoved_solutions, added_solutions = vsm.move(player=player, row=row, col=col)\nprint(\"len(removed_solutions) = \", len(removed_solutions))\nprint(\"len(added_solutions) = \", len(added_solutions))\n# print(\"number of useful solutions =\", len(self.solution_to_solutions))\nfor solution in removed_solutions:\n gm._remove_solution(solution)\nprint(\"number of solutions that remained =\", len(gm.solution_to_solutions))\n\n\ndef add_solutions():\n for solution in added_solutions:\n gm._add_solution(solution)\n\n print(\"number of solutions after adding =\", len(gm.solution_to_solutions))\n\n\ncProfile.run(\n 'add_solutions()',\n sort=\"cumtime\",\n)\n",
"import gym\nimport numpy as np\n\nimport cProfile\n\nfrom connect_four.evaluation.board import Board\nfrom connect_four.envs.connect_four_env import ConnectFourEnv\n\nenv = gym.make('connect_four-v0')\nConnectFourEnv.M = 6\nConnectFourEnv.N = 7\n\n# The empty 6x7 board has no solution set for Black because White is guaranteed to win.\nenv.state = np.array([\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n ],\n [\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n [0, 0, 0, 0, 0, 0, 0, ],\n ],\n])\nboard = Board(env.env_variables)\n\ncProfile.run('evaluator.evaluate(board=board)', sort=\"cumtime\")\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] |
thomasly/slgnn | [
"caa1e7814498da41ad025b4e62c569fe511848ff"
] | [
"slgnn/data_processing/jakfp_dataset.py"
] | [
"import os\n\nimport pandas as pd\nfrom chemreader.writers import GraphWriter\nfrom chemreader.readers import Smiles\nfrom rdkit.Chem import MolFromSmiles\nfrom slgnn.models.gcn.utils import get_filtered_fingerprint\nfrom tqdm import tqdm\n\n\ndef _is_active(value):\n if value < 1000:\n return 1\n elif value >= 10000:\n return -1\n else:\n return 0\n\n\ndef filter_(path):\n \"\"\" Filter JAK dataset\n \"\"\"\n jak = pd.read_csv(path)\n jak.dropna(subset=[\"Standard Relation\", \"Standard Value\"], inplace=True)\n not_eq = jak[\"Standard Relation\"] != \"'='\"\n lt_10um = jak[\"Standard Value\"] < 100000\n filtered = jak.drop(jak.loc[not_eq & lt_10um].index)\n gt = jak[\"Standard Relation\"] == \"'>'\"\n eq_1um = jak[\"Standard Value\"] >= 1000\n add_back = jak.loc[gt & eq_1um]\n filtered = filtered.append(add_back)\n filtered[\"Activity\"] = filtered[\"Standard Value\"].apply(_is_active)\n out_path = os.path.join(os.path.dirname(path), \"filtered_\" + os.path.basename(path))\n filtered[[\"Smiles\", \"Activity\"]].to_csv(out_path)\n\n\ndef write_graphs(inpath, outpath, prefix=None):\n \"\"\" Convert JAK dataset to graphs\n \"\"\"\n smiles = list()\n fps = list()\n pb = tqdm()\n with open(inpath, \"r\") as inf:\n line = inf.readline()\n while line:\n _, sm, _ = line.strip().split(\",\")\n if MolFromSmiles(sm) is None:\n line = inf.readline()\n continue\n smiles.append(Smiles(sm))\n fps.append(\",\".join(map(str, get_filtered_fingerprint(sm))))\n pb.update(1)\n line = inf.readline()\n writer = GraphWriter(smiles)\n writer.write(outpath, prefix=prefix, graph_labels=fps)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--path\", help=\"Path to the JAK file\")\n args = parser.parse_args()\n filter_(args.path)\n inpath = os.path.join(\n os.path.dirname(args.path), \"filtered_\" + os.path.basename(args.path)\n )\n pre = os.path.basename(args.path).split(\".\")[0] + \"FP\"\n write_graphs(inpath, os.path.join(os.path.dirname(args.path), \"graphs\"), prefix=pre)\n"
] | [
[
"pandas.read_csv"
]
] |
ryuwd/uproot4 | [
"20d8575e941c32559c7b5e62b0ed5f92bc4927d0"
] | [
"uproot/const.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines integer constants used by serialization and deserialization routines.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport numpy\n\n# used in unmarshaling\nkByteCountMask = numpy.int64(0x40000000)\nkByteCountVMask = numpy.int64(0x4000)\nkClassMask = numpy.int64(0x80000000)\nkNewClassTag = numpy.int64(0xFFFFFFFF)\n\nkIsOnHeap = numpy.uint32(0x01000000)\nkIsReferenced = numpy.uint32(1 << 4)\n\nkMapOffset = 2\n\n# not used?\nkNullTag = 0\nkNotDeleted = numpy.uint32(0x02000000)\nkZombie = numpy.uint32(0x04000000)\nkBitMask = numpy.uint32(0x00FFFFFF)\nkDisplacementMask = numpy.uint32(0xFF000000)\n\n############# core/zip/inc/Compression.h\n\nkZLIB = 1\nkLZMA = 2\nkOldCompressionAlgo = 3\nkLZ4 = 4\nkZSTD = 5\nkUndefinedCompressionAlgorithm = 6\n\n############# constants for streamers\n\nkBase = 0\nkChar = 1\nkShort = 2\nkInt = 3\nkLong = 4\nkFloat = 5\nkCounter = 6\nkCharStar = 7\nkDouble = 8\nkDouble32 = 9\nkLegacyChar = 10\nkUChar = 11\nkUShort = 12\nkUInt = 13\nkULong = 14\nkBits = 15\nkLong64 = 16\nkULong64 = 17\nkBool = 18\nkFloat16 = 19\nkOffsetL = 20\nkOffsetP = 40\nkObject = 61\nkAny = 62\nkObjectp = 63\nkObjectP = 64\nkTString = 65\nkTObject = 66\nkTNamed = 67\nkAnyp = 68\nkAnyP = 69\nkAnyPnoVT = 70\nkSTLp = 71\n\nkSkip = 100\nkSkipL = 120\nkSkipP = 140\n\nkConv = 200\nkConvL = 220\nkConvP = 240\n\nkSTL = 300\nkSTLstring = 365\n\nkStreamer = 500\nkStreamLoop = 501\n\n############# constants from core/foundation/inc/ESTLType.h\n\nkNotSTL = 0\nkSTLvector = 1\nkSTLlist = 2\nkSTLdeque = 3\nkSTLmap = 4\nkSTLmultimap = 5\nkSTLset = 6\nkSTLmultiset = 7\nkSTLbitset = 8\nkSTLforwardlist = 9\nkSTLunorderedset = 10\nkSTLunorderedmultiset = 11\nkSTLunorderedmap = 12\nkSTLunorderedmultimap = 13\nkSTLend = 14\nkSTLany = 300\n\n############# IOFeatures\n\nkGenerateOffsetMap = numpy.uint8(1)\n\n############# other\n\nkStreamedMemberWise = numpy.uint16(1 << 14)\n"
] | [
[
"numpy.uint8",
"numpy.uint16",
"numpy.uint32",
"numpy.int64"
]
] |
piotrsobecki/PCa-CNNs2 | [
"01504db2037c67dc6832c2c8aaf4b3d5e4f2808f"
] | [
"src/prostatex/normalization.py"
] | [
"import numpy\n\n\n# Normalization functions\nclass NormalizationNo():\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n return img\n\n\nclass NormalizationMean(NormalizationNo):\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n if img.std() == 0:\n return img\n return (img - img.mean()) / img.std()\n\n\nclass NormalizationMedian(NormalizationNo):\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n denominator = numpy.median(img) + 2 * img.std()\n if denominator == 0.0:\n return img\n return img / denominator\n\nclass NormalizationFeatureScaling(NormalizationNo):\n\n def __init__(self, vmin=0, vmax=1):\n self.vmin=vmin\n self.vmax=vmax\n\n def normalize(self, img, settings=None):\n if settings is None:\n settings = {}\n OldValue = img\n OldMin = img.min()\n OldMax = img.max()\n NewMax = self.vmax\n NewMin = self.vmin\n OldRange = (OldMax - OldMin)\n NewRange = (NewMax - NewMin)\n if OldRange == 0.0:\n return img\n NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin\n return NewValue\n"
] | [
[
"numpy.median"
]
] |
jay90099/struct2tensor | [
"47d651757efa27586bf75f991b2174d8173a750b"
] | [
"struct2tensor/expression_impl/map_prensor.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Arbitrary operations from sparse and ragged tensors to a leaf field.\n\nThere are two public methods of note right now: map_sparse_tensor\nand map_ragged_tensor.\n\nAssume expr is:\n\n```\nsession: {\n event: {\n val_a: 10\n val_b: 1\n }\n event: {\n val_a: 20\n val_b: 2\n }\n event: {\n }\n event: {\n val_a: 40\n }\n event: {\n val_b: 5\n }\n}\n```\n\nEither of the following alternatives will add val_a and val_b\nto create val_sum.\n\nmap_sparse_tensor converts val_a and val_b to sparse tensors,\nand then add them to produce val_sum.\n\n```\nnew_root = map_prensor.map_sparse_tensor(\n expr,\n path.Path([\"event\"]),\n [path.Path([\"val_a\"]), path.Path([\"val_b\"])],\n lambda x,y: x + y,\n False,\n tf.int32,\n \"val_sum\")\n```\n\nmap_ragged_tensor converts val_a and val_b to ragged tensors,\nand then add them to produce val_sum.\n\n```\nnew_root = map_prensor.map_ragged_tensor(\n expr,\n path.Path([\"event\"]),\n [path.Path([\"val_a\"]), path.Path([\"val_b\"])],\n lambda x,y: x + y,\n False,\n tf.int32,\n \"val_sum\")\n```\n\nThe result of either is:\n\n```\nsession: {\n event: {\n val_a: 10\n val_b: 1\n val_sum: 11\n }\n event: {\n val_a: 20\n val_b: 2\n val_sum: 22\n }\n event: {\n }\n event: {\n val_a: 40\n val_sum: 40\n }\n event: {\n val_b: 5\n val_sum: 5\n }\n}\n```\n\n\"\"\"\n\nfrom typing import Callable, FrozenSet, Optional, Sequence, Tuple\n\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import expression_add\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nfrom struct2tensor.expression_impl import project\nimport tensorflow as tf\n\n\ndef map_sparse_tensor(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.SparseTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> expression.Expression:\n \"\"\"Maps a sparse tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the sparse tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of sparse tensors as input and\n returns a sparse tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n A new root expression containing the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n\n return _map_sparse_tensor_impl(root, root_path, paths, operation, is_repeated,\n dtype, new_field_name)[0]\n\n\ndef map_ragged_tensor(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.RaggedTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> expression.Expression:\n \"\"\"Map a ragged tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the ragged tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of ragged tensors as input and\n returns a ragged tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n A new root expression containing the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n return _map_ragged_tensor_impl(root, root_path, paths, operation, is_repeated,\n dtype, new_field_name)[0]\n\n\nclass _MapPrensorExpression(expression.Expression):\n \"\"\"Maps the values of the given expression.\n\n It maps the value of a sub-tree (i.e. a Prensor) to a single prensor\n LeafNodeTensor. Therefore its sources are all the (known) descendants of\n `origin`: it usually should follow a project(...) to make known descendants\n clear.\n\n _MapPrensorExpression is intended to be a child of the origin. See\n map_prensor_impl for example usage.\n\n \"\"\"\n\n def __init__(self, origin: expression.Expression,\n operation: Callable[[prensor.Prensor, calculate_options\n .Options], prensor.LeafNodeTensor],\n is_repeated: bool, dtype: tf.DType):\n super().__init__(is_repeated, dtype)\n self._origin = origin\n self._operation = operation\n\n def _get_source_paths(self) -> Sequence[path.Path]:\n \"\"\"Returns the source paths in a deterministic order.\"\"\"\n result = [k for k in self._origin.get_known_descendants().keys()]\n result.sort()\n return result\n\n def get_source_expressions(self) -> Sequence[expression.Expression]:\n subtree = self._origin.get_known_descendants()\n source_paths = self._get_source_paths()\n return [subtree[k] for k in source_paths]\n\n def calculate(\n self,\n sources: Sequence[prensor.NodeTensor],\n destinations: Sequence[expression.Expression],\n options: calculate_options.Options,\n side_info: Optional[prensor.Prensor] = None) -> prensor.LeafNodeTensor:\n source_tree = prensor.create_prensor_from_descendant_nodes(\n {k: v for k, v in zip(self._get_source_paths(), sources)})\n return self._operation(source_tree, options)\n\n def calculation_is_identity(self) -> bool:\n return False\n\n def calculation_equal(self, expr: expression.Expression) -> bool:\n return self is expr\n\n def _get_child_impl(self,\n field_name: path.Step) -> Optional[expression.Expression]:\n return None\n\n def known_field_names(self) -> FrozenSet[path.Step]:\n return frozenset()\n\n\ndef _as_leaf_node_no_checks(sparse_tensor: tf.SparseTensor,\n is_repeated: bool) -> prensor.LeafNodeTensor:\n \"\"\"Take a SparseTensor and create a LeafNodeTensor, no checks.\"\"\"\n if is_repeated:\n parent_index = tf.transpose(sparse_tensor.indices)[0]\n else:\n parent_index = tf.reshape(sparse_tensor.indices, [-1])\n return prensor.LeafNodeTensor(parent_index, sparse_tensor.values, is_repeated)\n\n\ndef _as_leaf_node_with_checks(sparse_tensor: tf.SparseTensor, is_repeated: bool,\n required_batch_size: tf.Tensor\n ) -> prensor.LeafNodeTensor:\n \"\"\"Take a SparseTensor and create a LeafNodeTensor, with checks.\"\"\"\n assertions = [\n tf.assert_equal(sparse_tensor.dense_shape[0], required_batch_size)\n ]\n if is_repeated:\n assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 2))\n else:\n assertions.append(tf.assert_equal(tf.shape(sparse_tensor.indices)[1], 1))\n\n with tf.control_dependencies(assertions):\n # TODO(b/72947444): Check that the resulting tensor is canonical, that the\n # indices are in lexicographical order, and that the indices fit in the\n # shape. Moreover, maybe we should check if it is repeated that it is a\n # \"ragged array\".\n return _as_leaf_node_no_checks(sparse_tensor, is_repeated)\n\n\ndef _as_leaf_node(sparse_tensor: tf.SparseTensor, is_repeated: bool,\n required_batch_size: tf.Tensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n if options.sparse_checks:\n return _as_leaf_node_with_checks(sparse_tensor, is_repeated,\n required_batch_size)\n else:\n return _as_leaf_node_no_checks(sparse_tensor, is_repeated)\n\n\ndef _map_prensor_impl(\n root: expression.Expression, root_path: path.Path,\n paths_needed: Sequence[path.Path],\n operation: Callable[[prensor.Prensor, calculate_options.Options], prensor\n .LeafNodeTensor], is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Map prensor implementation.\"\"\"\n child_expr = root.get_descendant_or_error(root_path)\n sibling_child_expr = project.project(child_expr, paths_needed)\n new_field_expr = _MapPrensorExpression(sibling_child_expr, operation,\n is_repeated, dtype)\n new_path = root_path.get_child(new_field_name)\n return expression_add.add_paths(root, {new_path: new_field_expr}), new_path\n\n\ndef _map_sparse_tensor_impl(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.SparseTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step\n ) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Helper method for map_sparse_tensor.\"\"\"\n\n def new_op(pren: prensor.Prensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n \"\"\"Op for mapping prensor using the operation.\"\"\"\n sparse_tensor_map = pren.get_sparse_tensors(options)\n sparse_tensors = [sparse_tensor_map[p] for p in paths]\n result_as_tensor = operation(*sparse_tensors)\n result = _as_leaf_node(result_as_tensor, is_repeated,\n sparse_tensors[0].dense_shape[0], options)\n if result.values.dtype != dtype:\n raise ValueError(\"Type unmatched: actual ({})!= expected ({})\".format(\n str(result.values.dtype), str(dtype)))\n return result\n\n return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,\n new_field_name)\n\n\ndef _ragged_as_leaf_node(ragged_tensor: tf.RaggedTensor, is_repeated: bool,\n reference_ragged_tensor: tf.RaggedTensor,\n options: calculate_options.Options\n ) -> prensor.LeafNodeTensor:\n \"\"\"Creates a ragged tensor as a leaf node.\"\"\"\n assertions = []\n size_dim = tf.compat.dimension_at_index(ragged_tensor.shape, 0).value\n reference_size_dim = tf.compat.dimension_at_index(\n reference_ragged_tensor.shape, 0).value\n if (size_dim is not None and reference_size_dim is not None):\n if size_dim != reference_size_dim:\n raise ValueError(\"Returned ragged tensor is not the right size.\")\n elif options.ragged_checks:\n assertions.append(\n tf.assert_equal(ragged_tensor.nrows(), reference_ragged_tensor.nrows()))\n\n if not is_repeated:\n rowids = ragged_tensor.value_rowids()\n if options.ragged_checks:\n assertions.append(tf.compat.v1.assert_positive(rowids[1:] - rowids[:-1]))\n if assertions:\n with tf.control_dependencies(assertions):\n parent_index = ragged_tensor.value_rowids()\n return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,\n is_repeated)\n else:\n parent_index = ragged_tensor.value_rowids()\n return prensor.LeafNodeTensor(parent_index, ragged_tensor.values,\n is_repeated)\n\n\ndef _map_ragged_tensor_impl(root: expression.Expression, root_path: path.Path,\n paths: Sequence[path.Path],\n operation: Callable[..., tf.RaggedTensor],\n is_repeated: bool, dtype: tf.DType,\n new_field_name: path.Step\n ) -> Tuple[expression.Expression, path.Path]:\n \"\"\"Maps a ragged tensor.\n\n Args:\n root: the root of the expression.\n root_path: the path relative to which the ragged tensors are calculated.\n paths: the input paths relative to the root_path\n operation: a method that takes the list of ragged tensors as input and\n returns a ragged tensor.\n is_repeated: true if the result of operation is repeated.\n dtype: dtype of the result of the operation.\n new_field_name: root_path.get_child(new_field_name) is the path of the\n result.\n\n Returns:\n An expression/path pair (expr,p) with a new root expression containing\n the old root expression plus the new path,\n root_path.get_child(new_field_name), with the result of the operation.\n \"\"\"\n\n def new_op(tree: prensor.Prensor,\n options: calculate_options.Options) -> prensor.LeafNodeTensor:\n \"\"\"Apply operation to tree.\"\"\"\n ragged_tensor_map = tree.get_ragged_tensors(options)\n ragged_tensors = [ragged_tensor_map[p] for p in paths]\n result_as_tensor = operation(*ragged_tensors)\n result = _ragged_as_leaf_node(result_as_tensor, is_repeated,\n ragged_tensors[0], options)\n if result.values.dtype != dtype:\n raise ValueError(\"Type unmatched: actual ({})!= expected ({})\".format(\n str(result.values.dtype), str(dtype)))\n return result\n\n return _map_prensor_impl(root, root_path, paths, new_op, is_repeated, dtype,\n new_field_name)\n"
] | [
[
"tensorflow.compat.v1.assert_positive",
"tensorflow.assert_equal",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.compat.dimension_at_index",
"tensorflow.transpose",
"tensorflow.control_dependencies"
]
] |
sunghern/Auto-Compression | [
"7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e"
] | [
"pruning/cifar10_fbnet/supernet_main_file.py"
] | [
"import numpy as np\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\nfrom scipy.special import softmax\nimport argparse\n\nfrom general_functions.dataloaders import get_loaders, get_test_loader\nfrom general_functions.utils import get_logger, weights_init, load, create_directories_from_list, \\\n check_tensor_in_list, writh_new_ARCH_to_fbnet_modeldef\nfrom supernet_functions.lookup_table_builder import LookUpTable_HIGH\nfrom supernet_functions.model_supernet import FBNet_Stochastic_SuperNet, SupernetLoss\nfrom supernet_functions.training_functions_supernet import TrainerSupernet\nfrom supernet_functions.config_for_supernet import CONFIG_SUPERNET\nfrom fbnet_building_blocks.fbnet_modeldef import MODEL_ARCH\nimport copy\nimport torch.nn.utils.prune as prune\n\nparser = argparse.ArgumentParser(\"action\")\nparser.add_argument('--train_or_sample', type=str, default='', \\\n help='train means training of the SuperNet, sample means sample from SuperNet\\'s results')\nparser.add_argument('--architecture_name', type=str, default='', \\\n help='Name of an architecture to be sampled')\nparser.add_argument('--hardsampling_bool_value', type=str, default='True', \\\n help='If not False or 0 -> do hardsampling, else - softmax sampling')\nparser.add_argument('--prune', type=str, default='channel', \\\n help='channel or group')\nargs = parser.parse_args()\n\ndef train_supernet():\n manual_seed = 1\n np.random.seed(manual_seed)\n torch.manual_seed(manual_seed)\n torch.cuda.manual_seed_all(manual_seed)\n torch.backends.cudnn.benchmark = True\n\n create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])\n \n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])\n #### DataLoading\n train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],\n CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'],\n logger)\n test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],\n CONFIG_SUPERNET['dataloading']['path_to_save_data'])\n lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], prune_type=args.prune)\n\n ###MODEL\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = model.apply(weights_init)\n model = nn.DataParallel(model, device_ids=[0])\n for m in model.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n prune.remove(m, 'weight')\n #### Loss, Optimizer and Scheduler\n criterion = SupernetLoss().cuda()\n\n\n thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]\n params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]\n\n w_optimizer = torch.optim.SGD(params=params_except_thetas,\n lr=CONFIG_SUPERNET['optimizer']['w_lr'], \n momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],\n weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])\n \n theta_optimizer = torch.optim.Adam(params=thetas_params,\n lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],\n weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])\n\n last_epoch = -1\n w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,\n T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],\n last_epoch=last_epoch)\n #### Training Loop\n trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)\n trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n '''\n for layer in model.module.stages_to_search:\n #layer.thetas = nn.Parameter(torch.Tensor([1.0 / 1 for i in range(1)]).cuda())\n print(layer.thetas)\n '''\n f = open(\"result.txt\", \"w\")\n for i, layer in enumerate(model.module.stages_to_search):\n print('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end=\" \")\n f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())]+'\\n')\n f.close()\n print()\n\n# Arguments:\n# hardsampling=True means get operations with the largest weights\n# =False means apply softmax to weights and sample from the distribution\n# unique_name_of_arch - name of architecture. will be written into fbnet_building_blocks/fbnet_modeldef.py\n# and can be used in the training by train_architecture_main_file.py\ndef sample_architecture_from_the_supernet(unique_name_of_arch, hardsampling=True):\n logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])\n \n lookup_table = LookUpTable()\n model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()\n model = nn.DataParallel(model)\n\n load(model, CONFIG_SUPERNET['train_settings']['path_to_save_model'])\n\n ops_names = [op_name for op_name in lookup_table.lookup_table_operations]\n cnt_ops = len(ops_names)\n\n arch_operations=[]\n if hardsampling:\n for layer in model.module.stages_to_search:\n arch_operations.append(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])\n else:\n rng = np.linspace(0, cnt_ops - 1, cnt_ops, dtype=int)\n for layer in model.module.stages_to_search:\n distribution = softmax(layer.thetas.detach().cpu().numpy())\n arch_operations.append(ops_names[np.random.choice(rng, p=distribution)])\n \n logger.info(\"Sampled Architecture: \" + \" - \".join(arch_operations))\n writh_new_ARCH_to_fbnet_modeldef(arch_operations, my_unique_name_for_ARCH=unique_name_of_arch)\n logger.info(\"CONGRATULATIONS! New architecture \" + unique_name_of_arch \\\n + \" was written into fbnet_building_blocks/fbnet_modeldef.py\")\n \nif __name__ == \"__main__\":\n assert args.train_or_sample in ['train', 'sample']\n if args.train_or_sample == 'train':\n train_supernet()\n elif args.train_or_sample == 'sample':\n assert args.architecture_name != '' and args.architecture_name not in MODEL_ARCH\n hardsampling = False if args.hardsampling_bool_value in ['False', '0'] else True\n sample_architecture_from_the_supernet(unique_name_of_arch=args.architecture_name, hardsampling=hardsampling)\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.cuda.manual_seed_all",
"torch.optim.SGD",
"torch.nn.utils.prune.remove",
"torch.manual_seed",
"numpy.random.seed",
"numpy.random.choice",
"torch.optim.Adam",
"torch.nn.DataParallel",
"numpy.linspace"
]
] |
levishai/3DMPPE_POSENET_RELEASE | [
"e364053b5a4e51f4a84eb50abb26026094931d90"
] | [
"main/test.py"
] | [
"import argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\nfrom config import cfg\nimport torch\nfrom base import Tester\nfrom utils.vis import vis_keypoints\nfrom utils.pose_utils import flip\nimport torch.backends.cudnn as cudnn\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=str, dest='gpu_ids')\n parser.add_argument('--test_epoch', type=str, dest='test_epoch')\n args = parser.parse_args()\n\n # test gpus\n if not args.gpu_ids:\n assert 0, \"Please set proper gpu ids\"\n\n if '-' in args.gpu_ids:\n gpus = args.gpu_ids.split('-')\n gpus[0] = int(gpus[0])\n gpus[1] = int(gpus[1]) + 1\n args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))\n \n assert args.test_epoch, 'Test epoch is required.'\n return args\n\ndef main():\n\n args = parse_args()\n cfg.set_args(args.gpu_ids)\n cudnn.fastest = True\n cudnn.benchmark = True\n cudnn.deterministic = False\n cudnn.enabled = True\n\n tester = Tester(args.test_epoch)\n tester._make_batch_generator()\n tester._make_model()\n\n preds = []\n\n with torch.no_grad():\n for itr, input_img in enumerate(tqdm(tester.batch_generator)):\n \n # forward\n coord_out = tester.model(input_img)\n\n if cfg.flip_test:\n flipped_input_img = flip(input_img, dims=3)\n flipped_coord_out = tester.model(flipped_input_img)\n flipped_coord_out[:, :, 0] = cfg.output_shape[1] - flipped_coord_out[:, :, 0] - 1\n for pair in tester.flip_pairs:\n flipped_coord_out[:, pair[0], :], flipped_coord_out[:, pair[1], :] = flipped_coord_out[:, pair[1], :].clone(), flipped_coord_out[:, pair[0], :].clone()\n coord_out = (coord_out + flipped_coord_out)/2.\n\n vis = False\n if vis:\n filename = str(itr)\n tmpimg = input_img[0].cpu().numpy()\n tmpimg = tmpimg * np.array(cfg.pixel_std).reshape(3,1,1) + np.array(cfg.pixel_mean).reshape(3,1,1)\n tmpimg = tmpimg.astype(np.uint8)\n tmpimg = tmpimg[::-1, :, :]\n tmpimg = np.transpose(tmpimg,(1,2,0)).copy()\n tmpkps = np.zeros((3,tester.joint_num))\n tmpkps[:2,:] = coord_out[0,:,:2].cpu().numpy().transpose(1,0) / cfg.output_shape[0] * cfg.input_shape[0]\n tmpkps[2,:] = 1\n tmpimg = vis_keypoints(tmpimg, tmpkps, tester.skeleton)\n cv2.imwrite(filename + '_output.jpg', tmpimg)\n\n coord_out = coord_out.cpu().numpy()\n preds.append(coord_out)\n \n # evaluate\n preds = np.concatenate(preds, axis=0)\n tester._evaluate(preds, cfg.result_dir) \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.transpose",
"numpy.zeros",
"torch.no_grad",
"numpy.array",
"numpy.concatenate"
]
] |
sanjaymsh/silx | [
"3f9bcda88c074438fdb30cde29fec314d26f471c"
] | [
"silx/math/fit/fittheories.py"
] | [
"# coding: utf-8\n#/*##########################################################################\n#\n# Copyright (c) 2004-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n########################################################################### */\n\"\"\"This modules provides a set of fit functions and associated\nestimation functions in a format that can be imported into a\n:class:`silx.math.fit.FitManager` instance.\n\nThese functions are well suited for fitting multiple gaussian shaped peaks\ntypically found in spectroscopy data. The estimation functions are designed\nto detect how many peaks are present in the data, and provide an initial\nestimate for their height, their center location and their full-width\nat half maximum (fwhm).\n\nThe limitation of these estimation algorithms is that only gaussians having a\nsimilar fwhm can be detected by the peak search algorithm.\nThis *search fwhm* can be defined by the user, if\nhe knows the characteristics of his data, or can be automatically estimated\nbased on the fwhm of the largest peak in the data.\n\nThe source code of this module can serve as template for defining your own\nfit functions.\n\nThe functions to be imported by :meth:`FitManager.loadtheories` are defined by\na dictionary :const:`THEORY`: with the following structure::\n\n from silx.math.fit.fittheory import FitTheory\n\n THEORY = {\n 'theory_name_1': FitTheory(\n description='Description of theory 1',\n function=fitfunction1,\n parameters=('param name 1', 'param name 2', …),\n estimate=estimation_function1,\n configure=configuration_function1,\n derivative=derivative_function1),\n\n 'theory_name_2': FitTheory(…),\n }\n\n.. note::\n\n Consider using an OrderedDict instead of a regular dictionary, when\n defining your own theory dictionary, if the order matters to you.\n This will likely be the case if you intend to load a selection of\n functions in a GUI such as :class:`silx.gui.fit.FitManager`.\n\nTheory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).\n\nThe mandatory parameters for :class:`FitTheory` are ``function`` and\n``parameters``.\n\nYou can also define an ``INIT`` function that will be executed by\n:meth:`FitManager.loadtheories`.\n\nSee the documentation of :class:`silx.math.fit.fittheory.FitTheory`\nfor more information.\n\nModule members:\n---------------\n\"\"\"\nimport numpy\nfrom collections import OrderedDict\nimport logging\n\nfrom silx.math.fit import functions\nfrom silx.math.fit.peaks import peak_search, guess_fwhm\nfrom silx.math.fit.filters import strip, savitsky_golay\nfrom silx.math.fit.leastsq import leastsq\nfrom silx.math.fit.fittheory import FitTheory\n\n_logger = logging.getLogger(__name__)\n\n__authors__ = [\"V.A. Sole\", \"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"15/05/2017\"\n\n\nDEFAULT_CONFIG = {\n 'NoConstraintsFlag': False,\n 'PositiveFwhmFlag': True,\n 'PositiveHeightAreaFlag': True,\n 'SameFwhmFlag': False,\n 'QuotedPositionFlag': False, # peak not outside data range\n 'QuotedEtaFlag': False, # force 0 < eta < 1\n # Peak detection\n 'AutoScaling': False,\n 'Yscaling': 1.0,\n 'FwhmPoints': 8,\n 'AutoFwhm': True,\n 'Sensitivity': 2.5,\n 'ForcePeakPresence': True,\n # Hypermet\n 'HypermetTails': 15,\n 'QuotedFwhmFlag': 0,\n 'MaxFwhm2InputRatio': 1.5,\n 'MinFwhm2InputRatio': 0.4,\n # short tail parameters\n 'MinGaussArea4ShortTail': 50000.,\n 'InitialShortTailAreaRatio': 0.050,\n 'MaxShortTailAreaRatio': 0.100,\n 'MinShortTailAreaRatio': 0.0010,\n 'InitialShortTailSlopeRatio': 0.70,\n 'MaxShortTailSlopeRatio': 2.00,\n 'MinShortTailSlopeRatio': 0.50,\n # long tail parameters\n 'MinGaussArea4LongTail': 1000.0,\n 'InitialLongTailAreaRatio': 0.050,\n 'MaxLongTailAreaRatio': 0.300,\n 'MinLongTailAreaRatio': 0.010,\n 'InitialLongTailSlopeRatio': 20.0,\n 'MaxLongTailSlopeRatio': 50.0,\n 'MinLongTailSlopeRatio': 5.0,\n # step tail\n 'MinGaussHeight4StepTail': 5000.,\n 'InitialStepTailHeightRatio': 0.002,\n 'MaxStepTailHeightRatio': 0.0100,\n 'MinStepTailHeightRatio': 0.0001,\n # Hypermet constraints\n # position in range [estimated position +- estimated fwhm/2]\n 'HypermetQuotedPositionFlag': True,\n 'DeltaPositionFwhmUnits': 0.5,\n 'SameSlopeRatioFlag': 1,\n 'SameAreaRatioFlag': 1,\n # Strip bg removal\n 'StripBackgroundFlag': True,\n 'SmoothingFlag': True,\n 'SmoothingWidth': 5,\n 'StripWidth': 2,\n 'StripIterations': 5000,\n 'StripThresholdFactor': 1.0}\n\"\"\"This dictionary defines default configuration parameters that have effects\non fit functions and estimation functions, mainly on fit constraints.\nThis dictionary is accessible as attribute :attr:`FitTheories.config`,\nwhich can be modified by configuration functions defined in\n:const:`CONFIGURE`.\n\"\"\"\n\nCFREE = 0\nCPOSITIVE = 1\nCQUOTED = 2\nCFIXED = 3\nCFACTOR = 4\nCDELTA = 5\nCSUM = 6\nCIGNORED = 7\n\n\nclass FitTheories(object):\n \"\"\"Class wrapping functions from :class:`silx.math.fit.functions`\n and providing estimate functions for all of these fit functions.\"\"\"\n def __init__(self, config=None):\n if config is None:\n self.config = DEFAULT_CONFIG\n else:\n self.config = config\n\n def ahypermet(self, x, *pars):\n \"\"\"\n Wrapping of :func:`silx.math.fit.functions.sum_ahypermet` without\n the tail flags in the function signature.\n\n Depending on the value of `self.config['HypermetTails']`, one can\n activate or deactivate the various terms of the hypermet function.\n\n `self.config['HypermetTails']` must be an integer between 0 and 15.\n It is a set of 4 binary flags, one for activating each one of the\n hypermet terms: *gaussian function, short tail, long tail, step*.\n\n For example, 15 can be expressed as ``1111`` in base 2, so a flag of\n 15 means all terms are active.\n \"\"\"\n g_term = self.config['HypermetTails'] & 1\n st_term = (self.config['HypermetTails'] >> 1) & 1\n lt_term = (self.config['HypermetTails'] >> 2) & 1\n step_term = (self.config['HypermetTails'] >> 3) & 1\n return functions.sum_ahypermet(x, *pars,\n gaussian_term=g_term, st_term=st_term,\n lt_term=lt_term, step_term=step_term)\n\n def poly(self, x, *pars):\n \"\"\"Order n polynomial.\n The order of the polynomial is defined by the number of\n coefficients (``*pars``).\n\n \"\"\"\n p = numpy.poly1d(pars)\n return p(x)\n\n @staticmethod\n def estimate_poly(x, y, n=2):\n \"\"\"Estimate polynomial coefficients for a degree n polynomial.\n\n \"\"\"\n pcoeffs = numpy.polyfit(x, y, n)\n constraints = numpy.zeros((n + 1, 3), numpy.float)\n return pcoeffs, constraints\n\n def estimate_quadratic(self, x, y):\n \"\"\"Estimate quadratic coefficients\n\n \"\"\"\n return self.estimate_poly(x, y, n=2)\n\n def estimate_cubic(self, x, y):\n \"\"\"Estimate coefficients for a degree 3 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=3)\n\n def estimate_quartic(self, x, y):\n \"\"\"Estimate coefficients for a degree 4 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=4)\n\n def estimate_quintic(self, x, y):\n \"\"\"Estimate coefficients for a degree 5 polynomial\n\n \"\"\"\n return self.estimate_poly(x, y, n=5)\n\n def strip_bg(self, y):\n \"\"\"Return the strip background of y, using parameters from\n :attr:`config` dictionary (*StripBackgroundFlag, StripWidth,\n StripIterations, StripThresholdFactor*)\"\"\"\n remove_strip_bg = self.config.get('StripBackgroundFlag', False)\n if remove_strip_bg:\n if self.config['SmoothingFlag']:\n y = savitsky_golay(y, self.config['SmoothingWidth'])\n strip_width = self.config['StripWidth']\n strip_niterations = self.config['StripIterations']\n strip_thr_factor = self.config['StripThresholdFactor']\n return strip(y, w=strip_width,\n niterations=strip_niterations,\n factor=strip_thr_factor)\n else:\n return numpy.zeros_like(y)\n\n def guess_yscaling(self, y):\n \"\"\"Estimate scaling for y prior to peak search.\n A smoothing filter is applied to y to estimate the noise level\n (chi-squared)\n\n :param y: Data array\n :return: Scaling factor\n \"\"\"\n # ensure y is an array\n yy = numpy.array(y, copy=False)\n\n # smooth\n convolution_kernel = numpy.ones(shape=(3,)) / 3.\n ysmooth = numpy.convolve(y, convolution_kernel, mode=\"same\")\n\n # remove zeros\n idx_array = numpy.fabs(y) > 0.0\n yy = yy[idx_array]\n ysmooth = ysmooth[idx_array]\n\n # compute scaling factor\n chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))\n if chisq > 0:\n return 1. / chisq\n else:\n return 1.0\n\n def peak_search(self, y, fwhm, sensitivity):\n \"\"\"Search for peaks in y array, after padding the array and\n multiplying its value by a scaling factor.\n\n :param y: 1-D data array\n :param int fwhm: Typical full width at half maximum for peaks,\n in number of points. This parameter is used for to discriminate between\n true peaks and background fluctuations.\n :param float sensitivity: Sensitivity parameter. This is a threshold factor\n for peak detection. Only peaks larger than the standard deviation\n of the noise multiplied by this sensitivity parameter are detected.\n :return: List of peak indices\n \"\"\"\n # add padding\n ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float)\n ysearch[0:fwhm] = y[0]\n ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]\n ysearch[fwhm:fwhm + len(y)] = y[:]\n\n scaling = self.guess_yscaling(y) if self.config[\"AutoScaling\"] else self.config[\"Yscaling\"]\n\n if len(ysearch) > 1.5 * fwhm:\n peaks = peak_search(scaling * ysearch,\n fwhm=fwhm, sensitivity=sensitivity)\n return [peak_index - fwhm for peak_index in peaks\n if 0 <= peak_index - fwhm < len(y)]\n else:\n return []\n\n def estimate_height_position_fwhm(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM* of peaks, for gaussian-like\n curves.\n\n This functions finds how many parameters are needed, based on the\n number of peaks detected. Then it estimates the fit parameters\n with a few iterations of fitting gaussian functions.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar = []\n\n bg = self.strip_bg(y)\n\n if self.config['AutoFwhm']:\n search_fwhm = guess_fwhm(y)\n else:\n search_fwhm = int(float(self.config['FwhmPoints']))\n search_sens = float(self.config['Sensitivity'])\n\n if search_fwhm < 3:\n _logger.warning(\"Setting peak fwhm to 3 (lower limit)\")\n search_fwhm = 3\n self.config['FwhmPoints'] = 3\n\n if search_sens < 1:\n _logger.warning(\"Setting peak search sensitivity to 1. \" +\n \"(lower limit to filter out noise peaks)\")\n search_sens = 1\n self.config['Sensitivity'] = 1\n\n npoints = len(y)\n\n # Find indices of peaks in data array\n peaks = self.peak_search(y,\n fwhm=search_fwhm,\n sensitivity=search_sens)\n\n if not len(peaks):\n forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))\n if forcepeak:\n delta = y - bg\n # get index of global maximum\n # (first one if several samples are equal to this value)\n peaks = [numpy.nonzero(delta == delta.max())[0][0]]\n\n # Find index of largest peak in peaks array\n index_largest_peak = 0\n if len(peaks) > 0:\n # estimate fwhm as 5 * sampling interval\n sig = 5 * abs(x[npoints - 1] - x[0]) / npoints\n peakpos = x[int(peaks[0])]\n if abs(peakpos) < 1.0e-16:\n peakpos = 0.0\n param = numpy.array(\n [y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])\n height_largest_peak = param[0]\n peak_index = 1\n for i in peaks[1:]:\n param2 = numpy.array(\n [y[int(i)] - bg[int(i)], x[int(i)], sig])\n param = numpy.concatenate((param, param2))\n if param2[0] > height_largest_peak:\n height_largest_peak = param2[0]\n index_largest_peak = peak_index\n peak_index += 1\n\n # Subtract background\n xw = x\n yw = y - bg\n\n cons = numpy.zeros((len(param), 3), numpy.float)\n\n # peak height must be positive\n cons[0:len(param):3, 0] = CPOSITIVE\n # force peaks to stay around their position\n cons[1:len(param):3, 0] = CQUOTED\n\n # set possible peak range to estimated peak +- guessed fwhm\n if len(xw) > search_fwhm:\n fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])\n cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx\n cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx\n else:\n shape = [max(1, int(x)) for x in (param[1:len(param):3])]\n cons[1:len(param):3, 1] = min(xw) * numpy.ones(\n shape,\n numpy.float)\n cons[1:len(param):3, 2] = max(xw) * numpy.ones(\n shape,\n numpy.float)\n\n # ensure fwhm is positive\n cons[2:len(param):3, 0] = CPOSITIVE\n\n # run a quick iterative fit (4 iterations) to improve\n # estimations\n fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,\n max_iter=4, constraints=cons.tolist(),\n full_output=True)\n\n # set final constraints based on config parameters\n cons = numpy.zeros((len(fittedpar), 3), numpy.float)\n peak_index = 0\n for i in range(len(peaks)):\n # Setup height area constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveHeightAreaFlag']:\n cons[peak_index, 0] = CPOSITIVE\n cons[peak_index, 1] = 0\n cons[peak_index, 2] = 0\n peak_index += 1\n\n # Setup position constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['QuotedPositionFlag']:\n cons[peak_index, 0] = CQUOTED\n cons[peak_index, 1] = min(x)\n cons[peak_index, 2] = max(x)\n peak_index += 1\n\n # Setup positive FWHM constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveFwhmFlag']:\n cons[peak_index, 0] = CPOSITIVE\n cons[peak_index, 1] = 0\n cons[peak_index, 2] = 0\n if self.config['SameFwhmFlag']:\n if i != index_largest_peak:\n cons[peak_index, 0] = CFACTOR\n cons[peak_index, 1] = 3 * index_largest_peak + 2\n cons[peak_index, 2] = 1.0\n peak_index += 1\n\n return fittedpar, cons\n\n def estimate_agauss(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM* of peaks, for gaussian-like\n curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n converts the height parameters to area under the curve with the\n formula ``area = sqrt(2*pi) * height * fwhm / (2 * sqrt(2 * log(2))``\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n for i in range(npeaks):\n height = fittedpar[3 * i]\n fwhm = fittedpar[3 * i + 2]\n # Replace height with area in fittedpar\n fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (\n 2.0 * numpy.sqrt(2 * numpy.log(2)))\n return fittedpar, cons\n\n def estimate_alorentz(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM* of peaks, for Lorentzian\n curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n converts the height parameters to area under the curve with the\n formula ``area = height * fwhm * 0.5 * pi``\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n for i in range(npeaks):\n height = fittedpar[3 * i]\n fwhm = fittedpar[3 * i + 2]\n # Replace height with area in fittedpar\n fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)\n return fittedpar, cons\n\n def estimate_splitgauss(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM1, FWHM2* of peaks, for\n asymmetric gaussian-like curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds a second (identical) estimation of FWHM to the fit parameters\n for each peak, and the corresponding constraint.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM1, FWHM2*.\n Fit constraints depend on :attr:`config`.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n # get the number of found peaks\n npeaks = len(fittedpar) // 3\n estimated_parameters = []\n estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float)\n for i in range(npeaks):\n for j in range(3):\n estimated_parameters.append(fittedpar[3 * i + j])\n # fwhm2 estimate = fwhm1\n estimated_parameters.append(fittedpar[3 * i + 2])\n # height\n estimated_constraints[4 * i, 0] = cons[3 * i, 0]\n estimated_constraints[4 * i, 1] = cons[3 * i, 1]\n estimated_constraints[4 * i, 2] = cons[3 * i, 2]\n # position\n estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]\n estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]\n estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]\n # fwhm1\n estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]\n estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]\n estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]\n # fwhm2\n estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]\n estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]\n estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]\n if cons[3 * i + 2, 0] == CFACTOR:\n # convert indices of related parameters\n # (this happens if SameFwhmFlag == True)\n estimated_constraints[4 * i + 2, 1] = \\\n int(cons[3 * i + 2, 1] / 3) * 4 + 2\n estimated_constraints[4 * i + 3, 1] = \\\n int(cons[3 * i + 2, 1] / 3) * 4 + 3\n return estimated_parameters, estimated_constraints\n\n def estimate_pvoigt(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM, eta* of peaks, for\n pseudo-Voigt curves.\n\n Pseudo-Voigt are a sum of a gaussian curve *G(x)* and a lorentzian\n curve *L(x)* with the same height, center, fwhm parameters:\n ``y(x) = eta * G(x) + (1-eta) * L(x)``\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds a constant estimation of *eta* (0.5) to the fit parameters\n for each peak, and the corresponding constraint.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM, eta*.\n Constraint for the eta parameter can be set to QUOTED (0.--1.)\n by setting :attr:`config`['QuotedEtaFlag'] to ``True``.\n If this is not the case, the constraint code is set to FREE.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((4 * npeaks, 3), numpy.float)\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 4 * j + 2\n for i in range(npeaks):\n newpar.append(fittedpar[3 * i])\n newpar.append(fittedpar[3 * i + 1])\n newpar.append(fittedpar[3 * i + 2])\n newpar.append(0.5)\n # height\n newcons[4 * i, 0] = cons[3 * i, 0]\n newcons[4 * i, 1] = cons[3 * i, 1]\n newcons[4 * i, 2] = cons[3 * i, 2]\n # position\n newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]\n newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]\n newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]\n # fwhm\n newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]\n newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]\n newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]\n # Eta constrains\n newcons[4 * i + 3, 0] = CFREE\n newcons[4 * i + 3, 1] = 0\n newcons[4 * i + 3, 2] = 0\n if self.config['QuotedEtaFlag']:\n newcons[4 * i + 3, 0] = CQUOTED\n newcons[4 * i + 3, 1] = 0.0\n newcons[4 * i + 3, 2] = 1.0\n return newpar, newcons\n\n def estimate_splitpvoigt(self, x, y):\n \"\"\"Estimation of *Height, Position, FWHM1, FWHM2, eta* of peaks, for\n asymmetric pseudo-Voigt curves.\n\n This functions uses :meth:`estimate_height_position_fwhm`, then\n adds an identical FWHM2 parameter and a constant estimation of\n *eta* (0.5) to the fit parameters for each peak, and the corresponding\n constraints.\n\n Constraint for the eta parameter can be set to QUOTED (0.--1.)\n by setting :attr:`config`['QuotedEtaFlag'] to ``True``.\n If this is not the case, the constraint code is set to FREE.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Height, Position, FWHM1, FWHM2, eta*.\n \"\"\"\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((5 * npeaks, 3), numpy.float)\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 4 * j + 2\n for i in range(npeaks):\n # height\n newpar.append(fittedpar[3 * i])\n # position\n newpar.append(fittedpar[3 * i + 1])\n # fwhm1\n newpar.append(fittedpar[3 * i + 2])\n # fwhm2 estimate equal to fwhm1\n newpar.append(fittedpar[3 * i + 2])\n # eta\n newpar.append(0.5)\n # constraint codes\n # ----------------\n # height\n newcons[5 * i, 0] = cons[3 * i, 0]\n # position\n newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]\n # fwhm1\n newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]\n # fwhm2\n newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]\n # cons 1\n # ------\n newcons[5 * i, 1] = cons[3 * i, 1]\n newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]\n newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]\n newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]\n # cons 2\n # ------\n newcons[5 * i, 2] = cons[3 * i, 2]\n newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]\n newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]\n newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]\n\n if cons[3 * i + 2, 0] == CFACTOR:\n # fwhm2 connstraint depends on fwhm1\n newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1\n # eta constraints\n newcons[5 * i + 4, 0] = CFREE\n newcons[5 * i + 4, 1] = 0\n newcons[5 * i + 4, 2] = 0\n if self.config['QuotedEtaFlag']:\n newcons[5 * i + 4, 0] = CQUOTED\n newcons[5 * i + 4, 1] = 0.0\n newcons[5 * i + 4, 2] = 1.0\n return newpar, newcons\n\n def estimate_apvoigt(self, x, y):\n \"\"\"Estimation of *Area, Position, FWHM1, eta* of peaks, for\n pseudo-Voigt curves.\n\n This functions uses :meth:`estimate_pvoigt`, then converts the height\n parameter to area.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *Area, Position, FWHM, eta*.\n \"\"\"\n fittedpar, cons = self.estimate_pvoigt(x, y)\n npeaks = len(fittedpar) // 4\n # Assume 50% of the area is determined by the gaussian and 50% by\n # the Lorentzian.\n for i in range(npeaks):\n height = fittedpar[4 * i]\n fwhm = fittedpar[4 * i + 2]\n fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\\\n 0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n ) * numpy.sqrt(2 * numpy.pi)\n return fittedpar, cons\n\n def estimate_ahypermet(self, x, y):\n \"\"\"Estimation of *area, position, fwhm, st_area_r, st_slope_r,\n lt_area_r, lt_slope_r, step_height_r* of peaks, for hypermet curves.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each peak are:\n *area, position, fwhm, st_area_r, st_slope_r,\n lt_area_r, lt_slope_r, step_height_r* .\n \"\"\"\n yscaling = self.config.get('Yscaling', 1.0)\n if yscaling == 0:\n yscaling = 1.0\n fittedpar, cons = self.estimate_height_position_fwhm(x, y)\n npeaks = len(fittedpar) // 3\n newpar = []\n newcons = numpy.zeros((8 * npeaks, 3), numpy.float)\n main_peak = 0\n # find out related parameters proper index\n if not self.config['NoConstraintsFlag']:\n if self.config['SameFwhmFlag']:\n j = 0\n # get the index of the free FWHM\n for i in range(npeaks):\n if cons[3 * i + 2, 0] != 4:\n j = i\n for i in range(npeaks):\n if i != j:\n cons[3 * i + 2, 1] = 8 * j + 2\n main_peak = j\n for i in range(npeaks):\n if fittedpar[3 * i] > fittedpar[3 * main_peak]:\n main_peak = i\n\n for i in range(npeaks):\n height = fittedpar[3 * i]\n position = fittedpar[3 * i + 1]\n fwhm = fittedpar[3 * i + 2]\n area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n ) * numpy.sqrt(2 * numpy.pi)\n # the gaussian parameters\n newpar.append(area)\n newpar.append(position)\n newpar.append(fwhm)\n # print \"area, pos , fwhm = \",area,position,fwhm\n # Avoid zero derivatives because of not calculating contribution\n g_term = 1\n st_term = 1\n lt_term = 1\n step_term = 1\n if self.config['HypermetTails'] != 0:\n g_term = self.config['HypermetTails'] & 1\n st_term = (self.config['HypermetTails'] >> 1) & 1\n lt_term = (self.config['HypermetTails'] >> 2) & 1\n step_term = (self.config['HypermetTails'] >> 3) & 1\n if g_term == 0:\n # fix the gaussian parameters\n newcons[8 * i, 0] = CFIXED\n newcons[8 * i + 1, 0] = CFIXED\n newcons[8 * i + 2, 0] = CFIXED\n # the short tail parameters\n if ((area * yscaling) <\n self.config['MinGaussArea4ShortTail']) | \\\n (st_term == 0):\n newpar.append(0.0)\n newpar.append(0.0)\n newcons[8 * i + 3, 0] = CFIXED\n newcons[8 * i + 3, 1] = 0.0\n newcons[8 * i + 3, 2] = 0.0\n newcons[8 * i + 4, 0] = CFIXED\n newcons[8 * i + 4, 1] = 0.0\n newcons[8 * i + 4, 2] = 0.0\n else:\n newpar.append(self.config['InitialShortTailAreaRatio'])\n newpar.append(self.config['InitialShortTailSlopeRatio'])\n newcons[8 * i + 3, 0] = CQUOTED\n newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']\n newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']\n newcons[8 * i + 4, 0] = CQUOTED\n newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']\n newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']\n # the long tail parameters\n if ((area * yscaling) <\n self.config['MinGaussArea4LongTail']) | \\\n (lt_term == 0):\n newpar.append(0.0)\n newpar.append(0.0)\n newcons[8 * i + 5, 0] = CFIXED\n newcons[8 * i + 5, 1] = 0.0\n newcons[8 * i + 5, 2] = 0.0\n newcons[8 * i + 6, 0] = CFIXED\n newcons[8 * i + 6, 1] = 0.0\n newcons[8 * i + 6, 2] = 0.0\n else:\n newpar.append(self.config['InitialLongTailAreaRatio'])\n newpar.append(self.config['InitialLongTailSlopeRatio'])\n newcons[8 * i + 5, 0] = CQUOTED\n newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']\n newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']\n newcons[8 * i + 6, 0] = CQUOTED\n newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']\n newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']\n # the step parameters\n if ((height * yscaling) <\n self.config['MinGaussHeight4StepTail']) | \\\n (step_term == 0):\n newpar.append(0.0)\n newcons[8 * i + 7, 0] = CFIXED\n newcons[8 * i + 7, 1] = 0.0\n newcons[8 * i + 7, 2] = 0.0\n else:\n newpar.append(self.config['InitialStepTailHeightRatio'])\n newcons[8 * i + 7, 0] = CQUOTED\n newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']\n newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']\n # if self.config['NoConstraintsFlag'] == 1:\n # newcons=numpy.zeros((8*npeaks, 3),numpy.float)\n if npeaks > 0:\n if g_term:\n if self.config['PositiveHeightAreaFlag']:\n for i in range(npeaks):\n newcons[8 * i, 0] = CPOSITIVE\n if self.config['PositiveFwhmFlag']:\n for i in range(npeaks):\n newcons[8 * i + 2, 0] = CPOSITIVE\n if self.config['SameFwhmFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 2, 0] = CFACTOR\n newcons[8 * i + 2, 1] = 8 * main_peak + 2\n newcons[8 * i + 2, 2] = 1.0\n if self.config['HypermetQuotedPositionFlag']:\n for i in range(npeaks):\n delta = self.config['DeltaPositionFwhmUnits'] * fwhm\n newcons[8 * i + 1, 0] = CQUOTED\n newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta\n newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta\n if self.config['SameSlopeRatioFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 4, 0] = CFACTOR\n newcons[8 * i + 4, 1] = 8 * main_peak + 4\n newcons[8 * i + 4, 2] = 1.0\n newcons[8 * i + 6, 0] = CFACTOR\n newcons[8 * i + 6, 1] = 8 * main_peak + 6\n newcons[8 * i + 6, 2] = 1.0\n if self.config['SameAreaRatioFlag']:\n for i in range(npeaks):\n if i != main_peak:\n newcons[8 * i + 3, 0] = CFACTOR\n newcons[8 * i + 3, 1] = 8 * main_peak + 3\n newcons[8 * i + 3, 2] = 1.0\n newcons[8 * i + 5, 0] = CFACTOR\n newcons[8 * i + 5, 1] = 8 * main_peak + 5\n newcons[8 * i + 5, 2] = 1.0\n return newpar, newcons\n\n def estimate_stepdown(self, x, y):\n \"\"\"Estimation of parameters for stepdown curves.\n\n The functions estimates gaussian parameters for the derivative of\n the data, takes the largest gaussian peak and uses its estimated\n parameters to define the center of the step and its fwhm. The\n estimated amplitude returned is simply ``max(y) - min(y)``.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit newconstraints.\n Parameters to be estimated for each stepdown are:\n *height, centroid, fwhm* .\n \"\"\"\n crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]\n cutoff = len(crappyfilter) // 2\n y_deriv = numpy.convolve(y,\n crappyfilter,\n mode=\"valid\")\n\n # make the derivative's peak have the same amplitude as the step\n if max(y_deriv) > 0:\n y_deriv = y_deriv * max(y) / max(y_deriv)\n\n fittedpar, newcons = self.estimate_height_position_fwhm(\n x[cutoff:-cutoff], y_deriv)\n\n data_amplitude = max(y) - min(y)\n\n # use parameters from largest gaussian found\n if len(fittedpar):\n npeaks = len(fittedpar) // 3\n largest_index = 0\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n for i in range(npeaks):\n if fittedpar[3 * i] > largest[0]:\n largest_index = i\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n else:\n # no peak was found\n largest = [data_amplitude, # height\n x[len(x)//2], # center: middle of x range\n self.config[\"FwhmPoints\"] * (x[1] - x[0])] # fwhm: default value\n\n # Setup constrains\n newcons = numpy.zeros((3, 3), numpy.float)\n if not self.config['NoConstraintsFlag']:\n # Setup height constrains\n if self.config['PositiveHeightAreaFlag']:\n newcons[0, 0] = CPOSITIVE\n newcons[0, 1] = 0\n newcons[0, 2] = 0\n\n # Setup position constrains\n if self.config['QuotedPositionFlag']:\n newcons[1, 0] = CQUOTED\n newcons[1, 1] = min(x)\n newcons[1, 2] = max(x)\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n newcons[2, 0] = CPOSITIVE\n newcons[2, 1] = 0\n newcons[2, 2] = 0\n\n return largest, newcons\n\n def estimate_slit(self, x, y):\n \"\"\"Estimation of parameters for slit curves.\n\n The functions estimates stepup and stepdown parameters for the largest\n steps, and uses them for calculating the center (middle between stepup\n and stepdown), the height (maximum amplitude in data), the fwhm\n (distance between the up- and down-step centers) and the beamfwhm\n (average of FWHM for up- and down-step).\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each slit are:\n *height, position, fwhm, beamfwhm* .\n \"\"\"\n largestup, cons = self.estimate_stepup(x, y)\n largestdown, cons = self.estimate_stepdown(x, y)\n fwhm = numpy.fabs(largestdown[1] - largestup[1])\n beamfwhm = 0.5 * (largestup[2] + largestdown[1])\n beamfwhm = min(beamfwhm, fwhm / 10.0)\n beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))\n\n y_minus_bg = y - self.strip_bg(y)\n height = max(y_minus_bg)\n\n i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]\n xx = numpy.take(x, i1)\n position = (xx[0] + xx[-1]) / 2.0\n fwhm = xx[-1] - xx[0]\n largest = [height, position, fwhm, beamfwhm]\n cons = numpy.zeros((4, 3), numpy.float)\n # Setup constrains\n if not self.config['NoConstraintsFlag']:\n # Setup height constrains\n if self.config['PositiveHeightAreaFlag']:\n cons[0, 0] = CPOSITIVE\n cons[0, 1] = 0\n cons[0, 2] = 0\n\n # Setup position constrains\n if self.config['QuotedPositionFlag']:\n cons[1, 0] = CQUOTED\n cons[1, 1] = min(x)\n cons[1, 2] = max(x)\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n cons[2, 0] = CPOSITIVE\n cons[2, 1] = 0\n cons[2, 2] = 0\n\n # Setup positive FWHM constrains\n if self.config['PositiveFwhmFlag']:\n cons[3, 0] = CPOSITIVE\n cons[3, 1] = 0\n cons[3, 2] = 0\n return largest, cons\n\n def estimate_stepup(self, x, y):\n \"\"\"Estimation of parameters for a single step up curve.\n\n The functions estimates gaussian parameters for the derivative of\n the data, takes the largest gaussian peak and uses its estimated\n parameters to define the center of the step and its fwhm. The\n estimated amplitude returned is simply ``max(y) - min(y)``.\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n Parameters to be estimated for each stepup are:\n *height, centroid, fwhm* .\n \"\"\"\n crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]\n cutoff = len(crappyfilter) // 2\n y_deriv = numpy.convolve(y, crappyfilter, mode=\"valid\")\n if max(y_deriv) > 0:\n y_deriv = y_deriv * max(y) / max(y_deriv)\n\n fittedpar, cons = self.estimate_height_position_fwhm(\n x[cutoff:-cutoff], y_deriv)\n\n # for height, use the data amplitude after removing the background\n data_amplitude = max(y) - min(y)\n\n # find params of the largest gaussian found\n if len(fittedpar):\n npeaks = len(fittedpar) // 3\n largest_index = 0\n largest = [data_amplitude,\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n for i in range(npeaks):\n if fittedpar[3 * i] > largest[0]:\n largest_index = i\n largest = [fittedpar[3 * largest_index],\n fittedpar[3 * largest_index + 1],\n fittedpar[3 * largest_index + 2]]\n else:\n # no peak was found\n largest = [data_amplitude, # height\n x[len(x)//2], # center: middle of x range\n self.config[\"FwhmPoints\"] * (x[1] - x[0])] # fwhm: default value\n\n newcons = numpy.zeros((3, 3), numpy.float)\n # Setup constrains\n if not self.config['NoConstraintsFlag']:\n # Setup height constraints\n if self.config['PositiveHeightAreaFlag']:\n newcons[0, 0] = CPOSITIVE\n newcons[0, 1] = 0\n newcons[0, 2] = 0\n\n # Setup position constraints\n if self.config['QuotedPositionFlag']:\n newcons[1, 0] = CQUOTED\n newcons[1, 1] = min(x)\n newcons[1, 2] = max(x)\n\n # Setup positive FWHM constraints\n if self.config['PositiveFwhmFlag']:\n newcons[2, 0] = CPOSITIVE\n newcons[2, 1] = 0\n newcons[2, 2] = 0\n\n return largest, newcons\n\n def estimate_periodic_gauss(self, x, y):\n \"\"\"Estimation of parameters for periodic gaussian curves:\n *number of peaks, distance between peaks, height, position of the\n first peak, fwhm*\n\n The functions detects all peaks, then computes the parameters the\n following way:\n\n - *distance*: average of distances between detected peaks\n - *height*: average height of detected peaks\n - *fwhm*: fwhm of the highest peak (in number of samples) if\n field ``'AutoFwhm'`` in :attr:`config` is ``True``, else take\n the default value (field ``'FwhmPoints'`` in :attr:`config`)\n\n :param x: Array of abscissa values\n :param y: Array of ordinate values (``y = f(x)``)\n :return: Tuple of estimated fit parameters and fit constraints.\n \"\"\"\n yscaling = self.config.get('Yscaling', 1.0)\n if yscaling == 0:\n yscaling = 1.0\n\n bg = self.strip_bg(y)\n\n if self.config['AutoFwhm']:\n search_fwhm = guess_fwhm(y)\n else:\n search_fwhm = int(float(self.config['FwhmPoints']))\n search_sens = float(self.config['Sensitivity'])\n\n if search_fwhm < 3:\n search_fwhm = 3\n\n if search_sens < 1:\n search_sens = 1\n\n if len(y) > 1.5 * search_fwhm:\n peaks = peak_search(yscaling * y, fwhm=search_fwhm,\n sensitivity=search_sens)\n else:\n peaks = []\n npeaks = len(peaks)\n if not npeaks:\n fittedpar = []\n cons = numpy.zeros((len(fittedpar), 3), numpy.float)\n return fittedpar, cons\n\n fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]\n\n # The number of peaks\n fittedpar[0] = npeaks\n\n # The separation between peaks in x units\n delta = 0.0\n height = 0.0\n for i in range(npeaks):\n height += y[int(peaks[i])] - bg[int(peaks[i])]\n if i != npeaks - 1:\n delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])\n\n # delta between peaks\n if npeaks > 1:\n fittedpar[1] = delta / (npeaks - 1)\n\n # starting height\n fittedpar[2] = height / npeaks\n\n # position of the first peak\n fittedpar[3] = x[int(peaks[0])]\n\n # Estimate the fwhm\n fittedpar[4] = search_fwhm\n\n # setup constraints\n cons = numpy.zeros((5, 3), numpy.float)\n cons[0, 0] = CFIXED # the number of gaussians\n if npeaks == 1:\n cons[1, 0] = CFIXED # the delta between peaks\n else:\n cons[1, 0] = CFREE\n j = 2\n # Setup height area constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveHeightAreaFlag']:\n # POSITIVE = 1\n cons[j, 0] = CPOSITIVE\n cons[j, 1] = 0\n cons[j, 2] = 0\n j += 1\n\n # Setup position constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['QuotedPositionFlag']:\n # QUOTED = 2\n cons[j, 0] = CQUOTED\n cons[j, 1] = min(x)\n cons[j, 2] = max(x)\n j += 1\n\n # Setup positive FWHM constrains\n if not self.config['NoConstraintsFlag']:\n if self.config['PositiveFwhmFlag']:\n # POSITIVE=1\n cons[j, 0] = CPOSITIVE\n cons[j, 1] = 0\n cons[j, 2] = 0\n j += 1\n return fittedpar, cons\n\n def configure(self, **kw):\n \"\"\"Add new / unknown keyword arguments to :attr:`config`,\n update entries in :attr:`config` if the parameter name is a existing\n key.\n\n :param kw: Dictionary of keyword arguments.\n :return: Configuration dictionary :attr:`config`\n \"\"\"\n if not kw.keys():\n return self.config\n for key in kw.keys():\n notdone = 1\n # take care of lower / upper case problems ...\n for config_key in self.config.keys():\n if config_key.lower() == key.lower():\n self.config[config_key] = kw[key]\n notdone = 0\n if notdone:\n self.config[key] = kw[key]\n return self.config\n\nfitfuns = FitTheories()\n\nTHEORY = OrderedDict((\n ('Gaussians',\n FitTheory(description='Gaussian functions',\n function=functions.sum_gauss,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_height_position_fwhm,\n configure=fitfuns.configure)),\n ('Lorentz',\n FitTheory(description='Lorentzian functions',\n function=functions.sum_lorentz,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_height_position_fwhm,\n configure=fitfuns.configure)),\n ('Area Gaussians',\n FitTheory(description='Gaussian functions (area)',\n function=functions.sum_agauss,\n parameters=('Area', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_agauss,\n configure=fitfuns.configure)),\n ('Area Lorentz',\n FitTheory(description='Lorentzian functions (area)',\n function=functions.sum_alorentz,\n parameters=('Area', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_alorentz,\n configure=fitfuns.configure)),\n ('Pseudo-Voigt Line',\n FitTheory(description='Pseudo-Voigt functions',\n function=functions.sum_pvoigt,\n parameters=('Height', 'Position', 'FWHM', 'Eta'),\n estimate=fitfuns.estimate_pvoigt,\n configure=fitfuns.configure)),\n ('Area Pseudo-Voigt',\n FitTheory(description='Pseudo-Voigt functions (area)',\n function=functions.sum_apvoigt,\n parameters=('Area', 'Position', 'FWHM', 'Eta'),\n estimate=fitfuns.estimate_apvoigt,\n configure=fitfuns.configure)),\n ('Split Gaussian',\n FitTheory(description='Asymmetric gaussian functions',\n function=functions.sum_splitgauss,\n parameters=('Height', 'Position', 'LowFWHM',\n 'HighFWHM'),\n estimate=fitfuns.estimate_splitgauss,\n configure=fitfuns.configure)),\n ('Split Lorentz',\n FitTheory(description='Asymmetric lorentzian functions',\n function=functions.sum_splitlorentz,\n parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),\n estimate=fitfuns.estimate_splitgauss,\n configure=fitfuns.configure)),\n ('Split Pseudo-Voigt',\n FitTheory(description='Asymmetric pseudo-Voigt functions',\n function=functions.sum_splitpvoigt,\n parameters=('Height', 'Position', 'LowFWHM',\n 'HighFWHM', 'Eta'),\n estimate=fitfuns.estimate_splitpvoigt,\n configure=fitfuns.configure)),\n ('Step Down',\n FitTheory(description='Step down function',\n function=functions.sum_stepdown,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_stepdown,\n configure=fitfuns.configure)),\n ('Step Up',\n FitTheory(description='Step up function',\n function=functions.sum_stepup,\n parameters=('Height', 'Position', 'FWHM'),\n estimate=fitfuns.estimate_stepup,\n configure=fitfuns.configure)),\n ('Slit',\n FitTheory(description='Slit function',\n function=functions.sum_slit,\n parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),\n estimate=fitfuns.estimate_slit,\n configure=fitfuns.configure)),\n ('Atan',\n FitTheory(description='Arctan step up function',\n function=functions.atan_stepup,\n parameters=('Height', 'Position', 'Width'),\n estimate=fitfuns.estimate_stepup,\n configure=fitfuns.configure)),\n ('Hypermet',\n FitTheory(description='Hypermet functions',\n function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet\n parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',\n 'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),\n estimate=fitfuns.estimate_ahypermet,\n configure=fitfuns.configure)),\n # ('Periodic Gaussians',\n # FitTheory(description='Periodic gaussian functions',\n # function=functions.periodic_gauss,\n # parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),\n # estimate=fitfuns.estimate_periodic_gauss,\n # configure=fitfuns.configure))\n ('Degree 2 Polynomial',\n FitTheory(description='Degree 2 polynomial'\n '\\ny = a*x^2 + b*x +c',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c'],\n estimate=fitfuns.estimate_quadratic)),\n ('Degree 3 Polynomial',\n FitTheory(description='Degree 3 polynomial'\n '\\ny = a*x^3 + b*x^2 + c*x + d',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd'],\n estimate=fitfuns.estimate_cubic)),\n ('Degree 4 Polynomial',\n FitTheory(description='Degree 4 polynomial'\n '\\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd', 'e'],\n estimate=fitfuns.estimate_quartic)),\n ('Degree 5 Polynomial',\n FitTheory(description='Degree 5 polynomial'\n '\\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',\n function=fitfuns.poly,\n parameters=['a', 'b', 'c', 'd', 'e', 'f'],\n estimate=fitfuns.estimate_quintic)),\n))\n\"\"\"Dictionary of fit theories: fit functions and their associated estimation\nfunction, parameters list, configuration function and description.\n\"\"\"\n\n\ndef test(a):\n from silx.math.fit import fitmanager\n x = numpy.arange(1000).astype(numpy.float)\n p = [1500, 100., 50.0,\n 1500, 700., 50.0]\n y_synthetic = functions.sum_gauss(x, *p) + 1\n\n fit = fitmanager.FitManager(x, y_synthetic)\n fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],\n a.estimate_height_position_fwhm)\n fit.settheory('Gaussians')\n fit.setbackground('Linear')\n\n fit.estimate()\n fit.runfit()\n\n y_fit = fit.gendata()\n\n print(\"Fit parameter names: %s\" % str(fit.get_names()))\n print(\"Theoretical parameters: %s\" % str(numpy.append([1, 0], p)))\n print(\"Fitted parameters: %s\" % str(fit.get_fitted_parameters()))\n\n try:\n from silx.gui import qt\n from silx.gui.plot import plot1D\n app = qt.QApplication([])\n\n # Offset of 1 to see the difference in log scale\n plot1D(x, (y_synthetic + 1, y_fit), \"Input data + 1, Fit\")\n\n app.exec_()\n except ImportError:\n _logger.warning(\"Unable to load qt binding, can't plot results.\")\n\n\nif __name__ == \"__main__\":\n test(fitfuns)\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like",
"numpy.ones",
"numpy.fabs",
"numpy.append",
"numpy.zeros",
"numpy.take",
"numpy.concatenate",
"numpy.arange",
"numpy.poly1d",
"numpy.log",
"numpy.array",
"numpy.polyfit",
"numpy.convolve",
"numpy.nonzero"
]
] |
jwallnoefer/multisat_qrepeater_sim_archive | [
"69b4c242fb760cf195871f38b3172d4dfd26c01a"
] | [
"verificator/Maps.py"
] | [
"\"\"\"\nThe maps that model the different processes in the QKD return for input that is diagonal in Bell-basis a diagonal output.\nTo reduce calculations I determined in the scipt \"How many numbers for state\" the effect of the maps on the diagonal elements\n\"\"\"\nimport numpy as np\nimport functools\n\n\"\"\"These are some helper functions. a-d represents the diagonal elements of the first state, e-h the ones of the second state\"\"\"\n\nz_rot = lambda a, b, c, d: np.array([b, a, d, c])\ny_rot = lambda a, b, c, d: np.array([d, c, b, a])\n\nperf_dist = lambda a, b, c, d, e, f, g, h: np.array(\n [a * e, d * h, a * g, d * f, d * e, a * h, d * g, a * f, c * g, b * f, c * e, b * h, b * g, c * f, b * e, c * h])\ndc0 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(\n [ae + af, be + bf, ce + cf, de + df])\ndc1 = lambda ae, af, ag, ah, be, bf, bg, bh, ce, cf, cg, ch, de, df, dg, dh: np.array(\n [ae + af + ag + ah, be + bf + bg + bh, ce + cf + cg + ch, de + df + dg + dh])\n\"\"\"p is the ideality of the map, q = 1-p\"\"\"\nmixnswap = lambda p, q, a, b, c, d, e, f, g, h: np.array([a * e * p + b * f * p + c * g * p + d * h * p + q / 4,\n a * f * p + b * e * p + c * h * p + d * g * p + q / 4,\n a * g * p + b * h * p + c * e * p + d * f * p + q / 4,\n a * h * p + b * g * p + c * f * p + d * e * p + q / 4])\n\n\ndef dp_sing(t, T, a, b, c, d):\n \"\"\" Calculate the state after dephasing for one memory for time t.\n Parameters\n ----------\n t : float \n time of dephasig\n T : float\n dephasing time of the memory\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal elements of the state after dephasing\n\n \"\"\"\n lam = (1 - np.exp(-t / (2 * T))) / 2\n return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()\n\n\ndef dp_doub(t, T, a, b, c, d):\n \"\"\" Calculate the state after dephasing for time t1 for one memory and t2 for the other memory.\n Parameters\n ----------\n t : float \n time of dephasig\n T : float\n dephasing time of the memories\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal elements of the state after dephasing\n\n \"\"\"\n lam = (1 - np.exp(- t / (2 * T))) / 2\n lam = lam + lam - 2 * lam**2\n return ((1 - lam) * np.array([a, b, c, d]) + lam * z_rot(a, b, c, d)).tolist()\n\n\ndef coupl(em, a, b, c, d):\n \"\"\" Calculate the state after imperfect coupling to the fibre.\n Parameters\n ----------\n em1, em2 : float \n misalignment errors of the stations (0-1)\n a-d: float\n diagonal elements of the state\n\n Returns\n -------\n list of diagonal element of the state after coupling\n\n \"\"\"\n p = 1 - em\n q = em\n return (p * np.array([a, b, c, d]) + q * y_rot(a, b, c, d)).tolist()\n\n\[email protected]_cache(maxsize=2048)\ndef distil(lam, pd1, pd2, a, b, c, d, e, f, g, h):\n \"\"\" Calculate the state after imperfect entanglement distillation and dephasing.\n Parameters\n ----------\n lam1, lam2 : float\n idealities of the distillation process of the stations\n pd1, pd2 : float\n probabilities for dark counts in the measurement for the stations\n a-d: float\n diagonal elements of the fist state\n e-h: float\n diagonal elements of the second state\n\n Returns\n -------\n list of diagonal element of the state after dephasing, probability for acceptance of the distillation result\n\n \"\"\"\n p0 = (1 - pd1) * (1 - pd2) # probability for zero dark counts\n # probability for one or two dark counts\n p1 = 0.5 * (pd1 + pd2 - pd1 * pd2)\n mixed = (lam * perf_dist(a, b, c, d, e, f, g, h) + (1 - lam) * np.ones((16)) /\n 16).tolist() # mixing the result of the perfect map with abs mixed state\n # state times the accapance probability\n unnormed = p0 * dc0(*mixed) + p1 * dc1(*mixed)\n trace = np.sum(unnormed) # acceptance probability\n normed = (unnormed / trace).tolist() # normalising the state\n return normed, trace\n\n\ndef swap(lam, a, b, c, d, e, f, g, h):\n \"\"\" Calculate the state after imperfect entanglement swapping and dephasing.\n Parameters\n ----------\n lam: float\n idealities of the swapping process of the middle station\n a-d: float\n diagonal elements of the fist state\n e-h: float\n diagonal elements of the second state\n\n Returns\n -------\n list of diagonal element of the state after swapping\n\n \"\"\"\n swapped = mixnswap(lam, 1 - lam, a, b, c, d, e, f, g, h)\n normed = swapped / np.sum(swapped) # normalising the state\n return np.array(normed).tolist()\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.ones",
"numpy.exp"
]
] |
aayushkafle/implicit_alignment | [
"4835a8a5acc4b30daf7e1c95195f160e76306cd1"
] | [
"ai/domain_adaptation/utils/vis.py"
] | [
"import numpy as np\nfrom ai.domain_adaptation.datasets import image_index\nfrom ai.domain_adaptation.utils import np_utils\nfrom IPython.display import display, Image\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n\ndef load_data_for_vis(prob_path, target_domain_file, dataset_dir):\n domain_info = image_index.parse_domain_file(target_domain_file, dataset_dir)\n yhat_info = np_utils.parse_predictions_from_pickle(prob_path)\n\n return domain_info, yhat_info\n\n\ndef visulize_confidence(prob_path, target_domain_file, dataset_dir, cls_id):\n domain_info, yhat_info = load_data_for_vis(prob_path, target_domain_file, dataset_dir)\n vis_confident_predictions(cls_id, None, domain_info, yhat_info)\n\n\ndef vis_confident_predictions(cls_id, top_k=20, domain_info=None, yhat_info=None):\n sorted_id_indices = np_utils.retrieve_sorted_indices_for_one_cls(cls_id, yhat_info)\n\n for ith, example_id in enumerate(sorted_id_indices):\n filename, label = domain_info.image_path_label_tuples[example_id]\n print(f'{domain_info.label_description_dict[label]}, P {yhat_info.prob[example_id, cls_id]:.3}')\n img = Image(filename=filename, width=150, height=150)\n display(img)\n if top_k is not None and ith > top_k:\n break\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # np.set_printoptions(precision=3)\n\n fig, ax = plt.subplots(figsize=(20, 20))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n # ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.savefig(f'./plots/confusion_matrix{title}.pdf')\n return ax\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"numpy.arange",
"matplotlib.pyplot.subplots"
]
] |
ibadkureshi/tnk-locationallocation | [
"b06abcb7bf8675b13e4c2e4fe419afb5ee11018f"
] | [
"pmedian/views.py"
] | [
"from django.shortcuts import render\nfrom pmedian.tasks import *\nfrom pandas import errors\nfrom pmedapp.common.utilities import *\nimport json\nimport pandas as pd\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.datastructures import MultiValueDictKeyError\nimport glob\nimport os.path\n\n\n@csrf_exempt\ndef extract_csv(request):\n \"\"\"\n Getting a (two-column) csv and returning it as a json\n **Expected a lat/lon csv with headers\n \"\"\"\n if request.method == 'POST' and request.FILES['myfile']:\n\n if not validate_upload(request, '.csv'):\n return HttpResponseBadRequest(\"Data error: Please provide a valid csv file\")\n\n try:\n # expecting csv with headers\n df = pd.read_csv(request.FILES['myfile'])\n if column_numeric(df[df.columns[0]]) and column_numeric(df[df.columns[1]]) and not df.isnull().values.any():\n df.columns = ['latitude', 'longitude']\n return HttpResponse(df.to_json(orient='records'))\n else:\n return HttpResponseBadRequest(\"Data input error: Ensure data is numeric and no missing values exist\")\n\n except errors.EmptyDataError:\n return HttpResponse('CSV file is empty')\n\n else:\n # In case of GET request, just show the form\n return render(request, 'file_upload.html', locals())\n\n\n@csrf_exempt\ndef create_task(request):\n if request.method == 'POST':\n try:\n args = json.loads(request.POST.get('data')) # error checking\n input_df = pd.read_csv(request.FILES['myfile'], header=0)\n task = p_median_calculation_task.delay(input_df.to_json(), args)\n response_data = {'task_id': str(task)}\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except MultiValueDictKeyError:\n return HttpResponseBadRequest(\"Please provide the correct input data\")\n else:\n return HttpResponse(status=405, reason=\"Method not allowed\")\n\n\n@csrf_exempt\ndef get_task(request):\n \"\"\"\n Return the status of a task given it's id\n \"\"\"\n try:\n task_id = request.GET['task-id']\n result = AsyncResult(task_id)\n result_dct = {result.task_id: {\n 'status': result.status, 'date_done': str(result.date_done)}}\n result_dct[result.task_id]['result'] = result.result\n\n try:\n file = glob.glob(\"output/*\"+str(result)+\".json\")[0]\n result_dct['result_location'] = \"http://localhost:8000/pmedian/get-file?filename=\" + file[7:]\n except IndexError:\n result_dct['result_location'] = 'Calculation ongoing'\n\n return HttpResponse(json.dumps(result_dct))\n\n except KeyError:\n return HttpResponseBadRequest(\"Please provide a valid task-id\")\n\n\n@csrf_exempt\ndef get_all_tasks(request):\n \"\"\"\n Get all celery tasks from and return id, status (json)\n \"\"\"\n\n path = \"/tmp/results/celery-task-meta-*\"\n results = (glob.glob(path))\n\n result_array = []\n for result in results:\n asyng_result = AsyncResult(result[len(path) - 1:])\n result_dct = {}\n result_dct['id'] = result[len(path) - 1:]\n result_dct['status'] = asyng_result.status\n result_dct['date_done'] = str(asyng_result.date_done)\n try:\n file = glob.glob(\"output/*\"+str(asyng_result)+\".json\")[0]\n result_dct['result'] = \"http://localhost:8000/pmedian/get-file?filename=\" + file[7:]\n with open(file) as f:\n result_dct['name'] = json.load(f)['name']\n except IndexError:\n result_dct['result'] = 'Calculation ongoing'\n\n result_array.append(result_dct)\n\n return HttpResponse(json.dumps(result_array))\n\n\n@csrf_exempt\ndef get_file(request):\n \"\"\"\n Download output file to disk.\n \"\"\"\n return download_output_file(request)\n"
] | [
[
"pandas.read_csv"
]
] |
brentyi/multimodalfilter | [
"210b0e241120e0fbbeaef5e478bab36ffe1e159d"
] | [
"crossmodal/door_models/layers.py"
] | [
"import torch\nimport torch.nn as nn\nfrom fannypack.nn import resblocks\n\nstate_dim = 3\ncontrol_dim = 7\nobs_pos_dim = 3\nobs_sensors_dim = 7\n\n\ndef state_layers(units: int) -> nn.Module:\n \"\"\"Create a state encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(state_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef control_layers(units: int) -> nn.Module:\n \"\"\"Create a control command encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(control_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_image_layers(units: int) -> nn.Module:\n \"\"\"Create an image encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n resblocks.Conv2d(channels=32, kernel_size=3),\n nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3, padding=1),\n nn.Flatten(), # 32 * 32 * 8\n nn.Linear(8 * 32 * 32, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_pos_layers(units: int) -> nn.Module:\n \"\"\"Create an end effector position encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(obs_pos_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n\n\ndef observation_sensors_layers(units: int) -> nn.Module:\n \"\"\"Create an F/T sensor encoder block.\n\n Args:\n units (int): # of hidden units in network layers.\n\n Returns:\n nn.Module: Encoder block.\n \"\"\"\n return nn.Sequential(\n nn.Linear(obs_sensors_dim, units),\n nn.ReLU(inplace=True),\n resblocks.Linear(units),\n )\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.Flatten"
]
] |
ojInc/google-research | [
"650580cbf928aa640bf39897c5758ddb71b68a51"
] | [
"kws_streaming/train/model_train_eval.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple speech recognition to spot a limited number of keywords.\n\nIt is based on tensorflow/examples/speech_commands\nThis is a self-contained example script that will train a very basic audio\nrecognition model in TensorFlow. It downloads the necessary training data and\nruns with reasonable defaults to train within a few hours even only using a CPU.\n\nIt is intended as an introduction to using neural networks for audio\nrecognition, and is not a full speech recognition system. This network uses a\nkeyword detection style to spot discrete words from a small vocabulary,\nconsisting of\n\"yes\", \"no\", \"up\", \"down\", \"left\", \"right\", \"on\", \"off\", \"stop\", and \"go\".\n\nTo run the training process, use:\n\nbazel run model_train_eval.py\n\nThis will write out checkpoints to /tmp/speech_commands_train/, and will\ndownload over 1GB of open source training data, so you'll need enough free space\nand a good internet connection. The default data is a collection of thousands of\none-second .wav files, each containing one spoken word. This data set is\ncollected from https://aiyprojects.withgoogle.com/open_speech_recording, please\nconsider contributing to help improve this and other models!\n\nAs training progresses, it will print out its accuracy metrics, which should\nrise above 90% by the end. Once it's complete, it will produce\nKeras, SavedModel, TFLite and graphdef representations.\n\nIf you want to train on your own data, you'll need to create .wavs with your\nrecordings, all at a consistent length, and then arrange them into subfolders\norganized by label. For example, here's a possible file structure:\n\ndata >\n up >\n audio_0.wav\n audio_1.wav\n down >\n audio_2.wav\n audio_3.wav\n other>\n audio_4.wav\n audio_5.wav\n\nYou'll also need to tell the script what labels to look for, using the\n`--wanted_words` argument. In this case, 'up,down' might be what you want, and\nthe audio in the 'other' folder would be used to train an 'unknown' category.\n\nTo pull this all together, you'd run:\n\nbazel run tensorflow/examples/speech_commands:train --\n--data_dir /data --wanted_words up,down\n\nAbove script will automatically split data into training/validation and testing.\n\nIf you prefer to split the data on your own, then you should set flag\n\"--split_data 0\" and prepare folders with structure:\n\ndata >\n training >\n up >\n audio_0.wav\n audio_1.wav\n down >\n audio_2.wav\n audio_3.wav\n validation >\n up >\n audio_6.wav\n audio_7.wav\n down >\n audio_8.wav\n audio_9.wav\n testing >\n up >\n audio_12.wav\n audio_13.wav\n down >\n audio_14.wav\n audio_15.wav\n _background_noise_ >\n audio_18.wav\n\nTo pull this all together, you'd run:\n\nbazel run tensorflow/examples/speech_commands:train --\n--data_dir /data --wanted_words up,down --split_data 0\n\n\"\"\"\nimport json\nimport os\nimport sys\nfrom absl import logging\nimport tensorflow.compat.v1 as tf\nfrom kws_streaming.layers import modes\nimport kws_streaming.models.att_mh_rnn as att_mh_rnn\nimport kws_streaming.models.att_rnn as att_rnn\nimport kws_streaming.models.cnn as cnn\nimport kws_streaming.models.crnn as crnn\nimport kws_streaming.models.dnn as dnn\nimport kws_streaming.models.dnn_raw as dnn_raw\nimport kws_streaming.models.ds_cnn as ds_cnn\nimport kws_streaming.models.ds_tc_resnet as ds_tc_resnet\nimport kws_streaming.models.gru as gru\nimport kws_streaming.models.inception as inception\nimport kws_streaming.models.inception_resnet as inception_resnet\nimport kws_streaming.models.lstm as lstm\nimport kws_streaming.models.mobilenet as mobilenet\nimport kws_streaming.models.mobilenet_v2 as mobilenet_v2\nimport kws_streaming.models.svdf as svdf\nimport kws_streaming.models.svdf_resnet as svdf_resnet\nimport kws_streaming.models.tc_resnet as tc_resnet\nfrom kws_streaming.models.utils import parse\nimport kws_streaming.models.xception as xception\nfrom kws_streaming.train import base_parser\nfrom kws_streaming.train import model_flags\nfrom kws_streaming.train import train\nimport kws_streaming.train.test as test\n\nFLAGS = None\n\n\ndef main(_):\n # Update flags\n flags = model_flags.update_flags(FLAGS)\n\n if flags.train:\n # Create model folders where logs and model will be stored\n os.makedirs(flags.train_dir)\n os.mkdir(flags.summaries_dir)\n\n # Model training\n train.train(flags)\n else:\n if not os.path.isdir(flags.train_dir):\n raise ValueError('model is not trained set \"--train 1\" and retrain it')\n\n # write all flags settings into json\n with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:\n json.dump(flags.__dict__, f)\n\n # convert to SavedModel\n test.convert_model_saved(flags, 'non_stream',\n modes.Modes.NON_STREAM_INFERENCE)\n try:\n test.convert_model_saved(flags, 'stream_state_internal',\n modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TF streaming: %s', e)\n\n logging.info('run TF non streaming model accuracy evaluation')\n # with TF\n folder_name = 'tf'\n test.tf_non_stream_model_accuracy(flags, folder_name)\n\n # with TF.\n # We can apply non stream model on stream data, by running inference\n # every 200ms (for example), so that total latency will be similar with\n # streaming model which is executed every 20ms.\n # To measure the impact of sampling on model accuracy,\n # we introduce time_shift_ms during accuracy evaluation.\n # Convert milliseconds to samples:\n time_shift_samples = int(\n (flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)\n test.tf_non_stream_model_accuracy(\n flags,\n folder_name,\n time_shift_samples,\n accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')\n\n name2opt = {\n '': None,\n 'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],\n }\n\n for opt_name, optimizations in name2opt.items():\n\n if (opt_name and flags.feature_type == 'mfcc_tf' and\n flags.preprocess == 'raw'):\n logging.info('feature type mfcc_tf needs quantization aware training '\n 'for quantization - it is not implemented')\n continue\n\n folder_name = opt_name + 'tflite_non_stream'\n file_name = 'non_stream.tflite'\n mode = modes.Modes.NON_STREAM_INFERENCE\n test.convert_model_tflite(flags, folder_name, mode, file_name,\n optimizations=optimizations)\n test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)\n\n # these models are using bi-rnn, so they are non streamable by default\n # also models using striding or pooling are not supported for streaming now\n non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}\n\n model_is_streamable = True\n if flags.model_name in non_streamable_models:\n model_is_streamable = False\n # below models can use striding in time dimension,\n # but this is currently unsupported\n elif flags.model_name == 'cnn':\n for strides in parse(flags.cnn_strides):\n if strides[0] > 1:\n model_is_streamable = False\n break\n elif flags.model_name == 'ds_cnn':\n if parse(flags.cnn1_strides)[0] > 1:\n model_is_streamable = False\n for strides in parse(flags.dw2_strides):\n if strides[0] > 1:\n model_is_streamable = False\n break\n\n # if model can be streamed, then run conversion/evaluation in streaming mode\n if model_is_streamable:\n # ---------------- TF streaming model accuracy evaluation ----------------\n # Streaming model with external state evaluation using TF with state reset\n if not opt_name:\n logging.info('run TF evalution only without optimization/quantization')\n try:\n folder_name = 'tf'\n test.tf_stream_state_external_model_accuracy(\n flags,\n folder_name,\n accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',\n reset_state=True) # with state reset between test sequences\n\n # Streaming (with external state) evaluation using TF no state reset\n test.tf_stream_state_external_model_accuracy(\n flags,\n folder_name,\n accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',\n reset_state=False) # without state reset\n\n # Streaming (with internal state) evaluation using TF no state reset\n test.tf_stream_state_internal_model_accuracy(flags, folder_name)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TF streaming: %s', e)\n\n logging.info('run TFlite streaming model accuracy evaluation')\n try:\n # convert model to TFlite\n folder_name = opt_name + 'tflite_stream_state_external'\n file_name = 'stream_state_external.tflite'\n mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE\n test.convert_model_tflite(flags, folder_name, mode, file_name,\n optimizations=optimizations)\n\n # Streaming model accuracy evaluation with TFLite with state reset\n test.tflite_stream_state_external_model_accuracy(\n flags,\n folder_name,\n file_name,\n accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',\n reset_state=True)\n\n # Streaming model accuracy evaluation with TFLite without state reset\n test.tflite_stream_state_external_model_accuracy(\n flags,\n folder_name,\n file_name,\n accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',\n reset_state=False)\n except (ValueError, IndexError) as e:\n logging.info('FAILED to run TFLite streaming: %s', e)\n\nif __name__ == '__main__':\n # parser for training/testing data and speach feature flags\n parser = base_parser.base_parser()\n\n # sub parser for model settings\n subparsers = parser.add_subparsers(dest='model_name', help='NN model name')\n\n # DNN model settings\n parser_dnn = subparsers.add_parser('dnn')\n dnn.model_parameters(parser_dnn)\n\n # DNN raw model settings\n parser_dnn_raw = subparsers.add_parser('dnn_raw')\n dnn_raw.model_parameters(parser_dnn_raw)\n\n # LSTM model settings\n parser_lstm = subparsers.add_parser('lstm')\n lstm.model_parameters(parser_lstm)\n\n # GRU model settings\n parser_gru = subparsers.add_parser('gru')\n gru.model_parameters(parser_gru)\n\n # SVDF model settings\n parser_svdf = subparsers.add_parser('svdf')\n svdf.model_parameters(parser_svdf)\n\n # CNN model settings\n parser_cnn = subparsers.add_parser('cnn')\n cnn.model_parameters(parser_cnn)\n\n # CRNN model settings\n parser_crnn = subparsers.add_parser('crnn')\n crnn.model_parameters(parser_crnn)\n\n # ATT MH RNN model settings\n parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')\n att_mh_rnn.model_parameters(parser_att_mh_rnn)\n\n # ATT RNN model settings\n parser_att_rnn = subparsers.add_parser('att_rnn')\n att_rnn.model_parameters(parser_att_rnn)\n\n # DS_CNN model settings\n parser_ds_cnn = subparsers.add_parser('ds_cnn')\n ds_cnn.model_parameters(parser_ds_cnn)\n\n # TC Resnet model settings\n parser_tc_resnet = subparsers.add_parser('tc_resnet')\n tc_resnet.model_parameters(parser_tc_resnet)\n\n # Mobilenet model settings\n parser_mobilenet = subparsers.add_parser('mobilenet')\n mobilenet.model_parameters(parser_mobilenet)\n\n # Mobilenet V2 model settings\n parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')\n mobilenet_v2.model_parameters(parser_mobilenet_v2)\n\n # xception model settings\n parser_xception = subparsers.add_parser('xception')\n xception.model_parameters(parser_xception)\n\n # inception model settings\n parser_inception = subparsers.add_parser('inception')\n inception.model_parameters(parser_inception)\n\n # inception resnet model settings\n parser_inception_resnet = subparsers.add_parser('inception_resnet')\n inception_resnet.model_parameters(parser_inception_resnet)\n\n # svdf resnet model settings\n parser_svdf_resnet = subparsers.add_parser('svdf_resnet')\n svdf_resnet.model_parameters(parser_svdf_resnet)\n\n # ds_tc_resnet model settings\n parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')\n ds_tc_resnet.model_parameters(parser_ds_tc_resnet)\n\n FLAGS, unparsed = parser.parse_known_args()\n if unparsed and tuple(unparsed) != ('--alsologtostderr',):\n raise ValueError('Unknown argument: {}'.format(unparsed))\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.compat.v1.app.run"
]
] |
RyanXLi/OneshotDet | [
"77f629978d9d1739787b08de8cccea81341507bf"
] | [
"maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nfrom maskrcnn_benchmark.modeling import registry\nfrom torch import nn\n\n\[email protected]_BOX_PREDICTOR.register(\"FastRCNNPredictor\")\nclass FastRCNNPredictor(nn.Module):\n def __init__(self, config, in_channels):\n super(FastRCNNPredictor, self).__init__()\n assert in_channels is not None\n\n num_inputs = in_channels\n\n num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n if config.FEW_SHOT.SECOND_STAGE_METHOD == 'rn':\n num_classes = 2\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.cls_score = nn.Linear(num_inputs, num_classes)\n num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes\n self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)\n\n nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)\n nn.init.constant_(self.cls_score.bias, 0)\n\n nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)\n nn.init.constant_(self.bbox_pred.bias, 0)\n\n def forward(self, x):\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n cls_logit = self.cls_score(x)\n bbox_pred = self.bbox_pred(x)\n return cls_logit, bbox_pred\n\n\[email protected]_BOX_PREDICTOR.register(\"FPNPredictor\")\nclass FPNPredictor(nn.Module):\n def __init__(self, cfg, in_channels):\n super(FPNPredictor, self).__init__()\n num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n\n num_bbox_reg_classes = 2\n if cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':\n num_classes = 1 \n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'rn' and cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS != 'focal_loss':\n num_classes= 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \\\n not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 1\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss' and \\\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \\\n not cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss' and \\\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON:\n num_classes = 2 # originally 3, but 2 in new version neg support\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS =='cxe_loss' and cfg.FEW_SHOT.SOFT_LABELING:\n num_classes = 2\n elif cfg.FEW_SHOT.SECOND_STAGE_METHOD == 'concat' and \\\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['mse_loss','l1_loss']:\n num_classes = 1\n else:\n raise Exception('setting not compatible {} {} {}'.format(\n cfg.FEW_SHOT.SECOND_STAGE_METHOD,\n cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS,\n cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON\n ))\n\n if cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS in ['focal_loss', 'mse_loss', 'l1_loss']:\n num_bbox_reg_classes = num_classes+1\n else:\n num_bbox_reg_classes = num_classes\n\n representation_size = in_channels\n\n self.cls_score = nn.Linear(representation_size, num_classes)\n # num_bbox_reg_classes = 2 #if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes\n self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n def forward(self, x):\n if x.ndimension() == 4:\n assert list(x.shape[2:]) == [1, 1]\n x = x.view(x.size(0), -1)\n scores = self.cls_score(x)\n bbox_deltas = self.bbox_pred(x)\n\n return scores, bbox_deltas\n\n\ndef make_roi_box_predictor(cfg, in_channels):\n func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]\n return func(cfg, in_channels)\n"
] | [
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.init.constant_"
]
] |
jerryzhucs21/spinningup | [
"2992e6a8163d78c3f82a3d92c5235fda0527c398"
] | [
"spinup/rewards/cvar_utils.py"
] | [
"import numpy as np\n\ndef relu(x):\n if x > 0:\n return x\n else:\n return 0.0\n\n\ndef cvar_fn_val(sigma, exp_ret_rs, prob_rs, alpha):\n fn_val_relu_part = 0.0\n for i,ret in enumerate(exp_ret_rs):\n fn_val_relu_part += prob_rs[i] * relu(sigma - ret)\n \n fn_val = sigma - 1.0 / (1.0 - alpha) * fn_val_relu_part\n return fn_val\n\ndef cvar_line_search_pg(exp_ret_rs, prob_rs, alpha, num_discretize=1000):\n '''use a line search to approximate sigma'''\n assert(len(exp_ret_rs) == len(prob_rs))\n assert(alpha >= 0 and alpha <= 1)\n assert(np.abs(np.sum(prob_rs) - 1.0) < 0.000001)\n #run simple discrete line search to approximate sigma for now\n\n max_val = -np.inf\n max_sigma = None \n for x in np.linspace(min(exp_ret_rs), max(exp_ret_rs), num_discretize):\n cvar_val = cvar_fn_val(x, exp_ret_rs, prob_rs, alpha)\n #print(x, cvar_val)\n if cvar_val > max_val:\n max_val = cvar_val\n max_sigma = x\n #print(\"updating\")\n \n return max_sigma, max_val\n\n\n\n\n\ndef cvar_enumerate_pg(exp_ret_rs, prob_rs, alpha):\n '''cvar is piecewise linear/concave so the max must be at one of the endpoints!\n we can just iterate over them until we find the smallest one'''\n\n sorted_exp_ret_rs, sorted_prob_rs = zip(*sorted(zip(exp_ret_rs, prob_rs)))\n #print(\"sorted rets\", sorted_exp_ret_rs)\n #print(\"sorted probs\", sorted_prob_rs)\n cum_prob = 0.0\n \n \n max_val = -np.inf\n max_sigma = None \n for ret in sorted_exp_ret_rs:\n cvar_val = cvar_fn_val(ret, exp_ret_rs, prob_rs, alpha)\n #print(x, cvar_val)\n if cvar_val >= max_val:\n max_val = cvar_val\n max_sigma = ret\n #print(\"updating\")\n elif cvar_val < max_val:\n #this function is concave so once it starts decreasing we can stop since we are only interested in maximum\n break\n \n return max_sigma, max_val\n\n\n\n# if __name__ == \"__main__\":\n# #run test to make sure both give same answers.\n# #Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell\n# for i in range(100):\n# seed = np.random.randint(1000)\n# print(seed)\n# np.random.seed(seed)\n# num_rewards = 50\n# exp_rets = 200*np.random.rand(num_rewards) - 100 #[10,40, 80]\n# probs = np.random.rand(num_rewards)#[0.3, 0.3, 0.4]\n# probs /= np.sum(probs)\n# #print(np.sum(probs))\n# alpha = 0.6\n# num_discretize = 10000\n# #print(\"exp rets\", exp_rets)\n# #print(\"probs\", probs)\n# sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)\n# print(\"sigma = \", sigma)\n# print(\"cvar = \", cvar)\n\n# sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)\n# print(\"enum sigma\", sigma_enumerate)\n# print(\"sort cvar\", cvar_enumerate)\n\n# if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:\n# print(\"wrong\")\n# print(abs(sigma_enumerate - sigma))\n# input()\n\n\nif __name__ == \"__main__\":\n #run test to make sure both give same answers.\n #Note cvar_enumerate_pg is orders of magnitude faster and gives same answer as far as I can tell\n num_rewards = 2\n exp_rets = [10, 90]\n probs = [0.05, 0.95]\n probs /= np.sum(probs)\n #print(np.sum(probs))\n alpha = 0.95\n num_discretize = 10000\n #print(\"exp rets\", exp_rets)\n #print(\"probs\", probs)\n sigma, cvar = cvar_line_search_pg(exp_rets, probs, alpha, num_discretize)\n print(\"sigma = \", sigma)\n print(\"cvar = \", cvar)\n\n sigma_enumerate, cvar_enumerate = cvar_enumerate_pg(exp_rets, probs, alpha)\n print(\"enum sigma\", sigma_enumerate)\n print(\"sort cvar\", cvar_enumerate)\n\n if abs(sigma_enumerate - sigma) > 0.1 or abs(cvar - cvar_enumerate) > 0.001:\n print(\"wrong\")\n print(abs(sigma_enumerate - sigma))\n input()\n"
] | [
[
"numpy.sum"
]
] |
sbmalik/pytorchx | [
"938ba5855cfb72b0dbce91af8c0a6d0e3943f122"
] | [
"squeezenet/squeezenet.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\n\ndef main():\n print('cuda device count: ', torch.cuda.device_count())\n net = torchvision.models.squeezenet1_1(pretrained=True)\n #net.fc = nn.Linear(512, 2)\n net = net.eval()\n net = net.to('cuda:0')\n print(net)\n tmp = torch.ones(2, 3, 227, 227).to('cuda:0')\n out = net(tmp)\n print('squeezenet out:', out.shape)\n torch.save(net, \"squeezenet.pth\")\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.ones",
"torch.save",
"torch.cuda.device_count"
]
] |
Sage-Bionetworks/GENIE-Sponsored-Projects | [
"e34be3ece96144aa525c7281738736d3c5ef93cb"
] | [
"geniesp/sp_config.py"
] | [
"\"\"\"\nSponsored project configuration classes\n\nUSAGE:\ngit clone https://github.com/cBioPortal/cbioportal.git\npython runSP.py AKT1 ../cbioportal/ --staging\n\"\"\"\nimport os\nimport random\nimport string\n\nimport pandas as pd\nimport synapseclient\n\nfrom . import new_redcap_export_mapping\nfrom . import sp_redcap_export_mapping\n\n\nclass Akt1(sp_redcap_export_mapping.SponsoredProjectRunner):\n \"\"\"\n AKT1 PROCESSES\n - ONE TIMELINE FILE\n - CLINICAL FILE\n OS_MONTHS = death_date_int - mets_disease_date_int\n OS_MONTHS_PRIMARY = death_date_int - primary_dx_date_int \n All dates are converted from days to months (days/30.4)\n Add headers\n REMOVE PATIENTS/SAMPLES THAT DON'T HAVE GENIE SAMPLE IDS\n \"\"\"\n _SPONSORED_PROJECT = \"AKT1\"\n _DATES = [\"death_date_int\",\"follow_up_date_int\",\"primary_dx_date_int\",\"lrr_date_int\",\"mets_disease_date_int\",\"sample_date_int_1\",\n \"sequence_report_date_int_1\",\"sequence_report_date_int_1_static\",\"sample_date_int_2\",\"sample_date_int_2_static\",\n \"sequence_report_date_int_2\",\"sequence_report_date_int_2_static\",\"sequence_report_date_int_3_static\",\n \"OS_MONTHS\",\"OS_MONTHS_PRIMARY\"]\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = \"cancer_study_identifier: genie_akt1\\nstable_id: genie_akt1_sequenced\\ncase_list_category: all_cases_with_mutation_data\\ncase_list_name: Sequenced Tumors\\ncase_list_description: All sequenced samples (%s samples)\\ncase_list_ids: %s\"\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')\n _UNMAPPED_SYN_ID = \"syn11066652\"\n _MAPPED_SYN_ID = \"syn8404878\"\n _CASE_LIST_SYN_ID = \"syn10145838\"\n _SP_SYN_ID = \"syn8363325\"\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn8220815\"\n _SP_REDCAP_EXPORTS_SYNID = \"syn8404875\" #Storage of not found samples\n _NUM_SAMPLE_COLS = 3\n\n def addOSMonths(self, sponsoredProject_mapped_df):\n #Must add new date fields to the DATE variable along with add to the mapping table: syn8220815\n sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['mets_disease_date_int'] \n sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['death_date_int'] - sponsoredProject_mapped_df['primary_dx_date_int'] \n return(sponsoredProject_mapped_df)\n\n def createTemporaryGenieId(self, x, tempIdMapping):\n uniqId = x['record_id'] + x['redcap_data_access_group']\n tempIdMap = tempIdMapping['patientId'][tempIdMapping['uniqueId'] == uniqId]\n tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))\n if len(tempIdMap) == 0:\n return(tempId)\n else:\n return(tempIdMap.values[0])\n # if sum(tempIdMapping['uniqueId'] == uniqId) == 0:\n # #syn.store(synapseclient.Table(syn.get(\"syn10164044\"),[[uniqId, tempId, SPONSORED_PROJECT]]))\n # return(tempId)\n # elif pd.np.isnan(temp['tempPatientId'][tempIdMapping['uniqueId'] == uniqId].values[0]):\n\n # else:\n # return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['tempPatientId'].values[0])\n\n def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):\n print(\"RENAMING %s NULL PATIENTS\" % sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()))\n #Create temp patient Id\n allNullPatients = sponsoredProject_mapped_df[['record_id','redcap_data_access_group','genie_patient_id']][sponsoredProject_mapped_df['genie_patient_id'].isnull()]\n temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf), axis =1)\n if sponsoredProject_mapped_df['genie_patient_id'].isnull().any():\n sponsoredProject_mapped_df['genie_patient_id'][sponsoredProject_mapped_df['genie_patient_id'].isnull()] = temporaryIds\n assert sum(sponsoredProject_mapped_df['genie_patient_id'].isnull()) ==0, \"Make sure there are no null genie patient Ids\"\n\n sponsoredProject_mapped_df['genie_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','genie_patient_id'), axis=1)\n sponsoredProject_mapped_df.reset_index(inplace=True,drop=True)\n return(sponsoredProject_mapped_df, temporaryIds)\n\n def makeTimeLineDf(self, redCapExportDf, therapyRange = 18):\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_DRUG_CLINTRIAL = []\n THERAPY_DRUG_AZD5363 = []\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n THERAPY_COMBO_YN = []\n THERAPY_COMBO_NUM = []\n #THERAPY NUMBER\n for therapyNumber in range(1,therapyRange):\n therapyCols = [i for i in redCapExportDf if \"therapy%d_\" % therapyNumber in i]\n START_DATE.extend([i for i in therapyCols if \"start_int\" in i])\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([i for i in therapyCols if len(i.split(\"_\")) == 2])\n THERAPY_DRUG_CLINTRIAL.extend([i for i in therapyCols if \"clintrial\" in i])\n THERAPY_DRUG_AZD5363.extend([i for i in therapyCols if \"azd\" in i])\n THERAPY_DRUG_OTHER.extend([i for i in therapyCols if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if \"discontinue\" in i])\n THERAPY_DRUG_REASON.extend([i for i in therapyCols if \"reason\" in i])\n THERAPY_COMBO_YN.extend([i for i in therapyCols if \"combo_yn\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_COMBO_NUM.extend([i for i in therapyCols if \"combo_num\" in i]* len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_TYPE.extend([\"Medical Therapy %d\" % therapyNumber]* len([i for i in therapyCols if \"start_int\" in i]))\n SUBTYPE.extend([\"Chemo/Target/Immuno etc.\"] * len([i for i in therapyCols if \"start_int\" in i]))\n #OVARIAN\n ovarian = [i for i in redCapExportDf if \"ovariansup\" in i]\n ovarian_len = len([i for i in ovarian if \"start_int\" in i])\n START_DATE.extend([i for i in ovarian if \"start_int\" in i])\n STOP_DATE.extend([i for i in ovarian if \"end_int\" in i])\n TREATMENT_TYPE.extend([\"Ovarian Suppression At Primary\"] * ovarian_len)\n SUBTYPE.extend([\"Ovarian Suppression\"] * ovarian_len)\n AGENT.extend(['']*ovarian_len)\n THERAPY_DRUG_CLINTRIAL.extend(['']*ovarian_len)\n THERAPY_DRUG_AZD5363.extend(['']*ovarian_len)\n THERAPY_DRUG_OTHER.extend(['']*ovarian_len)\n THERAPY_DRUG_DISCONTINUE.extend(['']*ovarian_len)\n THERAPY_DRUG_REASON.extend(['']*ovarian_len)\n THERAPY_COMBO_YN.extend(['']*ovarian_len)\n THERAPY_COMBO_NUM.extend(['']*ovarian_len)\n #HORMONE\n hormo = [i for i in redCapExportDf if \"hormo\" in i]\n hormo_len = len([i for i in hormo if \"start_int\" in i])\n START_DATE.extend([i for i in hormo if \"start_int\" in i])\n STOP_DATE.extend([i for i in hormo if \"end_int\" in i])\n THERAPY_DRUG_CLINTRIAL.extend([i for i in hormo if \"clintrial\" in i])\n THERAPY_DRUG_AZD5363.extend(['']*hormo_len)\n THERAPY_DRUG_OTHER.extend([i for i in hormo if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in hormo if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([i for i in hormo if \"reason\" in i])\n AGENT.extend([i for i in hormo if \"reason\" not in i and \"discon\" not in i and \"other\" not in i and \"clintrial\" not in i and \"start_int\" not in i and \"end_int\" not in i and \"therapy\" not in i])\n THERAPY_COMBO_YN.extend(['']*hormo_len)\n THERAPY_COMBO_NUM.extend(['']*hormo_len)\n SUBTYPE.extend([\"Hormone Therapy\"] * hormo_len)\n TREATMENT_TYPE.extend([\"Medical Therapy 1\"] * hormo_len)\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n\n #METASTATIC DIAGNOSIS\n metaDiagnosis = pd.DataFrame()\n metaDiagnosis['PATIENT_ID'] = redCapExportDf['genie_patient_id']\n #MET DISEASE IS TIMEPOINT 0\n metaDiagnosis['START_DATE'] = 0\n #metaDiagnosis['START_DATE'] = redCapExportDf['mets_disease_date_int']\n metaDiagnosis['EVENT_TYPE'] = 'STATUS'\n metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'\n metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]\n\n removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_CLINTRIAL+THERAPY_DRUG_AZD5363+THERAPY_DRUG_OTHER+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM\n lengths = set([\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(SUBTYPE),\n len(AGENT),\n len(THERAPY_DRUG_CLINTRIAL),\n len(THERAPY_DRUG_AZD5363),\n len(THERAPY_DRUG_OTHER),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(THERAPY_COMBO_YN),\n len(THERAPY_COMBO_NUM),\n len(EVENT_TYPE)])\n assert len(lengths) == 1,\"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(redCapExportDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = [redCapExportDf['genie_patient_id'][i]]*len(START_DATE)\n #timelineDF['START_DATE'] = redCapExportDf.ix[i][START_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']\n #timelineDF['STOP_DATE'] = redCapExportDf.ix[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.ix[i]['primary_dx_date_int']\n #MET DISEASE IS TIMEPOINT 0\n timelineDF['START_DATE'] = redCapExportDf.iloc[i][START_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']\n timelineDF['STOP_DATE'] = redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True) - redCapExportDf.iloc[i]['mets_disease_date_int']\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_CLINTRIAL'] = redCapExportDf.iloc[i][THERAPY_DRUG_CLINTRIAL].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_AZD5363'] = redCapExportDf.iloc[i][THERAPY_DRUG_AZD5363].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)\n total = total.append(timelineDF)\n total['STATUS'] = ''\n ordering = total.columns\n total = total.append(metaDiagnosis)\n total = total[ordering]\n return(total,removeCols)\n\n def getSpecimen(self, getTimelineSpecimen):\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']\n specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC - getTimelineSpecimen.METS_DISEASE_DATE_INT\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']\n specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n\nclass Erbb2(sp_redcap_export_mapping.SponsoredProjectRunner):\n\n _SPONSORED_PROJECT = \"ERBB2\"\n _DATES = ['follow_up_date_int','date_death_int','primary_dx_date_int','lrr_date_int','date_first_met_int',\n 'sample_date_int_1','seq_report_date_int_1','sample_date_int_2','seq_report_date_int_2','sample_date_int_3',\n 'sequence_report_date_int_3','sample_date_int_4','sequence_report_date_int_4','sample_date_int_5','sequence_report_date_int_5',\n 'sample_date_int_6','seq_report_date_int_6','sample_date_int_7','seq_report_date_int_7','sample_date_int_8',\n 'sequence_report_date_int_8','sample_date_int_9','sequence_report_date_int_9','sample_date_int_10',\n 'sequence_report_date_int_10','date_bso_int','OS_MONTHS','OS_MONTHS_PRIMARY']\n\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = \"cancer_study_identifier: genie_erbb2\\nstable_id: genie_erbb2_sequenced\\ncase_list_category: all_cases_with_mutation_data\\ncase_list_name: Sequenced Tumors\\ncase_list_description: All sequenced samples (%s samples)\\ncase_list_ids: %s\"\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT,'case_lists')\n _UNMAPPED_SYN_ID = \"syn8356977\"\n _MAPPED_SYN_ID = \"syn8367692\"\n _CASE_LIST_SYN_ID = \"syn10145925\"\n _SP_SYN_ID = \"syn8363326\"\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn8363731\"\n _SP_REDCAP_EXPORTS_SYNID = \"syn8322425\" #Storage of not found samples\n _NUM_SAMPLE_COLS = 10\n\n def addOSMonths(self, sponsoredProject_mapped_df):\n #Must add new date fields to the DATE variable along with add to the mapping table: syn8220815\n sponsoredProject_mapped_df['OS_MONTHS'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['date_first_met_int'] \n sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = sponsoredProject_mapped_df['date_death_int'] - sponsoredProject_mapped_df['primary_dx_date_int'] \n return(sponsoredProject_mapped_df)\n\n def createTemporaryGenieId(self, x, tempIdMapping, patientIdCol):\n \"\"\"\n Create temporary genie id for those that don't have \n \"\"\"\n uniqId = x['record_id_patient_id'] + x['redcap_data_access_group']\n if sum(tempIdMapping['uniqueId'] == uniqId) == 0:\n tempId = 'GENIE-%s-%s' % (x['redcap_data_access_group'],''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)))\n self.syn.store(synapseclient.Table(self.syn.get(\"syn10164044\"),[[uniqId, tempId]]))\n return(tempId)\n else:\n return(tempIdMapping[tempIdMapping['uniqueId'] == uniqId]['temporaryId'].values[0])\n \n def createNullPatients(self, sponsoredProject_mapped_df, tempIdMappingDf):\n #### TIMELINE FILE\n sponsoredProject_mapped_df['redcap_data_access_group'] = [i.upper() for i in sponsoredProject_mapped_df['redcap_data_access_group']]\n allNullPatients = sponsoredProject_mapped_df[['record_id_patient_id','redcap_data_access_group']][sponsoredProject_mapped_df['record_id_patient_id'].isnull()]\n temporaryIds = allNullPatients.apply(lambda x: self.createTemporaryGenieId(x, tempIdMappingDf, 'record_id_patient_id'), axis =1)\n if not temporaryIds.empty:\n sponsoredProject_mapped_df['record_id_patient_id'][sponsoredProject_mapped_df['record_id_patient_id'].isnull()] = temporaryIds\n assert sum(sponsoredProject_mapped_df['record_id_patient_id'].isnull()) == 0, \"Make sure there are no null genie patient Ids\"\n sponsoredProject_mapped_df['record_id_patient_id'] = sponsoredProject_mapped_df.apply(lambda x: self.checkGenieId(x, 'redcap_data_access_group','record_id_patient_id'), axis=1)\n return(sponsoredProject_mapped_df, temporaryIds)\n\n def makeTimeLineDf(self, redCapExportDf, therapyRange = 16):\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_RESPONSE = []\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n THERAPY_COMBO_YN = []\n THERAPY_COMBO_NUM = []\n ADD_TREATMENT = []\n TREATMENT_SETTING = []\n for therapyNumber in range(1,therapyRange):\n therapyCols = [i for i in redCapExportDf if (\"therapy%d_\" % therapyNumber in i or \"combo_therapy_yn_%d\" %therapyNumber == i or \"add_treatment_%d\" % therapyNumber == i or \"treatment_setting_%d\" % therapyNumber == i)]\n START_DATE.extend([i for i in therapyCols if \"start_int\" in i])\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([i for i in therapyCols if len(i.split(\"_\")) == 2 and \"response\" not in i and \"ctdrug\" not in i])\n THERAPY_DRUG_OTHER.extend([i for i in therapyCols if \"other\" in i])\n THERAPY_DRUG_DISCONTINUE.extend([i for i in therapyCols if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([i for i in therapyCols if \"reason\" in i])\n THERAPY_COMBO_YN.extend([i for i in therapyCols if \"combo_therapy_yn\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_COMBO_NUM.extend([i for i in therapyCols if \"combo_num\" in i]* len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_TYPE.extend([\"Medical Therapy %d\" % therapyNumber]* len([i for i in therapyCols if \"start_int\" in i]))\n SUBTYPE.extend([\"Chemo/Target/Immuno etc.\"] * len([i for i in therapyCols if \"start_int\" in i]))\n THERAPY_RESPONSE.extend([i for i in therapyCols if \"response\" in i] *len([i for i in therapyCols if \"start_int\" in i]))\n ADD_TREATMENT.extend([i for i in therapyCols if \"add_treatment\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n TREATMENT_SETTING.extend([i for i in therapyCols if \"treatment_setting\" in i] * len([i for i in therapyCols if \"start_int\" in i]))\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n ADD_TREATMENT.extend(['']*4)\n\n #METASTATIC DIAGNOSIS\n metaDiagnosis = pd.DataFrame()\n #MET DISEASE IS TIMEPOINT 0\n metaDiagnosis['PATIENT_ID'] = redCapExportDf['record_id_patient_id']\n metaDiagnosis['START_DATE'] = 0\n #metaDiagnosis['START_DATE'] = redCapExportDf['date_first_met_int']\n metaDiagnosis['EVENT_TYPE'] = 'STATUS'\n metaDiagnosis['STATUS'] = 'Metastatic Diagnosis'\n metaDiagnosis = metaDiagnosis[~metaDiagnosis['START_DATE'].isnull()]\n\n removeCols = START_DATE+STOP_DATE+AGENT+THERAPY_DRUG_OTHER+THERAPY_RESPONSE+THERAPY_DRUG_DISCONTINUE+THERAPY_DRUG_REASON+THERAPY_COMBO_YN+THERAPY_COMBO_NUM+ADD_TREATMENT + TREATMENT_SETTING\n\n lengths = set([\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(SUBTYPE),\n len(AGENT),\n len(THERAPY_RESPONSE),\n len(THERAPY_DRUG_OTHER),\n len(TREATMENT_SETTING),\n len(ADD_TREATMENT),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(THERAPY_COMBO_YN),\n len(THERAPY_COMBO_NUM),\n len(EVENT_TYPE)])\n assert len(lengths) == 1,\"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(redCapExportDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = [redCapExportDf['record_id_patient_id'][i]]*len(START_DATE)\n if not pd.isnull(redCapExportDf.iloc[i]['date_first_met_int']):\n timelineDF['START_DATE'] = [start if pd.isnull(start) else int(start) - int(redCapExportDf.iloc[i]['date_first_met_int']) for start in redCapExportDf.iloc[i][START_DATE].reset_index(drop=True)]\n timelineDF['STOP_DATE'] = [end if pd.isnull(end) else int(end) - int(redCapExportDf.iloc[i]['date_first_met_int']) for end in redCapExportDf.iloc[i][STOP_DATE].reset_index(drop=True)]\n else:\n timelineDF['START_DATE'] = pd.np.nan\n timelineDF['STOP_DATE'] = pd.np.nan\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = redCapExportDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = redCapExportDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = redCapExportDf.iloc[i][THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = redCapExportDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_YN'] = redCapExportDf.iloc[i][THERAPY_COMBO_YN].reset_index(drop=True)\n timelineDF['THERAPY_COMBO_NUM'] = redCapExportDf.iloc[i][THERAPY_COMBO_NUM].reset_index(drop=True)\n total = total.append(timelineDF)\n total['STATUS'] = ''\n ordering = total.columns\n total = total.append(metaDiagnosis)\n total = total[ordering]\n return(total, removeCols)\n\n def getSpecimen(self, getTimelineSpecimen):\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = getTimelineSpecimen['PATIENT_ID']\n getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.isnull()]\n getTimelineSpecimen = getTimelineSpecimen[~getTimelineSpecimen.METS_DISEASE_DATE_INT.isnull()]\n specimen['START_DATE'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC.astype(int) - getTimelineSpecimen.METS_DISEASE_DATE_INT.astype(int)\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_ID'] = getTimelineSpecimen['SAMPLE_ID']\n specimen['SAMPLE_NOTES'] = getTimelineSpecimen.SEQUENCE_REPORT_DATE_INT_STATIC\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n\nclass Fgfr4(new_redcap_export_mapping.SponsoredProjectRunner):\n\n _DATA_ELEMENT_SYN_ID = \"syn12032922\"\n _SPONSORED_PROJECT = 'FGFR4'\n # No need to define in class\n _CASE_LIST_PATH = os.path.join(_SPONSORED_PROJECT, 'case_lists')\n _NUM_COUNTS = 4\n _REDCAP_TO_CBIOMAPPING_SYNID = \"syn15572052\"\n _UNLABELLED_SYN_ID = \"syn15341849\"\n _LABELLED_SYN_ID = \"syn15341838\"\n # Storage of not found samples\n _SP_REDCAP_EXPORTS_SYNID = \"syn11812526\"\n _SP_SYN_ID = \"syn14721789\"\n _CASE_LIST_MAF_SAMPLES_TEMPLATE = (\n \"cancer_study_identifier: genie_fgfr4\\n\"\n \"stable_id: genie_fgfr4_sequenced\\n\"\n \"case_list_category: all_cases_with_mutation_data\\n\"\n \"case_list_name: Sequenced Tumors\\n\"\n \"case_list_description: All sequenced samples \"\n \"(%s samples)\\ncase_list_ids: %s\")\n _CASE_LIST_SYN_ID = \"syn14721794\"\n\n # def addOSMonths(self, sponsoredProject_mapped_df):\n # '''\n # Must add new date fields to the DATE variable along with add\n # to the mapping table: syn8220815\n # '''\n # sponsoredProject_mapped_df['OS_MONTHS'] = \\\n # sponsoredProject_mapped_df['death_date_int'] - \\\n # sponsoredProject_mapped_df['date_first_met_int']\n # sponsoredProject_mapped_df['OS_MONTHS_PRIMARY'] = \\\n # sponsoredProject_mapped_df['death_date_int'] - \\\n # sponsoredProject_mapped_df['primary_dx_date_int']\n # return(sponsoredProject_mapped_df)\n\n def makeTimeLineDf(\n self, treatmentDf, finalPatientDf, therapyRange=5):\n # These variables are capitalized to match with the column headers\n START_DATE = []\n STOP_DATE = []\n TREATMENT_TYPE = []\n SUBTYPE = []\n AGENT = []\n THERAPY_RESPONSE = []\n # Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or\n # OTHER (NCIT ID)\n THERAPY_DRUG_OTHER = []\n THERAPY_DRUG_DISCONTINUE = []\n THERAPY_DRUG_REASON = []\n TREATMENT_SETTING = []\n RXNORM_ID = []\n # Name of Chemotherapeutic Agent or Hormone Therapy - Experimental or\n # OTHER\n THERAPY_DRUG_START_ESTIMATED = []\n THERAPY_DRUG_OTHER_NAME = []\n THERAPY_DRUG_END_ESTIMATED = []\n for therapyNumber in range(1, therapyRange):\n therapyCols = [\n i for i in treatmentDf\n if \"therapy_drug%d\" % therapyNumber in i]\n startCols = [i for i in therapyCols if \"start_int\" in i]\n START_DATE.extend(startCols)\n STOP_DATE.extend([i for i in therapyCols if \"end_int\" in i])\n AGENT.extend([\n i for i in therapyCols if \"name\" in i and \"other\" not in i])\n RXNORM_ID.extend([\n i for i in therapyCols\n if i == \"therapy_drug%d\" % therapyNumber])\n THERAPY_DRUG_OTHER.extend([\n i for i in therapyCols if \"other\" in i and 'name' not in i])\n THERAPY_DRUG_DISCONTINUE.extend([\n i for i in therapyCols if \"discon\" in i])\n THERAPY_DRUG_REASON.extend([\n i for i in therapyCols if \"reason\" in i])\n THERAPY_DRUG_OTHER_NAME.extend([\n i for i in therapyCols if \"other_name\" in i])\n THERAPY_DRUG_START_ESTIMATED.extend([\n i for i in therapyCols if \"start_estimated\" in i])\n THERAPY_DRUG_END_ESTIMATED.extend([\n i for i in therapyCols if \"end_estimated\" in i])\n # Value\n TREATMENT_TYPE.extend([\n \"Medical Therapy %d\" % therapyNumber] * len(startCols))\n # Value\n SUBTYPE = [\"Chemo/Target/Immuno etc.\"] * len(AGENT)\n TREATMENT_SETTING = ['treatment_setting'] * len(AGENT)\n THERAPY_RESPONSE = ['therapy_response'] * len(AGENT)\n # Value\n EVENT_TYPE = [\"TREATMENT\"]*len(AGENT)\n LINE_START = ['line_start_int'] * len(AGENT)\n REGIMEN_NAME = ['regimen_name'] * len(AGENT)\n CLINICAL_TRIAL = ['clinical_trial'] * len(AGENT)\n CENTER = ['redcap_data_access_group'] * len(AGENT)\n\n lengths = [\n len(START_DATE),\n len(STOP_DATE),\n len(TREATMENT_TYPE),\n len(AGENT),\n len(THERAPY_DRUG_OTHER),\n len(THERAPY_DRUG_DISCONTINUE),\n len(THERAPY_DRUG_REASON),\n len(RXNORM_ID),\n len(THERAPY_DRUG_OTHER_NAME),\n len(THERAPY_DRUG_START_ESTIMATED),\n len(THERAPY_DRUG_END_ESTIMATED),\n len(TREATMENT_TYPE)]\n assert len(set(lengths)) == 1, \"Lengths must all be the same\"\n\n total = pd.DataFrame()\n for i in range(len(treatmentDf)):\n timelineDF = pd.DataFrame()\n timelineDF['PATIENT_ID'] = \\\n [treatmentDf['patient_id'].iloc[i]]*len(START_DATE)\n timelineDF['START_DATE'] = \\\n treatmentDf.iloc[i][START_DATE].reset_index(drop=True)\n timelineDF['STOP_DATE'] = \\\n treatmentDf.iloc[i][STOP_DATE].reset_index(drop=True)\n\n timelineDF['EVENT_TYPE'] = EVENT_TYPE\n # has to be in this order of PATIENT_ID, START, STOP and EVENT_TYPE\n timelineDF['TREATMENT_TYPE'] = TREATMENT_TYPE\n timelineDF['SUBTYPE'] = SUBTYPE\n timelineDF['AGENT'] = \\\n treatmentDf.iloc[i][AGENT].reset_index(drop=True)\n timelineDF['RXNORM_ID'] = \\\n treatmentDf.iloc[i][RXNORM_ID].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER'] = \\\n treatmentDf.iloc[i][THERAPY_DRUG_OTHER].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_DISCONTINUE'] = treatmentDf.iloc[i][\n THERAPY_DRUG_DISCONTINUE].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_REASON'] = \\\n treatmentDf.iloc[i][THERAPY_DRUG_REASON].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_OTHER_NAME'] = treatmentDf.iloc[i][\n THERAPY_DRUG_OTHER_NAME].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_START_ESTIMATED'] = treatmentDf.iloc[i][\n THERAPY_DRUG_START_ESTIMATED].reset_index(drop=True)\n timelineDF['THERAPY_DRUG_END_ESTIMATED'] = treatmentDf.iloc[i][\n THERAPY_DRUG_END_ESTIMATED].reset_index(drop=True)\n timelineDF['TREATMENT_SETTING'] = \\\n treatmentDf.iloc[i][TREATMENT_SETTING].reset_index(drop=True)\n timelineDF['THERAPY_RESPONSE'] = \\\n treatmentDf.iloc[i][THERAPY_RESPONSE].reset_index(drop=True)\n timelineDF['LINE_START'] = \\\n treatmentDf.iloc[i][LINE_START].reset_index(drop=True)\n timelineDF['REGIMEN_NAME'] = \\\n treatmentDf.iloc[i][REGIMEN_NAME].reset_index(drop=True)\n timelineDF['CLINICAL_TRIAL'] = \\\n treatmentDf.iloc[i][CLINICAL_TRIAL].reset_index(drop=True)\n timelineDF['CENTER'] = \\\n treatmentDf.iloc[i][CENTER].reset_index(drop=True)\n total = total.append(timelineDF, sort=False)\n # remove all without START dates\n total = total[~total['START_DATE'].isnull()]\n total['SP'] = self._SPONSORED_PROJECT\n total['STATUS'] = ''\n total['START_DATE'] = total['START_DATE'].astype('float')\n total['STOP_DATE'] = total['STOP_DATE'].astype('float')\n total['RXNORM_ID'] = total['RXNORM_ID'].astype('float')\n total['LINE_START'] = total['LINE_START'].astype('float')\n total.drop_duplicates(inplace=True)\n # Anchor point is MET_DX_DATE_INT\n date_met_int = [\n float(finalPatientDf['MET_DX_DATE_INT'][\n finalPatientDf['PATIENT_ID'] == patient].values[0])\n for patient in total['PATIENT_ID']]\n total['START_DATE'] = total['START_DATE'] - date_met_int\n total['STOP_DATE'] = total['STOP_DATE'] - date_met_int\n total['LINE_START'] = total['LINE_START'] - date_met_int\n\n return(total)\n\n def createSpecimenDf(self, sampleDf, patientDf):\n clinicalDf = sampleDf.merge(patientDf, on=\"PATIENT_ID\", how=\"outer\")\n clinicalDf = clinicalDf[~clinicalDf.AGE_AT_SEQ_REPORT.isnull()]\n clinicalDf = \\\n clinicalDf[~clinicalDf.DATE_FIRST_DISTANT_MET_INT.isnull()]\n specimen = pd.DataFrame()\n specimen['PATIENT_ID'] = clinicalDf['PATIENT_ID']\n specimen['SAMPLE_ID'] = clinicalDf['SAMPLE_ID']\n specimen['START_DATE'] = \\\n clinicalDf.AGE_AT_SEQ_REPORT.astype(int) - \\\n clinicalDf.DATE_FIRST_DISTANT_MET_INT.astype(int)\n specimen['EVENT_TYPE'] = 'SPECIMEN'\n specimen['SAMPLE_NOTES'] = clinicalDf.AGE_AT_SEQ_REPORT\n specimen = specimen[~specimen['START_DATE'].isnull()]\n return(specimen)\n\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame"
]
] |
pragnesh-ai/driverlessai-recipes | [
"97371a2d2cd853cdeeb15037f462af96d81a7c0b"
] | [
"models/mli/model_skopes_rules.py"
] | [
"\"\"\"Skopes rules \"\"\"\n\nimport uuid\nimport os\nimport datatable as dt\nimport numpy as np\nfrom h2oaicore.models import CustomModel\nfrom sklearn.preprocessing import LabelEncoder\nfrom h2oaicore.systemutils import physical_cores_count\nfrom h2oaicore.systemutils import user_dir, remove, config\nfrom h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug\n\n\nclass SKOPE_RULES(CustomModel):\n _regression = False\n _binary = True\n _multiclass = False\n _display_name = \"SKOPE RULES\"\n _description = \"SKOPE RULES\"\n # using git master because pypi is very out of date (Jan 2020) but need Sept 1-ish master with fix for updated scikit-learn\n _modules_needed_by_name = ['git+https://github.com/scikit-learn-contrib/skope-rules.git']\n\n @staticmethod\n def do_acceptance_test():\n return True\n\n def set_default_params(self, accuracy=None, time_tolerance=None,\n interpretability=None, **kwargs):\n # Fill up parameters we care about\n self.params = dict(random_state=kwargs.get(\"random_state\", 1234),\n max_depth_duplication=None, n_estimators=10,\n precision_min=0.5, recall_min=0.01, max_samples=0.8,\n max_samples_features=1.0, max_depth=3,\n max_features=\"auto\", min_samples_split=2,\n bootstrap=False, bootstrap_features=False)\n\n def mutate_params(self, accuracy=10, **kwargs):\n if accuracy > 8:\n max_depth_duplication = [None, 2, 3]\n n_estimators = [10, 20, 40]\n precision_min = [0.1, 0.2, 0.3]\n recall_min = [0.01, 0.05]\n max_samples = [0.5, 0.8, 1.0]\n max_samples_features = [0.5, 0.8, 1.0]\n max_depth = [3, 4, 5]\n max_features = [\"sqrt\", \"log2\", \"auto\"]\n min_samples_split = [2, 11, 21]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n elif accuracy >= 5:\n max_depth_duplication = [None]\n n_estimators = [10, 20]\n precision_min = [0.1, 0.2, 0.3]\n recall_min = [0.01]\n max_samples = [0.8, 1.0]\n max_samples_features = [1.0]\n max_depth = [3, 4]\n max_features = [\"sqrt\", \"log2\", \"auto\"]\n min_samples_split = [2, 5, 11]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n else:\n max_depth_duplication = [None]\n n_estimators = [10]\n precision_min = [0.1, 0.2]\n recall_min = [0.01]\n max_samples = [0.8, 1.0]\n max_samples_features = [0.8, 1.0]\n max_depth = [3, 4]\n max_features = [\"auto\"]\n min_samples_split = [2]\n bootstrap = [True, False]\n bootstrap_features = [True, False]\n\n self.params[\"max_depth_duplication\"] = np.random.choice(max_depth_duplication)\n self.params[\"n_estimators\"] = np.random.choice(n_estimators)\n self.params[\"precision_min\"] = np.random.choice(precision_min)\n self.params[\"recall_min\"] = np.random.choice(recall_min)\n self.params[\"max_samples\"] = np.random.choice(max_samples)\n self.params[\"max_samples_features\"] = np.random.choice(max_samples_features)\n self.params[\"max_depth\"] = np.random.choice(max_depth)\n self.params[\"max_features\"] = np.random.choice(max_features)\n self.params[\"min_samples_split\"] = np.random.choice(min_samples_split)\n self.params[\"bootstrap\"] = np.random.choice(bootstrap)\n self.params[\"bootstrap_features\"] = np.random.choice(bootstrap_features)\n\n def _create_tmp_folder(self, logger):\n # Create a temp folder to store files \n # Set the default value without context available (required to pass acceptance test)\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n # Make a real tmp folder when experiment is available\n if self.context and self.context.experiment_id:\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_SKOPE_model_folder\" % uuid.uuid4())\n\n # Now let's try to create that folder\n try:\n os.mkdir(tmp_folder)\n except PermissionError:\n # This not occur so log a warning\n loggerwarning(logger, \"SKOPE was denied temp folder creation rights\")\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except FileExistsError:\n # We should never be here since temp dir name is expected to be unique\n loggerwarning(logger, \"SKOPE temp folder already exists\")\n tmp_folder = os.path.join(self.context.experiment_tmp_dir, \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n except:\n # Revert to temporary file path\n tmp_folder = os.path.join(user_dir(), \"%s_SKOPE_model_folder\" % uuid.uuid4())\n os.mkdir(tmp_folder)\n\n loggerinfo(logger, \"SKOPE temp folder {}\".format(tmp_folder))\n return tmp_folder\n\n def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):\n\n orig_cols = list(X.names)\n\n import pandas as pd\n import numpy as np\n from skrules import SkopeRules\n from sklearn.preprocessing import OneHotEncoder\n from collections import Counter\n\n # Get the logger if it exists\n logger = None\n if self.context and self.context.experiment_id:\n logger = make_experiment_logger(experiment_id=self.context.experiment_id,\n tmp_dir=self.context.tmp_dir,\n experiment_tmp_dir=self.context.experiment_tmp_dir)\n\n # Set up temp folder\n tmp_folder = self._create_tmp_folder(logger)\n\n # Set up model\n if self.num_classes >= 2:\n lb = LabelEncoder()\n lb.fit(self.labels)\n y = lb.transform(y)\n\n model = SkopeRules(max_depth_duplication=self.params[\"max_depth_duplication\"],\n n_estimators=self.params[\"n_estimators\"],\n precision_min=self.params[\"precision_min\"],\n recall_min=self.params[\"recall_min\"],\n max_samples=self.params[\"max_samples\"],\n max_samples_features=self.params[\"max_samples_features\"],\n max_depth=self.params[\"max_depth\"],\n max_features=self.params[\"max_features\"],\n min_samples_split=self.params[\"min_samples_split\"],\n bootstrap=self.params[\"bootstrap\"],\n bootstrap_features=self.params[\"bootstrap_features\"],\n random_state=self.params[\"random_state\"],\n feature_names=orig_cols)\n else:\n # Skopes doesn't work for regression\n loggerinfo(logger, \"PASS, no skopes model\")\n pass\n\n # Find the datatypes\n X = X.to_pandas()\n X.columns = orig_cols\n\n # Change continuous features to categorical\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # Change all float32 values to float64\n for ii in range(len(X_datatypes)):\n if X_datatypes[ii] == 'float32':\n X = X.astype({orig_cols[ii]: np.float64})\n\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # List the categorical and numerical features\n self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if\n (X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')]\n self.X_numeric = [item for item in orig_cols if item not in self.X_categorical]\n\n # Find the levels and mode for each categorical feature\n # for use in the test set\n self.train_levels = {}\n for item in self.X_categorical:\n self.train_levels[item] = list(set(X[item]))\n self.train_mode[item] = Counter(X[item]).most_common(1)[0][0]\n\n # One hot encode the categorical features\n # And replace missing values with a Missing category\n if len(self.X_categorical) > 0:\n loggerinfo(logger, \"PCategorical encode\")\n\n for colname in self.X_categorical:\n X[colname] = list(X[colname].fillna(\"Missing\"))\n self.enc = OneHotEncoder(handle_unknown='ignore')\n\n self.enc.fit(X[self.X_categorical])\n self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical))\n\n X_enc = self.enc.transform(X[self.X_categorical]).toarray()\n\n X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)\n\n # Replace missing values with a missing value code\n if len(self.X_numeric) > 0:\n\n for colname in self.X_numeric:\n X[colname] = list(X[colname].fillna(-999))\n\n model.fit(np.array(X), np.array(y))\n\n # Find the rule list\n self.rule_list = model.rules_\n\n # Calculate feature importances\n var_imp = []\n for var in orig_cols:\n var_imp.append(sum(int(var in item[0]) for item in self.rule_list))\n\n if max(var_imp) != 0:\n importances = list(np.array(var_imp) / max(var_imp))\n else:\n importances = [1] * len(var_imp)\n\n pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv(\n os.path.join(tmp_folder, 'Skope_rules.csv'), index=False)\n\n self.mean_target = np.array(sum(y) / len(y))\n\n # Set model properties\n self.set_model_properties(model=model,\n features=list(X.columns),\n importances=importances,\n iterations=self.params['n_estimators'])\n\n def predict(self, X, **kwargs):\n orig_cols = list(X.names)\n import pandas as pd\n\n X = dt.Frame(X)\n\n # Find datatypes\n X = X.to_pandas()\n\n X_datatypes = [str(item) for item in list(X.dtypes)]\n\n # Change float 32 values to float 64\n for ii in range(len(X_datatypes)):\n if X_datatypes[ii] == 'float32':\n X = X.astype({orig_cols[ii]: np.float64})\n\n # Replace missing values with a missing category\n # Replace categories that weren't in the training set with the mode\n if len(self.X_categorical) > 0:\n\n for colname in self.X_categorical:\n X[colname] = list(X[colname].fillna(\"Missing\"))\n\n for label in self.X_categorical:\n # Replace anything not in the test set\n train_categories = self.train_levels[label]\n X_label = np.array(X[label])\n mmode = self.train_mode[label]\n X_label[~np.isin(X_label, train_categories)] = mmode\n X[label] = X_label\n\n # Replace missing values with a missing value code \n if len(self.X_numeric) > 0:\n for colname in self.X_numeric:\n X[colname] = list(X[colname].fillna(-999))\n\n # Get model \n model, _, _, _ = self.get_model_properties()\n\n # One hot encode categorical features\n if len(self.X_categorical) > 0:\n X_enc = self.enc.transform(X[self.X_categorical]).toarray()\n X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)\n\n # Make predictions on the test set\n preds = model.score_top_rules(X) / len(self.rule_list)\n preds = np.array(preds)\n epsilon = 10 ** (-3)\n preds[np.isnan(preds)] = self.mean_target\n preds[preds > 1 - epsilon] = 1.0 - epsilon\n preds[preds < 0 + epsilon] = 0.0 + epsilon\n\n return preds\n"
] | [
[
"pandas.DataFrame",
"numpy.random.choice",
"numpy.isin",
"sklearn.preprocessing.LabelEncoder",
"numpy.isnan",
"numpy.array",
"sklearn.preprocessing.OneHotEncoder"
]
] |
AdrianMastronardi/pandas | [
"67045903306ac4a1cab108177e92df30d99912b4",
"67045903306ac4a1cab108177e92df30d99912b4"
] | [
"pandas/tests/arrays/integer/conftest.py",
"pandas/tests/frame/methods/test_replace.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas.core.arrays.integer import (\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n)\n\n\[email protected](\n params=[\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n ]\n)\ndef dtype(request):\n \"\"\"Parametrized fixture returning integer 'dtype'\"\"\"\n return request.param()\n\n\[email protected]\ndef data(dtype):\n \"\"\"\n Fixture returning 'data' array with valid and missing values according to\n parametrized integer 'dtype'.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n return pd.array(\n list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100],\n dtype=dtype,\n )\n\n\[email protected]\ndef data_missing(dtype):\n \"\"\"\n Fixture returning array with exactly one NaN and one valid integer,\n according to parametrized integer 'dtype'.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n return pd.array([np.nan, 1], dtype=dtype)\n\n\[email protected](params=[\"data\", \"data_missing\"])\ndef all_data(request, data, data_missing):\n \"\"\"Parametrized fixture returning 'data' or 'data_missing' integer arrays.\n\n Used to test dtype conversion with and without missing values.\n \"\"\"\n if request.param == \"data\":\n return data\n elif request.param == \"data_missing\":\n return data_missing\n",
"from __future__ import annotations\n\nfrom datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import np_version_under1p20\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\[email protected]\ndef mix_ab() -> dict[str, list[int | str]]:\n return {\"a\": list(range(4)), \"b\": list(\"ab..\")}\n\n\[email protected]\ndef mix_abc() -> dict[str, list[float | str]]:\n return {\"a\": list(range(4)), \"b\": list(\"ab..\"), \"c\": [\"a\", \"b\", np.nan, \"d\"]}\n\n\nclass TestDataFrameReplace:\n def test_replace_inplace(self, datetime_frame, float_string_frame):\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n\n tsframe = datetime_frame.copy()\n return_value = tsframe.replace(np.nan, 0, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))\n\n # mixed type\n mf = float_string_frame\n mf.iloc[5:20, mf.columns.get_loc(\"foo\")] = np.nan\n mf.iloc[-10:, mf.columns.get_loc(\"A\")] = np.nan\n\n result = float_string_frame.replace(np.nan, 0)\n expected = float_string_frame.fillna(value=0)\n tm.assert_frame_equal(result, expected)\n\n tsframe = datetime_frame.copy()\n return_value = tsframe.replace([np.nan], [0], inplace=True)\n assert return_value is None\n tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))\n\n @pytest.mark.parametrize(\n \"to_replace,values,expected\",\n [\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n (\n [r\"\\s*\\.\\s*\", r\"e|f|g\"],\n [np.nan, \"crap\"],\n {\n \"a\": [\"a\", \"b\", np.nan, np.nan],\n \"b\": [\"crap\"] * 3 + [\"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n },\n ),\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n (\n [r\"\\s*(\\.)\\s*\", r\"(e|f|g)\"],\n [r\"\\1\\1\", r\"\\1_crap\"],\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"e_crap\", \"f_crap\", \"g_crap\", \"h\"],\n \"c\": [\"h\", \"e_crap\", \"l\", \"o\"],\n },\n ),\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n (\n [r\"\\s*(\\.)\\s*\", r\"e\"],\n [r\"\\1\\1\", r\"crap\"],\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"crap\", \"f\", \"g\", \"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n },\n ),\n ],\n )\n @pytest.mark.parametrize(\"inplace\", [True, False])\n @pytest.mark.parametrize(\"use_value_regex_args\", [True, False])\n def test_regex_replace_list_obj(\n self, to_replace, values, expected, inplace, use_value_regex_args\n ):\n df = DataFrame({\"a\": list(\"ab..\"), \"b\": list(\"efgh\"), \"c\": list(\"helo\")})\n\n if use_value_regex_args:\n result = df.replace(value=values, regex=to_replace, inplace=inplace)\n else:\n result = df.replace(to_replace, values, regex=True, inplace=inplace)\n\n if inplace:\n assert result is None\n result = df\n\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_list_mixed(self, mix_ab):\n # mixed frame to make sure this doesn't break things\n dfmix = DataFrame(mix_ab)\n\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r\"\\s*\\.\\s*\", r\"a\"]\n values = [np.nan, \"crap\"]\n mix2 = {\"a\": list(range(4)), \"b\": list(\"ab..\"), \"c\": list(\"halo\")}\n dfmix2 = DataFrame(mix2)\n res = dfmix2.replace(to_replace_res, values, regex=True)\n expec = DataFrame(\n {\n \"a\": mix2[\"a\"],\n \"b\": [\"crap\", \"b\", np.nan, np.nan],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"(a|b)\"]\n values = [r\"\\1\\1\", r\"\\1_crap\"]\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"a_crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.replace(regex=to_replace_res, value=values)\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed_inplace(self, mix_ab):\n dfmix = DataFrame(mix_ab)\n # the same inplace\n # lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r\"\\s*\\.\\s*\", r\"a\"]\n values = [np.nan, \"crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b\", np.nan, np.nan]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"(a|b)\"]\n values = [r\"\\1\\1\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"a_crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(to_replace_res, values, inplace=True, regex=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n to_replace_res = [r\"\\s*(\\.)\\s*\", r\"a\", r\"(b)\"]\n values = [r\"\\1\\1\", r\"crap\", r\"\\1_crap\"]\n res = dfmix.copy()\n return_value = res.replace(regex=to_replace_res, value=values, inplace=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_ab[\"a\"], \"b\": [\"crap\", \"b_crap\", \"..\", \"..\"]})\n tm.assert_frame_equal(res, expec)\n\n def test_regex_replace_dict_mixed(self, mix_abc):\n dfmix = DataFrame(mix_abc)\n\n # dicts\n # single dict {re1: v1}, search the whole frame\n # need test for this...\n\n # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole\n # frame\n res = dfmix.replace({\"b\": r\"\\s*\\.\\s*\"}, {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": r\"\\s*\\.\\s*\"}, {\"b\": np.nan}, inplace=True, regex=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the\n # whole frame\n res = dfmix.replace({\"b\": r\"\\s*(\\.)\\s*\"}, {\"b\": r\"\\1ty\"}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": r\"\\s*(\\.)\\s*\"}, {\"b\": r\"\\1ty\"}, inplace=True, regex=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", \".ty\", \".ty\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n res = dfmix.replace(regex={\"b\": r\"\\s*(\\.)\\s*\"}, value={\"b\": r\"\\1ty\"})\n res2 = dfmix.copy()\n return_value = res2.replace(\n regex={\"b\": r\"\\s*(\\.)\\s*\"}, value={\"b\": r\"\\1ty\"}, inplace=True\n )\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", \".ty\", \".ty\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n # scalar -> dict\n # to_replace regex, {value: value}\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [np.nan, \"b\", \".\", \".\"], \"c\": mix_abc[\"c\"]}\n )\n res = dfmix.replace(\"a\", {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(\"a\", {\"b\": np.nan}, regex=True, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n res = dfmix.replace(\"a\", {\"b\": np.nan}, regex=True)\n res2 = dfmix.copy()\n return_value = res2.replace(regex=\"a\", value={\"b\": np.nan}, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [np.nan, \"b\", \".\", \".\"], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n\n def test_regex_replace_dict_nested(self, mix_abc):\n # nested dicts will not work until this is implemented for Series\n dfmix = DataFrame(mix_abc)\n res = dfmix.replace({\"b\": {r\"\\s*\\.\\s*\": np.nan}}, regex=True)\n res2 = dfmix.copy()\n res4 = dfmix.copy()\n return_value = res2.replace(\n {\"b\": {r\"\\s*\\.\\s*\": np.nan}}, inplace=True, regex=True\n )\n assert return_value is None\n res3 = dfmix.replace(regex={\"b\": {r\"\\s*\\.\\s*\": np.nan}})\n return_value = res4.replace(regex={\"b\": {r\"\\s*\\.\\s*\": np.nan}}, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n tm.assert_frame_equal(res4, expec)\n\n def test_regex_replace_dict_nested_non_first_character(self, any_string_dtype):\n # GH 25259\n dtype = any_string_dtype\n df = DataFrame({\"first\": [\"abc\", \"bca\", \"cab\"]}, dtype=dtype)\n expected = DataFrame({\"first\": [\".bc\", \"bc.\", \"c.b\"]}, dtype=dtype)\n result = df.replace({\"a\": \".\"}, regex=True)\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_dict_nested_gh4115(self):\n df = DataFrame({\"Type\": [\"Q\", \"T\", \"Q\", \"Q\", \"T\"], \"tmp\": 2})\n expected = DataFrame({\"Type\": [0, 1, 0, 0, 1], \"tmp\": 2})\n result = df.replace({\"Type\": {\"Q\": 0, \"T\": 1}})\n tm.assert_frame_equal(result, expected)\n\n def test_regex_replace_list_to_scalar(self, mix_abc):\n df = DataFrame(mix_abc)\n expec = DataFrame(\n {\n \"a\": mix_abc[\"a\"],\n \"b\": np.array([np.nan] * 4),\n \"c\": [np.nan, np.nan, np.nan, \"d\"],\n }\n )\n res = df.replace([r\"\\s*\\.\\s*\", \"a|b\"], np.nan, regex=True)\n res2 = df.copy()\n res3 = df.copy()\n return_value = res2.replace(\n [r\"\\s*\\.\\s*\", \"a|b\"], np.nan, regex=True, inplace=True\n )\n assert return_value is None\n return_value = res3.replace(\n regex=[r\"\\s*\\.\\s*\", \"a|b\"], value=np.nan, inplace=True\n )\n assert return_value is None\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_str_to_numeric(self, mix_abc):\n # what happens when you try to replace a numeric value with a regex?\n df = DataFrame(mix_abc)\n res = df.replace(r\"\\s*\\.\\s*\", 0, regex=True)\n res2 = df.copy()\n return_value = res2.replace(r\"\\s*\\.\\s*\", 0, inplace=True, regex=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=r\"\\s*\\.\\s*\", value=0, inplace=True)\n assert return_value is None\n expec = DataFrame({\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", 0, 0], \"c\": mix_abc[\"c\"]})\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_regex_list_to_numeric(self, mix_abc):\n df = DataFrame(mix_abc)\n res = df.replace([r\"\\s*\\.\\s*\", \"b\"], 0, regex=True)\n res2 = df.copy()\n return_value = res2.replace([r\"\\s*\\.\\s*\", \"b\"], 0, regex=True, inplace=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=[r\"\\s*\\.\\s*\", \"b\"], value=0, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", 0, 0, 0], \"c\": [\"a\", 0, np.nan, \"d\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_series_of_regexes(self, mix_abc):\n df = DataFrame(mix_abc)\n s1 = Series({\"b\": r\"\\s*\\.\\s*\"})\n s2 = Series({\"b\": np.nan})\n res = df.replace(s1, s2, regex=True)\n res2 = df.copy()\n return_value = res2.replace(s1, s2, inplace=True, regex=True)\n assert return_value is None\n res3 = df.copy()\n return_value = res3.replace(regex=s1, value=s2, inplace=True)\n assert return_value is None\n expec = DataFrame(\n {\"a\": mix_abc[\"a\"], \"b\": [\"a\", \"b\", np.nan, np.nan], \"c\": mix_abc[\"c\"]}\n )\n tm.assert_frame_equal(res, expec)\n tm.assert_frame_equal(res2, expec)\n tm.assert_frame_equal(res3, expec)\n\n def test_regex_replace_numeric_to_object_conversion(self, mix_abc):\n df = DataFrame(mix_abc)\n expec = DataFrame({\"a\": [\"a\", 1, 2, 3], \"b\": mix_abc[\"b\"], \"c\": mix_abc[\"c\"]})\n res = df.replace(0, \"a\")\n tm.assert_frame_equal(res, expec)\n assert res.a.dtype == np.object_\n\n @pytest.mark.parametrize(\n \"to_replace\", [{\"\": np.nan, \",\": \"\"}, {\",\": \"\", \"\": np.nan}]\n )\n def test_joint_simple_replace_and_regex_replace(self, to_replace):\n # GH-39338\n df = DataFrame(\n {\n \"col1\": [\"1,000\", \"a\", \"3\"],\n \"col2\": [\"a\", \"\", \"b\"],\n \"col3\": [\"a\", \"b\", \"c\"],\n }\n )\n result = df.replace(regex=to_replace)\n expected = DataFrame(\n {\n \"col1\": [\"1000\", \"a\", \"3\"],\n \"col2\": [\"a\", np.nan, \"b\"],\n \"col3\": [\"a\", \"b\", \"c\"],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"metachar\", [\"[]\", \"()\", r\"\\d\", r\"\\w\", r\"\\s\"])\n def test_replace_regex_metachar(self, metachar):\n df = DataFrame({\"a\": [metachar, \"else\"]})\n result = df.replace({\"a\": {metachar: \"paren\"}})\n expected = DataFrame({\"a\": [\"paren\", \"else\"]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"data,to_replace,expected\",\n [\n ([\"xax\", \"xbx\"], {\"a\": \"c\", \"b\": \"d\"}, [\"xcx\", \"xdx\"]),\n ([\"d\", \"\", \"\"], {r\"^\\s*$\": pd.NA}, [\"d\", pd.NA, pd.NA]),\n ],\n )\n def test_regex_replace_string_types(\n self, data, to_replace, expected, frame_or_series, any_string_dtype\n ):\n # GH-41333, GH-35977\n dtype = any_string_dtype\n obj = frame_or_series(data, dtype=dtype)\n result = obj.replace(to_replace, regex=True)\n expected = frame_or_series(expected, dtype=dtype)\n\n tm.assert_equal(result, expected)\n\n def test_replace(self, datetime_frame):\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n\n zero_filled = datetime_frame.replace(np.nan, -1e8)\n tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))\n tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame)\n\n datetime_frame[\"A\"][:5] = np.nan\n datetime_frame[\"A\"][-5:] = np.nan\n datetime_frame[\"B\"][:5] = -1e8\n\n # empty\n df = DataFrame(index=[\"a\", \"b\"])\n tm.assert_frame_equal(df, df.replace(5, 7))\n\n # GH 11698\n # test for mixed data types.\n df = DataFrame(\n [(\"-\", pd.to_datetime(\"20150101\")), (\"a\", pd.to_datetime(\"20150102\"))]\n )\n df1 = df.replace(\"-\", np.nan)\n expected_df = DataFrame(\n [(np.nan, pd.to_datetime(\"20150101\")), (\"a\", pd.to_datetime(\"20150102\"))]\n )\n tm.assert_frame_equal(df1, expected_df)\n\n def test_replace_list(self):\n obj = {\"a\": list(\"ab..\"), \"b\": list(\"efgh\"), \"c\": list(\"helo\")}\n dfobj = DataFrame(obj)\n\n # lists of regexes and values\n # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]\n to_replace_res = [r\".\", r\"e\"]\n values = [np.nan, \"crap\"]\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame(\n {\n \"a\": [\"a\", \"b\", np.nan, np.nan],\n \"b\": [\"crap\", \"f\", \"g\", \"h\"],\n \"c\": [\"h\", \"crap\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]\n to_replace_res = [r\".\", r\"f\"]\n values = [r\"..\", r\"crap\"]\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame(\n {\n \"a\": [\"a\", \"b\", \"..\", \"..\"],\n \"b\": [\"e\", \"crap\", \"g\", \"h\"],\n \"c\": [\"h\", \"e\", \"l\", \"o\"],\n }\n )\n tm.assert_frame_equal(res, expec)\n\n def test_replace_with_empty_list(self, frame_or_series):\n # GH 21977\n ser = Series([[\"a\", \"b\"], [], np.nan, [1]])\n obj = DataFrame({\"col\": ser})\n obj = tm.get_obj(obj, frame_or_series)\n expected = obj\n result = obj.replace([], np.nan)\n tm.assert_equal(result, expected)\n\n # GH 19266\n msg = (\n \"NumPy boolean array indexing assignment cannot assign {size} \"\n \"input values to the 1 output values where the mask is true\"\n )\n with pytest.raises(ValueError, match=msg.format(size=0)):\n obj.replace({np.nan: []})\n with pytest.raises(ValueError, match=msg.format(size=2)):\n obj.replace({np.nan: [\"dummy\", \"alt\"]})\n\n def test_replace_series_dict(self):\n # from GH 3064\n df = DataFrame({\"zero\": {\"a\": 0.0, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 0}})\n result = df.replace(0, {\"zero\": 0.5, \"one\": 1.0})\n expected = DataFrame({\"zero\": {\"a\": 0.5, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 1.0}})\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(0, df.mean())\n tm.assert_frame_equal(result, expected)\n\n # series to series/dict\n df = DataFrame({\"zero\": {\"a\": 0.0, \"b\": 1}, \"one\": {\"a\": 2.0, \"b\": 0}})\n s = Series({\"zero\": 0.0, \"one\": 2.0})\n result = df.replace(s, {\"zero\": 0.5, \"one\": 1.0})\n expected = DataFrame({\"zero\": {\"a\": 0.5, \"b\": 1}, \"one\": {\"a\": 1.0, \"b\": 0.0}})\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(s, df.mean())\n tm.assert_frame_equal(result, expected)\n\n def test_replace_convert(self):\n # gh 3907\n df = DataFrame([[\"foo\", \"bar\", \"bah\"], [\"bar\", \"foo\", \"bah\"]])\n m = {\"foo\": 1, \"bar\": 2, \"bah\": 3}\n rep = df.replace(m)\n expec = Series([np.int64] * 3)\n res = rep.dtypes\n tm.assert_series_equal(expec, res)\n\n def test_replace_mixed(self, float_string_frame):\n mf = float_string_frame\n mf.iloc[5:20, mf.columns.get_loc(\"foo\")] = np.nan\n mf.iloc[-10:, mf.columns.get_loc(\"A\")] = np.nan\n\n result = float_string_frame.replace(np.nan, -18)\n expected = float_string_frame.fillna(value=-18)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame)\n\n result = float_string_frame.replace(np.nan, -1e8)\n expected = float_string_frame.fillna(value=-1e8)\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame)\n\n def test_replace_mixed_int_block_upcasting(self):\n\n # int block upcasting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0.5, 1], dtype=\"float64\"),\n }\n )\n result = df.replace(0, 0.5)\n tm.assert_frame_equal(result, expected)\n\n return_value = df.replace(0, 0.5, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(df, expected)\n\n def test_replace_mixed_int_block_splitting(self):\n\n # int block splitting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n \"C\": Series([1, 2], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0.5, 1], dtype=\"float64\"),\n \"C\": Series([1, 2], dtype=\"int64\"),\n }\n )\n result = df.replace(0, 0.5)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_mixed2(self):\n\n # to object block upcasting\n df = DataFrame(\n {\n \"A\": Series([1.0, 2.0], dtype=\"float64\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n expected = DataFrame(\n {\n \"A\": Series([1, \"foo\"], dtype=\"object\"),\n \"B\": Series([0, 1], dtype=\"int64\"),\n }\n )\n result = df.replace(2, \"foo\")\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n {\n \"A\": Series([\"foo\", \"bar\"], dtype=\"object\"),\n \"B\": Series([0, \"foo\"], dtype=\"object\"),\n }\n )\n result = df.replace([1, 2], [\"foo\", \"bar\"])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_mixed3(self):\n # test case from\n df = DataFrame(\n {\"A\": Series([3, 0], dtype=\"int64\"), \"B\": Series([0, 3], dtype=\"int64\")}\n )\n result = df.replace(3, df.mean().to_dict())\n expected = df.copy().astype(\"float64\")\n m = df.mean()\n expected.iloc[0, 0] = m[0]\n expected.iloc[1, 1] = m[1]\n tm.assert_frame_equal(result, expected)\n\n def test_replace_nullable_int_with_string_doesnt_cast(self):\n # GH#25438 don't cast df['a'] to float64\n df = DataFrame({\"a\": [1, 2, 3, np.nan], \"b\": [\"some\", \"strings\", \"here\", \"he\"]})\n df[\"a\"] = df[\"a\"].astype(\"Int64\")\n\n res = df.replace(\"\", np.nan)\n tm.assert_series_equal(res[\"a\"], df[\"a\"])\n\n @pytest.mark.parametrize(\"dtype\", [\"boolean\", \"Int64\", \"Float64\"])\n def test_replace_with_nullable_column(self, dtype):\n # GH-44499\n nullable_ser = Series([1, 0, 1], dtype=dtype)\n df = DataFrame({\"A\": [\"A\", \"B\", \"x\"], \"B\": nullable_ser})\n result = df.replace(\"x\", \"X\")\n expected = DataFrame({\"A\": [\"A\", \"B\", \"X\"], \"B\": nullable_ser})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_simple_nested_dict(self):\n df = DataFrame({\"col\": range(1, 5)})\n expected = DataFrame({\"col\": [\"a\", 2, 3, \"b\"]})\n\n result = df.replace({\"col\": {1: \"a\", 4: \"b\"}})\n tm.assert_frame_equal(expected, result)\n\n # in this case, should be the same as the not nested version\n result = df.replace({1: \"a\", 4: \"b\"})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_simple_nested_dict_with_nonexistent_value(self):\n df = DataFrame({\"col\": range(1, 5)})\n expected = DataFrame({\"col\": [\"a\", 2, 3, \"b\"]})\n\n result = df.replace({-1: \"-\", 1: \"a\", 4: \"b\"})\n tm.assert_frame_equal(expected, result)\n\n result = df.replace({\"col\": {-1: \"-\", 1: \"a\", 4: \"b\"}})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_NA_with_None(self):\n # gh-45601\n df = DataFrame({\"value\": [42, None]}).astype({\"value\": \"Int64\"})\n result = df.replace({pd.NA: None})\n expected = DataFrame({\"value\": [42, None]}, dtype=object)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_NAT_with_None(self):\n # gh-45836\n df = DataFrame([pd.NaT, pd.NaT])\n result = df.replace({pd.NaT: None, np.NaN: None})\n expected = DataFrame([None, None])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_None_keeps_categorical(self):\n # gh-46634\n cat_series = Series([\"b\", \"b\", \"b\", \"d\"], dtype=\"category\")\n df = DataFrame(\n {\n \"id\": Series([5, 4, 3, 2], dtype=\"float64\"),\n \"col\": cat_series,\n }\n )\n result = df.replace({3: None})\n\n expected = DataFrame(\n {\n \"id\": Series([5.0, 4.0, None, 2.0], dtype=\"object\"),\n \"col\": cat_series,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_replace_value_is_none(self, datetime_frame):\n orig_value = datetime_frame.iloc[0, 0]\n orig2 = datetime_frame.iloc[1, 0]\n\n datetime_frame.iloc[0, 0] = np.nan\n datetime_frame.iloc[1, 0] = 1\n\n result = datetime_frame.replace(to_replace={np.nan: 0})\n expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T\n tm.assert_frame_equal(result, expected)\n\n result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8})\n tsframe = datetime_frame.copy()\n tsframe.iloc[0, 0] = 0\n tsframe.iloc[1, 0] = -1e8\n expected = tsframe\n tm.assert_frame_equal(expected, result)\n datetime_frame.iloc[0, 0] = orig_value\n datetime_frame.iloc[1, 0] = orig2\n\n def test_replace_for_new_dtypes(self, datetime_frame):\n\n # dtypes\n tsframe = datetime_frame.copy().astype(np.float32)\n tsframe[\"A\"][:5] = np.nan\n tsframe[\"A\"][-5:] = np.nan\n\n zero_filled = tsframe.replace(np.nan, -1e8)\n tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8))\n tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe)\n\n tsframe[\"A\"][:5] = np.nan\n tsframe[\"A\"][-5:] = np.nan\n tsframe[\"B\"][:5] = -1e8\n\n b = tsframe[\"B\"]\n b[b == -1e8] = np.nan\n tsframe[\"B\"] = b\n result = tsframe.fillna(method=\"bfill\")\n tm.assert_frame_equal(result, tsframe.fillna(method=\"bfill\"))\n\n @pytest.mark.parametrize(\n \"frame, to_replace, value, expected\",\n [\n (DataFrame({\"ints\": [1, 2, 3]}), 1, 0, DataFrame({\"ints\": [0, 2, 3]})),\n (\n DataFrame({\"ints\": [1, 2, 3]}, dtype=np.int32),\n 1,\n 0,\n DataFrame({\"ints\": [0, 2, 3]}, dtype=np.int32),\n ),\n (\n DataFrame({\"ints\": [1, 2, 3]}, dtype=np.int16),\n 1,\n 0,\n DataFrame({\"ints\": [0, 2, 3]}, dtype=np.int16),\n ),\n (\n DataFrame({\"bools\": [True, False, True]}),\n False,\n True,\n DataFrame({\"bools\": [True, True, True]}),\n ),\n (\n DataFrame({\"complex\": [1j, 2j, 3j]}),\n 1j,\n 0,\n DataFrame({\"complex\": [0j, 2j, 3j]}),\n ),\n (\n DataFrame(\n {\n \"datetime64\": Index(\n [\n datetime(2018, 5, 28),\n datetime(2018, 7, 28),\n datetime(2018, 5, 28),\n ]\n )\n }\n ),\n datetime(2018, 5, 28),\n datetime(2018, 7, 28),\n DataFrame({\"datetime64\": Index([datetime(2018, 7, 28)] * 3)}),\n ),\n # GH 20380\n (\n DataFrame({\"dt\": [datetime(3017, 12, 20)], \"str\": [\"foo\"]}),\n \"foo\",\n \"bar\",\n DataFrame({\"dt\": [datetime(3017, 12, 20)], \"str\": [\"bar\"]}),\n ),\n # GH 36782\n (\n DataFrame({\"dt\": [datetime(2920, 10, 1)]}),\n datetime(2920, 10, 1),\n datetime(2020, 10, 1),\n DataFrame({\"dt\": [datetime(2020, 10, 1)]}),\n ),\n (\n DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [0, np.nan, 2],\n }\n ),\n Timestamp(\"20130102\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n ),\n ),\n # GH 35376\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1.0,\n 5,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1,\n 5,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1.0,\n 5.0,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n (\n DataFrame([[1, 1.0], [2, 2.0]]),\n 1,\n 5.0,\n DataFrame([[5, 5.0], [2, 2.0]]),\n ),\n ],\n )\n def test_replace_dtypes(self, frame, to_replace, value, expected):\n result = getattr(frame, \"replace\")(to_replace, value)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_input_formats_listlike(self):\n # both dicts\n to_rep = {\"A\": np.nan, \"B\": 0, \"C\": \"\"}\n values = {\"A\": 0, \"B\": -1, \"C\": \"missing\"}\n df = DataFrame(\n {\"A\": [np.nan, 0, np.inf], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n filled = df.replace(to_rep, values)\n expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n result = df.replace([0, 2, 5], [5, 2, 0])\n expected = DataFrame(\n {\"A\": [np.nan, 5, np.inf], \"B\": [5, 2, 0], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n tm.assert_frame_equal(result, expected)\n\n # scalar to dict\n values = {\"A\": 0, \"B\": -1, \"C\": \"missing\"}\n df = DataFrame(\n {\"A\": [np.nan, 0, np.nan], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n filled = df.replace(np.nan, values)\n expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n # list to list\n to_rep = [np.nan, 0, \"\"]\n values = [-2, -1, \"missing\"]\n result = df.replace(to_rep, values)\n expected = df.copy()\n for i in range(len(to_rep)):\n return_value = expected.replace(to_rep[i], values[i], inplace=True)\n assert return_value is None\n tm.assert_frame_equal(result, expected)\n\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n df.replace(to_rep, values[1:])\n\n def test_replace_input_formats_scalar(self):\n df = DataFrame(\n {\"A\": [np.nan, 0, np.inf], \"B\": [0, 2, 5], \"C\": [\"\", \"asdf\", \"fd\"]}\n )\n\n # dict to scalar\n to_rep = {\"A\": np.nan, \"B\": 0, \"C\": \"\"}\n filled = df.replace(to_rep, 0)\n expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}\n tm.assert_frame_equal(filled, DataFrame(expected))\n\n msg = \"value argument must be scalar, dict, or Series\"\n with pytest.raises(TypeError, match=msg):\n df.replace(to_rep, [np.nan, 0, \"\"])\n\n # list to scalar\n to_rep = [np.nan, 0, \"\"]\n result = df.replace(to_rep, -1)\n expected = df.copy()\n for i in range(len(to_rep)):\n return_value = expected.replace(to_rep[i], -1, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(result, expected)\n\n def test_replace_limit(self):\n # TODO\n pass\n\n def test_replace_dict_no_regex(self):\n answer = Series(\n {\n 0: \"Strongly Agree\",\n 1: \"Agree\",\n 2: \"Neutral\",\n 3: \"Disagree\",\n 4: \"Strongly Disagree\",\n }\n )\n weights = {\n \"Agree\": 4,\n \"Disagree\": 2,\n \"Neutral\": 3,\n \"Strongly Agree\": 5,\n \"Strongly Disagree\": 1,\n }\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_series_no_regex(self):\n answer = Series(\n {\n 0: \"Strongly Agree\",\n 1: \"Agree\",\n 2: \"Neutral\",\n 3: \"Disagree\",\n 4: \"Strongly Disagree\",\n }\n )\n weights = Series(\n {\n \"Agree\": 4,\n \"Disagree\": 2,\n \"Neutral\": 3,\n \"Strongly Agree\": 5,\n \"Strongly Disagree\": 1,\n }\n )\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_dict_tuple_list_ordering_remains_the_same(self):\n df = DataFrame({\"A\": [np.nan, 1]})\n res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})\n res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])\n res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])\n\n expected = DataFrame({\"A\": [0, -1e8]})\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res2, res3)\n tm.assert_frame_equal(res3, expected)\n\n def test_replace_doesnt_replace_without_regex(self):\n df = DataFrame(\n {\n \"fol\": [1, 2, 2, 3],\n \"T_opp\": [\"0\", \"vr\", \"0\", \"0\"],\n \"T_Dir\": [\"0\", \"0\", \"0\", \"bt\"],\n \"T_Enh\": [\"vo\", \"0\", \"0\", \"0\"],\n }\n )\n res = df.replace({r\"\\D\": 1})\n tm.assert_frame_equal(df, res)\n\n def test_replace_bool_with_string(self):\n df = DataFrame({\"a\": [True, False], \"b\": list(\"ab\")})\n result = df.replace(True, \"a\")\n expected = DataFrame({\"a\": [\"a\", False], \"b\": df.b})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_pure_bool_with_string_no_op(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(\"asdf\", \"fdsa\")\n tm.assert_frame_equal(df, result)\n\n def test_replace_bool_with_bool(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(False, True)\n expected = DataFrame(np.ones((2, 2), dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_dict_with_bool_keys(self):\n df = DataFrame({0: [True, False], 1: [False, True]})\n result = df.replace({\"asdf\": \"asdb\", True: \"yes\"})\n expected = DataFrame({0: [\"yes\", False], 1: [False, \"yes\"]})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_dict_strings_vs_ints(self):\n # GH#34789\n df = DataFrame({\"Y0\": [1, 2], \"Y1\": [3, 4]})\n result = df.replace({\"replace_string\": \"test\"})\n\n tm.assert_frame_equal(result, df)\n\n result = df[\"Y0\"].replace({\"replace_string\": \"test\"})\n tm.assert_series_equal(result, df[\"Y0\"])\n\n def test_replace_truthy(self):\n df = DataFrame({\"a\": [True, True]})\n r = df.replace([np.inf, -np.inf], np.nan)\n e = df\n tm.assert_frame_equal(r, e)\n\n def test_nested_dict_overlapping_keys_replace_int(self):\n # GH 27660 keep behaviour consistent for simple dictionary and\n # nested dictionary replacement\n df = DataFrame({\"a\": list(range(1, 5))})\n\n result = df.replace({\"a\": dict(zip(range(1, 5), range(2, 6)))})\n expected = df.replace(dict(zip(range(1, 5), range(2, 6))))\n tm.assert_frame_equal(result, expected)\n\n def test_nested_dict_overlapping_keys_replace_str(self):\n # GH 27660\n a = np.arange(1, 5)\n astr = a.astype(str)\n bstr = np.arange(2, 6).astype(str)\n df = DataFrame({\"a\": astr})\n result = df.replace(dict(zip(astr, bstr)))\n expected = df.replace({\"a\": dict(zip(astr, bstr))})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_swapping_bug(self):\n df = DataFrame({\"a\": [True, False, True]})\n res = df.replace({\"a\": {True: \"Y\", False: \"N\"}})\n expect = DataFrame({\"a\": [\"Y\", \"N\", \"Y\"]})\n tm.assert_frame_equal(res, expect)\n\n df = DataFrame({\"a\": [0, 1, 0]})\n res = df.replace({\"a\": {0: \"Y\", 1: \"N\"}})\n expect = DataFrame({\"a\": [\"Y\", \"N\", \"Y\"]})\n tm.assert_frame_equal(res, expect)\n\n def test_replace_period(self):\n d = {\n \"fname\": {\n \"out_augmented_AUG_2011.json\": pd.Period(year=2011, month=8, freq=\"M\"),\n \"out_augmented_JAN_2011.json\": pd.Period(year=2011, month=1, freq=\"M\"),\n \"out_augmented_MAY_2012.json\": pd.Period(year=2012, month=5, freq=\"M\"),\n \"out_augmented_SUBSIDY_WEEK.json\": pd.Period(\n year=2011, month=4, freq=\"M\"\n ),\n \"out_augmented_AUG_2012.json\": pd.Period(year=2012, month=8, freq=\"M\"),\n \"out_augmented_MAY_2011.json\": pd.Period(year=2011, month=5, freq=\"M\"),\n \"out_augmented_SEP_2013.json\": pd.Period(year=2013, month=9, freq=\"M\"),\n }\n }\n\n df = DataFrame(\n [\n \"out_augmented_AUG_2012.json\",\n \"out_augmented_SEP_2013.json\",\n \"out_augmented_SUBSIDY_WEEK.json\",\n \"out_augmented_MAY_2012.json\",\n \"out_augmented_MAY_2011.json\",\n \"out_augmented_AUG_2011.json\",\n \"out_augmented_JAN_2011.json\",\n ],\n columns=[\"fname\"],\n )\n assert set(df.fname.values) == set(d[\"fname\"].keys())\n\n expected = DataFrame({\"fname\": [d[\"fname\"][k] for k in df.fname.values]})\n assert expected.dtypes[0] == \"Period[M]\"\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetime(self):\n d = {\n \"fname\": {\n \"out_augmented_AUG_2011.json\": Timestamp(\"2011-08\"),\n \"out_augmented_JAN_2011.json\": Timestamp(\"2011-01\"),\n \"out_augmented_MAY_2012.json\": Timestamp(\"2012-05\"),\n \"out_augmented_SUBSIDY_WEEK.json\": Timestamp(\"2011-04\"),\n \"out_augmented_AUG_2012.json\": Timestamp(\"2012-08\"),\n \"out_augmented_MAY_2011.json\": Timestamp(\"2011-05\"),\n \"out_augmented_SEP_2013.json\": Timestamp(\"2013-09\"),\n }\n }\n\n df = DataFrame(\n [\n \"out_augmented_AUG_2012.json\",\n \"out_augmented_SEP_2013.json\",\n \"out_augmented_SUBSIDY_WEEK.json\",\n \"out_augmented_MAY_2012.json\",\n \"out_augmented_MAY_2011.json\",\n \"out_augmented_AUG_2011.json\",\n \"out_augmented_JAN_2011.json\",\n ],\n columns=[\"fname\"],\n )\n assert set(df.fname.values) == set(d[\"fname\"].keys())\n expected = DataFrame({\"fname\": [d[\"fname\"][k] for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetimetz(self):\n\n # GH 11326\n # behaving poorly when presented with a datetime64[ns, tz]\n df = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [0, np.nan, 2],\n }\n )\n result = df.replace(np.nan, 1)\n expected = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": Series([0, 1, 2], dtype=\"float64\"),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.fillna(1)\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(0, np.nan)\n expected = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"B\": [np.nan, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.replace(\n Timestamp(\"20130102\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n )\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1, 0] = np.nan\n result = result.replace({\"A\": pd.NaT}, Timestamp(\"20130104\", tz=\"US/Eastern\"))\n tm.assert_frame_equal(result, expected)\n\n # coerce to object\n result = df.copy()\n result.iloc[1, 0] = np.nan\n with tm.assert_produces_warning(FutureWarning, match=\"mismatched timezone\"):\n result = result.replace(\n {\"A\": pd.NaT}, Timestamp(\"20130104\", tz=\"US/Pacific\")\n )\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\", tz=\"US/Pacific\"),\n # once deprecation is enforced\n # Timestamp(\"20130104\", tz=\"US/Pacific\").tz_convert(\"US/Eastern\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.iloc[1, 0] = np.nan\n result = result.replace({\"A\": np.nan}, Timestamp(\"20130104\"))\n expected = DataFrame(\n {\n \"A\": [\n Timestamp(\"20130101\", tz=\"US/Eastern\"),\n Timestamp(\"20130104\"),\n Timestamp(\"20130103\", tz=\"US/Eastern\"),\n ],\n \"B\": [0, np.nan, 2],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_empty_dictlike(self, mix_abc):\n # GH 15289\n df = DataFrame(mix_abc)\n tm.assert_frame_equal(df, df.replace({}))\n tm.assert_frame_equal(df, df.replace(Series([], dtype=object)))\n\n tm.assert_frame_equal(df, df.replace({\"b\": {}}))\n tm.assert_frame_equal(df, df.replace(Series({\"b\": {}})))\n\n @pytest.mark.parametrize(\n \"to_replace, method, expected\",\n [\n (0, \"bfill\", {\"A\": [1, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]}),\n (\n np.nan,\n \"bfill\",\n {\"A\": [0, 1, 2], \"B\": [5.0, 7.0, 7.0], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\"d\", \"ffill\", {\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]}),\n (\n [0, 2],\n \"bfill\",\n {\"A\": [1, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n [1, 2],\n \"pad\",\n {\"A\": [0, 0, 0], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n (1, 2),\n \"bfill\",\n {\"A\": [0, 2, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]},\n ),\n (\n [\"b\", \"c\"],\n \"ffill\",\n {\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"a\", \"a\"]},\n ),\n ],\n )\n def test_replace_method(self, to_replace, method, expected):\n # GH 19632\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [5, np.nan, 7], \"C\": [\"a\", \"b\", \"c\"]})\n\n result = df.replace(to_replace=to_replace, value=None, method=method)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"replace_dict, final_data\",\n [({\"a\": 1, \"b\": 1}, [[3, 3], [2, 2]]), ({\"a\": 1, \"b\": 2}, [[3, 1], [2, 3]])],\n )\n def test_categorical_replace_with_dict(self, replace_dict, final_data):\n # GH 26988\n df = DataFrame([[1, 1], [2, 2]], columns=[\"a\", \"b\"], dtype=\"category\")\n\n final_data = np.array(final_data)\n\n a = pd.Categorical(final_data[:, 0], categories=[3, 2])\n\n ex_cat = [3, 2] if replace_dict[\"b\"] == 1 else [1, 3]\n b = pd.Categorical(final_data[:, 1], categories=ex_cat)\n\n expected = DataFrame({\"a\": a, \"b\": b})\n result = df.replace(replace_dict, 3)\n tm.assert_frame_equal(result, expected)\n msg = (\n r\"Attributes of DataFrame.iloc\\[:, 0\\] \\(column name=\\\"a\\\"\\) are \"\n \"different\"\n )\n with pytest.raises(AssertionError, match=msg):\n # ensure non-inplace call does not affect original\n tm.assert_frame_equal(df, expected)\n return_value = df.replace(replace_dict, 3, inplace=True)\n assert return_value is None\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\n \"df, to_replace, exp\",\n [\n (\n {\"col1\": [1, 2, 3], \"col2\": [4, 5, 6]},\n {4: 5, 5: 6, 6: 7},\n {\"col1\": [1, 2, 3], \"col2\": [5, 6, 7]},\n ),\n (\n {\"col1\": [1, 2, 3], \"col2\": [\"4\", \"5\", \"6\"]},\n {\"4\": \"5\", \"5\": \"6\", \"6\": \"7\"},\n {\"col1\": [1, 2, 3], \"col2\": [\"5\", \"6\", \"7\"]},\n ),\n ],\n )\n def test_replace_commutative(self, df, to_replace, exp):\n # GH 16051\n # DataFrame.replace() overwrites when values are non-numeric\n # also added to data frame whilst issue was for series\n\n df = DataFrame(df)\n\n expected = DataFrame(exp)\n result = df.replace(to_replace)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"replacer\",\n [\n Timestamp(\"20170827\"),\n np.int8(1),\n np.int16(1),\n np.float32(1),\n np.float64(1),\n ],\n )\n def test_replace_replacer_dtype(self, request, replacer):\n # GH26632\n if np.isscalar(replacer) and replacer.dtype.itemsize < 8:\n request.node.add_marker(\n pytest.mark.xfail(\n np_version_under1p20, reason=\"np.putmask doesn't coerce dtype\"\n )\n )\n df = DataFrame([\"a\"])\n result = df.replace({\"a\": replacer, \"b\": replacer})\n expected = DataFrame([replacer])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_after_convert_dtypes(self):\n # GH31517\n df = DataFrame({\"grp\": [1, 2, 3, 4, 5]}, dtype=\"Int64\")\n result = df.replace(1, 10)\n expected = DataFrame({\"grp\": [10, 2, 3, 4, 5]}, dtype=\"Int64\")\n tm.assert_frame_equal(result, expected)\n\n def test_replace_invalid_to_replace(self):\n # GH 18634\n # API: replace() should raise an exception if invalid argument is given\n df = DataFrame({\"one\": [\"a\", \"b \", \"c\"], \"two\": [\"d \", \"e \", \"f \"]})\n msg = (\n r\"Expecting 'to_replace' to be either a scalar, array-like, \"\n r\"dict or None, got invalid type.*\"\n )\n with pytest.raises(TypeError, match=msg):\n df.replace(lambda x: x.strip())\n\n @pytest.mark.parametrize(\"dtype\", [\"float\", \"float64\", \"int64\", \"Int64\", \"boolean\"])\n @pytest.mark.parametrize(\"value\", [np.nan, pd.NA])\n def test_replace_no_replacement_dtypes(self, dtype, value):\n # https://github.com/pandas-dev/pandas/issues/32988\n df = DataFrame(np.eye(2), dtype=dtype)\n result = df.replace(to_replace=[None, -np.inf, np.inf], value=value)\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize(\"replacement\", [np.nan, 5])\n def test_replace_with_duplicate_columns(self, replacement):\n # GH 24798\n result = DataFrame({\"A\": [1, 2, 3], \"A1\": [4, 5, 6], \"B\": [7, 8, 9]})\n result.columns = list(\"AAB\")\n\n expected = DataFrame(\n {\"A\": [1, 2, 3], \"A1\": [4, 5, 6], \"B\": [replacement, 8, 9]}\n )\n expected.columns = list(\"AAB\")\n\n result[\"B\"] = result[\"B\"].replace(7, replacement)\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"value\", [pd.Period(\"2020-01\"), pd.Interval(0, 5)])\n def test_replace_ea_ignore_float(self, frame_or_series, value):\n # GH#34871\n obj = DataFrame({\"Per\": [value] * 3})\n obj = tm.get_obj(obj, frame_or_series)\n\n expected = obj.copy()\n result = obj.replace(1.0, 0.0)\n tm.assert_equal(expected, result)\n\n def test_replace_value_category_type(self):\n \"\"\"\n Test for #23305: to ensure category dtypes are maintained\n after replace with direct values\n \"\"\"\n\n # create input data\n input_dict = {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [\"a\", \"b\", \"c\", \"d\"],\n \"col3\": [1.5, 2.5, 3.5, 4.5],\n \"col4\": [\"cat1\", \"cat2\", \"cat3\", \"cat4\"],\n \"col5\": [\"obj1\", \"obj2\", \"obj3\", \"obj4\"],\n }\n # explicitly cast columns as category and order them\n input_df = DataFrame(data=input_dict).astype(\n {\"col2\": \"category\", \"col4\": \"category\"}\n )\n input_df[\"col2\"] = input_df[\"col2\"].cat.reorder_categories(\n [\"a\", \"b\", \"c\", \"d\"], ordered=True\n )\n input_df[\"col4\"] = input_df[\"col4\"].cat.reorder_categories(\n [\"cat1\", \"cat2\", \"cat3\", \"cat4\"], ordered=True\n )\n\n # create expected dataframe\n expected_dict = {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [\"a\", \"b\", \"c\", \"z\"],\n \"col3\": [1.5, 2.5, 3.5, 4.5],\n \"col4\": [\"cat1\", \"catX\", \"cat3\", \"cat4\"],\n \"col5\": [\"obj9\", \"obj2\", \"obj3\", \"obj4\"],\n }\n # explicitly cast columns as category and order them\n expected = DataFrame(data=expected_dict).astype(\n {\"col2\": \"category\", \"col4\": \"category\"}\n )\n expected[\"col2\"] = expected[\"col2\"].cat.reorder_categories(\n [\"a\", \"b\", \"c\", \"z\"], ordered=True\n )\n expected[\"col4\"] = expected[\"col4\"].cat.reorder_categories(\n [\"cat1\", \"catX\", \"cat3\", \"cat4\"], ordered=True\n )\n\n # replace values in input dataframe\n input_df = input_df.replace(\"d\", \"z\")\n input_df = input_df.replace(\"obj1\", \"obj9\")\n result = input_df.replace(\"cat2\", \"catX\")\n\n tm.assert_frame_equal(result, expected)\n\n def test_replace_dict_category_type(self):\n \"\"\"\n Test to ensure category dtypes are maintained\n after replace with dict values\n \"\"\"\n # GH#35268, GH#44940\n\n # create input dataframe\n input_dict = {\"col1\": [\"a\"], \"col2\": [\"obj1\"], \"col3\": [\"cat1\"]}\n # explicitly cast columns as category\n input_df = DataFrame(data=input_dict).astype(\n {\"col1\": \"category\", \"col2\": \"category\", \"col3\": \"category\"}\n )\n\n # create expected dataframe\n expected_dict = {\"col1\": [\"z\"], \"col2\": [\"obj9\"], \"col3\": [\"catX\"]}\n # explicitly cast columns as category\n expected = DataFrame(data=expected_dict).astype(\n {\"col1\": \"category\", \"col2\": \"category\", \"col3\": \"category\"}\n )\n\n # replace values in input dataframe using a dict\n result = input_df.replace({\"a\": \"z\", \"obj1\": \"obj9\", \"cat1\": \"catX\"})\n\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_compiled_regex(self):\n # https://github.com/pandas-dev/pandas/issues/35680\n df = DataFrame([\"a\", \"b\", \"c\"])\n regex = re.compile(\"^a$\")\n result = df.replace({regex: \"z\"}, regex=True)\n expected = DataFrame([\"z\", \"b\", \"c\"])\n tm.assert_frame_equal(result, expected)\n\n def test_replace_intervals(self):\n # https://github.com/pandas-dev/pandas/issues/35931\n df = DataFrame({\"a\": [pd.Interval(0, 1), pd.Interval(0, 1)]})\n result = df.replace({\"a\": {pd.Interval(0, 1): \"x\"}})\n expected = DataFrame({\"a\": [\"x\", \"x\"]})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_unicode(self):\n # GH: 16784\n columns_values_map = {\"positive\": {\"正面\": 1, \"中立\": 1, \"负面\": 0}}\n df1 = DataFrame({\"positive\": np.ones(3)})\n result = df1.replace(columns_values_map)\n expected = DataFrame({\"positive\": np.ones(3)})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_bytes(self, frame_or_series):\n # GH#38900\n obj = frame_or_series([\"o\"]).astype(\"|S\")\n expected = obj.copy()\n obj = obj.replace({None: np.nan})\n tm.assert_equal(obj, expected)\n\n @pytest.mark.parametrize(\n \"data, to_replace, value, expected\",\n [\n ([1], [1.0], [0], [0]),\n ([1], [1], [0], [0]),\n ([1.0], [1.0], [0], [0.0]),\n ([1.0], [1], [0], [0.0]),\n ],\n )\n @pytest.mark.parametrize(\"box\", [list, tuple, np.array])\n def test_replace_list_with_mixed_type(\n self, data, to_replace, value, expected, box, frame_or_series\n ):\n # GH#40371\n obj = frame_or_series(data)\n expected = frame_or_series(expected)\n result = obj.replace(box(to_replace), value)\n tm.assert_equal(result, expected)\n\n\nclass TestDataFrameReplaceRegex:\n @pytest.mark.parametrize(\n \"data\",\n [\n {\"a\": list(\"ab..\"), \"b\": list(\"efgh\")},\n {\"a\": list(\"ab..\"), \"b\": list(range(4))},\n ],\n )\n @pytest.mark.parametrize(\n \"to_replace,value\", [(r\"\\s*\\.\\s*\", np.nan), (r\"\\s*(\\.)\\s*\", r\"\\1\\1\\1\")]\n )\n @pytest.mark.parametrize(\"compile_regex\", [True, False])\n @pytest.mark.parametrize(\"regex_kwarg\", [True, False])\n @pytest.mark.parametrize(\"inplace\", [True, False])\n def test_regex_replace_scalar(\n self, data, to_replace, value, compile_regex, regex_kwarg, inplace\n ):\n df = DataFrame(data)\n expected = df.copy()\n\n if compile_regex:\n to_replace = re.compile(to_replace)\n\n if regex_kwarg:\n regex = to_replace\n to_replace = None\n else:\n regex = True\n\n result = df.replace(to_replace, value, inplace=inplace, regex=regex)\n\n if inplace:\n assert result is None\n result = df\n\n if value is np.nan:\n expected_replace_val = np.nan\n else:\n expected_replace_val = \"...\"\n\n expected.loc[expected[\"a\"] == \".\", \"a\"] = expected_replace_val\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"regex\", [False, True])\n def test_replace_regex_dtype_frame(self, regex):\n # GH-48644\n df1 = DataFrame({\"A\": [\"0\"], \"B\": [\"0\"]})\n expected_df1 = DataFrame({\"A\": [1], \"B\": [1]})\n result_df1 = df1.replace(to_replace=\"0\", value=1, regex=regex)\n tm.assert_frame_equal(result_df1, expected_df1)\n\n df2 = DataFrame({\"A\": [\"0\"], \"B\": [\"1\"]})\n expected_df2 = DataFrame({\"A\": [1], \"B\": [\"1\"]})\n result_df2 = df2.replace(to_replace=\"0\", value=1, regex=regex)\n tm.assert_frame_equal(result_df2, expected_df2)\n\n def test_replace_with_value_also_being_replaced(self):\n # GH46306\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [1, 0, 2]})\n result = df.replace({0: 1, 1: np.nan})\n expected = DataFrame({\"A\": [1, np.nan, 2], \"B\": [np.nan, 1, 2]})\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.array"
],
[
"numpy.ones",
"pandas.Series",
"pandas.Period",
"pandas._testing.assert_frame_equal",
"pandas.Categorical",
"pandas._testing.assert_series_equal",
"numpy.isscalar",
"numpy.float64",
"pandas._testing.assert_produces_warning",
"pandas._testing.assert_equal",
"pandas.to_datetime",
"numpy.random.rand",
"pandas.Timestamp",
"pandas._testing.get_obj",
"numpy.int8",
"numpy.eye",
"pandas.date_range",
"numpy.float32",
"numpy.arange",
"numpy.int16",
"pandas.DataFrame",
"numpy.array",
"pandas.Interval"
]
] |
gaoxuesong/sonnet | [
"40995a58744bbadc2e875c5c87e744896bdc4249"
] | [
"sonnet/python/modules/layer_norm.py"
] | [
"# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Layer normalization module for Sonnet.\n\nThis contains the module LayerNorm, which performs layer normalization on\nits inputs.\n\nOriginal paper: https://arxiv.org/abs/1607.06450.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom sonnet.python.modules import base\nfrom sonnet.python.modules import util\n\nimport tensorflow as tf\n\n\nclass LayerNorm(base.AbstractModule):\n \"\"\"Layer normalization module.\n\n Implementation based on:\n https://arxiv.org/abs/1607.06450\n\n This module transforms input x into:\n\n outputs = gamma * (x - mu) / sigma + beta\n\n where mu and sigma are respectively the mean and standard deviation of x.\n Gamma and beta are trainable parameters for scaling and shifting respectively.\n\n \"\"\"\n\n GAMMA = \"gamma\" # Layer norm scaling.\n BETA = \"beta\" # Layer norm bias.\n\n POSSIBLE_KEYS = {GAMMA, BETA}\n\n def __init__(self,\n eps=1e-5,\n initializers=None,\n partitioners=None,\n regularizers=None,\n name=\"layer_norm\"):\n \"\"\"Constructs a LayerNorm module.\n\n Args:\n eps: small epsilon to avoid division by zero variance. Defaults to\n 1e-5 as used in the paper.\n initializers: Dict containing ops to initialize the scale and bias.\n This dictionary may contain any of the keys in POSSIBLE_KEYS.\n partitioners: Optional dict containing partitioners to partition\n the scale and bias. As a default, no partitioners are used. This\n dict may contain any of the keys in POSSIBLE_KEYS.\n regularizers: Optional dict containing regularizers for the scale and\n bias. As a default, no regularizers are used. This dict may contain\n any of the keys in POSSIBLE_KEYS.\n name: name of the module.\n\n Raises:\n KeyError: If `initializers`, `partitioners` or `regularizers` contain\n any keys other than `gamma`, `beta`.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n \"\"\"\n super(LayerNorm, self).__init__(name=name)\n\n self._eps = eps\n\n self._initializers = util.check_initializers(initializers,\n self.POSSIBLE_KEYS)\n self._partitioners = util.check_partitioners(partitioners,\n self.POSSIBLE_KEYS)\n self._regularizers = util.check_regularizers(regularizers,\n self.POSSIBLE_KEYS)\n\n def _build(self, inputs):\n \"\"\"Connects the LayerNorm module into the graph.\n\n Args:\n inputs: a Tensor of shape `[batch_size, layer_dim]`.\n\n Returns:\n normalized: layer normalized outputs with same shape as inputs.\n\n Raises:\n base.NotSupportedError: If `inputs` has data type of `tf.float16`.\n \"\"\"\n\n if inputs.dtype == tf.float16:\n raise base.NotSupportedError(\n \"LayerNorm does not support `tf.float16`, insufficient \"\n \"precision for calculating sufficient statistics.\")\n\n if inputs.get_shape().ndims != 2:\n raise base.NotSupportedError(\n \"Layer normalization expects inputs of rank 2.\"\n \" Got inputs of rank {}.\".format(inputs.get_shape().ndims))\n\n hidden_size = inputs.get_shape()[1].value\n\n if self.GAMMA not in self._initializers:\n self._initializers[self.GAMMA] = create_gamma_initializer()\n self._gamma = tf.get_variable(\n self.GAMMA,\n shape=[hidden_size],\n dtype=inputs.dtype,\n initializer=self._initializers[self.GAMMA],\n partitioner=self._partitioners.get(self.GAMMA),\n regularizer=self._regularizers.get(self.GAMMA))\n\n if self.BETA not in self._initializers:\n self._initializers[self.BETA] = create_beta_initializer()\n self._beta = tf.get_variable(\n self.BETA,\n shape=[hidden_size],\n dtype=inputs.dtype,\n initializer=self._initializers[self.BETA],\n partitioner=self._partitioners.get(self.BETA),\n regularizer=self._regularizers.get(self.BETA))\n\n mean, var = tf.nn.moments(inputs, [1], keep_dims=True)\n\n normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,\n self._gamma, self._eps)\n return normalized\n\n @property\n def initializers(self):\n return self._initializers\n\n @property\n def partitioners(self):\n return self._partitioners\n\n @property\n def regularizers(self):\n return self._regularizers\n\n @property\n def beta(self):\n self._ensure_is_connected()\n return self._beta\n\n @property\n def gamma(self):\n self._ensure_is_connected()\n return self._gamma\n\n\ndef create_beta_initializer():\n \"\"\"Returns a default initializer for the `beta` in layer norm.\"\"\"\n return tf.zeros_initializer()\n\n\ndef create_gamma_initializer():\n \"\"\"Returns a default initializer for the `gamma` in layer norm.\"\"\"\n return tf.ones_initializer()\n"
] | [
[
"tensorflow.nn.batch_normalization",
"tensorflow.nn.moments",
"tensorflow.zeros_initializer",
"tensorflow.ones_initializer"
]
] |
Ezra-H/autodist | [
"b5ab28d0d867c22742daa3c1d324fe20c1852bd7"
] | [
"examples/benchmark/utils/recommendation/movielens.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Download and extract the MovieLens dataset from GroupLens website.\n\nDownload the dataset, and perform basic preprocessing.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tempfile\nimport zipfile\n\n# pylint: disable=g-bad-import-order\nimport numpy as np\nimport pandas as pd\nimport six\nfrom six.moves import urllib # pylint: disable=redefined-builtin\nfrom absl import app as absl_app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nfrom utils.flags import core as flags_core\n\n\nML_1M = \"ml-1m\"\nML_20M = \"ml-20m\"\nDATASETS = [ML_1M, ML_20M]\n\nRATINGS_FILE = \"ratings.csv\"\nMOVIES_FILE = \"movies.csv\"\n\n# URL to download dataset\n_DATA_URL = \"http://files.grouplens.org/datasets/movielens/\"\n\nGENRE_COLUMN = \"genres\"\nITEM_COLUMN = \"item_id\" # movies\nRATING_COLUMN = \"rating\"\nTIMESTAMP_COLUMN = \"timestamp\"\nTITLE_COLUMN = \"titles\"\nUSER_COLUMN = \"user_id\"\n\nGENRES = [\n 'Action',\n 'Adventure',\n 'Animation',\n \"Children\",\n 'Comedy',\n 'Crime',\n 'Documentary',\n 'Drama',\n 'Fantasy',\n 'Film-Noir',\n 'Horror',\n \"IMAX\",\n 'Musical',\n 'Mystery',\n 'Romance',\n 'Sci-Fi',\n 'Thriller',\n 'War',\n 'Western']\nN_GENRE = len(GENRES)\n\nRATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]\nMOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]\n\n# Note: Users are indexed [1, k], not [0, k-1]\nNUM_USER_IDS = {\n ML_1M: 6040,\n ML_20M: 138493,\n}\n\n# Note: Movies are indexed [1, k], not [0, k-1]\n# Both the 1m and 20m datasets use the same movie set.\nNUM_ITEM_IDS = 3952\n\nMAX_RATING = 5\n\nNUM_RATINGS = {\n ML_1M: 1000209,\n ML_20M: 20000263\n}\n\n\ndef _download_and_clean(dataset, data_dir):\n \"\"\"Download MovieLens dataset in a standard format.\n\n This function downloads the specified MovieLens format and coerces it into a\n standard format. The only difference between the ml-1m and ml-20m datasets\n after this point (other than size, of course) is that the 1m dataset uses\n whole number ratings while the 20m dataset allows half integer ratings.\n \"\"\"\n if dataset not in DATASETS:\n raise ValueError(\"dataset {} is not in {{{}}}\".format(\n dataset, \",\".join(DATASETS)))\n\n data_subdir = os.path.join(data_dir, dataset)\n\n expected_files = [\"{}.zip\".format(dataset), RATINGS_FILE, MOVIES_FILE]\n\n tf.io.gfile.makedirs(data_subdir)\n if set(expected_files).intersection(\n tf.io.gfile.listdir(data_subdir)) == set(expected_files):\n logging.info(\"Dataset {} has already been downloaded\".format(dataset))\n return\n\n url = \"{}{}.zip\".format(_DATA_URL, dataset)\n\n temp_dir = tempfile.mkdtemp()\n try:\n zip_path = os.path.join(temp_dir, \"{}.zip\".format(dataset))\n zip_path, _ = urllib.request.urlretrieve(url, zip_path)\n statinfo = os.stat(zip_path)\n # A new line to clear the carriage return from download progress\n # logging.info is not applicable here\n print()\n logging.info(\n \"Successfully downloaded {} {} bytes\".format(\n zip_path, statinfo.st_size))\n\n zipfile.ZipFile(zip_path, \"r\").extractall(temp_dir)\n\n if dataset == ML_1M:\n _regularize_1m_dataset(temp_dir)\n else:\n _regularize_20m_dataset(temp_dir)\n\n for fname in tf.io.gfile.listdir(temp_dir):\n if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):\n tf.io.gfile.copy(os.path.join(temp_dir, fname),\n os.path.join(data_subdir, fname))\n else:\n logging.info(\n \"Skipping copy of {}, as it already exists in the \"\n \"destination folder.\".format(fname))\n\n finally:\n tf.io.gfile.rmtree(temp_dir)\n\n\ndef _transform_csv(input_path, output_path, names, skip_first, separator=\",\"):\n \"\"\"Transform csv to a regularized format.\n\n Args:\n input_path: The path of the raw csv.\n output_path: The path of the cleaned csv.\n names: The csv column names.\n skip_first: Boolean of whether to skip the first line of the raw csv.\n separator: Character used to separate fields in the raw csv.\n \"\"\"\n if six.PY2:\n names = [six.ensure_text(n, \"utf-8\") for n in names]\n\n with tf.io.gfile.GFile(output_path, \"wb\") as f_out, \\\n tf.io.gfile.GFile(input_path, \"rb\") as f_in:\n\n # Write column names to the csv.\n f_out.write(\",\".join(names).encode(\"utf-8\"))\n f_out.write(b\"\\n\")\n for i, line in enumerate(f_in):\n if i == 0 and skip_first:\n continue # ignore existing labels in the csv\n\n line = six.ensure_text(line, \"utf-8\", errors=\"ignore\")\n fields = line.split(separator)\n if separator != \",\":\n fields = ['\"{}\"'.format(field) if \",\" in field else field\n for field in fields]\n f_out.write(\",\".join(fields).encode(\"utf-8\"))\n\n\ndef _regularize_1m_dataset(temp_dir):\n \"\"\"\n ratings.dat\n The file has no header row, and each line is in the following format:\n UserID::MovieID::Rating::Timestamp\n - UserIDs range from 1 and 6040\n - MovieIDs range from 1 and 3952\n - Ratings are made on a 5-star scale (whole-star ratings only)\n - Timestamp is represented in seconds since midnight Coordinated Universal\n Time (UTC) of January 1, 1970.\n - Each user has at least 20 ratings\n\n movies.dat\n Each line has the following format:\n MovieID::Title::Genres\n - MovieIDs range from 1 and 3952\n \"\"\"\n working_dir = os.path.join(temp_dir, ML_1M)\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"ratings.dat\"),\n output_path=os.path.join(temp_dir, RATINGS_FILE),\n names=RATING_COLUMNS, skip_first=False, separator=\"::\")\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"movies.dat\"),\n output_path=os.path.join(temp_dir, MOVIES_FILE),\n names=MOVIE_COLUMNS, skip_first=False, separator=\"::\")\n\n tf.io.gfile.rmtree(working_dir)\n\n\ndef _regularize_20m_dataset(temp_dir):\n \"\"\"\n ratings.csv\n Each line of this file after the header row represents one rating of one\n movie by one user, and has the following format:\n userId,movieId,rating,timestamp\n - The lines within this file are ordered first by userId, then, within user,\n by movieId.\n - Ratings are made on a 5-star scale, with half-star increments\n (0.5 stars - 5.0 stars).\n - Timestamps represent seconds since midnight Coordinated Universal Time\n (UTC) of January 1, 1970.\n - All the users had rated at least 20 movies.\n\n movies.csv\n Each line has the following format:\n MovieID,Title,Genres\n - MovieIDs range from 1 and 3952\n \"\"\"\n working_dir = os.path.join(temp_dir, ML_20M)\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"ratings.csv\"),\n output_path=os.path.join(temp_dir, RATINGS_FILE),\n names=RATING_COLUMNS, skip_first=True, separator=\",\")\n\n _transform_csv(\n input_path=os.path.join(working_dir, \"movies.csv\"),\n output_path=os.path.join(temp_dir, MOVIES_FILE),\n names=MOVIE_COLUMNS, skip_first=True, separator=\",\")\n\n tf.io.gfile.rmtree(working_dir)\n\n\ndef download(dataset, data_dir):\n if dataset:\n _download_and_clean(dataset, data_dir)\n else:\n _ = [_download_and_clean(d, data_dir) for d in DATASETS]\n\n\ndef ratings_csv_to_dataframe(data_dir, dataset):\n with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:\n return pd.read_csv(f, encoding=\"utf-8\")\n\n\ndef csv_to_joint_dataframe(data_dir, dataset):\n ratings = ratings_csv_to_dataframe(data_dir, dataset)\n\n with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:\n movies = pd.read_csv(f, encoding=\"utf-8\")\n\n df = ratings.merge(movies, on=ITEM_COLUMN)\n df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)\n\n return df\n\n\ndef integerize_genres(dataframe):\n \"\"\"Replace genre string with a binary vector.\n\n Args:\n dataframe: a pandas dataframe of movie data.\n\n Returns:\n The transformed dataframe.\n \"\"\"\n def _map_fn(entry):\n entry.replace(\"Children's\", \"Children\") # naming difference.\n movie_genres = entry.split(\"|\")\n output = np.zeros((len(GENRES),), dtype=np.int64)\n for i, genre in enumerate(GENRES):\n if genre in movie_genres:\n output[i] = 1\n return output\n\n dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)\n\n return dataframe\n\n\ndef define_data_download_flags():\n \"\"\"Add flags specifying data download arguments.\"\"\"\n flags.DEFINE_string(\n name=\"data_dir\", default=\"/tmp/movielens-data/\",\n help=flags_core.help_wrap(\n \"Directory to download and extract data.\"))\n\n flags.DEFINE_enum(\n name=\"dataset\", default=None,\n enum_values=DATASETS, case_sensitive=False,\n help=flags_core.help_wrap(\"Dataset to be trained and evaluated.\"))\n\n\ndef main(_):\n \"\"\"Download and extract the data from GroupLens website.\"\"\"\n download(flags.FLAGS.dataset, flags.FLAGS.data_dir)\n\n\nif __name__ == \"__main__\":\n define_data_download_flags()\n FLAGS = flags.FLAGS\n absl_app.run(main)\n"
] | [
[
"tensorflow.io.gfile.rmtree",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.GFile",
"pandas.read_csv",
"tensorflow.io.gfile.listdir"
]
] |
transcendentsky/py_tutorials | [
"fed8e6c8d79f854a1cebcfd5c37297a163846208"
] | [
"earlier-2020/graphs-paper1/print_line_chart.py"
] | [
"import csv\n# import matplotlib.pyplot as plt\nimport pylab as plt\nimport numpy as np\n\ndef show_plot(times, epochs, data):\n # line chart Or Scatter chart\n plt.figure(figsize=(8, 5))\n \"\"\"\n args:\n marker='o' ,'x',\n color=\n \"\"\"\n\n plt.plot(epochs, data, color='red', label='0')\n # plt.plot(epochs, data[:, 1], color='green', marker='x', label='1')\n # plt.legend() # 显示图例\n # plt.grid(True)\n # plt.xlabel('epo chs').set_visible(False)\n # plt.ylabel('data')\n plt.title('Test')\n # plt.gca().xaxis.set_major_locator(plt.MultipleLocator(100))\n # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.2))\n # plt.xticks(np.arange(0,400,100), [1,2,3,4])\n # plt.yticks(np.arange(0,10,4), [1,2,3,4])\n\n plt.show()\n\n# with open('run_nomix_cifar100_mute_with_xavier_logs-tag-Test_1001_val_acc.csv') as f:\n# f_csv = csv.reader(f)\n# headers = next(f_csv)\n# # print(headers)\n# for row in f_csv:\n# print(row)\n\ny = plt.linspace(0, 399, 400)\ny2 = plt.linspace(0, 350, 351)\n\nvconf1 = plt.linspace(0, 399, 400)\nvconf2 = plt.linspace(0, 399, 400)\nvconf3 = plt.linspace(0, 399, 400)\nvconf4 = plt.linspace(0, 350, 351)\n\nlconf1 = plt.linspace(0, 399, 400)\nlconf2 = plt.linspace(0, 399, 400)\nlconf3 = plt.linspace(0, 399, 400)\n\n\n# print(y)\n\nconf1 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_linearmix-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf1)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf1[i] = row[2]\n vconf3[i] *= 1.8\n\nconf2 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_scratch-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf2)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf2[i] = row[2]\n\nconf3 = open(\"paper-1-compare-schedules/run_ssd_vgg16_voc_sigmoid-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf3)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf3[i] = row[2]\n vconf3[i] *= 0.97\n\nrandr = (np.random.rand(400)-0.5) * 0.01 + 1\nrandr2 = (np.random.rand(400)-0.5) * 0.01 + 1\nline = np.linspace(1,1.12,400)\nlconf1 = vconf2.copy() * randr * 1.06\nlconf2 = vconf2.copy() * randr2 * 1.08\nlconf2 = line * lconf2\n\nconf4 = open(\"paper-1-compare-schedules/run_exp2-tag-Train_conf_loss.csv\")\nf_csv = csv.reader(conf4)\nheaders = next(f_csv)\nfor i, row in enumerate(f_csv):\n vconf4[i] = row[2]\n vconf4[i] *= 1.035\n # print(row)\n\n\n# plt.figure(figsize=(8, 5))\nfig, ax = plt.subplots(figsize=(8, 5))\n\n# plt.plot(y[:351], vconf1[:351], color='red', label='linear')\nplt.plot(y[:351], lconf2[:351], color='red', label='fixed ratio(0.1)')\nplt.plot(y[:351], lconf1[:351], color='green', label='fixed ratio(0.05)')\nplt.plot(y[:351], vconf2[:351], color='orange', label='fixed ratio(0.02)')\nplt.plot(y[:351], vconf3[:351], color='blue', label='sigmoid')\n# plt.plot(y2, vconf4, color=\"green\", label=\"exp\")\nplt.ylim(1.5,4)\nplt.xlabel('epochs')\nplt.ylabel('conf loss')\nplt.legend()\nplt.title('Conf Loss')\nplt.show()\nfig.savefig('./conf-loss.eps', dpi=600, format='eps')"
] | [
[
"numpy.linspace",
"numpy.random.rand"
]
] |
xcnick/oneflow | [
"7b786b27069dec35d2493256011e773988c91f56",
"7b786b27069dec35d2493256011e773988c91f56"
] | [
"oneflow/compatible_single_client_python/test/xrt/test_softmax_grad.py",
"oneflow/compatible_single_client_python/test/ops/test_assign.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\n\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\n\nconfig = flow.function_config()\n\n\ndef make_job(shape, axis, dtype=flow.float32):\n config.use_xla_jit(False)\n config.use_tensorrt(False)\n\n @flow.global_function(config)\n def softmax_grad_job(\n y=flow.FixedTensorDef(shape, dtype=dtype),\n dy=flow.FixedTensorDef(shape, dtype=dtype),\n ):\n return flow.nn.softmax_grad(y, dy, axis=axis)\n\n return softmax_grad_job\n\n\ndef make_xla_job(shape, axis, dtype=flow.float32):\n config.use_xla_jit(True)\n config.use_tensorrt(False)\n\n @flow.global_function(config)\n def xla_softmax_grad_job(\n y=flow.FixedTensorDef(shape, dtype=dtype),\n dy=flow.FixedTensorDef(shape, dtype=dtype),\n ):\n return flow.nn.softmax_grad(y, dy, axis=axis)\n\n return xla_softmax_grad_job\n\n\nclass TestSoftmaxGrad(unittest.TestCase):\n def _test_body(self, y, dy, axis, dtype=np.float32):\n f1 = make_job(y.shape, axis, dtype=flow.float32)\n f2 = make_xla_job(y.shape, axis, dtype=flow.float32)\n a = f1(y, dy).get()\n b = f2(y, dy).get()\n print(\"without xla: \", a)\n print(\"with xla\", b)\n self.assertTrue(a.shape == b.shape)\n self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))\n flow.clear_default_session()\n\n def _test_ones_body(self, shape, axis, dtype=np.float32):\n y = np.ones(shape, dtype=dtype)\n dy = np.ones(shape, dtype=dtype)\n self._test_body(y, dy, axis, dtype=dtype)\n\n def _test_random_body(self, shape, axis, dtype=np.float32):\n y = np.random.random(shape).astype(dtype)\n dy = np.random.random(shape).astype(dtype)\n self._test_body(y, dy, axis, dtype=dtype)\n\n def test_ones_input(self):\n self._test_ones_body((2, 5), axis=1)\n self._test_ones_body((2, 5), axis=-1)\n self._test_ones_body((1, 5, 2), axis=1)\n self._test_ones_body((1, 5, 2), axis=2)\n\n def test_random_input(self):\n self._test_random_body((2, 5), axis=1)\n self._test_random_body((2, 5), axis=-1)\n self._test_random_body((1, 5, 2), axis=1)\n self._test_random_body((1, 5, 2), axis=2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom oneflow.compatible import single_client as flow\nfrom test_util import GenArgDict\nfrom oneflow.compatible.single_client import typing as oft\nimport os\n\nflow_to_np_dtype_dict = {\n flow.int32: np.int32,\n flow.float: np.single,\n flow.double: np.float,\n}\n\n\ndef _random_input(shape, dtype):\n if np.issubdtype(dtype, np.integer):\n return np.random.random_integers(low=-10, high=10, size=shape)\n elif np.issubdtype(dtype, np.floating):\n rng = np.random.default_rng()\n return rng.standard_normal(size=shape, dtype=dtype)\n else:\n raise NotImplementedError\n\n\ndef _of_assign_and_relu(value, dtype, device_type, assign=flow.assign):\n flow.clear_default_session()\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\") is None:\n flow.config.gpu_device_num(1)\n flow.config.cpu_device_num(1)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(dtype)\n func_config.default_placement_scope(flow.scope.placement(device_type, \"0:0\"))\n\n @flow.global_function(function_config=func_config)\n def assign_fn(value_def: oft.Numpy.Placeholder(value.shape, dtype=dtype)):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n assign(var, value_def)\n\n @flow.global_function(function_config=func_config)\n def relu_fn():\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n return flow.nn.relu(var)\n\n assign_fn(value)\n return relu_fn().get().numpy()\n\n\ndef _np_relu(x):\n return np.maximum(x, 0)\n\n\ndef _compare_with_np(test_case, shape, dtype, device_type, assign):\n x = _random_input(shape, flow_to_np_dtype_dict[dtype])\n of_y = _of_assign_and_relu(x, dtype, device_type, assign=assign)\n test_case.assertTrue(np.allclose(_np_relu(x), of_y))\n\n\[email protected]_unless_2n1d()\nclass TestTwoNodeAssign(flow.unittest.TestCase):\n def test_2node_assign(test_case):\n if flow.eager_execution_enabled():\n assign = flow.experimental.eager_assign_121\n else:\n assign = flow.assign\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\"]\n arg_dict[\"assign\"] = [assign]\n for arg in GenArgDict(arg_dict):\n _2node_compare_with_np(test_case, **arg)\n\n\ndef _2node_compare_with_np(test_case, shape, dtype, device_type, assign):\n x = _random_input(shape, flow_to_np_dtype_dict[dtype])\n of_y = _2node_of_assign_and_relu(x, dtype, device_type, assign=assign)\n np_y = _np_relu(x)\n test_case.assertTrue(np.allclose(np_y, of_y))\n\n\ndef _2node_of_assign_and_relu(value, dtype, device_type, assign=flow.assign):\n flow.clear_default_session()\n flow.config.machine_num(2)\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\") is None:\n flow.config.gpu_device_num(1)\n flow.config.cpu_device_num(1)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(dtype)\n func_config.default_placement_scope(flow.scope.placement(device_type, \"0:0\"))\n\n @flow.global_function(function_config=func_config)\n def assign_fn(value_def: oft.Numpy.Placeholder(value.shape, dtype=dtype)):\n with flow.scope.placement(device_type, \"1:0\"):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n assign(var, value_def)\n\n @flow.global_function(function_config=func_config)\n def relu_fn():\n with flow.scope.placement(device_type, \"1:0\"):\n var = flow.get_variable(\n name=\"var\",\n shape=value.shape,\n dtype=dtype,\n initializer=flow.constant_initializer(0),\n )\n ret = flow.nn.relu(var)\n return ret\n\n assign_fn(value)\n relu_ret = relu_fn().get()\n return relu_ret.numpy()\n\n\[email protected]_unless_1n1d()\nclass TestAssign(flow.unittest.TestCase):\n def test_assign(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"assign\"] = [flow.assign]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_eager_assign_121(test_case):\n if not flow.eager_execution_enabled():\n return\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10), (30, 4), (8, 256, 20)]\n arg_dict[\"dtype\"] = [flow.float, flow.double]\n arg_dict[\"device_type\"] = [\"cpu\"]\n arg_dict[\"assign\"] = [flow.experimental.eager_assign_121]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.ones"
],
[
"numpy.allclose",
"numpy.random.default_rng",
"numpy.issubdtype",
"numpy.maximum",
"numpy.random.random_integers"
]
] |
SilanHe/hierarchical-dnn-interpretations | [
"d6f96d0ab6fec48ee53ab930b2660e80525993b9"
] | [
"acd/scores/cd.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy.special import expit as sigmoid\nfrom .cd_propagate import *\nfrom .cd_architecture_specific import *\n\ndef cd(im_torch: torch.Tensor, model, mask=None, model_type=None, device='cuda', transform=None):\n '''Get contextual decomposition scores for blob\n \n Params\n ------\n im_torch: torch.Tensor\n example to interpret - usually has shape (batch_size, num_channels, height, width)\n model: pytorch model \n mask: array_like (values in {0, 1})\n required unless transform is supplied\n array with 1s marking the locations of relevant pixels, 0s marking the background\n shape should match the shape of im_torch or just H x W \n model_type: str, optional\n usually should just leave this blank\n if this is == 'mnist', uses CD for a specific mnist model\n if this is == 'resnet18', uses resnet18 model\n device: str, optional\n transform: function\n transform should be a function which transforms the original image to specify rel\n only used if mask is not passed\n \n Returns\n -------\n relevant: torch.Tensor\n class-wise scores for relevant mask\n irrelevant: torch.Tensor\n class-wise scores for everything but the relevant mask \n '''\n # set up model\n model.eval()\n model = model.to(device)\n im_torch = im_torch.to(device)\n \n # set up masks\n if not mask is None:\n mask = torch.FloatTensor(mask).to(device)\n relevant = mask * im_torch\n irrelevant = (1 - mask) * im_torch\n elif not transform is None:\n relevant = transform(im_torch).to(device)\n if len(relevant.shape) < 4:\n relevant = relevant.reshape(1, 1, relevant.shape[0], relevant.shape[1])\n irrelevant = im_torch - relevant\n else:\n print('invalid arguments')\n relevant = relevant.to(device)\n irrelevant = irrelevant.to(device)\n\n # deal with specific architectures which have problems\n if model_type == 'mnist':\n return cd_propagate_mnist(relevant, irrelevant, model)\n elif model_type == 'resnet18':\n return cd_propagate_resnet(relevant, irrelevant, model)\n \n # try the generic case\n else:\n mods = list(model.modules())\n relevant, irrelevant = cd_generic(mods, relevant, irrelevant)\n return relevant, irrelevant\n\ndef cd_generic(mods, relevant, irrelevant):\n '''Helper function for cd which loops over modules and propagates them \n based on the layer name\n '''\n for i, mod in enumerate(mods):\n t = str(type(mod))\n if 'Conv2d' in t:\n relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)\n elif 'Linear' in t:\n relevant = relevant.reshape(relevant.shape[0], -1)\n irrelevant = irrelevant.reshape(irrelevant.shape[0], -1)\n relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)\n elif 'ReLU' in t:\n relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)\n elif 'AvgPool' in t or 'NormLayer' in t or 'Dropout' in t \\\n or 'ReshapeLayer' in t or ('modularize' in t and 'Transform' in t): # custom layers\n relevant, irrelevant = propagate_independent(relevant, irrelevant, mod)\n elif 'Pool' in t and not 'AvgPool' in t:\n relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod)\n elif 'BatchNorm2d' in t:\n relevant, irrelevant = propagate_batchnorm2d(relevant, irrelevant, mod)\n return relevant, irrelevant\n\n\ndef cd_text(batch, model, start, stop, return_irrel_scores=False):\n '''Get contextual decomposition scores for substring of a text sequence\n \n Params\n ------\n batch: torchtext batch\n really only requires that batch.text is the string input to be interpreted\n start: int\n beginning index of substring to be interpreted (inclusive)\n stop: int\n ending index of substring to be interpreted (inclusive)\n\n Returns\n -------\n scores: torch.Tensor\n class-wise scores for relevant substring\n '''\n weights = model.lstm.state_dict()\n\n # Index one = word vector (i) or hidden state (h), index two = gate\n W_ii, W_if, W_ig, W_io = np.split(weights['weight_ih_l0'], 4, 0)\n W_hi, W_hf, W_hg, W_ho = np.split(weights['weight_hh_l0'], 4, 0)\n b_i, b_f, b_g, b_o = np.split(weights['bias_ih_l0'].cpu().numpy() + weights['bias_hh_l0'].cpu().numpy(), 4)\n word_vecs = model.embed(batch.text)[:, 0].data\n T = word_vecs.size(0)\n relevant = np.zeros((T, model.hidden_dim))\n irrelevant = np.zeros((T, model.hidden_dim))\n relevant_h = np.zeros((T, model.hidden_dim))\n irrelevant_h = np.zeros((T, model.hidden_dim))\n for i in range(T):\n if i > 0:\n prev_rel_h = relevant_h[i - 1]\n prev_irrel_h = irrelevant_h[i - 1]\n else:\n prev_rel_h = np.zeros(model.hidden_dim)\n prev_irrel_h = np.zeros(model.hidden_dim)\n\n rel_i = np.dot(W_hi, prev_rel_h)\n rel_g = np.dot(W_hg, prev_rel_h)\n rel_f = np.dot(W_hf, prev_rel_h)\n rel_o = np.dot(W_ho, prev_rel_h)\n irrel_i = np.dot(W_hi, prev_irrel_h)\n irrel_g = np.dot(W_hg, prev_irrel_h)\n irrel_f = np.dot(W_hf, prev_irrel_h)\n irrel_o = np.dot(W_ho, prev_irrel_h)\n\n if i >= start and i <= stop:\n rel_i = rel_i + np.dot(W_ii, word_vecs[i])\n rel_g = rel_g + np.dot(W_ig, word_vecs[i])\n rel_f = rel_f + np.dot(W_if, word_vecs[i])\n rel_o = rel_o + np.dot(W_io, word_vecs[i])\n else:\n irrel_i = irrel_i + np.dot(W_ii, word_vecs[i])\n irrel_g = irrel_g + np.dot(W_ig, word_vecs[i])\n irrel_f = irrel_f + np.dot(W_if, word_vecs[i])\n irrel_o = irrel_o + np.dot(W_io, word_vecs[i])\n\n rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)\n rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, np.tanh)\n\n relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g\n irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g\n\n if i >= start and i <= stop:\n relevant[i] += bias_contrib_i * bias_contrib_g\n else:\n irrelevant[i] += bias_contrib_i * bias_contrib_g\n\n if i > 0:\n rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)\n relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]\n irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \\\n relevant[i - 1]\n\n o = sigmoid(np.dot(W_io, word_vecs[i]) + np.dot(W_ho, prev_rel_h + prev_irrel_h) + b_o)\n rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)\n new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])\n # relevant_h[i] = new_rel_h * (rel_contrib_o + bias_contrib_o)\n # irrelevant_h[i] = new_rel_h * (irrel_contrib_o) + new_irrel_h * (rel_contrib_o + irrel_contrib_o + bias_contrib_o)\n relevant_h[i] = o * new_rel_h\n irrelevant_h[i] = o * new_irrel_h\n\n W_out = model.hidden_to_label.weight.data\n\n # Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias\n scores = np.dot(W_out, relevant_h[T - 1])\n irrel_scores = np.dot(W_out, irrelevant_h[T - 1])\n\n if return_irrel_scores:\n return scores, irrel_scores\n \n return scores\n"
] | [
[
"numpy.dot",
"torch.FloatTensor",
"numpy.zeros",
"numpy.split"
]
] |
krishpop/pddm | [
"b1452554a4e318966b8ca3da53978458ac635c5d"
] | [
"pddm/regressors/feedforward_network.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\ndef feedforward_network(inputStates, inputSize, outputSize, num_fc_layers,\n depth_fc_layers, tf_datatype, scope):\n\n with tf.variable_scope(str(scope)):\n\n #concat K entries together [bs x K x sa] --> [bs x ksa]\n inputState = tf.layers.flatten(inputStates)\n\n #vars\n intermediate_size = depth_fc_layers\n reuse = False\n initializer = tf.glorot_normal_initializer(\n seed=None, dtype=tf_datatype)\n fc = tf.layers.dense\n\n # make hidden layers\n for i in range(num_fc_layers):\n if i==0:\n fc_i = fc(\n inputState,\n units=intermediate_size,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n else:\n fc_i = fc(\n h_i,\n units=intermediate_size,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n h_i = tf.nn.relu(fc_i)\n\n # make output layer\n z = fc(\n h_i,\n units=outputSize,\n activation=None,\n kernel_initializer=initializer,\n bias_initializer=initializer,\n reuse=reuse,\n trainable=True)\n\n return z\n"
] | [
[
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.glorot_normal_initializer",
"tensorflow.compat.v1.layers.flatten"
]
] |
AI-sandbox/hyperLAI | [
"49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689"
] | [
"hyperLAI/features/hyperLAIdataset.py"
] | [
"import numpy as np\nimport torch\nfrom torch.utils import data\nimport sys\nfrom utils.generate_dataset import *\nfrom HypHC.datasets.triples import samples_triples\n\nclass HyperLoader(data.Dataset):\n def __init__(self, data_dir, split_indices, restrict_labels=[0,1,2,3,4,5,6], chromosome=\"all\"):\n '''\n Takes in all the relevant arguments to produce the dataset.\n Arguments:\n `data_dir`: directory in which data (either text files or numpy arrays) are located\n `similarity_func`: function to calculate pairwise similarities\n `split_indices`: indices for the data split (train/test/valid)\n `restrict_labels`: list of super-populations to include in analysis. Indices correspond to 'EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'OCE', 'WAS'\n '''\n\n self.data_dir = data_dir \n self.restrict_labels = restrict_labels\n self.chromosome = chromosome\n self.split_indices = split_indices\n self.snps, self.pop_labels, self.suppop_labels, self.pop_label_index, self.suppop_label_index = self.load_data()\n def load_data(self):\n '''\n Loads SNP and label data from the necessary file locations \n '''\n #If we want all chromosomes, then we have the arrays already pre-created\n if self.chromosome ==\"all\":\n file_order = [\"all_snps.npy\", \"labels_suppop.npy\", \"labels_pop.npy\", \n \"coords.npy\", \"pop_index.npy\", \"pop_code_index.npy\", \"suppop_code_index.npy\"]\n test_data = tuple([np.load(self.data_dir + x) for x in file_order])\n ind_data = test_data[0]\n else:\n #The data for individual chromosomes is in a slightly different format\n test_data = load_dataset(self.data_dir + \"ref_final_beagle_phased_1kg_hgdp_sgdp_chr%s_hg19.vcf.gz\"%(self.chromosome), \n self.data_dir + \"reference_panel_metadata.tsv\", \"./\", chromosome=self.chromosome, \n verbose=True, filter_admixed=True, filter_missing_coord=True)\n ind_data = test_data[0].reshape([test_data[0].shape[0], test_data[0].shape[1] * test_data[0].shape[2]]).T \n #We've unfolded each set of 23 chromosomes as a \"different\" individual \n #So we must do the same for the labels by doubling them\n ind_pop_labels = np.repeat(test_data[2], 2).astype(int)\n ind_suppop_labels = np.repeat(test_data[1], 2).astype(int)\n #Restrict to only the super-populations we've specified\n pop_indices = np.argwhere(np.isin(ind_suppop_labels, self.restrict_labels)).T[0]\n indices = np.intersect1d(pop_indices, self.split_indices)\n #Return everything\n return ind_data[indices], ind_pop_labels[indices], ind_suppop_labels[indices], test_data[4], test_data[6]\n def __len__(self):\n return len(self.snps)\n def __getitem__(self, index):\n '''\n Returns data and labels for the current index\n '''\n return torch.tensor(self.snps[index]), torch.tensor(self.suppop_labels[index]), torch.tensor(self.pop_labels[index])\n"
] | [
[
"numpy.load",
"numpy.intersect1d",
"torch.tensor",
"numpy.repeat",
"numpy.isin"
]
] |
qbetterk/user-simulator | [
"77caca30ff67b9112b1fe5e65e191c6b5e25532c"
] | [
"sequicity/tsd_net.py"
] | [
"import torch\r\n\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nimport numpy as np\r\nimport math\r\nfrom sequicity.config import global_config as cfg\r\nimport copy, random, time, logging\r\n\r\nfrom torch.distributions import Categorical\r\nfrom sequicity.reader import pad_sequences\r\nimport pdb\r\nimport simulator.dialog_config as dialog_config\r\nimport pdb\r\n\r\n\r\ndef cuda_(var):\r\n return var.cuda() if cfg.cuda else var\r\n\r\n\r\ndef toss_(p):\r\n return random.randint(0, 99) <= p\r\n\r\n\r\ndef nan(v):\r\n if type(v) is float:\r\n return v == float('nan')\r\n return np.isnan(np.sum(v.data.cpu().numpy()))\r\n\r\n\r\ndef get_sparse_input_aug(x_input_np):\r\n \"\"\"\r\n sparse input of\r\n :param x_input_np: [T,B]\r\n :return: Numpy array: [B,T,aug_V]\r\n \"\"\"\r\n ignore_index = [0]\r\n unk = 2\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n for t in range(x_input_np.shape[0]):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n if w not in ignore_index:\r\n if w != unk:\r\n result[t][b][x_input_np[t][b]] = 1.0\r\n else:\r\n result[t][b][cfg.vocab_size + t] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n\r\ndef init_gru(gru):\r\n gru.reset_parameters()\r\n for _, hh, _, _ in gru.all_weights:\r\n for i in range(0, hh.size(0), gru.hidden_size):\r\n torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)\r\n\r\n\r\nclass Attn(nn.Module):\r\n def __init__(self, hidden_size):\r\n super(Attn, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\r\n self.v = nn.Parameter(torch.zeros(hidden_size))\r\n stdv = 1. / math.sqrt(self.v.size(0))\r\n self.v.data.normal_(mean=0, std=stdv)\r\n\r\n def forward(self, hidden, encoder_outputs, mask=False, inp_seqs=None, stop_tok=None, normalize=True):\r\n encoder_outputs = encoder_outputs.transpose(0, 1) # [B,T,H]\r\n attn_energies = self.score(hidden, encoder_outputs)\r\n if True or not mask:\r\n normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]\r\n else:\r\n mask_idx = []\r\n # inp_seqs: ndarray of [T,B]\r\n # inp_seqs = inp_seqs.cpu().numpy()\r\n for b in range(inp_seqs.shape[1]):\r\n for t in range(inp_seqs.shape[0] + 1):\r\n if t == inp_seqs.shape[0] or inp_seqs[t, b] in stop_tok:\r\n mask_idx.append(t)\r\n break\r\n mask = []\r\n for mask_len in mask_idx:\r\n mask.append([1.] * mask_len + [0.] * (inp_seqs.shape[0] - mask_len))\r\n mask = cuda_(Variable(torch.FloatTensor(mask))) # [B,T]\r\n attn_energies = attn_energies * mask.unsqueeze(1)\r\n normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]\r\n\r\n context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]\r\n return context.transpose(0, 1) # [1,B,H]\r\n\r\n def score(self, hidden, encoder_outputs):\r\n max_len = encoder_outputs.size(1)\r\n H = hidden.repeat(max_len, 1, 1).transpose(0, 1)\r\n # pdb.set_trace()\r\n energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2))) # [B,T,2H]->[B,T,H]\r\n energy = energy.transpose(2, 1) # [B,H,T]\r\n v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]\r\n energy = torch.bmm(v, energy) # [B,1,T]\r\n return energy\r\n\r\n\r\nclass SimpleDynamicEncoder(nn.Module):\r\n def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.embed_size = embed_size\r\n self.n_layers = n_layers\r\n self.dropout = dropout\r\n self.embedding = nn.Embedding(input_size, embed_size)\r\n self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)\r\n init_gru(self.gru)\r\n\r\n def forward(self, input_seqs, input_lens, hidden=None):\r\n \"\"\"\r\n forward procedure. No need for inputs to be sorted\r\n :param input_seqs: Variable of [T,B]\r\n :param hidden:\r\n :param input_lens: *numpy array* of len for each input sequence\r\n :return:\r\n \"\"\"\r\n # print(\"in encoder\")\r\n # print(\"input_seqs\", input_seqs)\r\n # print(\"hidden\", hidden)\r\n # print(\"input_lens\", input_lens)\r\n batch_size = input_seqs.size(1)\r\n embedded = self.embedding(input_seqs)\r\n import pdb\r\n if torch.isnan(embedded).sum() > 0:\r\n pdb.set_trace()\r\n # pass\r\n # print(\"embedded\", embedded)\r\n embedded = embedded.transpose(0, 1) # [B,T,E]\r\n sort_idx = np.argsort(-input_lens)\r\n unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))\r\n input_lens = input_lens[sort_idx]\r\n sort_idx = cuda_(torch.LongTensor(sort_idx))\r\n embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]\r\n # print(\"embedded\", embedded)\r\n packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)\r\n outputs, hidden = self.gru(packed, hidden)\r\n # print('outputs', outputs)\r\n\r\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\r\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]\r\n outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\r\n hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\r\n return outputs, hidden, embedded\r\n\r\n\r\nclass BSpanDecoder(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate, vocab):\r\n super().__init__()\r\n self.emb = nn.Embedding(vocab_size, embed_size)\r\n if cfg.use_positional_embedding:\r\n self.positional_embedding = nn.Embedding(cfg.max_ts + 1, embed_size)\r\n init_pos_emb = self.position_encoding_init(cfg.max_ts + 1, embed_size)\r\n self.positional_embedding.weight.data = init_pos_emb\r\n self.gru = nn.GRU(hidden_size + embed_size, hidden_size, dropout=dropout_rate)\r\n self.proj = nn.Linear(hidden_size * 2, vocab_size)\r\n\r\n self.attn_u = Attn(hidden_size)\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.inp_dropout = nn.Dropout(self.dropout_rate)\r\n\r\n init_gru(self.gru)\r\n self.vocab = vocab\r\n\r\n def position_encoding_init(self, n_position, d_pos_vec):\r\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]\r\n if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])\r\n\r\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\r\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\r\n return torch.from_numpy(position_enc).type(torch.FloatTensor)\r\n\r\n def forward(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n position):\r\n # print(\"in bSpanDecoder\")\r\n # print(u_input_np)\r\n # print(u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n # position)\r\n # print(\"prev_z_input_np\", prev_z_input_np)\r\n sparse_u_input = Variable(get_sparse_input_aug(u_input_np), requires_grad=False)\r\n\r\n if pv_z_enc_out is not None:\r\n context = self.attn_u(last_hidden, torch.cat([pv_z_enc_out, u_enc_out], dim=0), mask=True,\r\n inp_seqs=np.concatenate([prev_z_input_np, u_input_np], 0),\r\n stop_tok=[self.vocab.encode('EOS_M')])\r\n else:\r\n context = self.attn_u(last_hidden, u_enc_out, mask=True, inp_seqs=u_input_np,\r\n stop_tok=[self.vocab.encode('EOS_M')])\r\n embed_z = self.emb(z_tm1)\r\n # embed_z = self.inp_dropout(embed_z)\r\n\r\n if cfg.use_positional_embedding: # defaulty not used\r\n position_label = [position] * u_enc_out.size(1) # [B]\r\n position_label = cuda_(Variable(torch.LongTensor(position_label))).view(1, -1) # [1,B]\r\n pos_emb = self.positional_embedding(position_label)\r\n embed_z = embed_z + pos_emb\r\n\r\n gru_in = torch.cat([embed_z, context], 2)\r\n gru_out, last_hidden = self.gru(gru_in, last_hidden)\r\n # gru_out = self.inp_dropout(gru_out)\r\n gen_score = self.proj(torch.cat([gru_out, context], 2)).squeeze(0)\r\n # gen_score = self.inp_dropout(gen_score)\r\n u_copy_score = torch.tanh(self.proj_copy1(u_enc_out.transpose(0, 1))) # [B,T,H]\r\n # stable version of copynet\r\n u_copy_score = torch.matmul(u_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n u_copy_score = u_copy_score.cpu()\r\n u_copy_score_max = torch.max(u_copy_score, dim=1, keepdim=True)[0]\r\n u_copy_score = torch.exp(u_copy_score - u_copy_score_max) # [B,T]\r\n u_copy_score = torch.log(torch.bmm(u_copy_score.unsqueeze(1), sparse_u_input)).squeeze(\r\n 1) + u_copy_score_max # [B,V]\r\n u_copy_score = cuda_(u_copy_score)\r\n if pv_z_enc_out is None:\r\n # u_copy_score = self.inp_dropout(u_copy_score)\r\n scores = F.softmax(torch.cat([gen_score, u_copy_score], dim=1), dim=1)\r\n gen_score, u_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + u_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, u_copy_score[:, cfg.vocab_size:]], 1)\r\n else:\r\n sparse_pv_z_input = Variable(get_sparse_input_aug(prev_z_input_np), requires_grad=False)\r\n pv_z_copy_score = torch.tanh(self.proj_copy2(pv_z_enc_out.transpose(0, 1))) # [B,T,H]\r\n pv_z_copy_score = torch.matmul(pv_z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n pv_z_copy_score = pv_z_copy_score.cpu()\r\n pv_z_copy_score_max = torch.max(pv_z_copy_score, dim=1, keepdim=True)[0]\r\n pv_z_copy_score = torch.exp(pv_z_copy_score - pv_z_copy_score_max) # [B,T]\r\n pv_z_copy_score = torch.log(torch.bmm(pv_z_copy_score.unsqueeze(1), sparse_pv_z_input)).squeeze(\r\n 1) + pv_z_copy_score_max # [B,V]\r\n pv_z_copy_score = cuda_(pv_z_copy_score)\r\n scores = F.softmax(torch.cat([gen_score, u_copy_score, pv_z_copy_score], dim=1), dim=1)\r\n gen_score, u_copy_score, pv_z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:,\r\n cfg.vocab_size:2 * cfg.vocab_size + u_input_np.shape[0]], \\\r\n scores[:, 2 * cfg.vocab_size + u_input_np.shape[0]:]\r\n proba = gen_score + u_copy_score[:, :cfg.vocab_size] + pv_z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, pv_z_copy_score[:, cfg.vocab_size:], u_copy_score[:, cfg.vocab_size:]], 1)\r\n return gru_out, last_hidden, proba\r\n\r\n\r\nclass ResponseDecoder(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):\r\n super().__init__()\r\n self.emb = emb\r\n self.attn_z = Attn(hidden_size)\r\n self.attn_u = Attn(hidden_size)\r\n self.gru = gru\r\n init_gru(self.gru)\r\n self.proj = proj\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.vocab = vocab\r\n\r\n def get_sparse_selective_input(self, x_input_np):\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n for t in range(x_input_np.shape[0] - 1):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n word = self.vocab.decode(w)\r\n if word in reqs:\r\n slot = self.vocab.encode(word + '_SLOT')\r\n result[t + 1][b][slot] = 1.0\r\n else:\r\n if w == 2 or w >= cfg.vocab_size:\r\n result[t + 1][b][cfg.vocab_size + t] = 5.0\r\n else:\r\n result[t + 1][b][w] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):\r\n sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)\r\n\r\n m_embed = self.emb(m_t_input)\r\n z_context = self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],\r\n inp_seqs=z_input_np)\r\n u_context = self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],\r\n inp_seqs=u_input_np)\r\n gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)\r\n gru_out, last_hidden = self.gru(gru_in, last_hidden)\r\n gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)\r\n z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))\r\n z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n z_copy_score = z_copy_score.cpu()\r\n z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]\r\n z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]\r\n z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(\r\n 1) + z_copy_score_max # [B,V]\r\n z_copy_score = cuda_(z_copy_score)\r\n\r\n scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)\r\n gen_score, z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)\r\n return proba, last_hidden, gru_out\r\n\r\n\r\nclass ResponseDecoder_discrete(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):\r\n super().__init__()\r\n self.emb = emb\r\n self.attn_z = Attn(hidden_size)\r\n self.attn_u = Attn(hidden_size)\r\n self.gru = gru\r\n init_gru(self.gru)\r\n self.proj_0 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj_1 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj_2 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)\r\n self.proj = proj\r\n self.proj_copy1 = nn.Linear(hidden_size, hidden_size)\r\n self.proj_copy2 = nn.Linear(hidden_size, hidden_size)\r\n self.dropout_rate = dropout_rate\r\n\r\n self.vocab = vocab\r\n\r\n def get_sparse_selective_input(self, x_input_np):\r\n result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),\r\n dtype=np.float32)\r\n result.fill(1e-10)\r\n reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n for t in range(x_input_np.shape[0] - 1):\r\n for b in range(x_input_np.shape[1]):\r\n w = x_input_np[t][b]\r\n word = self.vocab.decode(w)\r\n if word in reqs:\r\n slot = self.vocab.encode(word + '_SLOT')\r\n result[t + 1][b][slot] = 1.0\r\n else:\r\n if w == 2 or w >= cfg.vocab_size:\r\n result[t + 1][b][cfg.vocab_size + t] = 5.0\r\n else:\r\n result[t + 1][b][w] = 1.0\r\n result_np = result.transpose((1, 0, 2))\r\n result = torch.from_numpy(result_np).float()\r\n return result\r\n\r\n def forward(self, z_enc_out, u_enc_out, np_state):\r\n # sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)\r\n\r\n # m_embed = self.emb(m_t_input)\r\n # z_context = torch.mean(z_enc_out, 0)#= self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],\r\n # inp_seqs=z_input_np)\r\n # pdb.set_trace()\r\n u_context = u_enc_out[-1, :, :]#= self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],\r\n # inp_seqs=u_input_np)\r\n state_from_np = torch.from_numpy(np_state).float().unsqueeze(0)\r\n\r\n output0 = F.tanh(self.proj_0(torch.cat([u_context, state_from_np], 1)))\r\n output1 = F.sigmoid(self.proj_1(output0))\r\n output2 = F.sigmoid(self.proj_2(output1))\r\n # gru_in = torch.cat([u_context, z_context], dim=2)\r\n # gru_out, last_hidden = self.gru(gru_in)\r\n # print(z_context)\r\n # print(z_context.shape)\r\n # print(u_context)\r\n # print(u_context.shape)\r\n gen_score = self.proj(output2)#.squeeze(0)# self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)\r\n\r\n return gen_score\r\n \"\"\"\r\n z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))\r\n z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)\r\n z_copy_score = z_copy_score.cpu()\r\n z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]\r\n z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]\r\n z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(\r\n 1) + z_copy_score_max # [B,V]\r\n z_copy_score = cuda_(z_copy_score)\r\n\r\n scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)\r\n gen_score, z_copy_score = scores[:, :cfg.vocab_size], \\\r\n scores[:, cfg.vocab_size:]\r\n proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]\r\n proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)\r\n \"\"\"\r\n return proba, last_hidden, gru_out\r\n\r\n\r\nclass TSD(nn.Module):\r\n def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,\r\n max_ts, action_size=dialog_config.SYS_ACTION_CARDINALITY, discrete_act=False, beam_search=False, teacher_force=100, **kwargs):\r\n super().__init__()\r\n self.vocab = kwargs['vocab']\r\n self.reader = kwargs['reader']\r\n self.emb = nn.Embedding(vocab_size, embed_size)\r\n self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)\r\n self.proj = nn.Linear(hidden_size * 3, vocab_size)\r\n self.proj_discrete = nn.Linear(hidden_size + dialog_config.STATE_DIM, action_size)\r\n self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)\r\n self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate, self.vocab)\r\n self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,\r\n self.dec_gru, self.proj, self.emb, self.vocab)\r\n self.m_decoder_discrete = ResponseDecoder_discrete(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,\r\n self.dec_gru, self.proj_discrete, self.emb, self.vocab)\r\n self.embed_size = embed_size\r\n\r\n self.z_length = z_length\r\n self.max_ts = max_ts\r\n self.discrete_act = discrete_act\r\n self.beam_search = beam_search\r\n self.teacher_force = teacher_force\r\n\r\n self.pr_loss = nn.NLLLoss(ignore_index=0)\r\n self.dec_loss = nn.NLLLoss(ignore_index=0)\r\n\r\n self.saved_log_policy = []\r\n\r\n if self.beam_search:\r\n self.beam_size = kwargs['beam_size']\r\n self.eos_token_idx = kwargs['eos_token_idx']\r\n\r\n def forward(self, u_input, u_input_np, m_input, m_input_np, z_input, u_len, m_len, turn_states,\r\n degree_input, mode, np_state, **kwargs):\r\n if mode == 'train' or mode == 'valid':\r\n pz_proba, pm_dec_proba, turn_states = \\\r\n self.forward_turn(u_input, u_len, m_input=m_input, m_len=m_len, z_input=z_input, mode='train',\r\n turn_states=turn_states, degree_input=degree_input, u_input_np=u_input_np,\r\n m_input_np=m_input_np, **kwargs)\r\n loss, pr_loss, m_loss = self.supervised_loss(torch.log(pz_proba), torch.log(pm_dec_proba),\r\n z_input, m_input)\r\n return loss, pr_loss, m_loss, turn_states\r\n\r\n elif mode == 'test':\r\n if self.discrete_act:\r\n m_output_index, pz_index, turn_states, pz_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,\r\n mode='test',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np,\r\n m_input_np=m_input_np,\r\n np_state=np_state,\r\n **kwargs\r\n )\r\n return m_output_index, pz_index, turn_states, pz_proba\r\n else:\r\n m_output_index, pz_index, turn_states, pz_proba, mt_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,\r\n mode='test',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np, m_input_np=m_input_np,\r\n **kwargs\r\n )\r\n return m_output_index, pz_index, turn_states, pz_proba, mt_proba\r\n\r\n elif mode == 'rl':\r\n loss = self.forward_turn(u_input, u_len=u_len, is_train=False, mode='rl',\r\n turn_states=turn_states,\r\n degree_input=degree_input,\r\n u_input_np=u_input_np, m_input_np=m_input_np,\r\n **kwargs\r\n )\r\n return loss\r\n\r\n def forward_turn(self, u_input, u_len, turn_states, mode, degree_input, u_input_np, m_input_np=None,\r\n m_input=None, np_state=None, m_len=None, z_input=None, **kwargs):\r\n \"\"\"\r\n compute required outputs for a single dialogue turn. Turn state{Dict} will be updated in each call.\r\n :param u_input_np:\r\n :param m_input_np:\r\n :param u_len:\r\n :param turn_states:\r\n :param is_train:\r\n :param u_input: [T,B]\r\n :param m_input: [T,B]\r\n :param z_input: [T,B]\r\n :return:\r\n \"\"\"\r\n prev_z_input = kwargs.get('prev_z_input', None)\r\n prev_z_input_np = kwargs.get('prev_z_input_np', None)\r\n prev_z_len = kwargs.get('prev_z_len', None)\r\n pv_z_emb = None\r\n batch_size = u_input.size(1)\r\n pv_z_enc_out = None\r\n\r\n if prev_z_input is not None:\r\n pv_z_enc_out, _, pv_z_emb = self.u_encoder(prev_z_input, prev_z_len)\r\n\r\n u_enc_out, u_enc_hidden, u_emb = self.u_encoder(u_input, u_len)\r\n last_hidden = u_enc_hidden[:-1]\r\n z_tm1 = cuda_(Variable(torch.ones(1, batch_size).long() * 3)) # GO_2 token\r\n m_tm1 = cuda_(Variable(torch.ones(1, batch_size).long())) # GO token\r\n if mode == 'train':\r\n pz_dec_outs = []\r\n pz_proba = []\r\n z_length = z_input.size(0) if z_input is not None else self.z_length # GO token\r\n hiddens = [None] * batch_size\r\n for t in range(z_length):\r\n pz_dec_out, last_hidden, proba = \\\r\n self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,\r\n z_tm1=z_tm1, last_hidden=last_hidden,\r\n pv_z_enc_out=pv_z_enc_out, prev_z_input_np=prev_z_input_np,\r\n u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)\r\n pz_proba.append(proba)\r\n pz_dec_outs.append(pz_dec_out)\r\n z_np = z_tm1.view(-1).cpu().data.numpy()\r\n for i in range(batch_size):\r\n if z_np[i] == self.vocab.encode('EOS_Z2'):\r\n hiddens[i] = last_hidden[:, i, :]\r\n z_tm1 = z_input[t].view(1, -1)\r\n for i in range(batch_size):\r\n if hiddens[i] is None:\r\n hiddens[i] = last_hidden[:, i, :]\r\n last_hidden = torch.stack(hiddens, dim=1)\r\n\r\n z_input_np = z_input.cpu().data.numpy()\r\n\r\n pz_dec_outs = torch.cat(pz_dec_outs, dim=0) # [Tz,B,H]\r\n pz_proba = torch.stack(pz_proba, dim=0)\r\n # P(m|z,u)\r\n pm_dec_proba, m_dec_outs = [], []\r\n m_length = m_input.size(0) # Tm\r\n # last_hidden = u_enc_hidden[:-1]\r\n for t in range(m_length):\r\n teacher_forcing = toss_(self.teacher_force)\r\n proba, last_hidden, dec_out = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, z_input_np)\r\n if teacher_forcing:\r\n m_tm1 = m_input[t].view(1, -1)\r\n else:\r\n _, m_tm1 = torch.topk(proba, 1)\r\n m_tm1 = m_tm1.view(1, -1)\r\n pm_dec_proba.append(proba)\r\n m_dec_outs.append(dec_out)\r\n\r\n pm_dec_proba = torch.stack(pm_dec_proba, dim=0) # [T,B,V]\r\n return pz_proba, pm_dec_proba, None\r\n else:\r\n # assert z_input is not None\r\n z_length = z_input.size(0) if z_input is not None else None # GO token\r\n # print(\"z_input\", z_input)\r\n if z_input is None:\r\n use_predicted_zt = True\r\n else:\r\n use_predicted_zt = False\r\n pz_dec_outs, bspan_index, last_hidden, pz_proba = self.bspan_decoder(u_enc_out, z_tm1, last_hidden, u_input_np,\r\n pv_z_enc_out=pv_z_enc_out,\r\n prev_z_input_np=prev_z_input_np,\r\n u_emb=u_emb, pv_z_emb=pv_z_emb,\r\n z_length=z_length,\r\n use_predicted_zt=use_predicted_zt,\r\n z_input=z_input)\r\n pz_proba = torch.stack(pz_proba, dim=0)\r\n pz_dec_outs = torch.cat(pz_dec_outs, dim=0)\r\n degree_input = self.reader.db_degree_handler(bspan_index, kwargs['dial_id'])\r\n degree_input = cuda_(Variable(torch.from_numpy(degree_input).float()))\r\n if mode == 'test':\r\n if not self.discrete_act:\r\n if not self.beam_search:\r\n m_output_index, m_probas = self.greedy_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n degree_input, bspan_index)\r\n\r\n # else:\r\n # m_output_index = self.beam_search_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n # degree_input, bspan_index)\r\n#\r\n return m_output_index, bspan_index, None, pz_proba, m_probas\r\n else:\r\n act_logits = self.action_decode(pz_dec_outs, u_enc_out, np_state)\r\n\r\n return act_logits, bspan_index, None, pz_proba\r\n\r\n elif mode == 'rl':\r\n return self.sampling_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,\r\n degree_input, bspan_index)\r\n\r\n def action_decode(self, pz_dec_outs, u_enc_out, np_state):\r\n\r\n logits = self.m_decoder_discrete(pz_dec_outs, u_enc_out, np_state)\r\n\r\n return logits\r\n\r\n def bspan_decoder(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,\r\n z_length=None, use_predicted_zt=True, z_input=None):\r\n if not use_predicted_zt:\r\n assert z_input is not None\r\n assert z_length is not None\r\n pz_dec_outs = []\r\n pz_proba = []\r\n decoded = []\r\n batch_size = u_enc_out.size(1)\r\n hiddens = [None] * batch_size\r\n z_length = z_length if z_length is not None else cfg.z_length\r\n # print(z_length)\r\n\r\n # import pdb\r\n # pdb.set_trace()\r\n for t in range(z_length):\r\n\r\n pz_dec_out, last_hidden, proba = \\\r\n self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,\r\n z_tm1=z_tm1, last_hidden=last_hidden, pv_z_enc_out=pv_z_enc_out,\r\n prev_z_input_np=prev_z_input_np, u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)\r\n # print(\"--\"*20)\r\n # print(\"in bspan decoder\")\r\n # print(\"proba \", proba)\r\n # print(\"z_tm1\", z_tm1)\r\n # print(\"t\", t)\r\n # print(\"--\"*20)\r\n pz_proba.append(proba)\r\n pz_dec_outs.append(pz_dec_out)\r\n # print(\"proba_size\", proba.shape)\r\n z_proba, z_index = torch.topk(proba, 1) # [B,1]\r\n # print('z_index', z_index)\r\n z_index = z_index.data.view(-1)\r\n\r\n #####################################################\r\n if prev_z_input_np is None:\r\n tmp = u_input_np # [,B]\r\n else:\r\n # pdb.set_trace()\r\n tmp = np.concatenate((u_input_np, prev_z_input_np), axis=0)\r\n\r\n for i in range(z_index.size(0)):\r\n if z_index[i] >= cfg.vocab_size:\r\n # print(z_index)\r\n z_index[i] = torch.tensor(int(tmp[z_index[i] - cfg.vocab_size, i]))\r\n del tmp\r\n decoded.append(z_index.clone())\r\n\r\n # print(decoded)\r\n #####################################################\r\n\r\n for i in range(z_index.size(0)):\r\n if z_index[i] >= cfg.vocab_size:\r\n z_index[i] = 2 # unk\r\n # print('z_index', z_index)\r\n\r\n z_np = z_tm1.view(-1).cpu().data.numpy()\r\n\r\n for i in range(batch_size):\r\n if z_np[i] == self.vocab.encode('EOS_Z2'):\r\n hiddens[i] = last_hidden[:, i, :]\r\n if use_predicted_zt:\r\n z_tm1 = cuda_(Variable(z_index).view(1, -1))\r\n else:\r\n z_tm1 = z_input[t].view(1, -1)\r\n for i in range(batch_size):\r\n if hiddens[i] is None:\r\n hiddens[i] = last_hidden[:, i, :]\r\n last_hidden = torch.stack(hiddens, dim=1)\r\n\r\n if not use_predicted_zt:\r\n z_input_np = z_input.cpu().data.numpy()\r\n decoded = torch.stack(decoded, dim=0).transpose(0, 1)\r\n decoded = list(decoded)\r\n decoded = [list(_) for _ in decoded]\r\n return pz_dec_outs, decoded, last_hidden, pz_proba\r\n\r\n\r\n\r\n def greedy_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n decoded = []\r\n probas = []\r\n bspan_index_np = pad_sequences(bspan_index).transpose((1, 0))\r\n for t in range(self.max_ts):\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, bspan_index_np)\r\n probas.append(proba)\r\n mt_proba, mt_index = torch.topk(proba, 1) # [B,1]\r\n mt_index = mt_index.data.view(-1)\r\n decoded.append(mt_index.clone())\r\n for i in range(mt_index.size(0)):\r\n if mt_index[i] >= cfg.vocab_size:\r\n mt_index[i] = 2 # unk\r\n m_tm1 = cuda_(Variable(mt_index).view(1, -1))\r\n decoded = torch.stack(decoded, dim=0).transpose(0, 1)\r\n decoded = list(decoded)\r\n return [list(_) for _ in decoded], probas\r\n\r\n def beam_search_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input,\r\n bspan_index):\r\n eos_token_id = self.vocab.encode(cfg.eos_m_token)\r\n batch_size = pz_dec_outs.size(1)\r\n if batch_size != 1:\r\n raise ValueError('\"Beam search single\" requires batch size to be 1')\r\n\r\n class BeamState:\r\n def __init__(self, score, last_hidden, decoded, length):\r\n \"\"\"\r\n Beam state in beam decoding\r\n :param score: sum of log-probabilities\r\n :param last_hidden: last hidden\r\n :param decoded: list of *Variable[1*1]* of all decoded words\r\n :param length: current decoded sentence length\r\n \"\"\"\r\n self.score = score\r\n self.last_hidden = last_hidden\r\n self.decoded = decoded\r\n self.length = length\r\n\r\n def update_clone(self, score_incre, last_hidden, decoded_t):\r\n decoded = copy.copy(self.decoded)\r\n decoded.append(decoded_t)\r\n clone = BeamState(self.score + score_incre, last_hidden, decoded, self.length + 1)\r\n return clone\r\n\r\n def beam_result_valid(decoded_t, bspan_index):\r\n decoded_t = [_.view(-1).data[0] for _ in decoded_t]\r\n req_slots = self.get_req_slots(bspan_index)\r\n decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)\r\n for req in req_slots:\r\n if req not in decoded_sentence:\r\n return False\r\n return True\r\n\r\n def score_bonus(state, decoded, bspan_index):\r\n bonus = cfg.beam_len_bonus\r\n return bonus\r\n\r\n def soft_score_incre(score, turn):\r\n return score\r\n\r\n finished, failed = [], []\r\n states = [] # sorted by score decreasingly\r\n dead_k = 0\r\n states.append(BeamState(0, last_hidden, [m_tm1], 0))\r\n bspan_index_np = np.array(bspan_index).reshape(-1, 1)\r\n for t in range(self.max_ts):\r\n new_states = []\r\n k = 0\r\n while k < len(states) and k < self.beam_size - dead_k:\r\n state = states[k]\r\n last_hidden, m_tm1 = state.last_hidden, state.decoded[-1]\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1, degree_input,\r\n last_hidden, bspan_index_np)\r\n\r\n proba = torch.log(proba)\r\n mt_proba, mt_index = torch.topk(proba, self.beam_size - dead_k) # [1,K]\r\n for new_k in range(self.beam_size - dead_k):\r\n score_incre = soft_score_incre(mt_proba[0][new_k].data[0], t) + score_bonus(state,\r\n mt_index[0][new_k].data[\r\n 0], bspan_index)\r\n if len(new_states) >= self.beam_size - dead_k and state.score + score_incre < new_states[-1].score:\r\n break\r\n decoded_t = mt_index[0][new_k]\r\n if decoded_t.data[0] >= cfg.vocab_size:\r\n decoded_t.data[0] = 2 # unk\r\n if self.vocab.decode(decoded_t.data[0]) == cfg.eos_m_token:\r\n if beam_result_valid(state.decoded, bspan_index):\r\n finished.append(state)\r\n dead_k += 1\r\n else:\r\n failed.append(state)\r\n else:\r\n decoded_t = decoded_t.view(1, -1)\r\n new_state = state.update_clone(score_incre, last_hidden, decoded_t)\r\n new_states.append(new_state)\r\n\r\n k += 1\r\n if self.beam_size - dead_k < 0:\r\n break\r\n new_states = new_states[:self.beam_size - dead_k]\r\n new_states.sort(key=lambda x: -x.score)\r\n states = new_states\r\n\r\n if t == self.max_ts - 1 and not finished:\r\n finished = failed\r\n print('FAIL')\r\n if not finished:\r\n finished.append(states[0])\r\n\r\n finished.sort(key=lambda x: -x.score)\r\n decoded_t = finished[0].decoded\r\n decoded_t = [_.view(-1).data[0] for _ in decoded_t]\r\n decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)\r\n # print(decoded_sentence)\r\n generated = torch.cat(finished[0].decoded, dim=1).data # [B=1, T]\r\n return generated\r\n\r\n def beam_search_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(\r\n m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)\r\n decoded = []\r\n for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):\r\n decoded_s = self.beam_search_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,\r\n u_input_np[:, i].reshape((-1, 1)),\r\n last_hidden_s, degree_input_s, bspan_index[i])\r\n decoded.append(decoded_s)\r\n return [list(_.view(-1)) for _ in decoded]\r\n\r\n def supervised_loss(self, pz_proba, pm_dec_proba, z_input, m_input):\r\n pz_proba, pm_dec_proba = pz_proba[:, :, :cfg.vocab_size].contiguous(), pm_dec_proba[:, :,\r\n :cfg.vocab_size].contiguous()\r\n pr_loss = self.pr_loss(pz_proba.view(-1, pz_proba.size(2)), z_input.view(-1))\r\n m_loss = self.dec_loss(pm_dec_proba.view(-1, pm_dec_proba.size(2)), m_input.view(-1))\r\n\r\n loss = pr_loss + m_loss\r\n return loss, pr_loss, m_loss\r\n\r\n def self_adjust(self, epoch):\r\n pass\r\n\r\n # REINFORCEMENT fine-tuning with MC\r\n\r\n def possible_reqs(self):\r\n if cfg.dataset == 'camrest':\r\n return ['address', 'phone', 'postcode', 'pricerange', 'area']\r\n elif cfg.dataset == 'kvret':\r\n req_by_intent = {\r\n 'weather': ['weather_attribute'],\r\n 'navigate': ['poi', 'traffic_info', 'address', 'distance'],\r\n 'schedule': ['event', 'date', 'time', 'party', 'agenda', 'room']\r\n }\r\n reqs = []\r\n for value in req_by_intent.values():\r\n reqs.extend(value)\r\n return reqs\r\n else:\r\n raise ValueError('unknown dataset')\r\n\r\n def get_req_slots(self, bspan_index):\r\n reqs = self.possible_reqs()\r\n reqs = set(self.vocab.sentence_decode(bspan_index).split()).intersection(reqs)\r\n return [_ + '_SLOT' for _ in reqs]\r\n\r\n def reward(self, m_tm1, decoded, bspan_index):\r\n \"\"\"\r\n The setting of the reward function is heuristic. It can be better optimized.\r\n :param m_tm1:\r\n :param decoded:\r\n :param bspan_index:\r\n :return:\r\n \"\"\"\r\n req_slots = self.get_req_slots(bspan_index)\r\n\r\n m_tm1 = self.vocab.decode(m_tm1[0])\r\n finished = m_tm1 == 'EOS_M'\r\n decoded = [_.view(-1)[0] for _ in decoded]\r\n decoded_sentence = self.vocab.sentence_decode(decoded, cfg.eos_m_token).split()\r\n reward = -0.01 if cfg.dataset == 'camrest' else 0\r\n '''\r\n if not finished:\r\n if m_tm1 in req_slots:\r\n if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:\r\n reward = 1.0\r\n '''\r\n # some modification for reward function.\r\n if m_tm1 in req_slots:\r\n if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:\r\n reward += 1.0\r\n else:\r\n reward -= 1.0 if cfg.dataset == 'camrest' else 0 # repeat\r\n return reward, finished\r\n\r\n def sampling_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(\r\n m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)\r\n batch_loss = []\r\n\r\n sample_num = 1\r\n\r\n for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):\r\n if not self.get_req_slots(bspan_index[i]):\r\n continue\r\n for j in range(sample_num):\r\n loss = self.sampling_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,\r\n u_input_np[:, i].reshape((-1, 1)),\r\n last_hidden_s, degree_input_s, bspan_index[i])\r\n batch_loss.append(loss)\r\n if not batch_loss:\r\n return None\r\n else:\r\n return sum(batch_loss) / len(batch_loss)\r\n\r\n def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):\r\n decoded = []\r\n reward_sum = 0\r\n log_probs = []\r\n rewards = []\r\n bspan_index_np = np.array(bspan_index).reshape(-1, 1)\r\n for t in range(self.max_ts):\r\n # reward\r\n reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)\r\n reward_sum += reward\r\n rewards.append(reward)\r\n if t == self.max_ts - 1:\r\n finished = True\r\n if finished:\r\n loss = self.finish_episode(log_probs, rewards)\r\n return loss\r\n # action\r\n proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,\r\n degree_input, last_hidden, bspan_index_np)\r\n proba = proba.squeeze(0) # [B,V]\r\n dis = Categorical(proba)\r\n action = dis.sample()\r\n log_probs.append(dis.log_prob(action))\r\n mt_index = action.data.view(-1)\r\n decoded.append(mt_index.clone())\r\n\r\n for i in range(mt_index.size(0)):\r\n if mt_index[i] >= cfg.vocab_size:\r\n mt_index[i] = 2 # unk\r\n\r\n m_tm1 = cuda_(Variable(mt_index).view(1, -1))\r\n\r\n def finish_episode(self, log_probas, saved_rewards):\r\n R = 0\r\n policy_loss = []\r\n rewards = []\r\n for r in saved_rewards:\r\n R = r + 0.8 * R\r\n rewards.insert(0, R)\r\n\r\n rewards = torch.Tensor(rewards)\r\n # rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\r\n\r\n for log_prob, reward in zip(log_probas, rewards):\r\n policy_loss.append(-log_prob * reward)\r\n l = len(policy_loss)\r\n policy_loss = torch.cat(policy_loss).sum()\r\n return policy_loss / l\r\n"
] | [
[
"torch.stack",
"torch.nn.functional.softmax",
"numpy.argsort",
"torch.nn.GRU",
"torch.log",
"torch.max",
"torch.cat",
"torch.nn.Dropout",
"torch.bmm",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.autograd.Variable",
"numpy.cos",
"torch.from_numpy",
"torch.Tensor",
"torch.ones",
"numpy.zeros",
"torch.distributions.Categorical",
"numpy.power",
"torch.isnan",
"numpy.array",
"torch.nn.NLLLoss",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.split",
"torch.nn.Embedding",
"torch.exp",
"torch.topk",
"torch.zeros",
"numpy.sin",
"numpy.concatenate",
"torch.LongTensor",
"torch.nn.init.orthogonal_"
]
] |
jggautier/dataverse-automating-downloads | [
"40cf127e7771049165b21b732635cd35848eda5e"
] | [
"dataverse_repository_curation_assistant/dataverse_repository_curation_assistant_functions.py"
] | [
"# Functions for the curation app\nimport csv\nfrom dateutil.parser import parse\nfrom functools import reduce\nimport json\nimport glob\nimport os\nfrom os import listdir\nimport pandas as pd\nfrom pathlib import Path\nimport re\nimport requests\nimport time\nfrom tkinter import Tk, ttk, Frame, Label, IntVar, Checkbutton, filedialog, NORMAL, DISABLED\nfrom tkinter import Listbox, MULTIPLE, StringVar, END, INSERT, N, E, S, W\nfrom tkinter.ttk import Entry, Progressbar, OptionMenu, Combobox\nfrom urllib.parse import urlparse\n\n\n# Class for custom collapsiblePanel frame using tkinter widgets\nclass collapsiblePanel(Frame):\n\n def __init__(self, parent, text='', default='closed', padx=0, pady=0, *args, **options):\n Frame.__init__(self, parent, *args, **options, padx=padx, pady=pady)\n\n self.show = IntVar()\n\n self.titleFrame = ttk.Frame(self, relief='raised', borderwidth=1)\n self.titleFrame.pack(fill='x', expand=1)\n\n Label(self.titleFrame, text=text, width=40, anchor='w').pack(side='left', fill='x', expand=1)\n\n self.toggleButton = ttk.Checkbutton(\n \tself.titleFrame, width=5, command=self.toggle,\n\t\t\tvariable=self.show, style='Toolbutton')\n self.toggleButton.pack(side='right')\n\n self.subFrame = Frame(self, borderwidth=1, relief='groove', bg='white', padx=10)\n\n if default == 'open':\n \tself.show.set(1)\n \tself.subFrame.pack(fill='x', expand=1)\n \tself.toggleButton.configure(text='▼')\n elif default == 'closed':\n \tself.show.set(0)\n \tself.toggleButton.configure(text='▲')\n\n def toggle(self):\n if bool(self.show.get()):\n self.subFrame.pack(fill='x', expand=1)\n self.toggleButton.configure(text='▼')\n else:\n self.subFrame.forget()\n self.toggleButton.configure(text='▲')\n\n\ndef forget_widget(widget):\n exists = widget.winfo_exists()\n if exists == 1:\n widget.grid_forget()\n else:\n pass\n\n\n# Function for getting value of nested key, truncating the value to 10,000 characters if it's a string\n# (character limit for many spreadsheet applications), and returning nothing if key doesn't exist\ndef improved_get(_dict, path, default=None):\n for key in path.split('.'):\n try:\n _dict = _dict[key]\n except KeyError:\n return default\n if isinstance(_dict, int) or isinstance(_dict, dict):\n return _dict\n elif isinstance(_dict, str):\n return _dict[:10000].replace('\\r', ' - ')\n\n\ndef list_to_string(lst): \n string = ', '.join(lst)\n return string\n\n\ndef convert_to_local_tz(timestamp, shortDate=False):\n # Save local timezone to localTimezone variable\n localTimezone = tz.tzlocal()\n # Convert string to datetime object\n timestamp = parse(timestamp)\n # Convert timestamp to local timezone\n timestamp = timestamp.astimezone(localTimezone)\n if shortDate is True:\n # Return timestamp in YYYY-MM-DD format\n timestamp = timestamp.strftime('%Y-%m-%d')\n return timestamp\n\n\ndef select_all(listbox):\n listbox.select_set(0, END)\n\n\ndef clear_selections(listbox):\n listbox.selection_clear(0, END)\n\n\n# Function for getting the server URL from a collection URL\n# or what's entered in the Installatio URL field\ndef get_installation_url(string):\n if string.startswith('http'):\n parsed = urlparse(string)\n installationUrl = parsed.scheme + '://' + parsed.netloc\n return installationUrl\n elif '(' in string:\n installationUrl = re.search(r'\\(.*\\)', string).group()\n installationUrl = re.sub('\\(|\\)', '', installationUrl)\n return installationUrl\n\n\n# Gets list of URLs from Dataverse map JSON data and add Demo Dataverse url\ndef get_installation_list():\n installationsList = []\n dataverseInstallationsJsonUrl = 'https://raw.githubusercontent.com/IQSS/dataverse-installations/master/data/data.json'\n response = requests.get(dataverseInstallationsJsonUrl)\n data = response.json()\n\n for installation in data['installations']:\n name = installation['name']\n hostname = installation['hostname']\n installationUrl = 'https://' + hostname\n nameAndUrl = '%s (%s)' % (name, installationUrl)\n installationsList.append(nameAndUrl)\n\n installationsList.insert(0, 'Demo Dataverse (https://demo.dataverse.org)')\n\n return installationsList\n\n\n# Function for getting name of installation's root collection \n# (assumming root dataverse's ID is 1, which isn't the case with UVA Dataverse)\ndef get_root_alias_name(url):\n\n # If it's the UVA homepage URL, it's root alias is uva (whose database ID is not 1)\n if 'dataverse.lib.virginia.edu' in url:\n rootAlias = 'uva'\n\n # If's it's not the UVA homepage URL, get the alias of the collection whose database is 1\n elif '/dataverse/' in url:\n parsed = urlparse(url)\n url = parsed.scheme + '://' + parsed.netloc + '/api/dataverses/1'\n response = requests.get(url)\n dataverseData = response.json()\n rootAlias = dataverseData['data']['alias']\n elif '/dataverse/' not in url:\n url = '%s/api/dataverses/1' % (url)\n response = requests.get(url)\n dataverseData = response.json()\n rootAlias = dataverseData['data']['alias']\n\n return rootAlias\n\n\n# Function for getting collection alias name of a given Dataverse Collection URL,\n# including the \"Root\" collection\ndef get_alias_from_collection_url(url):\n\n # If /dataverse/ is not in the URL, assume it's the installation's server url...\n if '/dataverse/' not in url:\n # If it's the UVA homepage URL, get it's root alias, whose database ID is not 1\n if 'dataverse.lib.virginia.edu' in url:\n alias = 'uva'\n\n # If's it's not the UVA homepage URL, get the alias of the collection whose database is 1\n elif 'dataverse.lib.virginia.edu' not in url:\n installationUrl = get_installation_url(url)\n url = '%s/api/dataverses/1' % (installationUrl)\n response = requests.get(url)\n dataverseData = response.json()\n alias = dataverseData['data']['alias']\n\n # If /dataverse/ is in the url, assume it's a collection URL and parse string to get its alias...\n elif '/dataverse/' in url:\n parsed = urlparse(url)\n try:\n alias = parsed.path.split('/')[2]\n # Or return an empty string\n except IndexError:\n alias = ''\n\n return alias\n\n\n# Returns True if collection alias is the installation's root collection or\n# False if not (doesn't work with UVA)\ndef is_root_collection(url):\n if get_alias_from_collection_url(url) == get_root_alias_name(url):\n return True\n elif get_alias_from_collection_url(url) != get_root_alias_name(url):\n return False\n\n\n# Function that turns Dataverse installation URL, instalation URL or search URL into a Search API URL\ndef get_search_api_url(url, apiKey=None):\n\n # If URL is not a search url (doesn't contain 'q=') and contains /dataverse/, it's a Dataverse collection URL\n if 'q=' not in url and '/dataverse/' in url:\n # Remove the jsessionidString that sometimes appears in the URL\n try:\n jsessionidString = re.search(r';jsessionid=.*', url).group()\n url = url.replace(jsessionidString, '?')\n except AttributeError:\n pass\n # Get the Dataverse Collection name in the search URL\n dataversePart = re.search(r'\\/dataverse\\/.*', url).group()\n dataverseName = dataversePart.replace('/dataverse/', '')\n # Repalce '/dataverse/' and the dataverse name with '/api/search?q=*' and add subtree parameter with dataverse name\n apiSearchURL = url.replace(dataversePart, '/api/search?q=*') + '&subtree=%s' % (dataverseName)\n\n # If URL is not a search URL (doesn't contain 'q=') and doesn't have /dataverse/, assume it's the URL of the installation\n if 'q=' not in url and '/dataverse/' not in url:\n apiSearchURL = url.replace('/dataverse.xhtml', '')\n apiSearchURL = apiSearchURL + '/api/search'\n # If entered installation URL ends with a forward slash, replace resulting double slash with a single slash\n apiSearchURL = apiSearchURL.replace('//api', '/api') + '?q=*'\n\n # If URL has 'q=', then assume it's a Search URL\n elif 'q=' in url:\n\n # Sometimes there's a slash before the ?q. If so, remove it\n url = url.replace('/?q', '?q')\n\n # If there's a jsessionid string, remove it\n try:\n jsessionidString = re.search(r';jsessionid=.*\\?', url).group()\n url = url.replace(jsessionidString, '?')\n except AttributeError:\n pass\n \n # Get the Dataverse Collection name in the search URL\n # dataverseName = re.search(r'\\/dataverse\\/\\w*\\?q', url)\n dataverseName = re.search(r'\\/dataverse\\/.*\\?q', url)\n dataverseName = dataverseName.group()\n\n subtree = dataverseName.replace('/dataverse/', '&subtree=').replace('?q', '')\n\n apiSearchURL = (\n url\n .replace(dataverseName, '/api/search?q')\n .replace('?q=&', '?q=*&')\n .replace('%3A', ':')\n .replace('%22', '\"')\n .replace('%28', '(')\n .replace('%29', ')')\n + '&show_entity_ids=true'\n + subtree\n )\n\n # Remove any digits after any fq parameters\n apiSearchURL = re.sub('fq\\d', 'fq', apiSearchURL)\n apiSearchURL = apiSearchURL + '&per_page=10&start=0'\n\n # Replace values of any \"types\" parameters into the Search API's \"type\" paramater\n try:\n dTypes = re.search(r'types=.*?&', apiSearchURL).group()\n dTypesList = dTypes.replace('types=', '').replace('&', '').split(':')\n dTypesString = ''\n for dType in dTypesList:\n dType = '&type=%s' %(re.sub('s$', '', dType))\n dTypesString = dTypesString + dType\n apiSearchURL = apiSearchURL + dTypesString\n except AttributeError:\n pass\n\n # Remove dvObjectType and types parameters, which I think the Search API is ignoring\n apiSearchURL = re.sub('fq=dvObjectType:\\(.*\\)&', '', apiSearchURL)\n apiSearchURL = re.sub('types=.*?&', '', apiSearchURL)\n\n return apiSearchURL\n\n\n# Function that converts as many common html codes as I could find into their human-readable strings\ndef convert_common_html_encoding(string):\n string = (\n string\n .replace('%20', ' ').replace('%21', '!').replace('%22', '\\\"').replace('%23', '#')\n .replace('%24', '$').replace('%25', '%').replace('%26', '&').replace('%27', '\\'')\n .replace('%28', '(').replace('%29', ')').replace('%2A', '*').replace('%2B', '+')\n .replace('%2C', ',').replace('%2D', '-').replace('%2E', '.').replace('%2F', '/')\n .replace('%30', '0').replace('%31', '1').replace('%32', '2').replace('%33', '3')\n .replace('%34', '4').replace('%35', '5').replace('%36', '6').replace('%37', '7')\n .replace('%38', '8').replace('%39', '9').replace('%3A', ':').replace('%3B', ';')\n .replace('%3C', '<').replace('%3D', '=').replace('%3E', '>').replace('%3F', '?')\n .replace('%40', '@').replace('%41', 'A').replace('%42', 'B').replace('%43', 'C')\n .replace('%44', 'D').replace('%45', 'E').replace('%46', 'F').replace('%47', 'G')\n .replace('%48', 'H').replace('%49', 'I').replace('%4A', 'J').replace('%4B', 'K')\n .replace('%4C', 'L').replace('%4D', 'M').replace('%4E', 'N').replace('%4F', 'O')\n .replace('%50', 'P').replace('%51', 'Q').replace('%52', 'R').replace('%53', 'S')\n .replace('%54', 'T').replace('%55', 'U').replace('%56', 'V').replace('%57', 'W')\n .replace('%58', 'X').replace('%59', 'Y').replace('%5A', 'Z').replace('%5B', '[')\n .replace('%5C', '\\\\').replace('%5D', ']').replace('%5E', '^').replace('%5F', '_')\n .replace('%60', '`').replace('%61', 'a').replace('%62', 'b').replace('%63', 'c')\n .replace('%64', 'd').replace('%65', 'e').replace('%66', 'f').replace('%67', 'g')\n .replace('%68', 'h').replace('%69', 'i').replace('%6A', 'j').replace('%6B', 'k')\n .replace('%6C', 'l').replace('%6D', 'm').replace('%6E', 'n').replace('%6F', 'o')\n .replace('%70', 'p').replace('%71', 'q').replace('%72', 'r').replace('%73', 's')\n .replace('%74', 't').replace('%75', 'u').replace('%76', 'v').replace('%77', 'w')\n .replace('%78', 'x').replace('%79', 'y').replace('%7A', 'z').replace('%7B', '{')\n .replace('%7C', '|').replace('%7D', '}').replace('%7E', '~').replace('%80', '€')\n .replace('%82', '‚').replace('%83', 'ƒ').replace('%84', '„').replace('%85', '…')\n .replace('%86', '†').replace('%87', '‡').replace('%88', 'ˆ').replace('%89', '‰')\n .replace('%8A', 'Š').replace('%8B', '‹').replace('%8C', 'Œ').replace('%8E', 'Ž')\n .replace('%91', '‘').replace('%92', '’').replace('%93', '“').replace('%94', '”')\n .replace('%95', '•').replace('%96', '–').replace('%97', '—').replace('%98', '˜')\n .replace('%99', '™').replace('%9A', 'š').replace('%9B', '›').replace('%9C', 'œ')\n .replace('%9E', 'ž').replace('%9F', 'Ÿ').replace('%A1', '¡').replace('%A2', '¢')\n .replace('%A3', '£').replace('%A4', '¤').replace('%A5', '¥').replace('%A6', '¦')\n .replace('%A7', '§').replace('%A8', '¨').replace('%A9', '©').replace('%AA', 'ª')\n .replace('%AB', '«').replace('%AC', '¬').replace('%AE', '®').replace('%AF', '¯')\n .replace('%B0', '°').replace('%B1', '±').replace('%B2', '²').replace('%B3', '³')\n .replace('%B4', '´').replace('%B5', 'µ').replace('%B6', '¶').replace('%B7', '·')\n .replace('%B8', '¸').replace('%B9', '¹').replace('%BA', 'º').replace('%BB', '»')\n .replace('%BC', '¼').replace('%BD', '½').replace('%BE', '¾').replace('%BF', '¿')\n .replace('%C0', 'À').replace('%C1', 'Á').replace('%C2', 'Â').replace('%C3', 'Ã')\n .replace('%C4', 'Ä').replace('%C5', 'Å').replace('%C6', 'Æ').replace('%C7', 'Ç')\n .replace('%C8', 'È').replace('%C9', 'É').replace('%CA', 'Ê').replace('%CB', 'Ë')\n .replace('%CC', 'Ì').replace('%CD', 'Í').replace('%CE', 'Î').replace('%CF', 'Ï')\n .replace('%D0', 'Ð').replace('%D1', 'Ñ').replace('%D2', 'Ò').replace('%D3', 'Ó')\n .replace('%D4', 'Ô').replace('%D5', 'Õ').replace('%D6', 'Ö').replace('%D7', '×')\n .replace('%D8', 'Ø').replace('%D9', 'Ù').replace('%DA', 'Ú').replace('%DB', 'Û')\n .replace('%DC', 'Ü').replace('%DD', 'Ý').replace('%DE', 'Þ').replace('%DF', 'ß')\n .replace('%E0', 'à').replace('%E1', 'á').replace('%E2', 'â').replace('%E3', 'ã')\n .replace('%E4', 'ä').replace('%E5', 'å').replace('%E6', 'æ').replace('%E7', 'ç')\n .replace('%E8', 'è').replace('%E9', 'é').replace('%EA', 'ê').replace('%EB', 'ë')\n .replace('%EC', 'ì').replace('%ED', 'í').replace('%EE', 'î').replace('%EF', 'ï')\n .replace('%F0', 'ð').replace('%F1', 'ñ').replace('%F2', 'ò').replace('%F3', 'ó')\n .replace('%F4', 'ô').replace('%F5', 'õ').replace('%F6', 'ö').replace('%F7', '÷')\n .replace('%F8', 'ø').replace('%F9', 'ù').replace('%FA', 'ú').replace('%FB', 'û')\n .replace('%FC', 'ü').replace('%FD', 'ý').replace('%FE', 'þ').replace('%FF', 'ÿ')\n )\n return string\n\n\ndef convert_utf8bytes_to_characters(string):\n string = (\n string\n .replace('%E2%82%AC', '€').replace('%E2%80%9A', '‚').replace('%C6%92', 'ƒ')\n .replace('%E2%80%A6', '…').replace('%E2%80%A0', '†').replace('%E2%80%A1', '‡')\n .replace('%E2%80%B0', '‰').replace('%C5%A0', 'Š').replace('%E2%80%B9', '‹')\n .replace('%C5%BD', 'Ž').replace('%E2%80%98', '‘').replace('%E2%80%99', '’')\n .replace('%E2%80%9D', '”').replace('%E2%80%A2', '•').replace('%E2%80%93', '–')\n .replace('%CB%9C', '˜').replace('%E2%84%A2', '™').replace('%C5%A1', 'š')\n .replace('%C5%93', 'œ').replace('%C5%BE', 'ž').replace('%C5%B8', 'Ÿ')\n .replace('%C2%A2', '¢').replace('%C2%A3', '£').replace('%C2%A4', '¤')\n .replace('%C2%A6', '¦').replace('%C2%A7', '§').replace('%C2%A8', '¨')\n .replace('%C2%AA', 'ª').replace('%C2%AB', '«').replace('%C2%AC', '¬')\n .replace('%C2%AE', '®').replace('%C2%AF', '¯').replace('%C2%B0', '°')\n .replace('%C2%B2', '²').replace('%C2%B3', '³').replace('%C2%B4', '´')\n .replace('%C2%B6', '¶').replace('%C2%B7', '·').replace('%C2%B8', '¸')\n .replace('%C2%BA', 'º').replace('%C2%BB', '»').replace('%C2%BC', '¼')\n .replace('%C2%BE', '¾').replace('%C2%BF', '¿').replace('%C3%80', 'À')\n .replace('%C3%82', 'Â').replace('%C3%83', 'Ã').replace('%C3%84', 'Ä')\n .replace('%C3%86', 'Æ').replace('%C3%87', 'Ç').replace('%C3%88', 'È')\n .replace('%C3%8A', 'Ê').replace('%C3%8B', 'Ë').replace('%C3%8C', 'Ì')\n .replace('%C3%8E', 'Î').replace('%C3%8F', 'Ï').replace('%C3%90', 'Ð')\n .replace('%C3%92', 'Ò').replace('%C3%93', 'Ó').replace('%C3%94', 'Ô')\n .replace('%C3%96', 'Ö').replace('%C3%97', '×').replace('%C3%98', 'Ø')\n .replace('%C3%9A', 'Ú').replace('%C3%9B', 'Û').replace('%C3%9C', 'Ü')\n .replace('%C3%9E', 'Þ').replace('%C3%9F', 'ß').replace('%C3%A0', 'à')\n .replace('%C3%A2', 'â').replace('%C3%A3', 'ã').replace('%C3%A4', 'ä')\n .replace('%C3%A6', 'æ').replace('%C3%A7', 'ç').replace('%C3%A8', 'è')\n .replace('%C3%AA', 'ê').replace('%C3%AB', 'ë').replace('%C3%AC', 'ì')\n .replace('%C3%8D', 'Í').replace('%C3%AE', 'î').replace('%C3%AF', 'ï')\n .replace('%C3%B0', 'ð').replace('%C3%B2', 'ò').replace('%C3%B3', 'ó')\n .replace('%C3%B4', 'ô').replace('%C3%B6', 'ö').replace('%C3%B7', '÷')\n .replace('%C3%B8', 'ø').replace('%C3%BA', 'ú').replace('%C3%BB', 'û')\n .replace('%C3%BC', 'ü').replace('%C3%BE', 'þ').replace('%C3%BF', 'ÿ')\n )\n return string\n\n# Function that returns the params of a given Search API URL, to be used in requests calls\ndef get_params(apiSearchURL):\n params = {\n 'baseUrl': '',\n 'params': {}\n }\n fq = []\n\n # Split apiSearchURL to create list of params\n splitSearchURLList = re.split('\\?|&fq|&', apiSearchURL)\n\n # Remove base search API URL from list\n params['baseUrl'] = splitSearchURLList[0]\n splitSearchURLList.pop(0)\n\n # Remove any empty items from the splitSearchURLList\n splitSearchURLList = list(filter(None, splitSearchURLList))\n\n typeParamList = []\n\n for paramValue in splitSearchURLList:\n\n # Add query to params dict\n if paramValue.startswith('q='):\n paramValue = convert_utf8bytes_to_characters(paramValue)\n paramValue = convert_common_html_encoding(paramValue)\n paramValue = paramValue.replace('+', ' ')\n params['params']['q'] = paramValue.replace('q=', '')\n\n # Add non-fq queries to params dict\n if not paramValue.startswith('=') and not paramValue.startswith('q='):\n key = paramValue.split('=')[0]\n if paramValue.split('=')[1] != '':\n params['params'][key] = paramValue.split('=')[1]\n\n # Add values of each type param to typeParamList\n if paramValue.startswith('type'):\n valueString = paramValue.split('=')[1]\n typeParamList.append(valueString)\n\n # Add fq queries to fq dict if paramValue.startswith('='):\n if paramValue.startswith('='):\n key = paramValue.replace('=', '').split(':')[0]\n value = paramValue.split(':')[1]\n value = convert_utf8bytes_to_characters(value)\n value = convert_common_html_encoding(value)\n value = value.replace('+', ' ')\n paramString = key + ':' + value\n fq.append(paramString)\n\n # If there are type param values in typeParamList, add as value to new \"type\" param\n if typeParamList:\n params['params']['type'] = typeParamList\n\n # If there are any fq params, add fq keys and values\n if len(fq) > 0:\n params['params']['fq'] = fq\n\n return params\n\n\n# Gets info from Search API about a given dataverse, dataset or file\ndef get_value_row_from_search_api_object(item, installationUrl):\n if item['type'] == 'dataset':\n datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + item['global_id']\n dataverseUrl = installationUrl + '/dataverse/' + item['identifier_of_dataverse']\n newRow = {\n 'dataset_pid': item['global_id'],\n 'version_state': item['versionState'],\n 'dataverse_alias': item['identifier_of_dataverse']\n # 'dataverse_url': dataverseUrl\n }\n\n if item['type'] == 'dataverse':\n newRow = {\n 'dataverse_database_id': item['entity_id'],\n 'dataverse_alias': item['identifier'],\n 'dataverse_url': item['url'],\n 'dataverse_name': item['name']\n }\n\n if item['type'] == 'file':\n if item.get('file_persistent_id'):\n filePersistentId = item['file_persistent_id']\n else:\n filePersistentId = ''\n newRow = {\n 'file_database_id': item['file_id'],\n 'file persistent_id': filePersistentId,\n 'file_name': item['name'],\n 'dataset_pid': item['dataset_persistent_id']\n }\n return newRow\n\n\n# Uses Search API to return dataframe containing info about datasets in a Dataverse installation\n# Write progress and results to the tkinter window\ndef get_object_dataframe_from_search_api(\n url, params, objectType, rootWindow=None, progressText=None, progressLabel=None, apiKey=None):\n\n installationUrl = get_installation_url(url)\n\n if apiKey:\n header = {'X-Dataverse-key': apiKey}\n else:\n header = {}\n\n params['type'] = objectType\n\n # Add param to show database IDs of each item\n params['show_entity_ids'] = 'true'\n\n # Get total count of objects\n params['per_page'] = 1\n\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n total = data['data']['total_count']\n\n misindexedObjectCount = 0\n objectInfoDict = []\n\n # Initialization for paginating through results of Search API calls\n condition = True\n params['start'] = 0\n\n if None not in [rootWindow, progressText, progressLabel]:\n text = 'Looking for datasets...'\n progressText.set(text)\n progressLabel.config(fg='green')\n progressLabel = progressLabel.grid(sticky='w', row=0)\n rootWindow.update_idletasks()\n \n while condition:\n try:\n params['per_page'] = 10\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n\n for item in data['data']['items']:\n newRow = get_value_row_from_search_api_object(item, installationUrl)\n objectInfoDict.append(dict(newRow))\n datasetCount = len(objectInfoDict)\n \n # Update variables to paginate through the search results\n params['start'] = params['start'] + params['per_page']\n\n # If misindexed datasets break the Search API call where per_page=10,\n # try calls where per_page=1 then per_page=10 again\n # (See https://github.com/IQSS/dataverse/issues/4225)\n except Exception:\n try:\n params['per_page'] = 1\n response = requests.get(\n url,\n params=params,\n headers=header\n )\n data = response.json()\n\n for item in data['data']['items']:\n newRow = get_value_row_from_search_api_object(item, installationUrl)\n objectInfoDict.append(dict(newRow))\n\n # Update variables to paginate through the search results\n params['start'] = params['start'] + params['per_page']\n\n # If page fails to load, count a misindexed object and continue to the next page\n except Exception:\n misindexedObjectCount += 1\n params['start'] = params['start'] + params['per_page']\n\n condition = params['start'] < total\n\n objectInfoDF = pd.DataFrame(objectInfoDict)\n\n return objectInfoDF\n\n\n# Uses \"Get Contents\" endpoint to return list of dataverse aliases of all subcollections in a given collection\ndef get_all_subcollection_aliases(collectionUrl, apiKey=''):\n\n parsed = urlparse(collectionUrl)\n installationUrl = parsed.scheme + '://' + parsed.netloc\n alias = parsed.path.split('/')[2]\n\n if apiKey:\n header = {'X-Dataverse-key': apiKey}\n else:\n header = {}\n\n # Get ID of given dataverse alias\n dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, alias)\n\n response = requests.get(\n dataverseInfoEndpoint,\n headers=header)\n data = response.json()\n parentDataverseId = data['data']['id']\n\n # Create list and add ID of given dataverse\n dataverseIds = [parentDataverseId]\n\n # Get each subdataverse in the given dataverse\n for dataverseId in dataverseIds:\n dataverseGetContentsEndpoint = '%s/api/dataverses/%s/contents' % (installationUrl, dataverseId)\n response = requests.get(\n dataverseGetContentsEndpoint,\n headers=header)\n data = response.json()\n\n for item in data['data']:\n if item['type'] == 'dataverse':\n dataverseId = item['id']\n dataverseIds.extend([dataverseId])\n\n # Get the alias for each dataverse ID\n dataverseAliases = []\n for dataverseId in dataverseIds:\n dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, dataverseId)\n response = requests.get(\n dataverseInfoEndpoint,\n headers=header)\n data = response.json()\n alias = data['data']['alias']\n dataverseAliases.append(alias)\n\n return dataverseAliases\n\n\ndef get_canonical_pid(pidOrUrl):\n\n # If entered dataset PID is the dataset page URL, get canonical PID\n if pidOrUrl.startswith('http') and 'persistentId=' in pidOrUrl:\n canonicalPid = pidOrUrl.split('persistentId=')[1]\n canonicalPid = canonicalPid.split('&version')[0]\n canonicalPid = canonicalPid.replace('%3A', ':').replace('%2F', ('/'))\n\n # If entered dataset PID is a DOI URL, get canonical PID\n elif pidOrUrl.startswith('http') and 'doi.' in pidOrUrl:\n canonicalPid = re.sub('http.*org\\/', 'doi:', pidOrUrl)\n\n elif pidOrUrl.startswith('doi:') and '/' in pidOrUrl:\n canonicalPid = pidOrUrl\n\n # If entered dataset PID is a Handle URL, get canonical PID\n elif pidOrUrl.startswith('http') and 'hdl.' in pidOrUrl:\n canonicalPid = re.sub('http.*net\\/', 'hdl:', pidOrUrl)\n\n elif pidOrUrl.startswith('hdl:') and '/' in pidOrUrl:\n canonicalPid = pidOrUrl\n\n return canonicalPid\n\n\ndef get_datasets_from_collection_or_search_url(\n url, rootWindow=None, progressLabel=None, progressText=None, textBoxCollectionDatasetPIDs=None, \n apiKey='', ignoreDeaccessionedDatasets=False, subdataverses=False):\n\n\n if textBoxCollectionDatasetPIDs is not None:\n # if None not in [rootWindow, progressLabel, progressText, textBoxCollectionDatasetPIDs]:\n # Hide the textBoxCollectionDatasetPIDs scrollbox if it exists\n forget_widget(textBoxCollectionDatasetPIDs)\n \n # Use the Search API to get dataset info from the given search url or Dataverse collection URL\n searchApiUrl = get_search_api_url(url)\n requestsGetProperties = get_params(searchApiUrl)\n baseUrl = requestsGetProperties['baseUrl']\n params = requestsGetProperties['params']\n datasetInfoDF = get_object_dataframe_from_search_api(\n url=baseUrl, rootWindow=rootWindow, progressLabel=progressLabel, progressText=progressText,\n params=params, objectType='dataset', apiKey=apiKey)\n datasetCount = len(datasetInfoDF.index)\n\n if datasetCount == 0:\n text = 'Datasets found: 0'\n\n if progressText is not None:\n progressText.set(text)\n else:\n print(text)\n \n elif datasetCount > 0:\n\n deaccessionedDatasetCount = 0\n \n # To ignore deaccessioned datasets, remove from the dataframe all datasets where version_state is DEACCESSIONED \n if ignoreDeaccessionedDatasets == True:\n datasetInfoDF = datasetInfoDF[datasetInfoDF['version_state'].str.contains('DEACCESSIONED') == False]\n deaccessionedDatasetCount = datasetCount - len(datasetInfoDF.index)\n\n # Remove version_state column so that I can remove the dataframe's duplicate rows and there's only one row per dataset\n datasetInfoDF = datasetInfoDF.drop('version_state', axis=1)\n\n # Drop duplicate rows, which happens when Search API results lists a dataset's published and draft versions\n datasetInfoDF = datasetInfoDF.drop_duplicates()\n\n # Recount datasets\n uniqueDatasetCount = len(datasetInfoDF.index)\n\n # Check if url is collection url. If so:\n if 'q=' not in url:\n # If the user wants datasets in all subdataverses and the url\n # is the root collection, don't filter the dataframe\n if subdataverses == True and is_root_collection(url) == True:\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the user wants datasets in all subdataverses and the url\n # is not the root collection...\n elif subdataverses == True and is_root_collection(url) == False:\n # Get the aliases of all subdataverses...\n dataverseAliases = get_all_subcollection_aliases(url, apiKey=apiKey)\n\n # Remove any datasets that aren't owned by any of the \n # subdataverses. This will exclude linked datasets\n datasetInfoDF = datasetInfoDF[\n datasetInfoDF['dataverse_alias'].isin(dataverseAliases)]\n\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the user wants only datasets in the collection,\n # and not in collections within the collection...\n elif subdataverses == False:\n # Get the alias of the collection (including the alias of the root collection)\n alias = get_alias_from_collection_url(url)\n # Retain only datasets owned by that collection\n datasetInfoDF = datasetInfoDF[datasetInfoDF['dataverse_alias'].isin([alias])]\n\n uniqueDatasetCount = len(datasetInfoDF)\n\n # If the url is a search URL, get all datasetPids from datasetInfoDF \n elif 'q=' in url:\n uniqueDatasetCount = len(datasetInfoDF)\n\n if textBoxCollectionDatasetPIDs is not None:\n # Place textbox with list of dataset PIDs and set state to read/write (normal) \n textBoxCollectionDatasetPIDs.grid(sticky='w', row=2, pady=5)\n textBoxCollectionDatasetPIDs.configure(state ='normal')\n \n # Clear whatever's in the textBoxCollectionDatasetPIDs textbox\n textBoxCollectionDatasetPIDs.delete('1.0', END)\n\n # Insert the dataset PIDs into the textBoxCollectionDatasetPIDs scrollbox\n for dfIndex, dfRow in datasetInfoDF.iterrows():\n datasetPid = dfRow['dataset_pid'] + '\\n'\n textBoxCollectionDatasetPIDs.insert('end', datasetPid)\n\n # Create and place result text with uniqueDatasetCount\n if deaccessionedDatasetCount == 0:\n text = 'Datasets found: %s' % (str(uniqueDatasetCount))\n if deaccessionedDatasetCount > 0:\n text = 'Datasets found: %s\\rDeaccessioned datasets ignored: %s' % (str(uniqueDatasetCount), str(deaccessionedDatasetCount))\n\n if progressText is not None:\n progressText.set(text)\n else:\n print(text)\n\n\ndef get_directory_path():\n directoryPath = filedialog.askdirectory()\n return directoryPath\n\n\ndef get_dataset_metadata_export(installationUrl, datasetPid, exportFormat, header={}, apiKey=''):\n if apiKey:\n header['X-Dataverse-key'] = apiKey\n\n if exportFormat == 'dataverse_json':\n getJsonRepresentationOfADatasetEndpoint = '%s/api/datasets/:persistentId/?persistentId=%s' % (installationUrl, datasetPid)\n getJsonRepresentationOfADatasetEndpoint = getJsonRepresentationOfADatasetEndpoint.replace('//api', '/api')\n response = requests.get(\n getJsonRepresentationOfADatasetEndpoint,\n headers=header)\n if response.status_code in (200, 401): # 401 is the unauthorized code. Valid API key is needed\n data = response.json()\n else:\n data = 'ERROR'\n\n return data\n\n # For getting metadata from other exports, which are available only for each dataset's latest published\n # versions (whereas Dataverse JSON export is available for unpublished versions)\n if exportFormat != 'dataverse_json':\n datasetMetadataExportEndpoint = '%s/api/datasets/export?exporter=%s&persistentId=%s' % (installationUrl, exportFormat, datasetPid)\n datasetMetadataExportEndpoint = datasetMetadataExportEndpoint.replace('//api', '/api')\n \n response = requests.get(\n datasetMetadataExportEndpoint,\n headers=header)\n\n if response.status_code == 200:\n \n if exportFormat in ('schema.org' , 'OAI_ORE'):\n data = response.json()\n\n if exportFormat in ('ddi' , 'oai_ddi', 'dcterms', 'oai_dc', 'Datacite', 'oai_datacite'):\n string = response.text\n data = BeautifulSoup(string, 'xml').prettify()\n else:\n data = 'ERROR'\n\n return data\n\n\ndef get_metadatablock_data(installationUrl, metadatablockName):\n metadatablocksApiEndpoint = '%s/api/v1/metadatablocks/%s' % (installationUrl, metadatablockName)\n\n response = requests.get(metadatablocksApiEndpoint)\n if response.status_code == 200:\n data = response.json()\n return data\n\n\ndef get_metadatablock_db_field_name_and_title(metadatablockData):\n # Get the database names of all fields\n allFieldsDBNamesList = []\n childFieldsDBNamesList = []\n\n for parentfield in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentfield]\n field = properties['name']\n allFieldsDBNamesList.append(field)\n if 'childFields' in properties:\n for childField in properties['childFields']:\n childFieldsDBNamesList.append(childField)\n\n parentFieldsDBNamesList = list(set(allFieldsDBNamesList) - set(childFieldsDBNamesList))\n\n\n parentFieldDBNameAndTitleDict = {}\n for dbName in parentFieldsDBNamesList:\n dbNameProperties = metadatablockData['data']['fields'][dbName]\n parentFieldDBNameAndTitleDict[dbNameProperties['title']] = dbName\n\n return parentFieldDBNameAndTitleDict#, compoundFieldsDBNamesList\n\n\n# Get list of parent field names and add to a tkinter listbox for user to choose fields\ndef get_parent_field_names(metadatablockData, listbox):\n \n # Clear any names already in the listbox\n listbox.delete(0, END)\n\n allFieldsDBNamesDict = {}\n childFieldsDBNamesList = []\n compoundFieldsDBNamesList = []\n\n for parentField in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentField]\n field = properties['name']\n allFieldsDBNamesDict[field] = properties['title']\n\n if 'childFields' in properties:\n compoundFieldsDBNamesList.append(properties['title'])\n for childField in properties['childFields']:\n childFieldsDBNamesList.append(childField)\n\n options = []\n fieldWithChildFieldList = []\n for parentField in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentField]\n if 'childFields' not in properties and properties['name'] not in childFieldsDBNamesList:\n fieldTitle = properties['title']\n options.append(' ' + fieldTitle)\n elif 'childFields' in properties:\n title = properties['title']\n childFieldDict = properties['childFields']\n childFieldsList = []\n for childField in childFieldDict:\n childFieldsList.append(childField)\n childFieldsString = list_to_string(childFieldsList)\n fieldWithChildField = '%s: %s' % (title, childFieldsString)\n if len(fieldWithChildField) > 50:\n fieldWithChildField = fieldWithChildField[0:50] + '...'\n fieldWithChildFieldList.append(fieldWithChildField)\n options.append(' ' + fieldWithChildField)\n\n for option in options:\n listbox.insert('end', option)\n\n\ndef get_listbox_values(listbox):\n selectedFields = []\n selections = listbox.curselection()\n for selection in selections:\n fieldName = listbox.get(selection).strip().split(':')[0]\n selectedFields.append(fieldName)\n return selectedFields\n\n\n# Get the chiild field database names of compound fields or the database name of primitive fields\ndef get_column_names(\n metadatablockData, parentFieldTitle, parentFieldDBNameAndTitleDict):\n \n compoundFieldsDBNamesList = []\n for parentfield in metadatablockData['data']['fields']:\n properties = metadatablockData['data']['fields'][parentfield]\n if 'childFields' in properties:\n compoundFieldsDBNamesList.append(properties['name'])\n\n if parentFieldTitle in parentFieldDBNameAndTitleDict.keys():\n\n chosenDBName = parentFieldDBNameAndTitleDict[parentFieldTitle]\n columns = []\n\n # If the field is a compound field:\n if chosenDBName in compoundFieldsDBNamesList:\n\n # Get the child fields of the compound field\n dbNameProperties = metadatablockData['data']['fields'][chosenDBName]\n for field in dbNameProperties['childFields']:\n columns.append(field)\n\n # # Other the field is a primitive field. Use its names as the column\n else:\n columns.append(chosenDBName)\n\n return columns\n\n\ndef get_metadata_values_lists(\n installationUrl, datasetMetadata, metadatablockName,\n chosenTitleDBName, chosenFields=None, versions='latestVersion'):\n\n if versions == 'allVersions':\n versions = 'datasetVersion'\n rowVariablesList = []\n\n if (datasetMetadata['status'] == 'OK') and\\\n (metadatablockName in datasetMetadata['data'][versions]['metadataBlocks']):\n\n datasetPersistentUrl = datasetMetadata['data']['persistentUrl']\n datasetPid = get_canonical_pid(datasetPersistentUrl)\n datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + datasetPid\n if 'versionNumber' in datasetMetadata['data'][versions]:\n\n majorVersionNumber = datasetMetadata['data'][versions]['versionNumber']\n minorVersionNumber = datasetMetadata['data'][versions]['versionMinorNumber']\n datasetVersionNumber = f'{majorVersionNumber}.{minorVersionNumber}'\n else:\n datasetVersionNumber = 'DRAFT'\n\n for fields in datasetMetadata['data'][versions]['metadataBlocks'][metadatablockName]['fields']:\n if fields['typeName'] == chosenTitleDBName:\n\n # Save the field's typeClass and if it allows multiple values \n typeClass = fields['typeClass']\n allowsMultiple = fields['multiple']\n\n if typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is True:\n for value in fields['value']:\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl,\n datasetVersionNumber, value[:10000].replace('\\r', ' - ')]\n rowVariablesList.append(rowVariables)\n\n elif typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is False:\n value = fields['value'][:10000].replace('\\r', ' - ')\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl, \n datasetVersionNumber, value]\n\n rowVariablesList.append(rowVariables)\n\n elif typeClass == 'compound' and allowsMultiple is True: \n \n index = 0\n condition = True\n\n while condition:\n rowVariables = [\n datasetPid, datasetPersistentUrl, datasetUrl, \n datasetVersionNumber]\n\n # Get number of multiples\n total = len(fields['value'])\n\n # For each child field...\n for chosenField in chosenFields:\n # Try getting the value of that child field\n try:\n value = fields['value'][index][chosenField]['value'][:10000].replace('\\r', ' - ')\n # Otherwise, save an empty string as the value\n except KeyError:\n value = ''\n # Append value to the rowVariables list to add to the CSV file\n rowVariables.append(value)\n\n rowVariablesList.append(rowVariables)\n\n index += 1\n condition = index < total\n\n elif typeClass == 'compound' and allowsMultiple is False:\n rowVariables = [datasetPid, datasetPersistentUrl, datasetUrl, datasetVersionNumber]\n\n for chosenField in chosenFields:\n try:\n # Get value from compound field\n value = fields['value'][chosenField]['value'][:10000].replace('\\r', ' - ')\n except KeyError:\n value = ''\n rowVariables.append(value)\n rowVariablesList.append(rowVariables)\n\n return rowVariablesList\n\n\n# Delete empty CSV files in a given directory. If file has fewer than 2 rows, delete it.\ndef delete_empty_csv_files(csvDirectory):\n fieldsWithNoMetadata = []\n for file in glob.glob(str(Path(csvDirectory)) + '/' + '*.csv'):\n with open(file, mode='r', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=',')\n data = list(reader)\n rowCount = len(data)\n if rowCount == 1:\n fieldName = Path(file).name.replace('.csv', '')\n fieldsWithNoMetadata.append(fieldName)\n f.close()\n os.remove(file)\n return fieldsWithNoMetadata\n\n\n# Full outer join of CSV files in a given directory\ndef join_metadata_csv_files(csvDirectory):\n\n # Create CSV file in the directory that the user selected\n allMetadataFileName = os.path.join(csvDirectory, 'all_fields.csv')\n\n # Create list of common columns in CSV files to join on\n indexList = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']\n\n # Get list of CSV files in the csvDirectory\n filesList = listdir(csvDirectory)\n if len(filesList) > 1:\n filesDirectoryPathsList = []\n for file in filesList:\n fileDirectoryPath = os.path.join(csvDirectory, file)\n filesDirectoryPathsList.append(fileDirectoryPath)\n\n # Create a dataframe of each CSV file in the 'filesList' list\n dataframes = [pd.read_csv(table, sep=',', na_filter = False) for table in filesDirectoryPathsList]\n\n # For each dataframe, set the indexes (or the common columns across the dataframes to join on)\n for dataframe in dataframes:\n dataframe.set_index(indexList, inplace=True)\n\n # Full outer join all dataframes and save to the 'joined' variable\n joined = reduce(lambda left, right: left.join(right, how='outer'), dataframes)\n\n # Export joined dataframe to a CSV file\n joined.to_csv(allMetadataFileName)\n\n\n# Get the metadata of datasets. Function passed to tkinter button\ndef get_dataset_metadata(\n rootWindow, progressLabel, progressText, noMetadataText, noMetadataLabel,\n installationUrl='', datasetPidString='', \n parentFieldTitleList='', directoryPath='', apiKey=''):\n\n # Use metadatablock API endpoint to get metadatablock data\n metadatablockData = get_metadatablock_data(installationUrl, 'citation')\n\n # From metadatablockData, get the database and display names of each parent field\n allFieldsDBNamesDict = get_metadatablock_db_field_name_and_title(metadatablockData)\n\n # Create directory in the directory that the user chose\n currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')\n\n installationRootName = get_root_alias_name(installationUrl)\n\n mainDirectoryName = '%s_dataset_metadata_%s' % (installationRootName, currentTime)\n mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))\n os.mkdir(mainDirectoryPath)\n\n # For each field the user chose:\n for parentFieldTitle in parentFieldTitleList:\n\n # Create CSV file\n\n # Create file name and path\n csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')\n csvFileName = csvFileName + '(citation)'\n mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))\n csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'\n \n # Create header row for the CSV file\n headerRow = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']\n\n childFieldsList = get_column_names(\n metadatablockData, parentFieldTitle, allFieldsDBNamesDict)\n # Add childFields list to header row\n headerRow = headerRow + childFieldsList\n\n # Create CSV file and add headerrow\n with open(csvFilePath, mode='w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(headerRow) \n\n # Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list\n datasetPidList = [x.strip() for x in datasetPidString.splitlines()][:-1]\n\n # Delete any message in the tkinter window about no metadata being found\n # the last time the \"Get metadata\" button was pressed\n noMetadataLabel.grid_forget()\n\n count = 0\n datasetTotalCount = len(datasetPidList)\n\n text = 'Dataset metadata retrieved: 0 of %s' % (datasetTotalCount)\n progressText.set(text)\n progressLabel.grid(sticky='w', row=1, columnspan=2)\n rootWindow.update_idletasks()\n\n for datasetPid in datasetPidList:\n\n # Get the JSON metadata export of the latest version of the dataset\n datasetMetadata = get_dataset_metadata_export(\n installationUrl=installationUrl,\n datasetPid=datasetPid, \n exportFormat='dataverse_json',\n apiKey=apiKey)\n\n if datasetMetadata['status'] == 'OK':\n\n for parentFieldTitle in parentFieldTitleList:\n # Get database name of parentFieldTitle\n dbName = allFieldsDBNamesDict[parentFieldTitle]\n\n valueLists = get_metadata_values_lists(\n installationUrl=installationUrl,\n datasetMetadata=datasetMetadata,\n metadatablockName='citation',\n chosenTitleDBName=dbName, \n chosenFields=get_column_names(\n metadatablockData, parentFieldTitle, allFieldsDBNamesDict)) \n csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')\n csvFileName = csvFileName + '(citation)'\n csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'\n\n for valueList in valueLists:\n\n with open(csvFilePath, mode='a', newline='', encoding='utf-8') as f:\n writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(valueList) \n\n count += 1\n text = 'Dataset metadata retrieved: %s of %s' % (count, datasetTotalCount)\n progressText.set(text)\n rootWindow.update_idletasks()\n\n \n\n fieldsWithNoMetadata = delete_empty_csv_files(mainDirectoryPath)\n\n if count > 0 and len(fieldsWithNoMetadata) > 0:\n\n # noMetadataLabel.grid(sticky='w', row=2)\n fieldsWithNoMetadataString = list_to_string(fieldsWithNoMetadata)\n fieldsWithNoMetadataString = (\n 'No metadata found for the following fields:\\r' + fieldsWithNoMetadataString)\n noMetadataText.set(fieldsWithNoMetadataString)\n noMetadataLabel.grid(sticky='w', row=2)\n rootWindow.update_idletasks()\n\n # Full outer join all CSV files to create a CSV with all metadata\n join_metadata_csv_files(mainDirectoryPath)\n\n\ndef delete_published_dataset(installationUrl, datasetPid, apiKey):\n destroyDatasetApiEndpointUrl = '%s/api/datasets/:persistentId/destroy/?persistentId=%s' % (installationUrl, datasetPid)\n req = requests.delete(\n destroyDatasetApiEndpointUrl,\n headers={'X-Dataverse-key': apiKey})\n data = req.json()\n\n status = data.get('status')\n\n if status:\n message = data.get('message', '')\n statusMessage = '%s: %s' % (status, message)\n return statusMessage\n\n\ndef delete_published_datasets(\n rootWindow, progressLabel, progressText, notDeletedText, notDeletedLabel,\n installationUrl, datasetPidString, apiKey):\n\n installationUrl = get_installation_url(installationUrl)\n \n # Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list\n datasetPidList = [x.strip() for x in datasetPidString.splitlines()]\n\n # Remove any empty items from the list of dataset PIDs\n datasetPidList = [datasetPid for datasetPid in datasetPidList if datasetPid]\n\n canonicalPidList = []\n for datasetPid in datasetPidList:\n canonicalPid = get_canonical_pid(datasetPid)\n canonicalPidList.append(canonicalPid)\n\n # Delete any message in the tkinter window about datasets not being deleted\n # the last time the \"Delete datasets\" button was pressed\n notDeletedLabel.grid_forget()\n\n deletedDatasetCount = 0\n datasetTotalCount = len(canonicalPidList)\n\n deletedText = 'Datasets deleted: 0 of %s' % (datasetTotalCount)\n progressText.set(deletedText)\n progressLabel.config(fg='green')\n progressLabel.grid(sticky='w', row=1)\n notDeletedLabel.config(fg='white')\n notDeletedLabel.grid(sticky='w', row=2)\n rootWindow.update_idletasks()\n\n destroyedDatasets = []\n notDestroyedDatasets = []\n\n for canonicalPid in canonicalPidList:\n \n statusMessage = delete_published_dataset(installationUrl, canonicalPid, apiKey)\n \n if 'OK' in statusMessage:\n destroyedDatasets.append(canonicalPid)\n deletedDatasetCount += 1\n deletedText = 'Datasets deleted: %s of %s' % (deletedDatasetCount, datasetTotalCount)\n progressText.set(deletedText)\n rootWindow.update_idletasks()\n\n elif 'ERROR' in statusMessage:\n notDeletedLabel.config(fg='red')\n notDestroyedDatasets.append(canonicalPid)\n notDeletedMessage = 'Datasets not deleted: %s' % (len(notDestroyedDatasets))\n notDeletedText.set(notDeletedMessage)\n rootWindow.update_idletasks()\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
jw03070/Advanced-DeepSleepNet | [
"b58d71971be28c8517f61731b8ee933a5bbf3f0a"
] | [
"slicing.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: BSW\n\"\"\"\n\nimport numpy as np\nimport os\n\ndef slicing(filename,data):\n wc=1\n n1c=1\n n2c=1\n n3c=1\n n4c=1\n t=0\n \n npz = np.load(data)\n x = npz['x']\n y = npz['y']\n \n os.makedirs(\"./data/\"+filename[:-3], exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_Wake\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N1\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N2\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_N3\", exist_ok=True)\n os.makedirs(\"./data/\"+filename[:-3]+\"/1D_Rem\", exist_ok=True)\n \n for i in y:\n if(i==0):\n if(wc<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"0000\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=10 and wc<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"000\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=100 and wc<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"00\"+str(wc)+\".npz\",x=x[t,:,0])\n elif(wc>=1000 and wc<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+\"0\"+str(wc)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Wake/\"+str(wc)+\".npz\",x=x[t,:,0])\n wc+=1\n t+=1\n \n if(i==1):\n if(n1c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"0000\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=10 and n1c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"000\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=100 and n1c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"00\"+str(n1c)+\".npz\",x=x[t,:,0])\n elif(n1c>=1000 and n1c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+\"0\"+str(n1c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N1/\"+str(n1c)+\".npz\",x=x[t,:,0])\n n1c+=1\n t+=1\n \n if(i==2):\n if(n2c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"0000\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=10 and n2c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"000\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=100 and n2c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"00\"+str(n2c)+\".npz\",x=x[t,:,0])\n elif(n2c>=1000 and n2c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+\"0\"+str(n2c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N2/\"+str(n2c)+\".npz\",x=x[t,:,0])\n n2c+=1\n t+=1\n \n if(i==3):\n if(n3c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"0000\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=10 and n3c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"000\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=100 and n3c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"00\"+str(n3c)+\".npz\",x=x[t,:,0])\n elif(n3c>=1000 and n3c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+\"0\"+str(n3c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_N3/\"+str(n3c)+\".npz\",x=x[t,:,0])\n n3c+=1\n t+=1\n \n if(i==4):\n if(n4c<10):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"0000\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=10 and n4c<100):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"000\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=100 and n4c<1000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"00\"+str(n4c)+\".npz\",x=x[t,:,0])\n elif(n4c>=1000 and n4c<10000):\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+\"0\"+str(n4c)+\".npz\",x=x[t,:,0])\n else:\n np.savez(\"./data/\"+filename[:-3]+\"/1D_Rem/\"+str(n4c)+\".npz\",x=x[t,:,0])\n n4c+=1\n t+=1\n\ndef search(dirname):\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n ext = os.path.splitext(full_filename)[-1]\n if ext == '.npz': \n slicing(filename,full_filename)\n pass\n\n \nif __name__ == '__main__':\n name = os.path.dirname( os.path.abspath( __file__ ) )\n Dataset_dir = \"npzdata\"\n Dataset_dir = name + '\\\\' + Dataset_dir + '\\\\'\n os.makedirs('data', exist_ok=True)\n search(Dataset_dir)\n\n"
] | [
[
"numpy.load"
]
] |
aghoshpub/LikelihoodFreeInterference | [
"fd6267104c29e935fa41dc92004dae98ded30626"
] | [
"examples/tutorial_h4l/3b_score.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # MadMiner particle physics tutorial\n# \n# # Part 3b: Training a score estimator\n# \n# Johann Brehmer, Felix Kling, Irina Espejo, and Kyle Cranmer 2018-2019\n\n# In part 3a of this tutorial we will finally train a neural network to estimate likelihood ratios. We assume that you have run part 1 and 2a of this tutorial. If, instead of 2a, you have run part 2b, you just have to load a different filename later.\n\n# ## Preparations\n\n# Make sure you've run the first tutorial before executing this notebook!\n\n# In[1]:\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n# get_ipython().magic(u'matplotlib inline')\n\nfrom madminer.sampling import SampleAugmenter\nfrom madminer import sampling\nfrom madminer.ml import ScoreEstimator\n\n\n# In[2]:\n\n\n# MadMiner output\nlogging.basicConfig(\n format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',\n datefmt='%H:%M',\n level=logging.INFO\n)\n\n# Output of all other modules (e.g. matplotlib)\nfor key in logging.Logger.manager.loggerDict:\n if \"madminer\" not in key:\n logging.getLogger(key).setLevel(logging.WARNING)\n\n\n# ## 1. Make (unweighted) training and test samples with augmented data\n\n# At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators:\n# \n# First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear.\n# \n# Second, `SampleAugmenter` calculates all the augmented data (\"gold\") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework.\n\n# In[3]:\n\n\nsampler = SampleAugmenter('data/delphes_data_shuffled.h5')\n\n\n# The relevant `SampleAugmenter` function for local score estimators is `extract_samples_train_local()`. As in part 3a of the tutorial, for the argument `theta` you can use the helper functions `sampling.benchmark()`, `sampling.benchmarks()`, `sampling.morphing_point()`, `sampling.morphing_points()`, and `sampling.random_morphing_points()`.\n\n# In[4]:\n\n\nx, theta, t_xz, _ = sampler.sample_train_local(\n theta=sampling.benchmark('sm'),\n #n_samples=4 * 10**5, #100000,\n n_samples= 2*10**6, # fewer than others\n folder='./data/samples',\n filename='train_score'\n)\n\n\n# We can use the same data as in part 3a, so you only have to execute this if you haven't gone through tutorial 3a:\n\n# In[5]:\n\n\n# _ = sampler.sample_test(\n# theta=sampling.benchmark('sm'),\n# n_samples=1*10**6,\n# folder='./data/samples',\n# filename='test'\n# )\n\n\n# ## 2. Train score estimator\n\n# It's now time to build a neural network. Only this time, instead of the likelihood ratio itself, we will estimate the gradient of the log likelihood with respect to the theory parameters -- the score. To be precise, the output of the neural network is an estimate of the score at some reference parameter point, for instance the Standard Model. A neural network that estimates this \"local\" score can be used to calculate the Fisher information at that point. The estimated score can also be used as a machine learning version of Optimal Observables, and likelihoods can be estimated based on density estimation in the estimated score space. This method for likelihood ratio estimation is called SALLY, and there is a closely related version called SALLINO. Both are explained in [\"Constraining Effective Field Theories With Machine Learning\"](https://arxiv.org/abs/1805.00013) and [\"A Guide to Constraining Effective Field Theories With Machine Learning\"](https://arxiv.org/abs/1805.00020).\n# \n# The central object for this is the `madminer.ml.ScoreEstimator` class:\n\n# In[6]:\n\n\nestimator = ScoreEstimator(n_hidden=(100,))\n\n\n# In[ ]:\n\n\nestimator.train(\n method='sally',\n x='data/samples/x_train_score.npy',\n t_xz='data/samples/t_xz_train_score.npy',\n)\n\nestimator.save('models/sally')\n\n\n# # ## 3. Evaluate score estimator\n\n# # Let's evaluate the SM score on the test data\n\n# # In[ ]:\n\n\n# estimator = ScoreEstimator(n_hidden=(50,))\n\n\n# # In[ ]:\n\n\n# estimator.load('models/sally')\n\n# t_hat = estimator.evaluate_score(\n# x='data/samples/x_test.npy'\n# )\n\n\n# # Let's have a look at the estimated score and how it is related to the observables:\n\n# # In[ ]:\n\n\n# x = np.load('data/samples/x_test.npy')\n\n# fig = plt.figure(figsize=(10,4))\n\n# #for i in range(2):\n# for i in range(1):\n \n# ax = plt.subplot(1,2,i+1)\n\n# sc = plt.scatter(x[:,0], x[:,1], c=t_hat[:,i], s=25., cmap='viridis', vmin=-1., vmax=1.)\n# cbar = plt.colorbar(sc)\n\n# cbar.set_label(r'$\\hat{t}_' + str(i) + r'(x | \\theta_{ref})$')\n# plt.xlabel(r'$p_{T,j1}$ [GeV]')\n# plt.ylabel(r'$\\Delta \\phi_{jj}$ Sally')\n# plt.xlim(10.,300.)\n# plt.ylim(-3.15,3.15)\n \n# plt.tight_layout()\n# #plt.show()\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.use"
]
] |
jqueguiner/training_results_v1.0 | [
"8200377f425ae24b6ed6c2816b9273aab0996d43",
"8200377f425ae24b6ed6c2816b9273aab0996d43"
] | [
"Graphcore/benchmarks/bert/implementations/popart/pack_pretraining_data.py",
"Supermicro/benchmarks/unet3d/implementations/mxnet_j2/runtime/distributed.py"
] | [
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport time\nimport glob\nimport struct\nimport random\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom scipy import optimize\nfrom itertools import repeat, chain\nfrom functools import lru_cache, reduce\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\nfrom concurrent.futures import ProcessPoolExecutor\nfrom bert_data.pretraining_dataset import CachedDataLoader, data_file_format\n\n\n@lru_cache(maxsize=None)\ndef packing_strategies(start, previous, target, depth):\n gap = target - start\n\n # The collection of possible strategies given the\n # starting sum, the target sum, and the available depth\n # strategy search is limited to increments greater or equal to previous\n strategies = []\n # Complete the packing with exactly 1 number\n if depth == 1:\n if gap >= previous:\n strategies.append([gap])\n\n # Complete the sample in \"depth\" steps, recursively\n else:\n for new in range(previous, gap + 1):\n\n new_gap = target - start - new\n if new_gap == 0:\n strategies.append([new])\n else:\n options = packing_strategies(start + new, new, target, depth - 1)\n\n for option in options:\n if len(option) > 0:\n strategies.append([new] + option)\n return strategies\n\n\ndef get_packing_recipe(sequence_lengths, max_sequence_length, max_sequences_per_pack=3):\n # Histogram of sequence lengths\n histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))\n print(\"Begin packing pass\".center(80, \"_\"))\n print(f\"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}\")\n\n # Make sure all strategies are recipes to pack to the correct sequence length\n strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)\n for strategy in strategy_set:\n assert(sum(strategy) == max_sequence_length)\n num_strategies = len(strategy_set)\n print(f\"Found {num_strategies} unique packing strategies.\")\n\n # Solve the packing equation A@mixture = histogram\n A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)\n for i in range(num_strategies):\n strategy = strategy_set[i]\n for seq_len in strategy:\n A[seq_len - 1, i] += 1\n\n # short sequences are inexpensive to add, so should have low residual weights\n # to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)\n # in practice the difference is negligible, but this converges faster\n padding_cutoff = 8\n w0 = np.ones([max_sequence_length])\n # w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight\n w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)\n w0 = np.sqrt(w0)\n\n # Starting values for the padding and the mixture\n padding = np.zeros([max_sequence_length], dtype=np.int32)\n mixture = np.zeros([num_strategies], dtype=np.int32)\n b = histogram + padding\n\n # Pack sequences as best as possible, then increase padding accordingly and repeat\n for i in range(0, 20):\n print(f\"\\nIteration: {i}: sequences still to pack: \", b.sum())\n start = time.time()\n partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)\n print(f\"Solving nnls took {time.time() - start:3.2f} seconds.\")\n print(f\"Residual norm: {rnorm:3.5e}\")\n\n # Update mixture (round the floating point solution to integers)\n partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))\n\n # If partial mixture is empty (due to rounding) we follow the gradient\n # this usually happens when the number of examples is small i.e. ~100\n if partial_mixture.max() == 0:\n grad = A.T @ (b * np.arange(1, max_sequence_length + 1))\n k = int(b.sum() // 2) + 1\n topk = np.argsort(-grad)[:k]\n partial_mixture[topk] += 1\n\n # Update mixture\n mixture = mixture + partial_mixture\n\n # Compute the residuals\n residual = b - A @ partial_mixture\n print(f\"Max residual: {abs(residual).max()}\")\n print(f\"Residual on first 8 categories: {np.around(residual[:8], 4)}\")\n print(f\"Residual on last 8 categories: {np.around(residual[-8:], 4)}\")\n\n # Add padding based on deficit (negative residual)\n partial_padding = np.where(residual < 0, -residual, 0)\n print(f\"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.\")\n padding = padding + partial_padding\n\n # Update the rhs vector (remaining surplus sequences)\n b = histogram + padding - A @ mixture\n assert np.all(b >= 0), b\n\n # Done iterating\n if b.sum() < 100:\n break\n\n # Make sure there is no remainder\n unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]\n # Update the mixture to also covered the unpacked sequences\n for l in unpacked_seqlen:\n # Get the depth 1 strategy\n strategy = sorted([l, args.max_sequence_length - l])\n strategy_index = strategy_set.index(strategy)\n mixture[strategy_index] += b[l-1]\n b = histogram - A @ mixture\n padding = np.where(b < 0, -b, 0)\n b = histogram + padding - A @ mixture\n assert b.sum() == 0\n\n # Analyze result\n print(\"Done solving for packing order\".center(80, \"_\"))\n num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()\n num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()\n print(f\"Number of sequences dropped: {b.sum()}\")\n print(f\"Number of strategies utilized: {np.count_nonzero(mixture)}\")\n new_number_of_samples = int(mixture.sum())\n compression = 1 - new_number_of_samples / len(sequence_lengths)\n print(f\"New number of samples: {new_number_of_samples:3.2f}, original {len(sequence_lengths)}. A compression ratio of {compression:3.3f}\")\n print(f\"The expected speed-up from packing: {1/(1-compression):3.3f}\")\n upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))\n print(f\"Theoretical upper bound on speed-up: {upper_bound:3.3f}\")\n avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples\n print(f\"Average sequences/sample {avg_sequences_per_sample:3.5f}\")\n print(f\"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens\")\n efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)\n print(f\"Packing efficiency (fraction of real tokens): {efficiency:3.4f}\")\n\n print(f\"Top 8 strategies\")\n topK = np.argsort(-mixture)[:8]\n for i in topK:\n print(f\"Strategy {strategy_set[i]} which is used {int(mixture[i])} times\")\n print(\"\".center(80, \"_\"))\n\n # Figure out the slicing that each strategy should use\n slicing = np.zeros_like(A)\n slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]\n slicing = slicing.T\n\n mixture = mixture.astype(np.int64)\n return strategy_set, mixture, padding, slicing\n\n\ndef slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):\n # Divide the work, firstly between the strategies and then into chunks of 50k\n slices = []\n strategies = []\n part_idx = []\n for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):\n if repeat_count == 0:\n continue\n # Slice out the sequences allocated to this strategy in increments of 50k\n num_parts = repeat_count // 50000\n num_parts = num_parts + int(repeat_count != num_parts * 50000)\n subcounts = (min(50000, repeat_count - 50000 * (i - 1)) for i in range(1, num_parts + 1))\n for part_id, part_count in enumerate(subcounts):\n examples = []\n for k, seq_len in enumerate(strategy):\n slice_start = int(slice_offsets[seq_len - 1])\n slice_end = slice_start + int(part_count)\n slice_offsets[seq_len - 1] = slice_end\n examples.append(examples_by_length[seq_len][slice_start:slice_end])\n\n slices.append(examples)\n strategies.append(strategy)\n part_idx.append(part_id)\n\n return slices, strategies, part_idx\n\n\ndef parallel_pack_according_to_strategy(args, part_idx, strategy, examples):\n # Pack the sequences according to the strategy and write them to disk\n base_filename = os.path.join(args.output_dir, \"strategy_\" + \"_\".join(map(str, strategy)))\n filename = base_filename + f\"_part_{part_idx}\"\n lines = []\n for i, multi_sequence in enumerate(zip(*examples)):\n lines.append(create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,\n args.max_sequence_length, args.max_sequences_per_pack))\n # Write to file\n with open(filename, \"wb\") as f:\n f.writelines(lines)\n\n\ndef create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):\n # SEQ\n packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)\n packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)\n packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)\n packed_positions = np.zeros(max_sequence_length, dtype=np.int32)\n\n # MLM\n # we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens\n # in case that percentege is rounded up for all sequences in the pack, need to add an extra token for\n # each sequence in the pack\n packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)\n\n # NSP\n packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)\n packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)\n packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)\n\n offset = 0\n mlm_offset = 0\n sequence_index = 1 # used in the input mask\n for sequence in multi_sequence:\n # Padding sequences are donoted with None\n if sequence is not None:\n input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence\n seq_len = input_mask.sum()\n\n # SEQ\n packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]\n packed_input_mask[offset:offset + seq_len] = sequence_index\n packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]\n packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)\n\n # MLM\n mlm_len = int(masked_lm_weights.sum())\n assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, \"Too many LM predictions per sequences\"\n max_mlm = mlm_offset + mlm_len\n packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]\n packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]\n packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index\n\n # NSP\n packed_next_sentence_positions[sequence_index - 1] = offset\n packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels\n packed_next_sentence_weights[sequence_index - 1] = 1\n\n # Update offsets\n sequence_index += 1\n offset += seq_len\n mlm_offset = max_mlm\n\n # Pack into binary format and write it\n line = reduce(lambda accl, i: accl + struct.pack('<I', i),\n chain(packed_input_ids,\n packed_input_mask,\n packed_segment_ids,\n packed_positions,\n packed_masked_lm_positions,\n packed_masked_lm_ids,\n packed_masked_lm_weights,\n packed_next_sentence_positions,\n packed_next_sentence_labels,\n packed_next_sentence_weights), b'')\n return line\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-glob\", help=\"A glob expression for the input files to read in and pack\", required=True, type=str)\n parser.add_argument(\"--output-dir\", help=\"The destination folder for the output files\", required=True)\n parser.add_argument(\"--random-seed\", help=\"For shuffling the data\", default=12345)\n parser.add_argument(\"--max-files\", help=\"At most how many files to process (limited by RAM)\", default=100)\n parser.add_argument(\"--duplication-factor\", help=\"Same as the one passed to create input data\", default=1, type=int)\n parser.add_argument(\"--max-sequence-length\", help=\"The maximum number of tokens in an example\", default=512, type=int)\n parser.add_argument(\"--max-predictions-per-sequence\", help=\"The maximum number of masked tokens in an un-packed example\", default=76, type=int)\n parser.add_argument(\"--max-sequences-per-pack\", help=\"The maximum number of sequences per packed example.\", choices=[2, 3], default=3, type=int)\n args = parser.parse_args()\n random.seed(args.random_seed)\n\n # Input files\n input_files = glob.glob(args.input_glob)\n if len(input_files) > args.max_files:\n input_files = np.random.choice(input_files, size=args.max_files, replace=False)\n assert len(input_files) > 0\n\n # Load un-packed dataset\n sample_sizes = data_file_format(args.max_sequence_length, args.max_predictions_per_sequence)\n\n load_size = 1 if len(input_files) == 1 else 1024\n dataset = CachedDataLoader(input_files, sample_sizes, duplication_factor=args.duplication_factor, batch_size=load_size)\n\n # Put examples into bins depending on their sequence lengths and extract the sequence length\n # as an array\n sequence_lengths = []\n examples_by_length = defaultdict(list)\n print(\"Looping through dataset to collect sequence length information...\")\n for data in dataset:\n input_mask = data[1]\n batch_of_lengths = input_mask.sum(1).tolist()\n for i, length in enumerate(batch_of_lengths):\n examples_by_length[length].append([data[k][i] for k in range(len(data))])\n sequence_lengths.extend(batch_of_lengths)\n sequence_lengths = np.array(sequence_lengths)\n\n # Pass the array of sequence lengths to the packing algorithm\n strategy_set, mixture, padding, slicing = get_packing_recipe(sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)\n\n # Add the calculated padding\n for i in range(1, args.max_sequence_length + 1):\n examples_by_length[i].extend([None] * int(padding[i - 1]))\n\n # Shuffle the data\n for key in examples_by_length:\n random.shuffle(examples_by_length[key])\n\n # Pack and store the data\n print(f\"\\nPacking and writing packed dataset to {args.output_dir}.\")\n\n # Slice the data into chunks of max 50k packed examples\n example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)\n print(f\"Splitting work into {len(part_idx)} parts.\")\n\n start = time.time()\n with ProcessPoolExecutor(16) as executor:\n work = repeat(args), part_idx, strategies, example_slices\n for partial_result in executor.map(parallel_pack_according_to_strategy, *work):\n pass\n print(f\"\\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.\")\n",
"import os\nfrom time import time\n\nimport numpy as np\nfrom mpi4py import MPI\nfrom mxnet import nd\n\ndef distribute_mpiranks(local_rank, local_size, size, nodes_for_eval, gpu_per_node):\n # assign top \"nodes_for_eval\" nodes for evaluation. Rest of the nodes go to training\n total_ranks = list(range(size))\n train_ranks = total_ranks[:size - nodes_for_eval * gpu_per_node]\n eval_ranks = train_ranks\n transfer_ranks = []\n if nodes_for_eval:\n eval_ranks = total_ranks[size - nodes_for_eval * gpu_per_node:]\n # print(f\"Training ranks {train_ranks} \\nEval ranks {eval_ranks}\")\n #transfer_ranks = [train_ranks[0], eval_ranks[0]]\n # Form multiple transfer_rank groups, by local_rank\n transfer_ranks = [train_ranks[local_rank], *[x for x in eval_ranks if x % local_size == local_rank]]\n assert train_ranks, \"Training ranks list is empty\"\n assert eval_ranks, \"Evaluation ranks list is empty\"\n # print(f\"TRANSFER RANKS {transfer_ranks}\")\n return train_ranks, eval_ranks, transfer_ranks\n\n\ndef get_group_comm(comm, ranks):\n # Create a grouped mpi communicator with the ranks\n # assert len(ranks) > 0, \"cannot create group as ranks is empty\"\n xcomm = None\n if ranks:\n xgroup = comm.group.Incl(ranks)\n xcomm = comm.Create_group(xgroup)\n\n return xcomm\n\n\ndef sync_training_and_evaluation(flags, global_comm, eval_comm, transfer_comm,\n rank, model, train_ranks, eval_ranks, transfer_ranks,\n cycle, stop_training, ctx):\n\n # Let training threads know if evaluation has reached target\n # All reduce also acts as barrier to make sure parameter save is done\n local_stop_training = np.array([stop_training], dtype=np.int32)\n global_stop_training = np.zeros(1, dtype=np.int32)\n global_comm.Allreduce(local_stop_training, global_stop_training, MPI.SUM)\n\n start = time()\n filename = os.path.join(flags.network_dir, f'model_{cycle}.params')\n if flags.use_mpi_bcast:\n if rank in transfer_ranks:\n broadcast_model(model, transfer_comm, rank, eval_ranks)\n elif flags.use_mpi_transfer:\n if rank == train_ranks[0] or rank in eval_ranks:\n transfer_model(model, global_comm, eval_comm, rank, train_ranks[0], eval_ranks[0], eval_ranks)\n else:\n if rank == train_ranks[0]:\n model.save_parameters(filename)\n\n # Evaluation found end of training\n if global_stop_training != 0:\n stop_training = True\n else:\n if not flags.use_mpi_bcast and not flags.use_mpi_transfer:\n # load model for evaluation\n if rank in eval_ranks:\n if os.path.exists(filename):\n model.load_parameters(filename, ctx=ctx)\n else:\n raise Exception(f\"rank {rank}: model does not exist for {cycle}\")\n\n if rank == train_ranks[0]:\n print(f\"rank {rank}: cycle = {cycle}: time to send the model = {time() - start}\")\n if rank == eval_ranks[0]:\n print(f\"rank {rank}: cycle = {cycle}: time to receive the model = {time() - start}\")\n\n return stop_training, model\n\n\ndef broadcast_model(model, comm, rank, eval_ranks):\n params = model._collect_params_with_prefix()\n\n irequests = []\n result = {}\n for name, p in sorted(params.items()):\n if \"dummy\" in name:\n continue\n result[name] = p.data().asnumpy()\n irequests.append(comm.Ibcast(result[name], root=0))\n\n MPI.Request.waitall(irequests)\n\n if rank in eval_ranks:\n for name, p in sorted(params.items()):\n if \"dummy\" in name:\n continue\n params[name].set_data(result[name])\n\n\ndef transfer_model(model, global_comm, eval_comm, rank, source_rank, target_rank, eval_ranks):\n params = model._collect_params_with_prefix()\n\n irequests = []\n result = {}\n for idx, (name, p) in enumerate(sorted(params.items())):\n if \"dummy\" in name:\n continue\n data = p.data().asnumpy()\n if rank == source_rank:\n irequests.append(global_comm.Isend(data, dest=target_rank, tag=idx))\n elif rank == target_rank:\n result[name] = data\n irequests.append(global_comm.Irecv(result[name], source=source_rank, tag=idx))\n else:\n result[name] = data\n\n if rank == source_rank:\n MPI.Request.waitall(irequests)\n\n elif rank in eval_ranks:\n if rank == target_rank:\n MPI.Request.waitall(irequests)\n eval_comm.Barrier()\n for idx, (name, p) in enumerate(sorted(params.items())):\n if \"dummy\" in name or name not in result.keys():\n continue\n # data = p.data().asnumpy()\n eval_comm.Bcast(result[name], root=0)\n # params[name]._load_init(nd.array(result[name]), ctx, cast_dtype=False, dtype_source='current')\n params[name].set_data(result[name])\n"
] | [
[
"numpy.rint",
"numpy.ones",
"numpy.zeros_like",
"numpy.cumsum",
"numpy.zeros",
"numpy.argsort",
"numpy.random.choice",
"numpy.floor",
"numpy.count_nonzero",
"numpy.arange",
"numpy.all",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.around",
"numpy.where",
"numpy.array"
],
[
"numpy.array",
"numpy.zeros"
]
] |
MobileAnalytics/iPython-Framework | [
"da0e598308c067cd5c5290a6364b3ffaf2d2418f"
] | [
"SprityBird/spritybird/python3.5/lib/python3.5/site-packages/plotly/tools.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\ntools\n=====\n\nFunctions that USERS will possibly want access to.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom collections import OrderedDict\n\nimport warnings\n\nimport six\nimport math\nimport decimal\n\nfrom plotly import utils\nfrom plotly import exceptions\nfrom plotly import graph_reference\nfrom plotly import session\nfrom plotly.files import (CONFIG_FILE, CREDENTIALS_FILE, FILE_CONTENT,\n GRAPH_REFERENCE_FILE, check_file_permissions)\n\nDEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',\n 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',\n 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',\n 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',\n 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']\n\n\nREQUIRED_GANTT_KEYS = ['Task', 'Start', 'Finish']\nPLOTLY_SCALES = {'Greys': ['rgb(0,0,0)', 'rgb(255,255,255)'],\n 'YlGnBu': ['rgb(8,29,88)', 'rgb(255,255,217)'],\n 'Greens': ['rgb(0,68,27)', 'rgb(247,252,245)'],\n 'YlOrRd': ['rgb(128,0,38)', 'rgb(255,255,204)'],\n 'Bluered': ['rgb(0,0,255)', 'rgb(255,0,0)'],\n 'RdBu': ['rgb(5,10,172)', 'rgb(178,10,28)'],\n 'Reds': ['rgb(220,220,220)', 'rgb(178,10,28)'],\n 'Blues': ['rgb(5,10,172)', 'rgb(220,220,220)'],\n 'Picnic': ['rgb(0,0,255)', 'rgb(255,0,0)'],\n 'Rainbow': ['rgb(150,0,90)', 'rgb(255,0,0)'],\n 'Portland': ['rgb(12,51,131)', 'rgb(217,30,30)'],\n 'Jet': ['rgb(0,0,131)', 'rgb(128,0,0)'],\n 'Hot': ['rgb(0,0,0)', 'rgb(255,255,255)'],\n 'Blackbody': ['rgb(0,0,0)', 'rgb(160,200,255)'],\n 'Earth': ['rgb(0,0,130)', 'rgb(255,255,255)'],\n 'Electric': ['rgb(0,0,0)', 'rgb(255,250,220)'],\n 'Viridis': ['rgb(68,1,84)', 'rgb(253,231,37)']}\n\n# color constants for violin plot\nDEFAULT_FILLCOLOR = '#1f77b4'\nDEFAULT_HISTNORM = 'probability density'\nALTERNATIVE_HISTNORM = 'probability'\n\n\n# Warning format\ndef warning_on_one_line(message, category, filename, lineno,\n file=None, line=None):\n return '%s:%s: %s:\\n\\n%s\\n\\n' % (filename, lineno, category.__name__,\n message)\nwarnings.formatwarning = warning_on_one_line\n\ntry:\n from . import matplotlylib\n _matplotlylib_imported = True\nexcept ImportError:\n _matplotlylib_imported = False\n\ntry:\n import IPython\n import IPython.core.display\n _ipython_imported = True\nexcept ImportError:\n _ipython_imported = False\n\ntry:\n import numpy as np\n _numpy_imported = True\nexcept ImportError:\n _numpy_imported = False\n\ntry:\n import pandas as pd\n _pandas_imported = True\nexcept ImportError:\n _pandas_imported = False\n\ntry:\n import scipy as scp\n _scipy_imported = True\nexcept ImportError:\n _scipy_imported = False\n\ntry:\n import scipy.spatial as scs\n _scipy__spatial_imported = True\nexcept ImportError:\n _scipy__spatial_imported = False\n\ntry:\n import scipy.cluster.hierarchy as sch\n _scipy__cluster__hierarchy_imported = True\nexcept ImportError:\n _scipy__cluster__hierarchy_imported = False\n\ntry:\n import scipy\n import scipy.stats\n _scipy_imported = True\nexcept ImportError:\n _scipy_imported = False\n\n\ndef get_config_defaults():\n \"\"\"\n Convenience function to check current settings against defaults.\n\n Example:\n\n if plotly_domain != get_config_defaults()['plotly_domain']:\n # do something\n\n \"\"\"\n return dict(FILE_CONTENT[CONFIG_FILE]) # performs a shallow copy\n\n\ndef ensure_local_plotly_files():\n \"\"\"Ensure that filesystem is setup/filled out in a valid way.\n If the config or credential files aren't filled out, then write them\n to the disk.\n \"\"\"\n if check_file_permissions():\n for fn in [CREDENTIALS_FILE, CONFIG_FILE]:\n utils.ensure_file_exists(fn)\n contents = utils.load_json_dict(fn)\n for key, val in list(FILE_CONTENT[fn].items()):\n # TODO: removed type checking below, may want to revisit\n if key not in contents:\n contents[key] = val\n contents_keys = list(contents.keys())\n for key in contents_keys:\n if key not in FILE_CONTENT[fn]:\n del contents[key]\n utils.save_json_dict(fn, contents)\n\n # make a request to get graph reference if DNE.\n utils.ensure_file_exists(GRAPH_REFERENCE_FILE)\n utils.save_json_dict(GRAPH_REFERENCE_FILE,\n graph_reference.GRAPH_REFERENCE)\n\n else:\n warnings.warn(\"Looks like you don't have 'read-write' permission to \"\n \"your 'home' ('~') directory or to our '~/.plotly' \"\n \"directory. That means plotly's python api can't setup \"\n \"local configuration files. No problem though! You'll \"\n \"just have to sign-in using 'plotly.plotly.sign_in()'. \"\n \"For help with that: 'help(plotly.plotly.sign_in)'.\"\n \"\\nQuestions? [email protected]\")\n\n\n### credentials tools ###\n\ndef set_credentials_file(username=None,\n api_key=None,\n stream_ids=None,\n proxy_username=None,\n proxy_password=None):\n \"\"\"Set the keyword-value pairs in `~/.plotly_credentials`.\n\n :param (str) username: The username you'd use to sign in to Plotly\n :param (str) api_key: The api key associated with above username\n :param (list) stream_ids: Stream tokens for above credentials\n :param (str) proxy_username: The un associated with with your Proxy\n :param (str) proxy_password: The pw associated with your Proxy un\n\n \"\"\"\n if not check_file_permissions():\n raise exceptions.PlotlyError(\"You don't have proper file permissions \"\n \"to run this function.\")\n ensure_local_plotly_files() # make sure what's there is OK\n credentials = get_credentials_file()\n if isinstance(username, six.string_types):\n credentials['username'] = username\n if isinstance(api_key, six.string_types):\n credentials['api_key'] = api_key\n if isinstance(proxy_username, six.string_types):\n credentials['proxy_username'] = proxy_username\n if isinstance(proxy_password, six.string_types):\n credentials['proxy_password'] = proxy_password\n if isinstance(stream_ids, (list, tuple)):\n credentials['stream_ids'] = stream_ids\n utils.save_json_dict(CREDENTIALS_FILE, credentials)\n ensure_local_plotly_files() # make sure what we just put there is OK\n\n\ndef get_credentials_file(*args):\n \"\"\"Return specified args from `~/.plotly_credentials`. as dict.\n\n Returns all if no arguments are specified.\n\n Example:\n get_credentials_file('username')\n\n \"\"\"\n if check_file_permissions():\n ensure_local_plotly_files() # make sure what's there is OK\n return utils.load_json_dict(CREDENTIALS_FILE, *args)\n else:\n return FILE_CONTENT[CREDENTIALS_FILE]\n\n\ndef reset_credentials_file():\n ensure_local_plotly_files() # make sure what's there is OK\n utils.save_json_dict(CREDENTIALS_FILE, {})\n ensure_local_plotly_files() # put the defaults back\n\n\n### config tools ###\n\ndef set_config_file(plotly_domain=None,\n plotly_streaming_domain=None,\n plotly_api_domain=None,\n plotly_ssl_verification=None,\n plotly_proxy_authorization=None,\n world_readable=None,\n sharing=None,\n auto_open=None):\n \"\"\"Set the keyword-value pairs in `~/.plotly/.config`.\n\n :param (str) plotly_domain: ex - https://plot.ly\n :param (str) plotly_streaming_domain: ex - stream.plot.ly\n :param (str) plotly_api_domain: ex - https://api.plot.ly\n :param (bool) plotly_ssl_verification: True = verify, False = don't verify\n :param (bool) plotly_proxy_authorization: True = use plotly proxy auth creds\n :param (bool) world_readable: True = public, False = private\n\n \"\"\"\n if not check_file_permissions():\n raise exceptions.PlotlyError(\"You don't have proper file permissions \"\n \"to run this function.\")\n ensure_local_plotly_files() # make sure what's there is OK\n utils.validate_world_readable_and_sharing_settings({\n 'sharing': sharing, 'world_readable': world_readable})\n settings = get_config_file()\n if isinstance(plotly_domain, six.string_types):\n settings['plotly_domain'] = plotly_domain\n elif plotly_domain is not None:\n raise TypeError('plotly_domain should be a string')\n if isinstance(plotly_streaming_domain, six.string_types):\n settings['plotly_streaming_domain'] = plotly_streaming_domain\n elif plotly_streaming_domain is not None:\n raise TypeError('plotly_streaming_domain should be a string')\n if isinstance(plotly_api_domain, six.string_types):\n settings['plotly_api_domain'] = plotly_api_domain\n elif plotly_api_domain is not None:\n raise TypeError('plotly_api_domain should be a string')\n if isinstance(plotly_ssl_verification, (six.string_types, bool)):\n settings['plotly_ssl_verification'] = plotly_ssl_verification\n elif plotly_ssl_verification is not None:\n raise TypeError('plotly_ssl_verification should be a boolean')\n if isinstance(plotly_proxy_authorization, (six.string_types, bool)):\n settings['plotly_proxy_authorization'] = plotly_proxy_authorization\n elif plotly_proxy_authorization is not None:\n raise TypeError('plotly_proxy_authorization should be a boolean')\n if isinstance(auto_open, bool):\n settings['auto_open'] = auto_open\n elif auto_open is not None:\n raise TypeError('auto_open should be a boolean')\n\n if isinstance(world_readable, bool):\n settings['world_readable'] = world_readable\n settings.pop('sharing')\n elif world_readable is not None:\n raise TypeError('Input should be a boolean')\n if isinstance(sharing, six.string_types):\n settings['sharing'] = sharing\n elif sharing is not None:\n raise TypeError('sharing should be a string')\n utils.set_sharing_and_world_readable(settings)\n\n utils.save_json_dict(CONFIG_FILE, settings)\n ensure_local_plotly_files() # make sure what we just put there is OK\n\n\ndef get_config_file(*args):\n \"\"\"Return specified args from `~/.plotly/.config`. as tuple.\n\n Returns all if no arguments are specified.\n\n Example:\n get_config_file('plotly_domain')\n\n \"\"\"\n if check_file_permissions():\n ensure_local_plotly_files() # make sure what's there is OK\n return utils.load_json_dict(CONFIG_FILE, *args)\n else:\n return FILE_CONTENT[CONFIG_FILE]\n\n\ndef reset_config_file():\n ensure_local_plotly_files() # make sure what's there is OK\n f = open(CONFIG_FILE, 'w')\n f.close()\n ensure_local_plotly_files() # put the defaults back\n\n\n### embed tools ###\n\ndef get_embed(file_owner_or_url, file_id=None, width=\"100%\", height=525):\n \"\"\"Returns HTML code to embed figure on a webpage as an <iframe>\n\n Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.\n Since each file is given a corresponding unique url, you may also simply\n pass a valid plotly url as the first argument.\n\n Note, if you're using a file_owner string as the first argument, you MUST\n specify a `file_id` keyword argument. Else, if you're using a url string\n as the first argument, you MUST NOT specify a `file_id` keyword argument,\n or file_id must be set to Python's None value.\n\n Positional arguments:\n file_owner_or_url (string) -- a valid plotly username OR a valid plotly url\n\n Keyword arguments:\n file_id (default=None) -- an int or string that can be converted to int\n if you're using a url, don't fill this in!\n width (default=\"100%\") -- an int or string corresp. to width of the figure\n height (default=\"525\") -- same as width but corresp. to the height of the\n figure\n\n \"\"\"\n plotly_rest_url = (session.get_session_config().get('plotly_domain') or\n get_config_file()['plotly_domain'])\n if file_id is None: # assume we're using a url\n url = file_owner_or_url\n if url[:len(plotly_rest_url)] != plotly_rest_url:\n raise exceptions.PlotlyError(\n \"Because you didn't supply a 'file_id' in the call, \"\n \"we're assuming you're trying to snag a figure from a url. \"\n \"You supplied the url, '{0}', we expected it to start with \"\n \"'{1}'.\"\n \"\\nRun help on this function for more information.\"\n \"\".format(url, plotly_rest_url))\n urlsplit = six.moves.urllib.parse.urlparse(url)\n file_owner = urlsplit.path.split('/')[1].split('~')[1]\n file_id = urlsplit.path.split('/')[2]\n\n # to check for share_key we check urlsplit.query\n query_dict = six.moves.urllib.parse.parse_qs(urlsplit.query)\n if query_dict:\n share_key = query_dict['share_key'][-1]\n else:\n share_key = ''\n else:\n file_owner = file_owner_or_url\n share_key = ''\n try:\n test_if_int = int(file_id)\n except ValueError:\n raise exceptions.PlotlyError(\n \"The 'file_id' argument was not able to be converted into an \"\n \"integer number. Make sure that the positional 'file_id' argument \"\n \"is a number that can be converted into an integer or a string \"\n \"that can be converted into an integer.\"\n )\n if int(file_id) < 0:\n raise exceptions.PlotlyError(\n \"The 'file_id' argument must be a non-negative number.\"\n )\n if share_key is '':\n s = (\"<iframe id=\\\"igraph\\\" scrolling=\\\"no\\\" style=\\\"border:none;\\\" \"\n \"seamless=\\\"seamless\\\" \"\n \"src=\\\"{plotly_rest_url}/\"\n \"~{file_owner}/{file_id}.embed\\\" \"\n \"height=\\\"{iframe_height}\\\" width=\\\"{iframe_width}\\\">\"\n \"</iframe>\").format(\n plotly_rest_url=plotly_rest_url,\n file_owner=file_owner, file_id=file_id,\n iframe_height=height, iframe_width=width)\n else:\n s = (\"<iframe id=\\\"igraph\\\" scrolling=\\\"no\\\" style=\\\"border:none;\\\" \"\n \"seamless=\\\"seamless\\\" \"\n \"src=\\\"{plotly_rest_url}/\"\n \"~{file_owner}/{file_id}.embed?share_key={share_key}\\\" \"\n \"height=\\\"{iframe_height}\\\" width=\\\"{iframe_width}\\\">\"\n \"</iframe>\").format(\n plotly_rest_url=plotly_rest_url,\n file_owner=file_owner, file_id=file_id, share_key=share_key,\n iframe_height=height, iframe_width=width)\n\n return s\n\n\ndef embed(file_owner_or_url, file_id=None, width=\"100%\", height=525):\n \"\"\"Embeds existing Plotly figure in IPython Notebook\n\n Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.\n Since each file is given a corresponding unique url, you may also simply\n pass a valid plotly url as the first argument.\n\n Note, if you're using a file_owner string as the first argument, you MUST\n specify a `file_id` keyword argument. Else, if you're using a url string\n as the first argument, you MUST NOT specify a `file_id` keyword argument,\n or file_id must be set to Python's None value.\n\n Positional arguments:\n file_owner_or_url (string) -- a valid plotly username OR a valid plotly url\n\n Keyword arguments:\n file_id (default=None) -- an int or string that can be converted to int\n if you're using a url, don't fill this in!\n width (default=\"100%\") -- an int or string corresp. to width of the figure\n height (default=\"525\") -- same as width but corresp. to the height of the\n figure\n\n \"\"\"\n try:\n s = get_embed(file_owner_or_url, file_id=file_id, width=width,\n height=height)\n\n # see if we are in the SageMath Cloud\n from sage_salvus import html\n return html(s, hide=False)\n except:\n pass\n if _ipython_imported:\n if file_id:\n plotly_domain = (\n session.get_session_config().get('plotly_domain') or\n get_config_file()['plotly_domain']\n )\n url = \"{plotly_domain}/~{un}/{fid}\".format(\n plotly_domain=plotly_domain,\n un=file_owner_or_url,\n fid=file_id)\n else:\n url = file_owner_or_url\n return PlotlyDisplay(url, width, height)\n else:\n if (get_config_defaults()['plotly_domain']\n != session.get_session_config()['plotly_domain']):\n feedback_email = '[email protected]'\n else:\n\n # different domain likely means enterprise\n feedback_email = '[email protected]'\n\n warnings.warn(\n \"Looks like you're not using IPython or Sage to embed this \"\n \"plot. If you just want the *embed code*,\\ntry using \"\n \"`get_embed()` instead.\"\n '\\nQuestions? {}'.format(feedback_email))\n\n\n### mpl-related tools ###\[email protected]_doc(**get_config_file())\ndef mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):\n \"\"\"Convert a matplotlib figure to plotly dictionary and send.\n\n All available information about matplotlib visualizations are stored\n within a matplotlib.figure.Figure object. You can create a plot in python\n using matplotlib, store the figure object, and then pass this object to\n the fig_to_plotly function. In the background, mplexporter is used to\n crawl through the mpl figure object for appropriate information. This\n information is then systematically sent to the PlotlyRenderer which\n creates the JSON structure used to make plotly visualizations. Finally,\n these dictionaries are sent to plotly and your browser should open up a\n new tab for viewing! Optionally, if you're working in IPython, you can\n set notebook=True and the PlotlyRenderer will call plotly.iplot instead\n of plotly.plot to have the graph appear directly in the IPython notebook.\n\n Note, this function gives the user access to a simple, one-line way to\n render an mpl figure in plotly. If you need to trouble shoot, you can do\n this step manually by NOT running this fuction and entereing the following:\n\n ===========================================================================\n from mplexporter import Exporter\n from mplexporter.renderers import PlotlyRenderer\n\n # create an mpl figure and store it under a varialble 'fig'\n\n renderer = PlotlyRenderer()\n exporter = Exporter(renderer)\n exporter.run(fig)\n ===========================================================================\n\n You can then inspect the JSON structures by accessing these:\n\n renderer.layout -- a plotly layout dictionary\n renderer.data -- a list of plotly data dictionaries\n\n Positional arguments:\n fig -- a matplotlib figure object\n username -- a valid plotly username **\n api_key -- a valid api_key for the above username **\n notebook -- an option for use with an IPython notebook\n\n ** Don't have a username/api_key? Try looking here:\n {plotly_domain}/plot\n\n ** Forgot your api_key? Try signing in and looking here:\n {plotly_domain}/python/getting-started\n\n \"\"\"\n if _matplotlylib_imported:\n renderer = matplotlylib.PlotlyRenderer()\n matplotlylib.Exporter(renderer).run(fig)\n if resize:\n renderer.resize()\n if strip_style:\n renderer.strip_style()\n if verbose:\n print(renderer.msg)\n return renderer.plotly_fig\n else:\n warnings.warn(\n \"To use Plotly's matplotlylib functionality, you'll need to have \"\n \"matplotlib successfully installed with all of its dependencies. \"\n \"You're getting this error because matplotlib or one of its \"\n \"dependencies doesn't seem to be installed correctly.\")\n\n\n### graph_objs related tools ###\n\ndef get_subplots(rows=1, columns=1, print_grid=False, **kwargs):\n \"\"\"Return a dictionary instance with the subplots set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.get_subplots(rows=2)\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x1', yaxis='y1')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 2:\n # print out string showing the subplot grid you've put in the layout\n fig = tools.get_subplots(rows=3, columns=2, print_grid=True)\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows, evenly spaced vertically on the figure.\n\n columns (kwarg, int greater than 0, default=1):\n Number of columns, evenly spaced horizontally on the figure.\n\n horizontal_spacing (kwarg, float in [0,1], default=0.1):\n Space between subplot columns. Applied to all columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.05):\n Space between subplot rows. Applied to all rows.\n\n print_grid (kwarg, True | False, default=False):\n If True, prints a tab-delimited string representation\n of your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / columns):\n Space between subplot columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n warnings.warn(\n \"tools.get_subplots is depreciated. \"\n \"Please use tools.make_subplots instead.\"\n )\n\n # Throw exception for non-integer rows and columns\n if not isinstance(rows, int) or rows <= 0:\n raise Exception(\"Keyword argument 'rows' \"\n \"must be an int greater than 0\")\n if not isinstance(columns, int) or columns <= 0:\n raise Exception(\"Keyword argument 'columns' \"\n \"must be an int greater than 0\")\n\n # Throw exception if non-valid kwarg is sent\n VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing']\n for key in kwargs.keys():\n if key not in VALID_KWARGS:\n raise Exception(\"Invalid keyword argument: '{0}'\".format(key))\n\n # Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns\n try:\n horizontal_spacing = float(kwargs['horizontal_spacing'])\n except KeyError:\n horizontal_spacing = 0.2 / columns\n try:\n vertical_spacing = float(kwargs['vertical_spacing'])\n except KeyError:\n vertical_spacing = 0.3 / rows\n\n fig = dict(layout=graph_objs.Layout()) # will return this at the end\n plot_width = (1 - horizontal_spacing * (columns - 1)) / columns\n plot_height = (1 - vertical_spacing * (rows - 1)) / rows\n plot_num = 0\n for rrr in range(rows):\n for ccc in range(columns):\n xaxis_name = 'xaxis{0}'.format(plot_num + 1)\n x_anchor = 'y{0}'.format(plot_num + 1)\n x_start = (plot_width + horizontal_spacing) * ccc\n x_end = x_start + plot_width\n\n yaxis_name = 'yaxis{0}'.format(plot_num + 1)\n y_anchor = 'x{0}'.format(plot_num + 1)\n y_start = (plot_height + vertical_spacing) * rrr\n y_end = y_start + plot_height\n\n xaxis = graph_objs.XAxis(domain=[x_start, x_end], anchor=x_anchor)\n fig['layout'][xaxis_name] = xaxis\n yaxis = graph_objs.YAxis(domain=[y_start, y_end], anchor=y_anchor)\n fig['layout'][yaxis_name] = yaxis\n plot_num += 1\n\n if print_grid:\n print(\"This is the format of your plot grid!\")\n grid_string = \"\"\n plot = 1\n for rrr in range(rows):\n grid_line = \"\"\n for ccc in range(columns):\n grid_line += \"[{0}]\\t\".format(plot)\n plot += 1\n grid_string = grid_line + '\\n' + grid_string\n print(grid_string)\n\n return graph_objs.Figure(fig) # forces us to validate what we just did...\n\n\ndef make_subplots(rows=1, cols=1,\n shared_xaxes=False, shared_yaxes=False,\n start_cell='top-left', print_grid=True,\n **kwargs):\n \"\"\"Return an instance of plotly.graph_objs.Figure\n with the subplots domain set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.make_subplots(rows=2)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n # or see Figure.append_trace\n\n Example 2:\n # subplots with shared x axes\n fig = tools.make_subplots(rows=2, shared_xaxes=True)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x1,y2 ]\n\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]\n\n Example 3:\n # irregular subplot layout (more examples below under 'specs')\n fig = tools.make_subplots(rows=2, cols=2,\n specs=[[{}, {}],\n [{'colspan': 2}, None]])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ] [ (1,2) x2,y2 ]\n [ (2,1) x3,y3 - ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]\n\n Example 4:\n # insets\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 5:\n # include subplot titles\n fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 6:\n # Include subplot title on one plot (but not all)\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],\n subplot_titles=('','Inset'))\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows in the subplot grid.\n\n cols (kwarg, int greater than 0, default=1):\n Number of columns in the subplot grid.\n\n shared_xaxes (kwarg, boolean or list, default=False)\n Assign shared x axes.\n If True, subplots in the same grid column have one common\n shared x-axis at the bottom of the gird.\n\n To assign shared x axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared x axis)\n of cell index tuples.\n\n shared_yaxes (kwarg, boolean or list, default=False)\n Assign shared y axes.\n If True, subplots in the same grid row have one common\n shared y-axis on the left-hand side of the gird.\n\n To assign shared y axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared y axis)\n of cell index tuples.\n\n start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')\n Choose the starting cell in the subplot grid used to set the\n domains of the subplots.\n\n print_grid (kwarg, boolean, default=True):\n If True, prints a tab-delimited string representation of\n your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):\n Space between subplot columns.\n Applies to all columns (use 'specs' subplot-dependents spacing)\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n Applies to all rows (use 'specs' subplot-dependents spacing)\n\n subplot_titles (kwarg, list of strings, default=empty list):\n Title of each subplot.\n \"\" can be included in the list if no subplot title is desired in\n that space so that the titles are properly indexed.\n\n specs (kwarg, list of lists of dictionaries):\n Subplot specifications.\n\n ex1: specs=[[{}, {}], [{'colspan': 2}, None]]\n\n ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]\n\n - Indices of the outer list correspond to subplot grid rows\n starting from the bottom. The number of rows in 'specs'\n must be equal to 'rows'.\n\n - Indices of the inner lists correspond to subplot grid columns\n starting from the left. The number of columns in 'specs'\n must be equal to 'cols'.\n\n - Each item in the 'specs' list corresponds to one subplot\n in a subplot grid. (N.B. The subplot grid has exactly 'rows'\n times 'cols' cells.)\n\n - Use None for blank a subplot cell (or to move pass a col/row span).\n\n - Note that specs[0][0] has the specs of the 'start_cell' subplot.\n\n - Each item in 'specs' is a dictionary.\n The available keys are:\n\n * is_3d (boolean, default=False): flag for 3d scenes\n * colspan (int, default=1): number of subplot columns\n for this subplot to span.\n * rowspan (int, default=1): number of subplot rows\n for this subplot to span.\n * l (float, default=0.0): padding left of cell\n * r (float, default=0.0): padding right of cell\n * t (float, default=0.0): padding right of cell\n * b (float, default=0.0): padding bottom of cell\n\n - Use 'horizontal_spacing' and 'vertical_spacing' to adjust\n the spacing in between the subplots.\n\n insets (kwarg, list of dictionaries):\n Inset specifications.\n\n - Each item in 'insets' is a dictionary.\n The available keys are:\n\n * cell (tuple, default=(1,1)): (row, col) index of the\n subplot cell to overlay inset axes onto.\n * is_3d (boolean, default=False): flag for 3d scenes\n * l (float, default=0.0): padding left of inset\n in fraction of cell width\n * w (float or 'to_end', default='to_end') inset width\n in fraction of cell width ('to_end': to cell right edge)\n * b (float, default=0.0): padding bottom of inset\n in fraction of cell height\n * h (float or 'to_end', default='to_end') inset height\n in fraction of cell height ('to_end': to cell top edge)\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Throw exception for non-integer rows and cols\n if not isinstance(rows, int) or rows <= 0:\n raise Exception(\"Keyword argument 'rows' \"\n \"must be an int greater than 0\")\n if not isinstance(cols, int) or cols <= 0:\n raise Exception(\"Keyword argument 'cols' \"\n \"must be an int greater than 0\")\n\n # Dictionary of things start_cell\n START_CELL_all = {\n 'bottom-left': {\n # 'natural' setup where x & y domains increase monotonically\n 'col_dir': 1,\n 'row_dir': 1\n },\n 'top-left': {\n # 'default' setup visually matching the 'specs' list of lists\n 'col_dir': 1,\n 'row_dir': -1\n }\n # TODO maybe add 'bottom-right' and 'top-right'\n }\n\n # Throw exception for invalid 'start_cell' values\n try:\n START_CELL = START_CELL_all[start_cell]\n except KeyError:\n raise Exception(\"Invalid 'start_cell' value\")\n\n # Throw exception if non-valid kwarg is sent\n VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing',\n 'specs', 'insets', 'subplot_titles']\n for key in kwargs.keys():\n if key not in VALID_KWARGS:\n raise Exception(\"Invalid keyword argument: '{0}'\".format(key))\n\n # Set 'subplot_titles'\n subplot_titles = kwargs.get('subplot_titles', [\"\"] * rows * cols)\n\n # Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / cols\n try:\n horizontal_spacing = float(kwargs['horizontal_spacing'])\n except KeyError:\n horizontal_spacing = 0.2 / cols\n try:\n vertical_spacing = float(kwargs['vertical_spacing'])\n except KeyError:\n if 'subplot_titles' in kwargs:\n vertical_spacing = 0.5 / rows\n else:\n vertical_spacing = 0.3 / rows\n\n # Sanitize 'specs' (must be a list of lists)\n exception_msg = \"Keyword argument 'specs' must be a list of lists\"\n try:\n specs = kwargs['specs']\n if not isinstance(specs, list):\n raise Exception(exception_msg)\n else:\n for spec_row in specs:\n if not isinstance(spec_row, list):\n raise Exception(exception_msg)\n except KeyError:\n specs = [[{}\n for c in range(cols)]\n for r in range(rows)] # default 'specs'\n\n # Throw exception if specs is over or under specified\n if len(specs) != rows:\n raise Exception(\"The number of rows in 'specs' \"\n \"must be equal to 'rows'\")\n for r, spec_row in enumerate(specs):\n if len(spec_row) != cols:\n raise Exception(\"The number of columns in 'specs' \"\n \"must be equal to 'cols'\")\n\n # Sanitize 'insets'\n try:\n insets = kwargs['insets']\n if not isinstance(insets, list):\n raise Exception(\"Keyword argument 'insets' must be a list\")\n except KeyError:\n insets = False\n\n # Throw exception if non-valid key / fill in defaults\n def _check_keys_and_fill(name, arg, defaults):\n def _checks(item, defaults):\n if item is None:\n return\n if not isinstance(item, dict):\n raise Exception(\"Items in keyword argument '{name}' must be \"\n \"dictionaries or None\".format(name=name))\n for k in item.keys():\n if k not in defaults.keys():\n raise Exception(\"Invalid key '{k}' in keyword \"\n \"argument '{name}'\".format(k=k, name=name))\n for k in defaults.keys():\n if k not in item.keys():\n item[k] = defaults[k]\n for arg_i in arg:\n if isinstance(arg_i, list):\n for arg_ii in arg_i:\n _checks(arg_ii, defaults)\n elif isinstance(arg_i, dict):\n _checks(arg_i, defaults)\n\n # Default spec key-values\n SPEC_defaults = dict(\n is_3d=False,\n colspan=1,\n rowspan=1,\n l=0.0,\n r=0.0,\n b=0.0,\n t=0.0\n # TODO add support for 'w' and 'h'\n )\n _check_keys_and_fill('specs', specs, SPEC_defaults)\n\n # Default inset key-values\n if insets:\n INSET_defaults = dict(\n cell=(1, 1),\n is_3d=False,\n l=0.0,\n w='to_end',\n b=0.0,\n h='to_end'\n )\n _check_keys_and_fill('insets', insets, INSET_defaults)\n\n # Set width & height of each subplot cell (excluding padding)\n width = (1. - horizontal_spacing * (cols - 1)) / cols\n height = (1. - vertical_spacing * (rows - 1)) / rows\n\n # Built row/col sequence using 'row_dir' and 'col_dir'\n COL_DIR = START_CELL['col_dir']\n ROW_DIR = START_CELL['row_dir']\n col_seq = range(cols)[::COL_DIR]\n row_seq = range(rows)[::ROW_DIR]\n\n # [grid] Build subplot grid (coord tuple of cell)\n grid = [[((width + horizontal_spacing) * c,\n (height + vertical_spacing) * r)\n for c in col_seq]\n for r in row_seq]\n\n # [grid_ref] Initialize the grid and insets' axis-reference lists\n grid_ref = [[None for c in range(cols)] for r in range(rows)]\n insets_ref = [None for inset in range(len(insets))] if insets else None\n\n layout = graph_objs.Layout() # init layout object\n\n # Function handling logic around 2d axis labels\n # Returns 'x{}' | 'y{}'\n def _get_label(x_or_y, r, c, cnt, shared_axes):\n # Default label (given strictly by cnt)\n label = \"{x_or_y}{cnt}\".format(x_or_y=x_or_y, cnt=cnt)\n\n if isinstance(shared_axes, bool):\n if shared_axes:\n if x_or_y == 'x':\n label = \"{x_or_y}{c}\".format(x_or_y=x_or_y, c=c + 1)\n if x_or_y == 'y':\n label = \"{x_or_y}{r}\".format(x_or_y=x_or_y, r=r + 1)\n\n if isinstance(shared_axes, list):\n if isinstance(shared_axes[0], tuple):\n shared_axes = [shared_axes] # TODO put this elsewhere\n for shared_axis in shared_axes:\n if (r + 1, c + 1) in shared_axis:\n label = {\n 'x': \"x{0}\".format(shared_axis[0][1]),\n 'y': \"y{0}\".format(shared_axis[0][0])\n }[x_or_y]\n\n return label\n\n # Row in grid of anchor row if shared_xaxes=True\n ANCHOR_ROW = 0 if ROW_DIR > 0 else rows - 1\n\n # Function handling logic around 2d axis anchors\n # Return 'x{}' | 'y{}' | 'free' | False\n def _get_anchors(r, c, x_cnt, y_cnt, shared_xaxes, shared_yaxes):\n # Default anchors (give strictly by cnt)\n x_anchor = \"y{y_cnt}\".format(y_cnt=y_cnt)\n y_anchor = \"x{x_cnt}\".format(x_cnt=x_cnt)\n\n if isinstance(shared_xaxes, bool):\n if shared_xaxes:\n if r != ANCHOR_ROW:\n x_anchor = False\n y_anchor = 'free'\n if shared_yaxes and c != 0: # TODO covers all cases?\n y_anchor = False\n return x_anchor, y_anchor\n\n elif isinstance(shared_xaxes, list):\n if isinstance(shared_xaxes[0], tuple):\n shared_xaxes = [shared_xaxes] # TODO put this elsewhere\n for shared_xaxis in shared_xaxes:\n if (r + 1, c + 1) in shared_xaxis[1:]:\n x_anchor = False\n y_anchor = 'free' # TODO covers all cases?\n\n if isinstance(shared_yaxes, bool):\n if shared_yaxes:\n if c != 0:\n y_anchor = False\n x_anchor = 'free'\n if shared_xaxes and r != ANCHOR_ROW: # TODO all cases?\n x_anchor = False\n return x_anchor, y_anchor\n\n elif isinstance(shared_yaxes, list):\n if isinstance(shared_yaxes[0], tuple):\n shared_yaxes = [shared_yaxes] # TODO put this elsewhere\n for shared_yaxis in shared_yaxes:\n if (r + 1, c + 1) in shared_yaxis[1:]:\n y_anchor = False\n x_anchor = 'free' # TODO covers all cases?\n\n return x_anchor, y_anchor\n\n list_of_domains = [] # added for subplot titles\n\n # Function pasting x/y domains in layout object (2d case)\n def _add_domain(layout, x_or_y, label, domain, anchor, position):\n name = label[0] + 'axis' + label[1:]\n graph_obj = '{X_or_Y}Axis'.format(X_or_Y=x_or_y.upper())\n axis = getattr(graph_objs, graph_obj)(domain=domain)\n if anchor:\n axis['anchor'] = anchor\n if isinstance(position, float):\n axis['position'] = position\n layout[name] = axis\n list_of_domains.append(domain) # added for subplot titles\n\n # Function pasting x/y domains in layout object (3d case)\n def _add_domain_is_3d(layout, s_label, x_domain, y_domain):\n scene = graph_objs.Scene(domain={'x': x_domain, 'y': y_domain})\n layout[s_label] = scene\n\n x_cnt = y_cnt = s_cnt = 1 # subplot axis/scene counters\n\n # Loop through specs -- (r, c) <-> (row, col)\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n\n if spec is None: # skip over None cells\n continue\n\n c_spanned = c + spec['colspan'] - 1 # get spanned c\n r_spanned = r + spec['rowspan'] - 1 # get spanned r\n\n # Throw exception if 'colspan' | 'rowspan' is too large for grid\n if c_spanned >= cols:\n raise Exception(\"Some 'colspan' value is too large for \"\n \"this subplot grid.\")\n if r_spanned >= rows:\n raise Exception(\"Some 'rowspan' value is too large for \"\n \"this subplot grid.\")\n\n # Get x domain using grid and colspan\n x_s = grid[r][c][0] + spec['l']\n x_e = grid[r][c_spanned][0] + width - spec['r']\n x_domain = [x_s, x_e]\n\n # Get y domain (dep. on row_dir) using grid & r_spanned\n if ROW_DIR > 0:\n y_s = grid[r][c][1] + spec['b']\n y_e = grid[r_spanned][c][1] + height - spec['t']\n else:\n y_s = grid[r_spanned][c][1] + spec['b']\n y_e = grid[r][c][1] + height - spec['t']\n y_domain = [y_s, y_e]\n\n if spec['is_3d']:\n\n # Add scene to layout\n s_label = 'scene{0}'.format(s_cnt)\n _add_domain_is_3d(layout, s_label, x_domain, y_domain)\n grid_ref[r][c] = (s_label, )\n s_cnt += 1\n\n else:\n\n # Get axis label and anchor\n x_label = _get_label('x', r, c, x_cnt, shared_xaxes)\n y_label = _get_label('y', r, c, y_cnt, shared_yaxes)\n x_anchor, y_anchor = _get_anchors(r, c,\n x_cnt, y_cnt,\n shared_xaxes,\n shared_yaxes)\n\n # Add a xaxis to layout (N.B anchor == False -> no axis)\n if x_anchor:\n if x_anchor == 'free':\n x_position = y_domain[0]\n else:\n x_position = False\n _add_domain(layout, 'x', x_label, x_domain,\n x_anchor, x_position)\n x_cnt += 1\n\n # Add a yaxis to layout (N.B anchor == False -> no axis)\n if y_anchor:\n if y_anchor == 'free':\n y_position = x_domain[0]\n else:\n y_position = False\n _add_domain(layout, 'y', y_label, y_domain,\n y_anchor, y_position)\n y_cnt += 1\n\n grid_ref[r][c] = (x_label, y_label) # fill in ref\n\n # Loop through insets\n if insets:\n for i_inset, inset in enumerate(insets):\n\n r = inset['cell'][0] - 1\n c = inset['cell'][1] - 1\n\n # Throw exception if r | c is out of range\n if not (0 <= r < rows):\n raise Exception(\"Some 'cell' row value is out of range. \"\n \"Note: the starting cell is (1, 1)\")\n if not (0 <= c < cols):\n raise Exception(\"Some 'cell' col value is out of range. \"\n \"Note: the starting cell is (1, 1)\")\n\n # Get inset x domain using grid\n x_s = grid[r][c][0] + inset['l'] * width\n if inset['w'] == 'to_end':\n x_e = grid[r][c][0] + width\n else:\n x_e = x_s + inset['w'] * width\n x_domain = [x_s, x_e]\n\n # Get inset y domain using grid\n y_s = grid[r][c][1] + inset['b'] * height\n if inset['h'] == 'to_end':\n y_e = grid[r][c][1] + height\n else:\n y_e = y_s + inset['h'] * height\n y_domain = [y_s, y_e]\n\n if inset['is_3d']:\n\n # Add scene to layout\n s_label = 'scene{0}'.format(s_cnt)\n _add_domain_is_3d(layout, s_label, x_domain, y_domain)\n insets_ref[i_inset] = (s_label, )\n s_cnt += 1\n\n else:\n\n # Get axis label and anchor\n x_label = _get_label('x', False, False, x_cnt, False)\n y_label = _get_label('y', False, False, y_cnt, False)\n x_anchor, y_anchor = _get_anchors(r, c,\n x_cnt, y_cnt,\n False, False)\n\n # Add a xaxis to layout (N.B insets always have anchors)\n _add_domain(layout, 'x', x_label, x_domain, x_anchor, False)\n x_cnt += 1\n\n # Add a yayis to layout (N.B insets always have anchors)\n _add_domain(layout, 'y', y_label, y_domain, y_anchor, False)\n y_cnt += 1\n\n insets_ref[i_inset] = (x_label, y_label) # fill in ref\n\n # [grid_str] Set the grid's string representation\n sp = \" \" # space between cell\n s_str = \"[ \" # cell start string\n e_str = \" ]\" # cell end string\n colspan_str = ' -' # colspan string\n rowspan_str = ' |' # rowspan string\n empty_str = ' (empty) ' # empty cell string\n\n # Init grid_str with intro message\n grid_str = \"This is the format of your plot grid:\\n\"\n\n # Init tmp list of lists of strings (sorta like 'grid_ref' but w/ strings)\n _tmp = [['' for c in range(cols)] for r in range(rows)]\n\n # Define cell string as function of (r, c) and grid_ref\n def _get_cell_str(r, c, ref):\n return '({r},{c}) {ref}'.format(r=r + 1, c=c + 1, ref=','.join(ref))\n\n # Find max len of _cell_str, add define a padding function\n cell_len = max([len(_get_cell_str(r, c, ref))\n for r, row_ref in enumerate(grid_ref)\n for c, ref in enumerate(row_ref)\n if ref]) + len(s_str) + len(e_str)\n\n def _pad(s, cell_len=cell_len):\n return ' ' * (cell_len - len(s))\n\n # Loop through specs, fill in _tmp\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n\n ref = grid_ref[r][c]\n if ref is None:\n if _tmp[r][c] == '':\n _tmp[r][c] = empty_str + _pad(empty_str)\n continue\n\n cell_str = s_str + _get_cell_str(r, c, ref)\n\n if spec['colspan'] > 1:\n for cc in range(1, spec['colspan'] - 1):\n _tmp[r][c + cc] = colspan_str + _pad(colspan_str)\n _tmp[r][c + spec['colspan'] - 1] = (\n colspan_str + _pad(colspan_str + e_str)) + e_str\n else:\n cell_str += e_str\n\n if spec['rowspan'] > 1:\n for rr in range(1, spec['rowspan'] - 1):\n _tmp[r + rr][c] = rowspan_str + _pad(rowspan_str)\n for cc in range(spec['colspan']):\n _tmp[r + spec['rowspan'] - 1][c + cc] = (\n rowspan_str + _pad(rowspan_str))\n\n _tmp[r][c] = cell_str + _pad(cell_str)\n\n # Append grid_str using data from _tmp in the correct order\n for r in row_seq[::-1]:\n grid_str += sp.join(_tmp[r]) + '\\n'\n\n # Append grid_str to include insets info\n if insets:\n grid_str += \"\\nWith insets:\\n\"\n for i_inset, inset in enumerate(insets):\n\n r = inset['cell'][0] - 1\n c = inset['cell'][1] - 1\n ref = grid_ref[r][c]\n\n grid_str += (\n s_str + ','.join(insets_ref[i_inset]) + e_str +\n ' over ' +\n s_str + _get_cell_str(r, c, ref) + e_str + '\\n'\n )\n\n # Add subplot titles\n\n # If shared_axes is False (default) use list_of_domains\n # This is used for insets and irregular layouts\n if not shared_xaxes and not shared_yaxes:\n x_dom = list_of_domains[::2]\n y_dom = list_of_domains[1::2]\n subtitle_pos_x = []\n subtitle_pos_y = []\n for x_domains in x_dom:\n subtitle_pos_x.append(sum(x_domains) / 2)\n for y_domains in y_dom:\n subtitle_pos_y.append(y_domains[1])\n # If shared_axes is True the domin of each subplot is not returned so the\n # title position must be calculated for each subplot\n else:\n subtitle_pos_x = [None] * cols\n subtitle_pos_y = [None] * rows\n delt_x = (x_e - x_s)\n for index in range(cols):\n subtitle_pos_x[index] = ((delt_x / 2) +\n ((delt_x + horizontal_spacing) * index))\n subtitle_pos_x *= rows\n for index in range(rows):\n subtitle_pos_y[index] = (1 - ((y_e + vertical_spacing) * index))\n subtitle_pos_y *= cols\n subtitle_pos_y = sorted(subtitle_pos_y, reverse=True)\n\n plot_titles = []\n for index in range(len(subplot_titles)):\n if not subplot_titles[index]:\n pass\n else:\n plot_titles.append({'y': subtitle_pos_y[index],\n 'xref': 'paper',\n 'x': subtitle_pos_x[index],\n 'yref': 'paper',\n 'text': subplot_titles[index],\n 'showarrow': False,\n 'font': graph_objs.Font(size=16),\n 'xanchor': 'center',\n 'yanchor': 'bottom'\n })\n\n layout['annotations'] = plot_titles\n\n if print_grid:\n print(grid_str)\n\n fig = graph_objs.Figure(layout=layout)\n\n fig.__dict__['_grid_ref'] = grid_ref\n fig.__dict__['_grid_str'] = grid_str\n\n return fig\n\n\ndef get_valid_graph_obj(obj, obj_type=None):\n \"\"\"Returns a new graph object that won't raise.\n\n CAREFUL: this will *silently* strip out invalid pieces of the object.\n\n \"\"\"\n # TODO: Deprecate or move. #283\n from plotly.graph_objs import graph_objs\n try:\n cls = getattr(graph_objs, obj_type)\n except (AttributeError, KeyError):\n raise exceptions.PlotlyError(\n \"'{}' is not a recognized graph_obj.\".format(obj_type)\n )\n return cls(obj, _raise=False)\n\n\ndef validate(obj, obj_type):\n \"\"\"Validate a dictionary, list, or graph object as 'obj_type'.\n\n This will not alter the 'obj' referenced in the call signature. It will\n raise an error if the 'obj' reference could not be instantiated as a\n valid 'obj_type' graph object.\n\n \"\"\"\n # TODO: Deprecate or move. #283\n from plotly.graph_objs import graph_objs\n\n if obj_type not in graph_reference.CLASSES:\n obj_type = graph_reference.string_to_class_name(obj_type)\n\n try:\n cls = getattr(graph_objs, obj_type)\n except AttributeError:\n raise exceptions.PlotlyError(\n \"'{0}' is not a recognizable graph_obj.\".\n format(obj_type))\n cls(obj) # this will raise on invalid keys/items\n\n\ndef _replace_newline(obj):\n \"\"\"Replaces '\\n' with '<br>' for all strings in a collection.\"\"\"\n if isinstance(obj, dict):\n d = dict()\n for key, val in list(obj.items()):\n d[key] = _replace_newline(val)\n return d\n elif isinstance(obj, list):\n l = list()\n for index, entry in enumerate(obj):\n l += [_replace_newline(entry)]\n return l\n elif isinstance(obj, six.string_types):\n s = obj.replace('\\n', '<br>')\n if s != obj:\n warnings.warn(\"Looks like you used a newline character: '\\\\n'.\\n\\n\"\n \"Plotly uses a subset of HTML escape characters\\n\"\n \"to do things like newline (<br>), bold (<b></b>),\\n\"\n \"italics (<i></i>), etc. Your newline characters \\n\"\n \"have been converted to '<br>' so they will show \\n\"\n \"up right on your Plotly figure!\")\n return s\n else:\n return obj # we return the actual reference... but DON'T mutate.\n\n\nif _ipython_imported:\n class PlotlyDisplay(IPython.core.display.HTML):\n \"\"\"An IPython display object for use with plotly urls\n\n PlotlyDisplay objects should be instantiated with a url for a plot.\n IPython will *choose* the proper display representation from any\n Python object, and using provided methods if they exist. By defining\n the following, if an HTML display is unusable, the PlotlyDisplay\n object can provide alternate representations.\n\n \"\"\"\n def __init__(self, url, width, height):\n self.resource = url\n self.embed_code = get_embed(url, width=width, height=height)\n super(PlotlyDisplay, self).__init__(data=self.embed_code)\n\n def _repr_html_(self):\n return self.embed_code\n\n\ndef return_figure_from_figure_or_data(figure_or_data, validate_figure):\n from plotly.graph_objs import graph_objs\n if isinstance(figure_or_data, dict):\n figure = figure_or_data\n elif isinstance(figure_or_data, list):\n figure = {'data': figure_or_data}\n else:\n raise exceptions.PlotlyError(\"The `figure_or_data` positional \"\n \"argument must be either \"\n \"`dict`-like or `list`-like.\")\n if validate_figure:\n\n try:\n graph_objs.Figure(figure)\n except exceptions.PlotlyError as err:\n raise exceptions.PlotlyError(\"Invalid 'figure_or_data' argument. \"\n \"Plotly will not be able to properly \"\n \"parse the resulting JSON. If you \"\n \"want to send this 'figure_or_data' \"\n \"to Plotly anyway (not recommended), \"\n \"you can set 'validate=False' as a \"\n \"plot option.\\nHere's why you're \"\n \"seeing this error:\\n\\n{0}\"\n \"\".format(err))\n if not figure['data']:\n raise exceptions.PlotlyEmptyDataError(\n \"Empty data list found. Make sure that you populated the \"\n \"list of data objects you're sending and try again.\\n\"\n \"Questions? [email protected]\"\n )\n\n return figure\n\n# Default colours for finance charts\n_DEFAULT_INCREASING_COLOR = '#3D9970' # http://clrs.cc\n_DEFAULT_DECREASING_COLOR = '#FF4136'\n\nDIAG_CHOICES = ['scatter', 'histogram', 'box']\nVALID_COLORMAP_TYPES = ['cat', 'seq']\n\n\nclass FigureFactory(object):\n \"\"\"\n BETA functions to create specific chart types.\n\n This is beta as in: subject to change in a backwards incompatible way\n without notice.\n\n Supported chart types include candlestick, open high low close, quiver,\n streamline, distplot, dendrogram, annotated heatmap, and tables. See\n FigureFactory.create_candlestick, FigureFactory.create_ohlc,\n FigureFactory.create_quiver, FigureFactory.create_streamline,\n FigureFactory.create_distplot, FigureFactory.create_dendrogram,\n FigureFactory.create_annotated_heatmap, or FigureFactory.create_table for\n more information and examples of a specific chart type.\n \"\"\"\n\n @staticmethod\n def _make_colorscale(colors, scale=None):\n \"\"\"\n Makes a colorscale from a list of colors and scale\n\n Takes a list of colors and scales and constructs a colorscale based\n on the colors in sequential order. If 'scale' is left empty, a linear-\n interpolated colorscale will be generated. If 'scale' is a specificed\n list, it must be the same legnth as colors and must contain all floats\n For documentation regarding to the form of the output, see\n https://plot.ly/python/reference/#mesh3d-colorscale\n \"\"\"\n colorscale = []\n\n if not scale:\n for j, color in enumerate(colors):\n colorscale.append([j * 1./(len(colors) - 1), color])\n return colorscale\n\n else:\n colorscale = [list(tup) for tup in zip(scale, colors)]\n return colorscale\n\n @staticmethod\n def _convert_colorscale_to_rgb(colorscale):\n \"\"\"\n Converts the colors in a colorscale to rgb colors\n\n A colorscale is an array of arrays, each with a numeric value as the\n first item and a color as the second. This function specifically is\n converting a colorscale with tuple colors (each coordinate between 0\n and 1) into a colorscale with the colors transformed into rgb colors\n \"\"\"\n for color in colorscale:\n color[1] = FigureFactory._convert_to_RGB_255(\n color[1]\n )\n\n for color in colorscale:\n color[1] = FigureFactory._label_rgb(\n color[1]\n )\n return colorscale\n\n @staticmethod\n def _make_linear_colorscale(colors):\n \"\"\"\n Makes a list of colors into a colorscale-acceptable form\n\n For documentation regarding to the form of the output, see\n https://plot.ly/python/reference/#mesh3d-colorscale\n \"\"\"\n scale = 1./(len(colors) - 1)\n return[[i * scale, color] for i, color in enumerate(colors)]\n\n @staticmethod\n def create_2D_density(x, y, colorscale='Earth', ncontours=20,\n hist_color=(0, 0, 0.5), point_color=(0, 0, 0.5),\n point_size=2, title='2D Density Plot',\n height=600, width=600):\n \"\"\"\n Returns figure for a 2D density plot\n\n :param (list|array) x: x-axis data for plot generation\n :param (list|array) y: y-axis data for plot generation\n :param (str|tuple|list) colorscale: either a plotly scale name, an rgb\n or hex color, a color tuple or a list or tuple of colors. An rgb\n color is of the form 'rgb(x, y, z)' where x, y, z belong to the\n interval [0, 255] and a color tuple is a tuple of the form\n (a, b, c) where a, b and c belong to [0, 1]. If colormap is a\n list, it must contain the valid color types aforementioned as its\n members.\n :param (int) ncontours: the number of 2D contours to draw on the plot\n :param (str) hist_color: the color of the plotted histograms\n :param (str) point_color: the color of the scatter points\n :param (str) point_size: the color of the scatter points\n :param (str) title: set the title for the plot\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple 2D Density Plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n # Make data points\n t = np.linspace(-1,1.2,2000)\n x = (t**3)+(0.3*np.random.randn(2000))\n y = (t**6)+(0.3*np.random.randn(2000))\n\n # Create a figure\n fig = FF.create_2D_density(x, y)\n\n # Plot the data\n py.iplot(fig, filename='simple-2d-density')\n ```\n\n Example 2: Using Parameters\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n # Make data points\n t = np.linspace(-1,1.2,2000)\n x = (t**3)+(0.3*np.random.randn(2000))\n y = (t**6)+(0.3*np.random.randn(2000))\n\n # Create custom colorscale\n colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)',\n (1, 1, 0.2), (0.98,0.98,0.98)]\n\n # Create a figure\n fig = FF.create_2D_density(\n x, y, colorscale=colorscale,\n hist_color='rgb(255, 237, 222)', point_size=3)\n\n # Plot the data\n py.iplot(fig, filename='use-parameters')\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n from numbers import Number\n\n # validate x and y are filled with numbers only\n for array in [x, y]:\n if not all(isinstance(element, Number) for element in array):\n raise exceptions.PlotlyError(\n \"All elements of your 'x' and 'y' lists must be numbers.\"\n )\n\n # validate x and y are the same length\n if len(x) != len(y):\n raise exceptions.PlotlyError(\n \"Both lists 'x' and 'y' must be the same length.\"\n )\n\n colorscale = FigureFactory._validate_colors(colorscale, 'rgb')\n colorscale = FigureFactory._make_linear_colorscale(colorscale)\n\n # validate hist_color and point_color\n hist_color = FigureFactory._validate_colors(hist_color, 'rgb')\n point_color = FigureFactory._validate_colors(point_color, 'rgb')\n\n trace1 = graph_objs.Scatter(\n x=x, y=y, mode='markers', name='points',\n marker=dict(\n color=point_color[0],\n size=point_size,\n opacity=0.4\n )\n )\n trace2 = graph_objs.Histogram2dcontour(\n x=x, y=y, name='density', ncontours=ncontours,\n colorscale=colorscale, reversescale=True, showscale=False\n )\n trace3 = graph_objs.Histogram(\n x=x, name='x density',\n marker=dict(color=hist_color[0]), yaxis='y2'\n )\n trace4 = graph_objs.Histogram(\n y=y, name='y density',\n marker=dict(color=hist_color[0]), xaxis='x2'\n )\n data = [trace1, trace2, trace3, trace4]\n\n layout = graph_objs.Layout(\n showlegend=False,\n autosize=False,\n title=title,\n height=height,\n width=width,\n xaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n yaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n margin=dict(\n t=50\n ),\n hovermode='closest',\n bargap=0,\n xaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n ),\n yaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n )\n )\n\n fig = graph_objs.Figure(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _validate_gantt(df):\n \"\"\"\n Validates the inputted dataframe or list\n \"\"\"\n if _pandas_imported and isinstance(df, pd.core.frame.DataFrame):\n # validate that df has all the required keys\n for key in REQUIRED_GANTT_KEYS:\n if key not in df:\n raise exceptions.PlotlyError(\n \"The columns in your dataframe must include the \"\n \"keys\".format(REQUIRED_GANTT_KEYS)\n )\n\n num_of_rows = len(df.index)\n chart = []\n for index in range(num_of_rows):\n task_dict = {}\n for key in df:\n task_dict[key] = df.ix[index][key]\n chart.append(task_dict)\n\n return chart\n\n # validate if df is a list\n if not isinstance(df, list):\n raise exceptions.PlotlyError(\"You must input either a dataframe \"\n \"or a list of dictionaries.\")\n\n # validate if df is empty\n if len(df) <= 0:\n raise exceptions.PlotlyError(\"Your list is empty. It must contain \"\n \"at least one dictionary.\")\n if not isinstance(df[0], dict):\n raise exceptions.PlotlyError(\"Your list must only \"\n \"include dictionaries.\")\n return df\n\n @staticmethod\n def _gantt(chart, colors, title, bar_width, showgrid_x, showgrid_y,\n height, width, tasks=None, task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n color_index = 0\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n # check if colors need to be looped\n if color_index >= len(colors):\n color_index = 0\n tasks[index]['fillcolor'] = colors[color_index]\n # Add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n color_index += 1\n\n layout = dict(\n title=title,\n showlegend=False,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _gantt_colorscale(chart, colors, title, index_col, show_colorbar,\n bar_width, showgrid_x, showgrid_y, height,\n width, tasks=None, task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n from numbers import Number\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n # compute the color for task based on indexing column\n if isinstance(chart[0][index_col], Number):\n # check that colors has at least 2 colors\n if len(colors) < 2:\n raise exceptions.PlotlyError(\n \"You must use at least 2 colors in 'colors' if you \"\n \"are using a colorscale. However only the first two \"\n \"colors given will be used for the lower and upper \"\n \"bounds on the colormap.\"\n )\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n # unlabel color\n colors = FigureFactory._color_parser(\n colors, FigureFactory._unlabel_rgb\n )\n lowcolor = colors[0]\n highcolor = colors[1]\n\n intermed = (chart[index][index_col])/100.0\n intermed_color = FigureFactory._find_intermediate_color(\n lowcolor, highcolor, intermed\n )\n intermed_color = FigureFactory._color_parser(\n intermed_color, FigureFactory._label_rgb\n )\n tasks[index]['fillcolor'] = intermed_color\n # relabel colors with 'rgb'\n colors = FigureFactory._color_parser(\n colors, FigureFactory._label_rgb\n )\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data for colorscale visibility\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[index, index],\n name='',\n marker={'color': 'white',\n 'colorscale': [[0, colors[0]], [1, colors[1]]],\n 'showscale': True,\n 'cmax': 100,\n 'cmin': 0}\n )\n )\n\n if isinstance(chart[0][index_col], str):\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n if len(colors) < len(index_vals):\n raise exceptions.PlotlyError(\n \"Error. The number of colors in 'colors' must be no less \"\n \"than the number of unique index values in your group \"\n \"column.\"\n )\n\n # make a dictionary assignment to each index value\n index_vals_dict = {}\n # define color index\n c_index = 0\n for key in index_vals:\n if c_index > len(colors) - 1:\n c_index = 0\n index_vals_dict[key] = colors[c_index]\n c_index += 1\n\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n tasks[index]['fillcolor'] = index_vals_dict[\n chart[index][index_col]\n ]\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data to generate legend\n showlegend = True\n for k, index_value in enumerate(index_vals):\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[k, k],\n showlegend=True,\n name=str(index_value),\n hoverinfo='none',\n marker=dict(\n color=colors[k],\n size=1\n )\n )\n )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def _gantt_dict(chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width, tasks=None,\n task_names=None, data=None):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(x0=chart[index]['Start'],\n x1=chart[index]['Finish'],\n name=chart[index]['Task'])\n tasks.append(task)\n\n shape_template = {\n 'type': 'rect',\n 'xref': 'x',\n 'yref': 'y',\n 'opacity': 1,\n 'line': {\n 'width': 0,\n },\n 'yref': 'y',\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # verify each value in index column appears in colors dictionary\n for key in index_vals:\n if key not in colors:\n raise exceptions.PlotlyError(\n \"If you are using colors as a dictionary, all of its \"\n \"keys must be all the values in the index column.\"\n )\n\n for index in range(len(tasks)):\n tn = tasks[index]['name']\n task_names.append(tn)\n del tasks[index]['name']\n tasks[index].update(shape_template)\n tasks[index]['y0'] = index - bar_width\n tasks[index]['y1'] = index + bar_width\n\n tasks[index]['fillcolor'] = colors[chart[index][index_col]]\n\n # add a line for hover text and autorange\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x1']],\n y=[index, index],\n name='',\n marker={'color': 'white'}\n )\n )\n\n if show_colorbar is True:\n # generate dummy data to generate legend\n showlegend = True\n for k, index_value in enumerate(index_vals):\n data.append(\n dict(\n x=[tasks[index]['x0'], tasks[index]['x0']],\n y=[k, k],\n showlegend=True,\n hoverinfo='none',\n name=str(index_value),\n marker=dict(\n color=colors[index_value],\n size=1\n )\n )\n )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode='closest',\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(tasks))),\n range=[-1, len(tasks) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list([\n dict(count=7,\n label='1w',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1m',\n step='month',\n stepmode='backward'),\n dict(count=6,\n label='6m',\n step='month',\n stepmode='backward'),\n dict(count=1,\n label='YTD',\n step='year',\n stepmode='todate'),\n dict(count=1,\n label='1y',\n step='year',\n stepmode='backward'),\n dict(step='all')\n ])\n ),\n type='date'\n )\n )\n layout['shapes'] = tasks\n\n fig = dict(data=data, layout=layout)\n return fig\n\n @staticmethod\n def create_gantt(df, colors=None, index_col=None, show_colorbar=False,\n reverse_colors=False, title='Gantt Chart',\n bar_width=0.2, showgrid_x=False, showgrid_y=False,\n height=600, width=900, tasks=None,\n task_names=None, data=None):\n \"\"\"\n Returns figure for a gantt chart\n\n :param (array|list) df: input data for gantt chart. Must be either a\n a dataframe or a list. If dataframe, the columns must include\n 'Task', 'Start' and 'Finish'. Other columns can be included and\n used for indexing. If a list, its elements must be dictionaries\n with the same required column headers: 'Task', 'Start' and\n 'Finish'.\n :param (str|list|dict|tuple) colors: either a plotly scale name, an\n rgb or hex color, a color tuple or a list of colors. An rgb color\n is of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colors is a list, it must\n contain the valid color types aforementioned as its members.\n If a dictionary, all values of the indexing column must be keys in\n colors.\n :param (str|float) index_col: the column header (if df is a data\n frame) that will function as the indexing column. If df is a list,\n index_col must be one of the keys in all the items of df.\n :param (bool) show_colorbar: determines if colorbar will be visible.\n Only applies if values in the index column are numeric.\n :param (bool) reverse_colors: reverses the order of selected colors\n :param (str) title: the title of the chart\n :param (float) bar_width: the width of the horizontal bars in the plot\n :param (bool) showgrid_x: show/hide the x-axis grid\n :param (bool) showgrid_y: show/hide the y-axis grid\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple Gantt Chart\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01', Finish='2009-02-30'),\n dict(Task=\"Job B\", Start='2009-03-05', Finish='2009-04-15'),\n dict(Task=\"Job C\", Start='2009-02-20', Finish='2009-05-30')]\n\n # Create a figure\n fig = FF.create_gantt(df)\n\n # Plot the data\n py.iplot(fig, filename='Simple Gantt Chart', world_readable=True)\n ```\n\n Example 2: Index by Column with Numerical Entries\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Complete=10),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Complete=60),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Complete=95)]\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors='Blues', index_col='Complete',\n show_colorbar=True, bar_width=0.5,\n showgrid_x=True, showgrid_y=True)\n\n # Plot the data\n py.iplot(fig, filename='Numerical Entries', world_readable=True)\n ```\n\n Example 3: Index by Column with String Entries\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Resource='Apple'),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Resource='Grape'),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Resource='Banana')]\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors=['rgb(200, 50, 25)',\n (1, 0, 1),\n '#6c4774'],\n index_col='Resource',\n reverse_colors=True,\n show_colorbar=True)\n\n # Plot the data\n py.iplot(fig, filename='String Entries', world_readable=True)\n ```\n\n Example 4: Use a dictionary for colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Make data for chart\n df = [dict(Task=\"Job A\", Start='2009-01-01',\n Finish='2009-02-30', Resource='Apple'),\n dict(Task=\"Job B\", Start='2009-03-05',\n Finish='2009-04-15', Resource='Grape'),\n dict(Task=\"Job C\", Start='2009-02-20',\n Finish='2009-05-30', Resource='Banana')]\n\n # Make a dictionary of colors\n colors = {'Apple': 'rgb(255, 0, 0)',\n 'Grape': 'rgb(170, 14, 200)',\n 'Banana': (1, 1, 0.2)}\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors=colors,\n index_col='Resource',\n show_colorbar=True)\n\n # Plot the data\n py.iplot(fig, filename='dictioanry colors', world_readable=True)\n ```\n\n Example 5: Use a pandas dataframe\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import pandas as pd\n\n # Make data as a dataframe\n df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],\n ['Fast', '2011-01-01', '2012-06-05', 55],\n ['Eat', '2012-01-05', '2013-07-05', 94]],\n columns=['Task', 'Start', 'Finish', 'Complete'])\n\n # Create a figure with Plotly colorscale\n fig = FF.create_gantt(df, colors='Blues', index_col='Complete',\n show_colorbar=True, bar_width=0.5,\n showgrid_x=True, showgrid_y=True)\n\n # Plot the data\n py.iplot(fig, filename='data with dataframe', world_readable=True)\n ```\n \"\"\"\n # validate gantt input data\n chart = FigureFactory._validate_gantt(df)\n\n if index_col:\n if index_col not in chart[0]:\n raise exceptions.PlotlyError(\n \"In order to use an indexing column and assign colors to \"\n \"the values of the index, you must choose an actual \"\n \"column name in the dataframe or key if a list of \"\n \"dictionaries is being used.\")\n\n # validate gantt index column\n index_list = []\n for dictionary in chart:\n index_list.append(dictionary[index_col])\n FigureFactory._validate_index(index_list)\n\n # Validate colors\n if isinstance(colors, dict):\n colors = FigureFactory._validate_colors_dict(colors, 'rgb')\n else:\n colors = FigureFactory._validate_colors(colors, 'rgb')\n\n if reverse_colors is True:\n colors.reverse()\n\n if not index_col:\n if isinstance(colors, dict):\n raise exceptions.PlotlyError(\n \"Error. You have set colors to a dictionary but have not \"\n \"picked an index. An index is required if you are \"\n \"assigning colors to particular values in a dictioanry.\"\n )\n fig = FigureFactory._gantt(\n chart, colors, title, bar_width, showgrid_x, showgrid_y,\n height, width, tasks=None, task_names=None, data=None\n )\n return fig\n else:\n if not isinstance(colors, dict):\n fig = FigureFactory._gantt_colorscale(\n chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width,\n tasks=None, task_names=None, data=None\n )\n return fig\n else:\n fig = FigureFactory._gantt_dict(\n chart, colors, title, index_col, show_colorbar, bar_width,\n showgrid_x, showgrid_y, height, width,\n tasks=None, task_names=None, data=None\n )\n return fig\n\n @staticmethod\n def _validate_colors(colors, colortype='tuple'):\n \"\"\"\n Validates color(s) and returns a list of color(s) of a specified type\n \"\"\"\n from numbers import Number\n if colors is None:\n colors = DEFAULT_PLOTLY_COLORS\n\n if isinstance(colors, str):\n if colors in PLOTLY_SCALES:\n colors = PLOTLY_SCALES[colors]\n elif 'rgb' in colors or '#' in colors:\n colors = [colors]\n else:\n raise exceptions.PlotlyError(\n \"If your colors variable is a string, it must be a \"\n \"Plotly scale, an rgb color or a hex color.\")\n\n elif isinstance(colors, tuple):\n if isinstance(colors[0], Number):\n colors = [colors]\n else:\n colors = list(colors)\n\n # convert color elements in list to tuple color\n for j, each_color in enumerate(colors):\n if 'rgb' in each_color:\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unlabel_rgb\n )\n for value in each_color:\n if value > 255.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your rgb colors \"\n \"tuples cannot exceed 255.0.\"\n )\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unconvert_from_RGB_255\n )\n colors[j] = each_color\n\n if '#' in each_color:\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._hex_to_rgb\n )\n each_color = FigureFactory._color_parser(\n each_color, FigureFactory._unconvert_from_RGB_255\n )\n\n colors[j] = each_color\n\n if isinstance(each_color, tuple):\n for value in each_color:\n if value > 1.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your colors tuples \"\n \"cannot exceed 1.0.\"\n )\n colors[j] = each_color\n\n if colortype == 'rgb':\n for j, each_color in enumerate(colors):\n rgb_color = FigureFactory._color_parser(\n each_color, FigureFactory._convert_to_RGB_255\n )\n colors[j] = FigureFactory._color_parser(\n rgb_color, FigureFactory._label_rgb\n )\n\n return colors\n\n @staticmethod\n def _validate_colors_dict(colors, colortype='tuple'):\n \"\"\"\n Validates dictioanry of color(s)\n \"\"\"\n # validate each color element in the dictionary\n for key in colors:\n if 'rgb' in colors[key]:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unlabel_rgb\n )\n for value in colors[key]:\n if value > 255.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your rgb colors \"\n \"tuples cannot exceed 255.0.\"\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unconvert_from_RGB_255\n )\n\n if '#' in colors[key]:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._hex_to_rgb\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._unconvert_from_RGB_255\n )\n\n if isinstance(colors[key], tuple):\n for value in colors[key]:\n if value > 1.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your colors tuples \"\n \"cannot exceed 1.0.\"\n )\n\n if colortype == 'rgb':\n for key in colors:\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._convert_to_RGB_255\n )\n colors[key] = FigureFactory._color_parser(\n colors[key], FigureFactory._label_rgb\n )\n\n return colors\n\n @staticmethod\n def _calc_stats(data):\n \"\"\"\n Calculate statistics for use in violin plot.\n \"\"\"\n import numpy as np\n\n x = np.asarray(data, np.float)\n vals_min = np.min(x)\n vals_max = np.max(x)\n q2 = np.percentile(x, 50, interpolation='linear')\n q1 = np.percentile(x, 25, interpolation='lower')\n q3 = np.percentile(x, 75, interpolation='higher')\n iqr = q3 - q1\n whisker_dist = 1.5 * iqr\n\n # in order to prevent drawing whiskers outside the interval\n # of data one defines the whisker positions as:\n d1 = np.min(x[x >= (q1 - whisker_dist)])\n d2 = np.max(x[x <= (q3 + whisker_dist)])\n return {\n 'min': vals_min,\n 'max': vals_max,\n 'q1': q1,\n 'q2': q2,\n 'q3': q3,\n 'd1': d1,\n 'd2': d2\n }\n\n @staticmethod\n def _make_half_violin(x, y, fillcolor='#1f77b4',\n linecolor='rgb(0, 0, 0)'):\n \"\"\"\n Produces a sideways probability distribution fig violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n text = ['(pdf(y), y)=(' + '{:0.2f}'.format(x[i]) +\n ', ' + '{:0.2f}'.format(y[i]) + ')'\n for i in range(len(x))]\n\n return graph_objs.Scatter(\n x=x,\n y=y,\n mode='lines',\n name='',\n text=text,\n fill='tonextx',\n fillcolor=fillcolor,\n line=graph_objs.Line(width=0.5, color=linecolor, shape='spline'),\n hoverinfo='text',\n opacity=0.5\n )\n\n @staticmethod\n def _make_violin_rugplot(vals, pdf_max, distance,\n color='#1f77b4'):\n \"\"\"\n Returns a rugplot fig for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n y=vals,\n x=[-pdf_max-distance]*len(vals),\n marker=graph_objs.Marker(\n color=color,\n symbol='line-ew-open'\n ),\n mode='markers',\n name='',\n showlegend=False,\n hoverinfo='y'\n )\n\n @staticmethod\n def _make_quartiles(q1, q3):\n \"\"\"\n Makes the upper and lower quartiles for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0, 0],\n y=[q1, q3],\n text=['lower-quartile: ' + '{:0.2f}'.format(q1),\n 'upper-quartile: ' + '{:0.2f}'.format(q3)],\n mode='lines',\n line=graph_objs.Line(\n width=4,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text'\n )\n\n @staticmethod\n def _make_median(q2):\n \"\"\"\n Formats the 'median' hovertext for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0],\n y=[q2],\n text=['median: ' + '{:0.2f}'.format(q2)],\n mode='markers',\n marker=dict(symbol='square',\n color='rgb(255,255,255)'),\n hoverinfo='text'\n )\n\n @staticmethod\n def _make_non_outlier_interval(d1, d2):\n \"\"\"\n Returns the scatterplot fig of most of a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n return graph_objs.Scatter(\n x=[0, 0],\n y=[d1, d2],\n name='',\n mode='lines',\n line=graph_objs.Line(width=1.5,\n color='rgb(0,0,0)')\n )\n\n @staticmethod\n def _make_XAxis(xaxis_title, xaxis_range):\n \"\"\"\n Makes the x-axis for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n xaxis = graph_objs.XAxis(title=xaxis_title,\n range=xaxis_range,\n showgrid=False,\n zeroline=False,\n showline=False,\n mirror=False,\n ticks='',\n showticklabels=False,\n )\n return xaxis\n\n @staticmethod\n def _make_YAxis(yaxis_title):\n \"\"\"\n Makes the y-axis for a violin plot.\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n yaxis = graph_objs.YAxis(title=yaxis_title,\n showticklabels=True,\n autorange=True,\n ticklen=4,\n showline=True,\n zeroline=False,\n showgrid=False,\n mirror=False)\n return yaxis\n\n @staticmethod\n def _violinplot(vals, fillcolor='#1f77b4', rugplot=True):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n \"\"\"\n import numpy as np\n from scipy import stats\n\n vals = np.asarray(vals, np.float)\n # summary statistics\n vals_min = FigureFactory._calc_stats(vals)['min']\n vals_max = FigureFactory._calc_stats(vals)['max']\n q1 = FigureFactory._calc_stats(vals)['q1']\n q2 = FigureFactory._calc_stats(vals)['q2']\n q3 = FigureFactory._calc_stats(vals)['q3']\n d1 = FigureFactory._calc_stats(vals)['d1']\n d2 = FigureFactory._calc_stats(vals)['d2']\n\n # kernel density estimation of pdf\n pdf = stats.gaussian_kde(vals)\n # grid over the data interval\n xx = np.linspace(vals_min, vals_max, 100)\n # evaluate the pdf at the grid xx\n yy = pdf(xx)\n max_pdf = np.max(yy)\n # distance from the violin plot to rugplot\n distance = (2.0 * max_pdf)/10 if rugplot else 0\n # range for x values in the plot\n plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]\n plot_data = [FigureFactory._make_half_violin(\n -yy, xx, fillcolor=fillcolor),\n FigureFactory._make_half_violin(\n yy, xx, fillcolor=fillcolor),\n FigureFactory._make_non_outlier_interval(d1, d2),\n FigureFactory._make_quartiles(q1, q3),\n FigureFactory._make_median(q2)]\n if rugplot:\n plot_data.append(FigureFactory._make_violin_rugplot(\n vals,\n max_pdf,\n distance=distance,\n color=fillcolor)\n )\n return plot_data, plot_xrange\n\n @staticmethod\n def _violin_no_colorscale(data, data_header, group_header, colors,\n use_colorscale, group_stats,\n height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n color_index = 0\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n if color_index >= len(colors):\n color_index = 0\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor=colors[color_index]\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n color_index += 1\n\n # add violin plot labels\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def _violin_colorscale(data, data_header, group_header, colors,\n use_colorscale, group_stats, height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot with colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n # make sure all group names are keys in group_stats\n for group in group_name:\n if group not in group_stats:\n raise exceptions.PlotlyError(\"All values/groups in the index \"\n \"column must be represented \"\n \"as a key in group_stats.\")\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n\n # prepare low and high color for colorscale\n lowcolor = FigureFactory._color_parser(\n colors[0], FigureFactory._unlabel_rgb\n )\n highcolor = FigureFactory._color_parser(\n colors[1], FigureFactory._unlabel_rgb\n )\n\n # find min and max values in group_stats\n group_stats_values = []\n for key in group_stats:\n group_stats_values.append(group_stats[key])\n\n max_value = max(group_stats_values)\n min_value = min(group_stats_values)\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n\n # find intermediate color from colorscale\n intermed = (group_stats[gr] - min_value) / (max_value - min_value)\n intermed_color = FigureFactory._find_intermediate_color(\n lowcolor, highcolor, intermed\n )\n\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor='rgb{}'.format(intermed_color)\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n # add colorbar to plot\n trace_dummy = graph_objs.Scatter(\n x=[0],\n y=[0],\n mode='markers',\n marker=dict(\n size=2,\n cmin=min_value,\n cmax=max_value,\n colorscale=[[0, colors[0]],\n [1, colors[1]]],\n showscale=True),\n showlegend=False,\n )\n fig.append_trace(trace_dummy, 1, L)\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def _violin_dict(data, data_header, group_header, colors, use_colorscale,\n group_stats, height, width, title):\n \"\"\"\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n import numpy as np\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n group_name.sort()\n\n # check if all group names appear in colors dict\n for group in group_name:\n if group not in colors:\n raise exceptions.PlotlyError(\"If colors is a dictionary, all \"\n \"the group names must appear as \"\n \"keys in colors.\")\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(rows=1, cols=L,\n shared_yaxes=True,\n horizontal_spacing=0.025,\n print_grid=True)\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], np.float)\n plot_data, plot_xrange = FigureFactory._violinplot(\n vals,\n fillcolor=colors[gr]\n )\n layout = graph_objs.Layout()\n\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n\n # add violin plot labels\n fig['layout'].update({'xaxis{}'.format(k + 1):\n FigureFactory._make_XAxis(group_name[k],\n plot_xrange)})\n\n # set the sharey axis style\n fig['layout'].update(\n {'yaxis{}'.format(1): FigureFactory._make_YAxis('')}\n )\n fig['layout'].update(\n title=title,\n showlegend=False,\n hovermode='closest',\n autosize=False,\n height=height,\n width=width\n )\n\n return fig\n\n @staticmethod\n def create_violin(data, data_header=None, group_header=None,\n colors=None, use_colorscale=False, group_stats=None,\n height=450, width=600, title='Violin and Rug Plot'):\n \"\"\"\n Returns figure for a violin plot\n\n :param (list|array) data: accepts either a list of numerical values,\n a list of dictionaries all with identical keys and at least one\n column of numeric values, or a pandas dataframe with at least one\n column of numbers\n :param (str) data_header: the header of the data column to be used\n from an inputted pandas dataframe. Not applicable if 'data' is\n a list of numeric values\n :param (str) group_header: applicable if grouping data by a variable.\n 'group_header' must be set to the name of the grouping variable.\n :param (str|tuple|list|dict) colors: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colors is a list, it must contain valid color types as its\n members.\n :param (bool) use_colorscale: Only applicable if grouping by another\n variable. Will implement a colorscale based on the first 2 colors\n of param colors. This means colors must be a list with at least 2\n colors in it (Plotly colorscales are accepted since they map to a\n list of two rgb colors)\n :param (dict) group_stats: a dictioanry where each key is a unique\n value from the group_header column in data. Each value must be a\n number and will be used to color the violin plots if a colorscale\n is being used\n :param (float) height: the height of the violin plot\n :param (float) width: the width of the violin plot\n :param (str) title: the title of the violin plot\n\n Example 1: Single Violin Plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n from scipy import stats\n\n # create list of random values\n data_list = np.random.randn(100)\n data_list.tolist()\n\n # create violin fig\n fig = FF.create_violin(data_list, colors='#604d9e')\n\n # plot\n py.iplot(fig, filename='Violin Plot')\n ```\n\n Example 2: Multiple Violin Plots with Qualitative Coloring\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n import pandas as pd\n from scipy import stats\n\n # create dataframe\n np.random.seed(619517)\n Nr=250\n y = np.random.randn(Nr)\n gr = np.random.choice(list(\"ABCDE\"), Nr)\n norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n for i, letter in enumerate(\"ABCDE\"):\n y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n df = pd.DataFrame(dict(Score=y, Group=gr))\n\n # create violin fig\n fig = FF.create_violin(df, data_header='Score', group_header='Group',\n height=600, width=1000)\n\n # plot\n py.iplot(fig, filename='Violin Plot with Coloring')\n ```\n\n Example 3: Violin Plots with Colorscale\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n import numpy as np\n import pandas as pd\n from scipy import stats\n\n # create dataframe\n np.random.seed(619517)\n Nr=250\n y = np.random.randn(Nr)\n gr = np.random.choice(list(\"ABCDE\"), Nr)\n norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n for i, letter in enumerate(\"ABCDE\"):\n y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n df = pd.DataFrame(dict(Score=y, Group=gr))\n\n # define header params\n data_header = 'Score'\n group_header = 'Group'\n\n # make groupby object with pandas\n group_stats = {}\n groupby_data = df.groupby([group_header])\n\n for group in \"ABCDE\":\n data_from_group = groupby_data.get_group(group)[data_header]\n # take a stat of the grouped data\n stat = np.median(data_from_group)\n # add to dictionary\n group_stats[group] = stat\n\n # create violin fig\n fig = FF.create_violin(df, data_header='Score', group_header='Group',\n height=600, width=1000, use_colorscale=True,\n group_stats=group_stats)\n\n # plot\n py.iplot(fig, filename='Violin Plot with Colorscale')\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n from numbers import Number\n\n # Validate colors\n if isinstance(colors, dict):\n valid_colors = FigureFactory._validate_colors_dict(colors, 'rgb')\n else:\n valid_colors = FigureFactory._validate_colors(colors, 'rgb')\n\n # validate data and choose plot type\n if group_header is None:\n if isinstance(data, list):\n if len(data) <= 0:\n raise exceptions.PlotlyError(\"If data is a list, it must be \"\n \"nonempty and contain either \"\n \"numbers or dictionaries.\")\n\n if not all(isinstance(element, Number) for element in data):\n raise exceptions.PlotlyError(\"If data is a list, it must \"\n \"contain only numbers.\")\n\n if _pandas_imported and isinstance(data, pd.core.frame.DataFrame):\n if data_header is None:\n raise exceptions.PlotlyError(\"data_header must be the \"\n \"column name with the \"\n \"desired numeric data for \"\n \"the violin plot.\")\n\n data = data[data_header].values.tolist()\n\n # call the plotting functions\n plot_data, plot_xrange = FigureFactory._violinplot(\n data, fillcolor=valid_colors[0]\n )\n\n layout = graph_objs.Layout(\n title=title,\n autosize=False,\n font=graph_objs.Font(size=11),\n height=height,\n showlegend=False,\n width=width,\n xaxis=FigureFactory._make_XAxis('', plot_xrange),\n yaxis=FigureFactory._make_YAxis(''),\n hovermode='closest'\n )\n layout['yaxis'].update(dict(showline=False,\n showticklabels=False,\n ticks=''))\n\n fig = graph_objs.Figure(data=graph_objs.Data(plot_data),\n layout=layout)\n\n return fig\n\n else:\n if not isinstance(data, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\"Error. You must use a pandas \"\n \"DataFrame if you are using a \"\n \"group header.\")\n\n if data_header is None:\n raise exceptions.PlotlyError(\"data_header must be the column \"\n \"name with the desired numeric \"\n \"data for the violin plot.\")\n\n if use_colorscale is False:\n if isinstance(valid_colors, dict):\n # validate colors dict choice below\n fig = FigureFactory._violin_dict(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n else:\n fig = FigureFactory._violin_no_colorscale(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n else:\n if isinstance(valid_colors, dict):\n raise exceptions.PlotlyError(\"The colors param cannot be \"\n \"a dictionary if you are \"\n \"using a colorscale.\")\n\n if len(valid_colors) < 2:\n raise exceptions.PlotlyError(\"colors must be a list with \"\n \"at least 2 colors. A \"\n \"Plotly scale is allowed.\")\n\n if not isinstance(group_stats, dict):\n raise exceptions.PlotlyError(\"Your group_stats param \"\n \"must be a dictionary.\")\n\n fig = FigureFactory._violin_colorscale(\n data, data_header, group_header, valid_colors,\n use_colorscale, group_stats, height, width, title\n )\n return fig\n\n @staticmethod\n def _find_intermediate_color(lowcolor, highcolor, intermed):\n \"\"\"\n Returns the color at a given distance between two colors\n\n This function takes two color tuples, where each element is between 0\n and 1, along with a value 0 < intermed < 1 and returns a color that is\n intermed-percent from lowcolor to highcolor\n\n \"\"\"\n diff_0 = float(highcolor[0] - lowcolor[0])\n diff_1 = float(highcolor[1] - lowcolor[1])\n diff_2 = float(highcolor[2] - lowcolor[2])\n\n return (lowcolor[0] + intermed * diff_0,\n lowcolor[1] + intermed * diff_1,\n lowcolor[2] + intermed * diff_2)\n\n @staticmethod\n def _color_parser(colors, function):\n \"\"\"\n Takes color(s) and a function and applies the function on the color(s)\n\n In particular, this function identifies whether the given color object\n is an iterable or not and applies the given color-parsing function to\n the color or iterable of colors. If given an iterable, it will only be\n able to work with it if all items in the iterable are of the same type\n - rgb string, hex string or tuple\n\n \"\"\"\n from numbers import Number\n if isinstance(colors, str):\n return function(colors)\n\n if isinstance(colors, tuple) and isinstance(colors[0], Number):\n return function(colors)\n\n if hasattr(colors, '__iter__'):\n if isinstance(colors, tuple):\n new_color_tuple = tuple(function(item) for item in colors)\n return new_color_tuple\n\n else:\n new_color_list = [function(item) for item in colors]\n return new_color_list\n\n @staticmethod\n def _unconvert_from_RGB_255(colors):\n \"\"\"\n Return a tuple where each element gets divided by 255\n\n Takes a (list of) color tuple(s) where each element is between 0 and\n 255. Returns the same tuples where each tuple element is normalized to\n a value between 0 and 1\n\n \"\"\"\n return (colors[0]/(255.0),\n colors[1]/(255.0),\n colors[2]/(255.0))\n\n @staticmethod\n def _map_face2color(face, colormap, vmin, vmax):\n \"\"\"\n Normalize facecolor values by vmin/vmax and return rgb-color strings\n\n This function takes a tuple color along with a colormap and a minimum\n (vmin) and maximum (vmax) range of possible mean distances for the\n given parametrized surface. It returns an rgb color based on the mean\n distance between vmin and vmax\n\n \"\"\"\n if vmin >= vmax:\n raise exceptions.PlotlyError(\"Incorrect relation between vmin \"\n \"and vmax. The vmin value cannot be \"\n \"bigger than or equal to the value \"\n \"of vmax.\")\n\n if len(colormap) == 1:\n # color each triangle face with the same color in colormap\n face_color = colormap[0]\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n else:\n if face == vmax:\n # pick last color in colormap\n face_color = colormap[-1]\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n else:\n # find the normalized distance t of a triangle face between\n # vmin and vmax where the distance is between 0 and 1\n t = (face - vmin) / float((vmax - vmin))\n low_color_index = int(t / (1./(len(colormap) - 1)))\n\n face_color = FigureFactory._find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n t * (len(colormap) - 1) - low_color_index\n )\n\n face_color = FigureFactory._convert_to_RGB_255(face_color)\n face_color = FigureFactory._label_rgb(face_color)\n return face_color\n\n @staticmethod\n def _trisurf(x, y, z, simplices, show_colorbar, edges_color,\n colormap=None, color_func=None, plot_edges=False,\n x_edge=None, y_edge=None, z_edge=None, facecolor=None):\n \"\"\"\n Refer to FigureFactory.create_trisurf() for docstring\n \"\"\"\n # numpy import check\n if _numpy_imported is False:\n raise ImportError(\"FigureFactory._trisurf() requires \"\n \"numpy imported.\")\n import numpy as np\n from plotly.graph_objs import graph_objs\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\"If color_func is a list/array, it must \"\n \"be the same length as simplices.\")\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if '#' in color_func[index]:\n foo = FigureFactory._hex_to_rgb(color_func[index])\n color_func[index] = FigureFactory._label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = FigureFactory._convert_to_RGB_255(color_func[index])\n color_func[index] = FigureFactory._label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = FigureFactory._map_face2color(mean_dists[index],\n colormap,\n min_mean_dists,\n max_mean_dists)\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = list(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(x=x, y=y, z=z, facecolor=facecolor,\n i=ii, j=jj, k=kk, name='')\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = FigureFactory._make_colorscale(colormap)\n colorscale = FigureFactory._convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[0],\n y=y[0],\n z=z[0],\n mode='markers',\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True),\n hoverinfo='None',\n showlegend=False\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return graph_objs.Data([triangles, colorbar])\n else:\n return graph_objs.Data([triangles])\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\"If any (x_edge, y_edge, z_edge) is None, \"\n \"all must be None\")\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack([pull_edges[:, :, 0],\n np.tile(None, [pull_edges.shape[0], 1])])\n y_edge_pull = np.hstack([pull_edges[:, :, 1],\n np.tile(None, [pull_edges.shape[0], 1])])\n z_edge_pull = np.hstack([pull_edges[:, :, 2],\n np.tile(None, [pull_edges.shape[0], 1])])\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\"The lengths of x_edge, y_edge and \"\n \"z_edge are not the same.\")\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge, y=y_edge, z=z_edge, mode='lines',\n line=graph_objs.Line(\n color=edges_color,\n width=1.5\n ),\n showlegend=False\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return graph_objs.Data([triangles, lines, colorbar])\n else:\n return graph_objs.Data([triangles, lines])\n\n @staticmethod\n def create_trisurf(x, y, z, simplices, colormap=None, show_colorbar=True,\n color_func=None, title='Trisurf Plot', plot_edges=True,\n showbackground=True,\n backgroundcolor='rgb(230, 230, 230)',\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n edges_color='rgb(50, 50, 50)',\n height=800, width=800,\n aspectratio=dict(x=1, y=1, z=1)):\n \"\"\"\n Returns figure for a triangulated surface plot\n\n :param (array) x: data values of x in a 1D array\n :param (array) y: data values of y in a 1D array\n :param (array) z: data values of z in a 1D array\n :param (array) simplices: an array of shape (ntri, 3) where ntri is\n the number of triangles in the triangularization. Each row of the\n array contains the indicies of the verticies of each triangle\n :param (str|tuple|list) colormap: either a plotly scale name, an rgb\n or hex color, a color tuple or a list of colors. An rgb color is\n of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colormap is a list, it must\n contain the valid color types aforementioned as its members\n :param (bool) show_colorbar: determines if colorbar is visible\n :param (function|list) color_func: The parameter that determines the\n coloring of the surface. Takes either a function with 3 arguments\n x, y, z or a list/array of color values the same length as\n simplices. If None, coloring will only depend on the z axis\n :param (str) title: title of the plot\n :param (bool) plot_edges: determines if the triangles on the trisurf\n are visible\n :param (bool) showbackground: makes background in plot visible\n :param (str) backgroundcolor: color of background. Takes a string of\n the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) gridcolor: color of the gridlines besides the axes. Takes\n a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255\n inclusive\n :param (str) zerolinecolor: color of the axes. Takes a string of the\n form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) edges_color: color of the edges, if plot_edges is True\n :param (int|float) height: the height of the plot (in pixels)\n :param (int|float) width: the width of the plot (in pixels)\n :param (dict) aspectratio: a dictionary of the aspect ratio values for\n the x, y and z axes. 'x', 'y' and 'z' take (int|float) values\n\n Example 1: Sphere\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 20)\n v = np.linspace(0, np.pi, 20)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n x = np.sin(v)*np.cos(u)\n y = np.sin(v)*np.sin(u)\n z = np.cos(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=\"Blues\",\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-sphere')\n ```\n\n Example 2: Torus\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 20)\n v = np.linspace(0, 2*np.pi, 20)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n x = (3 + (np.cos(v)))*np.cos(u)\n y = (3 + (np.cos(v)))*np.sin(u)\n z = np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=\"Greys\",\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-torus')\n ```\n\n Example 3: Mobius Band\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u = np.linspace(0, 2*np.pi, 24)\n v = np.linspace(-1, 1, 8)\n u,v = np.meshgrid(u,v)\n u = u.flatten()\n v = v.flatten()\n\n tp = 1 + 0.5*v*np.cos(u/2.)\n x = tp*np.cos(u)\n y = tp*np.sin(u)\n z = 0.5*v*np.sin(u/2.)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],\n simplices=simplices)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-mobius-band')\n ```\n\n Example 4: Using a Custom Colormap Function with Light Cone\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u=np.linspace(-np.pi, np.pi, 30)\n v=np.linspace(-np.pi, np.pi, 30)\n u,v=np.meshgrid(u,v)\n u=u.flatten()\n v=v.flatten()\n\n x = u\n y = u*np.cos(v)\n z = u*np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n # Define distance function\n def dist_origin(x, y, z):\n return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)\n\n # Create a figure\n fig1 = FF.create_trisurf(x=x, y=y, z=z,\n colormap=['#604d9e',\n 'rgb(50, 150, 255)',\n (0.2, 0.2, 0.8)],\n simplices=simplices,\n color_func=dist_origin)\n # Plot the data\n py.iplot(fig1, filename='trisurf-plot-custom-coloring')\n ```\n\n Example 5: Enter color_func as a list of colors\n ```\n # Necessary Imports for Trisurf\n import numpy as np\n from scipy.spatial import Delaunay\n import random\n\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import graph_objs\n\n # Make data for plot\n u=np.linspace(-np.pi, np.pi, 30)\n v=np.linspace(-np.pi, np.pi, 30)\n u,v=np.meshgrid(u,v)\n u=u.flatten()\n v=v.flatten()\n\n x = u\n y = u*np.cos(v)\n z = u*np.sin(v)\n\n points2D = np.vstack([u,v]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n\n\n colors = []\n color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']\n\n for index in range(len(simplices)):\n colors.append(random.choice(color_choices))\n\n fig = FF.create_trisurf(\n x, y, z, simplices,\n color_func=colors,\n show_colorbar=True,\n edges_color='rgb(2, 85, 180)',\n title=' Modern Art'\n )\n\n py.iplot(fig, filename=\"trisurf-plot-modern-art\")\n ```\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n # Validate colormap\n colormap = FigureFactory._validate_colors(colormap, 'tuple')\n\n data1 = FigureFactory._trisurf(x, y, z, simplices,\n show_colorbar=show_colorbar,\n color_func=color_func,\n colormap=colormap,\n edges_color=edges_color,\n plot_edges=plot_edges)\n axis = dict(\n showbackground=showbackground,\n backgroundcolor=backgroundcolor,\n gridcolor=gridcolor,\n zerolinecolor=zerolinecolor,\n )\n layout = graph_objs.Layout(\n title=title,\n width=width,\n height=height,\n scene=graph_objs.Scene(\n xaxis=graph_objs.XAxis(axis),\n yaxis=graph_objs.YAxis(axis),\n zaxis=graph_objs.ZAxis(axis),\n aspectratio=dict(\n x=aspectratio['x'],\n y=aspectratio['y'],\n z=aspectratio['z']),\n )\n )\n\n return graph_objs.Figure(data=data1, layout=layout)\n\n @staticmethod\n def _scatterplot(dataframe, headers, diag, size,\n height, width, title, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix without index\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n # Insert traces into trace_list\n for listy in dataframe:\n for listx in dataframe:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n name=None,\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n trace_list.append(trace)\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size),\n showlegend=False,\n **kwargs\n )\n trace_list.append(trace)\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True\n )\n return fig\n\n @staticmethod\n def _scatterplot_dict(dataframe, headers, diag, size,\n height, width, title, index, index_vals,\n endpts, colormap, colormap_type, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked.\n Used if colormap is a dictionary with index values as keys pointing to\n colors. Forces colormap_type to behave categorically because it would\n not make sense colors are assigned to each index value and thus\n implies that a categorical approach should be taken\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n theme = colormap\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[name]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[name]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[name]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[name]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[name]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[name]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(\n trace_list[trace_index][name],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n @staticmethod\n def _scatterplot_theme(dataframe, headers, diag, size, height,\n width, title, index, index_vals, endpts,\n colormap, colormap_type, **kwargs):\n \"\"\"\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked\n\n \"\"\"\n from plotly.graph_objs import graph_objs\n\n # Check if index is made of string values\n if isinstance(index_vals[0], str):\n unique_index_vals = []\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals.append(name)\n n_colors_len = len(unique_index_vals)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == 'seq':\n foo = FigureFactory._color_parser(\n colormap, FigureFactory._unlabel_rgb\n )\n foo = FigureFactory._n_colors(foo[0],\n foo[1],\n n_colors_len)\n theme = FigureFactory._color_parser(\n foo, FigureFactory._label_rgb\n )\n\n if colormap_type == 'cat':\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(\n trace_list[trace_index][name],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n if endpts:\n intervals = FigureFactory._endpts_to_intervals(endpts)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == 'seq':\n foo = FigureFactory._color_parser(\n colormap, FigureFactory._unlabel_rgb\n )\n foo = FigureFactory._n_colors(foo[0],\n foo[1],\n len(intervals))\n theme = FigureFactory._color_parser(\n foo, FigureFactory._label_rgb\n )\n\n if colormap_type == 'cat':\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n interval_labels = {}\n for interval in intervals:\n interval_labels[str(interval)] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for interval in intervals:\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if interval[0] < index_vals[j] <= interval[1]:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=True\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n (kwargs['marker']\n ['color']) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n showlegend=True,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=True,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(\n color=theme[c_indx]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n (kwargs['marker']\n ['color']) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode='markers',\n name=str(interval),\n marker=dict(\n size=size,\n color=theme[c_indx]),\n showlegend=False,\n **kwargs\n )\n # Push the trace into dictionary\n interval_labels[str(interval)] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(interval_labels)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for interval in intervals:\n fig.append_trace(\n trace_list[trace_index][str(interval)],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n theme = colormap\n\n # add a copy of rgb color to theme if it contains one color\n if len(theme) <= 1:\n theme.append(theme[0])\n\n color = []\n for incr in range(len(theme)):\n color.append([1./(len(theme)-1)*incr, theme[incr]])\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim)\n trace_list = []\n legend_param = 0\n # Run through all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = index_vals\n kwargs['marker']['colorscale'] = color\n kwargs['marker']['showscale'] = True\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=True),\n showlegend=False,\n **kwargs\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == 'histogram'):\n trace = graph_objs.Histogram(\n x=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n elif (listx == listy) and (diag == 'box'):\n trace = graph_objs.Box(\n y=listx,\n marker=dict(\n color=theme[0]),\n showlegend=False\n )\n else:\n if 'marker' in kwargs:\n kwargs['marker']['size'] = size\n kwargs['marker']['color'] = index_vals\n kwargs['marker']['colorscale'] = color\n kwargs['marker']['showscale'] = False\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n showlegend=False,\n **kwargs\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode='markers',\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=False),\n showlegend=False,\n **kwargs\n )\n # Push the trace into list\n trace_list.append(trace)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index],\n y_index,\n x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)\n fig['layout'][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = 'yaxis{}'.format(1 + (dim * j))\n fig['layout'][yaxis_key].update(title=headers[j])\n\n if diag == 'histogram':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True,\n barmode='stack')\n return fig\n\n elif diag == 'box':\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n else:\n fig['layout'].update(\n height=height, width=width,\n title=title,\n showlegend=True)\n return fig\n\n @staticmethod\n def _validate_index(index_vals):\n \"\"\"\n Validates if a list contains all numbers or all strings\n\n :raises: (PlotlyError) If there are any two items in the list whose\n types differ\n \"\"\"\n from numbers import Number\n if isinstance(index_vals[0], Number):\n if not all(isinstance(item, Number) for item in index_vals):\n raise exceptions.PlotlyError(\"Error in indexing column. \"\n \"Make sure all entries of each \"\n \"column are all numbers or \"\n \"all strings.\")\n\n elif isinstance(index_vals[0], str):\n if not all(isinstance(item, str) for item in index_vals):\n raise exceptions.PlotlyError(\"Error in indexing column. \"\n \"Make sure all entries of each \"\n \"column are all numbers or \"\n \"all strings.\")\n\n @staticmethod\n def _validate_dataframe(array):\n \"\"\"\n Validates all strings or numbers in each dataframe column\n\n :raises: (PlotlyError) If there are any two items in any list whose\n types differ\n \"\"\"\n from numbers import Number\n for vector in array:\n if isinstance(vector[0], Number):\n if not all(isinstance(item, Number) for item in vector):\n raise exceptions.PlotlyError(\"Error in dataframe. \"\n \"Make sure all entries of \"\n \"each column are either \"\n \"numbers or strings.\")\n elif isinstance(vector[0], str):\n if not all(isinstance(item, str) for item in vector):\n raise exceptions.PlotlyError(\"Error in dataframe. \"\n \"Make sure all entries of \"\n \"each column are either \"\n \"numbers or strings.\")\n\n @staticmethod\n def _validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):\n \"\"\"\n Validates basic inputs for FigureFactory.create_scatterplotmatrix()\n\n :raises: (PlotlyError) If pandas is not imported\n :raises: (PlotlyError) If pandas dataframe is not inputted\n :raises: (PlotlyError) If pandas dataframe has <= 1 columns\n :raises: (PlotlyError) If diagonal plot choice (diag) is not one of\n the viable options\n :raises: (PlotlyError) If colormap_type is not a valid choice\n :raises: (PlotlyError) If kwargs contains 'size', 'color' or\n 'colorscale'\n \"\"\"\n if _pandas_imported is False:\n raise ImportError(\"FigureFactory.scatterplotmatrix requires \"\n \"a pandas DataFrame.\")\n\n # Check if pandas dataframe\n if not isinstance(df, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\"Dataframe not inputed. Please \"\n \"use a pandas dataframe to pro\"\n \"duce a scatterplot matrix.\")\n\n # Check if dataframe is 1 column or less\n if len(df.columns) <= 1:\n raise exceptions.PlotlyError(\"Dataframe has only one column. To \"\n \"use the scatterplot matrix, use at \"\n \"least 2 columns.\")\n\n # Check that diag parameter is a valid selection\n if diag not in DIAG_CHOICES:\n raise exceptions.PlotlyError(\"Make sure diag is set to \"\n \"one of {}\".format(DIAG_CHOICES))\n\n # Check that colormap_types is a valid selection\n if colormap_type not in VALID_COLORMAP_TYPES:\n raise exceptions.PlotlyError(\"Must choose a valid colormap type. \"\n \"Either 'cat' or 'seq' for a cate\"\n \"gorical and sequential colormap \"\n \"respectively.\")\n\n # Check for not 'size' or 'color' in 'marker' of **kwargs\n if 'marker' in kwargs:\n FORBIDDEN_PARAMS = ['size', 'color', 'colorscale']\n if any(param in kwargs['marker'] for param in FORBIDDEN_PARAMS):\n raise exceptions.PlotlyError(\"Your kwargs dictionary cannot \"\n \"include the 'size', 'color' or \"\n \"'colorscale' key words inside \"\n \"the marker dict since 'size' is \"\n \"already an argument of the \"\n \"scatterplot matrix function and \"\n \"both 'color' and 'colorscale \"\n \"are set internally.\")\n\n @staticmethod\n def _endpts_to_intervals(endpts):\n \"\"\"\n Returns a list of intervals for categorical colormaps\n\n Accepts a list or tuple of sequentially increasing numbers and returns\n a list representation of the mathematical intervals with these numbers\n as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]\n\n :raises: (PlotlyError) If input is not a list or tuple\n :raises: (PlotlyError) If the input contains a string\n :raises: (PlotlyError) If any number does not increase after the\n previous one in the sequence\n \"\"\"\n length = len(endpts)\n # Check if endpts is a list or tuple\n if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):\n raise exceptions.PlotlyError(\"The intervals_endpts argument must \"\n \"be a list or tuple of a sequence \"\n \"of increasing numbers.\")\n # Check if endpts contains only numbers\n for item in endpts:\n if isinstance(item, str):\n raise exceptions.PlotlyError(\"The intervals_endpts argument \"\n \"must be a list or tuple of a \"\n \"sequence of increasing \"\n \"numbers.\")\n # Check if numbers in endpts are increasing\n for k in range(length-1):\n if endpts[k] >= endpts[k+1]:\n raise exceptions.PlotlyError(\"The intervals_endpts argument \"\n \"must be a list or tuple of a \"\n \"sequence of increasing \"\n \"numbers.\")\n else:\n intervals = []\n # add -inf to intervals\n intervals.append([float('-inf'), endpts[0]])\n for k in range(length - 1):\n interval = []\n interval.append(endpts[k])\n interval.append(endpts[k + 1])\n intervals.append(interval)\n # add +inf to intervals\n intervals.append([endpts[length - 1], float('inf')])\n return intervals\n\n @staticmethod\n def _convert_to_RGB_255(colors):\n \"\"\"\n Multiplies each element of a triplet by 255\n\n Each coordinate of the color tuple is rounded to the nearest float and\n then is turned into an integer. If a number is of the form x.5, then\n if x is odd, the number rounds up to (x+1). Otherwise, it rounds down\n to just x. This is the way rounding works in Python 3 and in current\n statistical analysis to avoid rounding bias\n \"\"\"\n rgb_components = []\n\n for component in colors:\n rounded_num = decimal.Decimal(str(component*255.0)).quantize(\n decimal.Decimal('1'), rounding=decimal.ROUND_HALF_EVEN\n )\n # convert rounded number to an integer from 'Decimal' form\n rounded_num = int(rounded_num)\n rgb_components.append(rounded_num)\n\n return (rgb_components[0], rgb_components[1], rgb_components[2])\n\n @staticmethod\n def _n_colors(lowcolor, highcolor, n_colors):\n \"\"\"\n Splits a low and high color into a list of n_colors colors in it\n\n Accepts two color tuples and returns a list of n_colors colors\n which form the intermediate colors between lowcolor and highcolor\n from linearly interpolating through RGB space\n\n \"\"\"\n diff_0 = float(highcolor[0] - lowcolor[0])\n incr_0 = diff_0/(n_colors - 1)\n diff_1 = float(highcolor[1] - lowcolor[1])\n incr_1 = diff_1/(n_colors - 1)\n diff_2 = float(highcolor[2] - lowcolor[2])\n incr_2 = diff_2/(n_colors - 1)\n color_tuples = []\n\n for index in range(n_colors):\n new_tuple = (lowcolor[0] + (index * incr_0),\n lowcolor[1] + (index * incr_1),\n lowcolor[2] + (index * incr_2))\n color_tuples.append(new_tuple)\n\n return color_tuples\n\n @staticmethod\n def _label_rgb(colors):\n \"\"\"\n Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'\n \"\"\"\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))\n\n @staticmethod\n def _unlabel_rgb(colors):\n \"\"\"\n Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)\n\n This function takes either an 'rgb(a, b, c)' color or a list of\n such colors and returns the color tuples in tuple(s) (a, b, c)\n\n \"\"\"\n str_vals = ''\n for index in range(len(colors)):\n try:\n float(colors[index])\n str_vals = str_vals + colors[index]\n except ValueError:\n if colors[index] == ',' or colors[index] == '.':\n str_vals = str_vals + colors[index]\n\n str_vals = str_vals + ','\n numbers = []\n str_num = ''\n for char in str_vals:\n if char != ',':\n str_num = str_num + char\n else:\n numbers.append(float(str_num))\n str_num = ''\n return (numbers[0], numbers[1], numbers[2])\n\n @staticmethod\n def create_scatterplotmatrix(df, index=None, endpts=None, diag='scatter',\n height=500, width=500, size=6,\n title='Scatterplot Matrix', colormap=None,\n colormap_type='cat', dataframe=None,\n headers=None, index_vals=None, **kwargs):\n \"\"\"\n Returns data for a scatterplot matrix.\n\n :param (array) df: array of the data with column headers\n :param (str) index: name of the index column in data array\n :param (list|tuple) endpts: takes an increasing sequece of numbers\n that defines intervals on the real line. They are used to group\n the entries in an index of numbers into their corresponding\n interval and therefore can be treated as categorical data\n :param (str) diag: sets the chart type for the main diagonal plots\n :param (int|float) height: sets the height of the chart\n :param (int|float) width: sets the width of the chart\n :param (float) size: sets the marker size (in px)\n :param (str) title: the title label of the scatterplot matrix\n :param (str|tuple|list|dict) colormap: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colormap is a list, it must contain valid color types as its\n members.\n If colormap is a dictionary, all the string entries in\n the index column must be a key in colormap. In this case, the\n colormap_type is forced to 'cat' or categorical\n :param (str) colormap_type: determines how colormap is interpreted.\n Valid choices are 'seq' (sequential) and 'cat' (categorical). If\n 'seq' is selected, only the first two colors in colormap will be\n considered (when colormap is a list) and the index values will be\n linearly interpolated between those two colors. This option is\n forced if all index values are numeric.\n If 'cat' is selected, a color from colormap will be assigned to\n each category from index, including the intervals if endpts is\n being used\n :param (dict) **kwargs: a dictionary of scatterplot arguments\n The only forbidden parameters are 'size', 'color' and\n 'colorscale' in 'marker'\n\n Example 1: Vanilla Scatterplot Matrix\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe\n df = pd.DataFrame(np.random.randn(10, 2),\n columns=['Column 1', 'Column 2'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df)\n\n # Plot\n py.iplot(fig, filename='Vanilla Scatterplot Matrix')\n ```\n\n Example 2: Indexing a Column\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with index\n df = pd.DataFrame(np.random.randn(10, 2),\n columns=['A', 'B'])\n\n # Add another column of strings to the dataframe\n df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df, index='Fruit', size=10)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix with Index')\n ```\n\n Example 3: Styling the Diagonal Subplots\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with index\n df = pd.DataFrame(np.random.randn(10, 4),\n columns=['A', 'B', 'C', 'D'])\n\n # Add another column of strings to the dataframe\n df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n # Create scatterplot matrix\n fig = FF.create_scatterplotmatrix(df, diag='box', index='Fruit',\n height=1000, width=1000)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Diagonal Styling')\n ```\n\n Example 4: Use a Theme to Style the Subplots\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['A', 'B', 'C'])\n\n # Create scatterplot matrix using a built-in\n # Plotly palette scale and indexing column 'A'\n fig = FF.create_scatterplotmatrix(df, diag='histogram',\n index='A', colormap='Blues',\n height=800, width=800)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Colormap Theme')\n ```\n\n Example 5: Example 4 with Interval Factoring\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['A', 'B', 'C'])\n\n # Create scatterplot matrix using a list of 2 rgb tuples\n # and endpoints at -1, 0 and 1\n fig = FF.create_scatterplotmatrix(df, diag='histogram', index='A',\n colormap=['rgb(140, 255, 50)',\n 'rgb(170, 60, 115)',\n '#6c4774',\n (0.5, 0.1, 0.8)],\n endpts=[-1, 0, 1],\n height=800, width=800)\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - Intervals')\n ```\n\n Example 6: Using the colormap as a Dictionary\n ```\n import plotly.plotly as py\n from plotly.graph_objs import graph_objs\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n import random\n\n # Create dataframe with random data\n df = pd.DataFrame(np.random.randn(100, 3),\n columns=['Column A',\n 'Column B',\n 'Column C'])\n\n # Add new color column to dataframe\n new_column = []\n strange_colors = ['turquoise', 'limegreen', 'goldenrod']\n\n for j in range(100):\n new_column.append(random.choice(strange_colors))\n df['Colors'] = pd.Series(new_column, index=df.index)\n\n # Create scatterplot matrix using a dictionary of hex color values\n # which correspond to actual color names in 'Colors' column\n fig = FF.create_scatterplotmatrix(\n df, diag='box', index='Colors',\n colormap= dict(\n turquoise = '#00F5FF',\n limegreen = '#32CD32',\n goldenrod = '#DAA520'\n ),\n colormap_type='cat',\n height=800, width=800\n )\n\n # Plot\n py.iplot(fig, filename = 'Scatterplot Matrix - colormap dictionary ')\n ```\n \"\"\"\n # TODO: protected until #282\n if dataframe is None:\n dataframe = []\n if headers is None:\n headers = []\n if index_vals is None:\n index_vals = []\n\n FigureFactory._validate_scatterplotmatrix(df, index, diag,\n colormap_type, **kwargs)\n\n # Validate colormap\n if isinstance(colormap, dict):\n colormap = FigureFactory._validate_colors_dict(colormap, 'rgb')\n else:\n colormap = FigureFactory._validate_colors(colormap, 'rgb')\n\n if not index:\n for name in df:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n # Check for same data-type in df columns\n FigureFactory._validate_dataframe(dataframe)\n figure = FigureFactory._scatterplot(dataframe, headers, diag,\n size, height, width, title,\n **kwargs)\n return figure\n else:\n # Validate index selection\n if index not in df:\n raise exceptions.PlotlyError(\"Make sure you set the index \"\n \"input variable to one of the \"\n \"column names of your \"\n \"dataframe.\")\n index_vals = df[index].values.tolist()\n for name in df:\n if name != index:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n\n # check for same data-type in each df column\n FigureFactory._validate_dataframe(dataframe)\n FigureFactory._validate_index(index_vals)\n\n # check if all colormap keys are in the index\n # if colormap is a dictionary\n if isinstance(colormap, dict):\n for key in colormap:\n if not all(index in colormap for index in index_vals):\n raise exceptions.PlotlyError(\"If colormap is a \"\n \"dictionary, all the \"\n \"names in the index \"\n \"must be keys.\")\n figure = FigureFactory._scatterplot_dict(\n dataframe, headers, diag, size, height, width, title,\n index, index_vals, endpts, colormap, colormap_type,\n **kwargs\n )\n return figure\n\n else:\n figure = FigureFactory._scatterplot_theme(\n dataframe, headers, diag, size, height, width, title,\n index, index_vals, endpts, colormap, colormap_type,\n **kwargs\n )\n return figure\n\n @staticmethod\n def _validate_equal_length(*args):\n \"\"\"\n Validates that data lists or ndarrays are the same length.\n\n :raises: (PlotlyError) If any data lists are not the same length.\n \"\"\"\n length = len(args[0])\n if any(len(lst) != length for lst in args):\n raise exceptions.PlotlyError(\"Oops! Your data lists or ndarrays \"\n \"should be the same length.\")\n\n @staticmethod\n def _validate_ohlc(open, high, low, close, direction, **kwargs):\n \"\"\"\n ohlc and candlestick specific validations\n\n Specifically, this checks that the high value is the greatest value and\n the low value is the lowest value in each unit.\n\n See FigureFactory.create_ohlc() or FigureFactory.create_candlestick()\n for params\n\n :raises: (PlotlyError) If the high value is not the greatest value in\n each unit.\n :raises: (PlotlyError) If the low value is not the lowest value in each\n unit.\n :raises: (PlotlyError) If direction is not 'increasing' or 'decreasing'\n \"\"\"\n for lst in [open, low, close]:\n for index in range(len(high)):\n if high[index] < lst[index]:\n raise exceptions.PlotlyError(\"Oops! Looks like some of \"\n \"your high values are less \"\n \"the corresponding open, \"\n \"low, or close values. \"\n \"Double check that your data \"\n \"is entered in O-H-L-C order\")\n\n for lst in [open, high, close]:\n for index in range(len(low)):\n if low[index] > lst[index]:\n raise exceptions.PlotlyError(\"Oops! Looks like some of \"\n \"your low values are greater \"\n \"than the corresponding high\"\n \", open, or close values. \"\n \"Double check that your data \"\n \"is entered in O-H-L-C order\")\n\n direction_opts = ('increasing', 'decreasing', 'both')\n if direction not in direction_opts:\n raise exceptions.PlotlyError(\"direction must be defined as \"\n \"'increasing', 'decreasing', or \"\n \"'both'\")\n\n @staticmethod\n def _validate_distplot(hist_data, curve_type):\n \"\"\"\n Distplot-specific validations\n\n :raises: (PlotlyError) If hist_data is not a list of lists\n :raises: (PlotlyError) If curve_type is not valid (i.e. not 'kde' or\n 'normal').\n \"\"\"\n try:\n import pandas as pd\n _pandas_imported = True\n except ImportError:\n _pandas_imported = False\n\n hist_data_types = (list,)\n if _numpy_imported:\n hist_data_types += (np.ndarray,)\n if _pandas_imported:\n hist_data_types += (pd.core.series.Series,)\n\n if not isinstance(hist_data[0], hist_data_types):\n raise exceptions.PlotlyError(\"Oops, this function was written \"\n \"to handle multiple datasets, if \"\n \"you want to plot just one, make \"\n \"sure your hist_data variable is \"\n \"still a list of lists, i.e. x = \"\n \"[1, 2, 3] -> x = [[1, 2, 3]]\")\n\n curve_opts = ('kde', 'normal')\n if curve_type not in curve_opts:\n raise exceptions.PlotlyError(\"curve_type must be defined as \"\n \"'kde' or 'normal'\")\n\n if _scipy_imported is False:\n raise ImportError(\"FigureFactory.create_distplot requires scipy\")\n\n @staticmethod\n def _validate_positive_scalars(**kwargs):\n \"\"\"\n Validates that all values given in key/val pairs are positive.\n\n Accepts kwargs to improve Exception messages.\n\n :raises: (PlotlyError) If any value is < 0 or raises.\n \"\"\"\n for key, val in kwargs.items():\n try:\n if val <= 0:\n raise ValueError('{} must be > 0, got {}'.format(key, val))\n except TypeError:\n raise exceptions.PlotlyError('{} must be a number, got {}'\n .format(key, val))\n\n @staticmethod\n def _validate_streamline(x, y):\n \"\"\"\n Streamline-specific validations\n\n Specifically, this checks that x and y are both evenly spaced,\n and that the package numpy is available.\n\n See FigureFactory.create_streamline() for params\n\n :raises: (ImportError) If numpy is not available.\n :raises: (PlotlyError) If x is not evenly spaced.\n :raises: (PlotlyError) If y is not evenly spaced.\n \"\"\"\n if _numpy_imported is False:\n raise ImportError(\"FigureFactory.create_streamline requires numpy\")\n for index in range(len(x) - 1):\n if ((x[index + 1] - x[index]) - (x[1] - x[0])) > .0001:\n raise exceptions.PlotlyError(\"x must be a 1 dimensional, \"\n \"evenly spaced array\")\n for index in range(len(y) - 1):\n if ((y[index + 1] - y[index]) -\n (y[1] - y[0])) > .0001:\n raise exceptions.PlotlyError(\"y must be a 1 dimensional, \"\n \"evenly spaced array\")\n\n @staticmethod\n def _validate_annotated_heatmap(z, x, y, annotation_text):\n \"\"\"\n Annotated-heatmap-specific validations\n\n Check that if a text matrix is supplied, it has the same\n dimensions as the z matrix.\n\n See FigureFactory.create_annotated_heatmap() for params\n\n :raises: (PlotlyError) If z and text matrices do not have the same\n dimensions.\n \"\"\"\n if annotation_text is not None and isinstance(annotation_text, list):\n FigureFactory._validate_equal_length(z, annotation_text)\n for lst in range(len(z)):\n if len(z[lst]) != len(annotation_text[lst]):\n raise exceptions.PlotlyError(\"z and text should have the \"\n \"same dimensions\")\n\n if x:\n if len(x) != len(z[0]):\n raise exceptions.PlotlyError(\"oops, the x list that you \"\n \"provided does not match the \"\n \"width of your z matrix \")\n\n if y:\n if len(y) != len(z):\n raise exceptions.PlotlyError(\"oops, the y list that you \"\n \"provided does not match the \"\n \"length of your z matrix \")\n\n @staticmethod\n def _validate_table(table_text, font_colors):\n \"\"\"\n Table-specific validations\n\n Check that font_colors is supplied correctly (1, 3, or len(text)\n colors).\n\n :raises: (PlotlyError) If font_colors is supplied incorretly.\n\n See FigureFactory.create_table() for params\n \"\"\"\n font_colors_len_options = [1, 3, len(table_text)]\n if len(font_colors) not in font_colors_len_options:\n raise exceptions.PlotlyError(\"Oops, font_colors should be a list \"\n \"of length 1, 3 or len(text)\")\n\n @staticmethod\n def _flatten(array):\n \"\"\"\n Uses list comprehension to flatten array\n\n :param (array): An iterable to flatten\n :raises (PlotlyError): If iterable is not nested.\n :rtype (list): The flattened list.\n \"\"\"\n try:\n return [item for sublist in array for item in sublist]\n except TypeError:\n raise exceptions.PlotlyError(\"Your data array could not be \"\n \"flattened! Make sure your data is \"\n \"entered as lists or ndarrays!\")\n\n @staticmethod\n def _hex_to_rgb(value):\n \"\"\"\n Calculates rgb values from a hex color code.\n\n :param (string) value: Hex color string\n\n :rtype (tuple) (r_value, g_value, b_value): tuple of rgb values\n \"\"\"\n value = value.lstrip('#')\n hex_total_length = len(value)\n rgb_section_length = hex_total_length // 3\n return tuple(int(value[i:i + rgb_section_length], 16)\n for i in range(0, hex_total_length, rgb_section_length))\n\n @staticmethod\n def create_quiver(x, y, u, v, scale=.1, arrow_scale=.3,\n angle=math.pi / 9, **kwargs):\n \"\"\"\n Returns data for a quiver plot.\n\n :param (list|ndarray) x: x coordinates of the arrow locations\n :param (list|ndarray) y: y coordinates of the arrow locations\n :param (list|ndarray) u: x components of the arrow vectors\n :param (list|ndarray) v: y components of the arrow vectors\n :param (float in [0,1]) scale: scales size of the arrows(ideally to\n avoid overlap). Default = .1\n :param (float in [0,1]) arrow_scale: value multiplied to length of barb\n to get length of arrowhead. Default = .3\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of quiver figure.\n\n Example 1: Trivial Quiver\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import math\n\n # 1 Arrow from (0,0) to (1,1)\n fig = FF.create_quiver(x=[0], y=[0],\n u=[1], v=[1],\n scale=1)\n\n py.plot(fig, filename='quiver')\n ```\n\n Example 2: Quiver plot using meshgrid\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))\n u = np.cos(x)*y\n v = np.sin(x)*y\n\n #Create quiver\n fig = FF.create_quiver(x, y, u, v)\n\n # Plot\n py.plot(fig, filename='quiver')\n ```\n\n Example 3: Styling the quiver plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n import math\n\n # Add data\n x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),\n np.arange(-math.pi, math.pi, .5))\n u = np.cos(x)*y\n v = np.sin(x)*y\n\n # Create quiver\n fig = FF.create_quiver(x, y, u, v, scale=.2,\n arrow_scale=.3,\n angle=math.pi/6,\n name='Wind Velocity',\n line=Line(width=1))\n\n # Add title to layout\n fig['layout'].update(title='Quiver Plot')\n\n # Plot\n py.plot(fig, filename='quiver')\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_equal_length(x, y, u, v)\n FigureFactory._validate_positive_scalars(arrow_scale=arrow_scale,\n scale=scale)\n\n barb_x, barb_y = _Quiver(x, y, u, v, scale,\n arrow_scale, angle).get_barbs()\n arrow_x, arrow_y = _Quiver(x, y, u, v, scale,\n arrow_scale, angle).get_quiver_arrows()\n quiver = graph_objs.Scatter(x=barb_x + arrow_x,\n y=barb_y + arrow_y,\n mode='lines', **kwargs)\n\n data = [quiver]\n layout = graph_objs.Layout(hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_streamline(x, y, u, v,\n density=1, angle=math.pi / 9,\n arrow_scale=.09, **kwargs):\n \"\"\"\n Returns data for a streamline plot.\n\n :param (list|ndarray) x: 1 dimensional, evenly spaced list or array\n :param (list|ndarray) y: 1 dimensional, evenly spaced list or array\n :param (ndarray) u: 2 dimensional array\n :param (ndarray) v: 2 dimensional array\n :param (float|int) density: controls the density of streamlines in\n plot. This is multiplied by 30 to scale similiarly to other\n available streamline functions such as matplotlib.\n Default = 1\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of streamline figure.\n\n Example 1: Plot simple streamline and increase arrow size\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n x = np.linspace(-3, 3, 100)\n y = np.linspace(-3, 3, 100)\n Y, X = np.meshgrid(x, y)\n u = -1 - X**2 + Y\n v = 1 + X - Y**2\n u = u.T # Transpose\n v = v.T # Transpose\n\n # Create streamline\n fig = FF.create_streamline(x, y, u, v,\n arrow_scale=.1)\n\n # Plot\n py.plot(fig, filename='streamline')\n ```\n\n Example 2: from nbviewer.ipython.org/github/barbagroup/AeroPython\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import math\n\n # Add data\n N = 50\n x_start, x_end = -2.0, 2.0\n y_start, y_end = -1.0, 1.0\n x = np.linspace(x_start, x_end, N)\n y = np.linspace(y_start, y_end, N)\n X, Y = np.meshgrid(x, y)\n ss = 5.0\n x_s, y_s = -1.0, 0.0\n\n # Compute the velocity field on the mesh grid\n u_s = ss/(2*np.pi) * (X-x_s)/((X-x_s)**2 + (Y-y_s)**2)\n v_s = ss/(2*np.pi) * (Y-y_s)/((X-x_s)**2 + (Y-y_s)**2)\n\n # Create streamline\n fig = FF.create_streamline(x, y, u_s, v_s,\n density=2, name='streamline')\n\n # Add source point\n point = Scatter(x=[x_s], y=[y_s], mode='markers',\n marker=Marker(size=14), name='source point')\n\n # Plot\n fig['data'].append(point)\n py.plot(fig, filename='streamline')\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_equal_length(x, y)\n FigureFactory._validate_equal_length(u, v)\n FigureFactory._validate_streamline(x, y)\n FigureFactory._validate_positive_scalars(density=density,\n arrow_scale=arrow_scale)\n\n streamline_x, streamline_y = _Streamline(x, y, u, v,\n density, angle,\n arrow_scale).sum_streamlines()\n arrow_x, arrow_y = _Streamline(x, y, u, v,\n density, angle,\n arrow_scale).get_streamline_arrows()\n\n streamline = graph_objs.Scatter(x=streamline_x + arrow_x,\n y=streamline_y + arrow_y,\n mode='lines', **kwargs)\n\n data = [streamline]\n layout = graph_objs.Layout(hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def _make_increasing_ohlc(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes increasing ohlc sticks\n\n _make_increasing_ohlc() and _make_decreasing_ohlc separate the\n increasing trace from the decreasing trace so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_incr_data: Scatter trace of all increasing ohlc\n sticks.\n \"\"\"\n (flat_increase_x,\n flat_increase_y,\n text_increase) = _OHLC(open, high, low, close, dates).get_increase()\n\n if 'name' in kwargs:\n showlegend = True\n else:\n kwargs.setdefault('name', 'Increasing')\n showlegend = False\n\n kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR,\n width=1))\n kwargs.setdefault('text', text_increase)\n\n ohlc_incr = dict(type='scatter',\n x=flat_increase_x,\n y=flat_increase_y,\n mode='lines',\n showlegend=showlegend,\n **kwargs)\n return ohlc_incr\n\n @staticmethod\n def _make_decreasing_ohlc(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes decreasing ohlc sticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_decr_data: Scatter trace of all decreasing ohlc\n sticks.\n \"\"\"\n (flat_decrease_x,\n flat_decrease_y,\n text_decrease) = _OHLC(open, high, low, close, dates).get_decrease()\n\n kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR,\n width=1))\n kwargs.setdefault('text', text_decrease)\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('name', 'Decreasing')\n\n ohlc_decr = dict(type='scatter',\n x=flat_decrease_x,\n y=flat_decrease_y,\n mode='lines',\n **kwargs)\n return ohlc_decr\n\n @staticmethod\n def create_ohlc(open, high, low, close,\n dates=None, direction='both',\n **kwargs):\n \"\"\"\n BETA function that creates an ohlc chart\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all units where the close value is greater than the\n corresponding open value, and when the direction is 'decreasing',\n the returned figure consists of all units where the close value is\n less than or equal to the corresponding open value. When the\n direction is 'both', both increasing and decreasing units are\n returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of an ohlc chart figure.\n\n Example 1: Simple OHLC chart from a Pandas DataFrame\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))\n fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)\n\n py.plot(fig, filename='finance/aapl-ohlc')\n ```\n\n Example 2: Add text and annotations to the OHLC chart\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))\n fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)\n\n # Update the fig - all options here: https://plot.ly/python/reference/#Layout\n fig['layout'].update({\n 'title': 'The Great Recession',\n 'yaxis': {'title': 'AAPL Stock'},\n 'shapes': [{\n 'x0': '2008-09-15', 'x1': '2008-09-15', 'type': 'line',\n 'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',\n 'line': {'color': 'rgb(40,40,40)', 'width': 0.5}\n }],\n 'annotations': [{\n 'text': \"the fall of Lehman Brothers\",\n 'x': '2008-09-15', 'y': 1.02,\n 'xref': 'x', 'yref': 'paper',\n 'showarrow': False, 'xanchor': 'left'\n }]\n })\n\n py.plot(fig, filename='finance/aapl-recession-ohlc', validate=False)\n ```\n\n Example 3: Customize the OHLC colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import Line, Marker\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))\n\n # Make increasing ohlc sticks and customize their color and name\n fig_increasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='increasing', name='AAPL',\n line=Line(color='rgb(150, 200, 250)'))\n\n # Make decreasing ohlc sticks and customize their color and name\n fig_decreasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='decreasing',\n line=Line(color='rgb(128, 128, 128)'))\n\n # Initialize the figure\n fig = fig_increasing\n\n # Add decreasing data with .extend()\n fig['data'].extend(fig_decreasing['data'])\n\n py.iplot(fig, filename='finance/aapl-ohlc-colors', validate=False)\n ```\n\n Example 4: OHLC chart with datetime objects\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n from datetime import datetime\n\n # Add data\n open_data = [33.0, 33.3, 33.5, 33.0, 34.1]\n high_data = [33.1, 33.3, 33.6, 33.2, 34.8]\n low_data = [32.7, 32.7, 32.8, 32.6, 32.8]\n close_data = [33.0, 32.9, 33.3, 33.1, 33.1]\n dates = [datetime(year=2013, month=10, day=10),\n datetime(year=2013, month=11, day=10),\n datetime(year=2013, month=12, day=10),\n datetime(year=2014, month=1, day=10),\n datetime(year=2014, month=2, day=10)]\n\n # Create ohlc\n fig = FF.create_ohlc(open_data, high_data,\n low_data, close_data, dates=dates)\n\n py.iplot(fig, filename='finance/simple-ohlc', validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n if dates is not None:\n FigureFactory._validate_equal_length(open, high, low, close, dates)\n else:\n FigureFactory._validate_equal_length(open, high, low, close)\n FigureFactory._validate_ohlc(open, high, low, close, direction,\n **kwargs)\n\n if direction is 'increasing':\n ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_incr]\n elif direction is 'decreasing':\n ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_decr]\n else:\n ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,\n low, close,\n dates, **kwargs)\n data = [ohlc_incr, ohlc_decr]\n\n layout = graph_objs.Layout(xaxis=dict(zeroline=False),\n hovermode='closest')\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def _make_increasing_candle(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes boxplot trace for increasing candlesticks\n\n _make_increasing_candle() and _make_decreasing_candle separate the\n increasing traces from the decreasing traces so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_incr_data: list of the box trace for\n increasing candlesticks.\n \"\"\"\n increase_x, increase_y = _Candlestick(\n open, high, low, close, dates, **kwargs).get_candle_increase()\n\n if 'line' in kwargs:\n kwargs.setdefault('fillcolor', kwargs['line']['color'])\n else:\n kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR)\n if 'name' in kwargs:\n kwargs.setdefault('showlegend', True)\n else:\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('name', 'Increasing')\n kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR))\n\n candle_incr_data = dict(type='box',\n x=increase_x,\n y=increase_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs)\n\n return [candle_incr_data]\n\n @staticmethod\n def _make_decreasing_candle(open, high, low, close, dates, **kwargs):\n \"\"\"\n Makes boxplot trace for decreasing candlesticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to decreasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_decr_data: list of the box trace for\n decreasing candlesticks.\n \"\"\"\n\n decrease_x, decrease_y = _Candlestick(\n open, high, low, close, dates, **kwargs).get_candle_decrease()\n\n if 'line' in kwargs:\n kwargs.setdefault('fillcolor', kwargs['line']['color'])\n else:\n kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR)\n kwargs.setdefault('showlegend', False)\n kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR))\n kwargs.setdefault('name', 'Decreasing')\n\n candle_decr_data = dict(type='box',\n x=decrease_x,\n y=decrease_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs)\n\n return [candle_decr_data]\n\n @staticmethod\n def create_candlestick(open, high, low, close,\n dates=None, direction='both', **kwargs):\n \"\"\"\n BETA function that creates a candlestick chart\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all candlesticks where the close value is greater than\n the corresponding open value, and when the direction is\n 'decreasing', the returned figure consists of all candlesticks\n where the close value is less than or equal to the corresponding\n open value. When the direction is 'both', both increasing and\n decreasing candlesticks are returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of candlestick chart figure.\n\n Example 1: Simple candlestick chart from a Pandas DataFrame\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2007, 10, 1), datetime(2009, 4, 1))\n fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)\n py.plot(fig, filename='finance/aapl-candlestick', validate=False)\n ```\n\n Example 2: Add text and annotations to the candlestick chart\n ```\n fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)\n # Update the fig - all options here: https://plot.ly/python/reference/#Layout\n fig['layout'].update({\n 'title': 'The Great Recession',\n 'yaxis': {'title': 'AAPL Stock'},\n 'shapes': [{\n 'x0': '2007-12-01', 'x1': '2007-12-01',\n 'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',\n 'line': {'color': 'rgb(30,30,30)', 'width': 1}\n }],\n 'annotations': [{\n 'x': '2007-12-01', 'y': 0.05, 'xref': 'x', 'yref': 'paper',\n 'showarrow': False, 'xanchor': 'left',\n 'text': 'Official start of the recession'\n }]\n })\n py.plot(fig, filename='finance/aapl-recession-candlestick', validate=False)\n ```\n\n Example 3: Customize the candlestick colors\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n from plotly.graph_objs import Line, Marker\n from datetime import datetime\n\n import pandas.io.data as web\n\n df = web.DataReader(\"aapl\", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))\n\n # Make increasing candlesticks and customize their color and name\n fig_increasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='increasing', name='AAPL',\n marker=Marker(color='rgb(150, 200, 250)'),\n line=Line(color='rgb(150, 200, 250)'))\n\n # Make decreasing candlesticks and customize their color and name\n fig_decreasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,\n direction='decreasing',\n marker=Marker(color='rgb(128, 128, 128)'),\n line=Line(color='rgb(128, 128, 128)'))\n\n # Initialize the figure\n fig = fig_increasing\n\n # Add decreasing data with .extend()\n fig['data'].extend(fig_decreasing['data'])\n\n py.iplot(fig, filename='finance/aapl-candlestick-custom', validate=False)\n ```\n\n Example 4: Candlestick chart with datetime objects\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n from datetime import datetime\n\n # Add data\n open_data = [33.0, 33.3, 33.5, 33.0, 34.1]\n high_data = [33.1, 33.3, 33.6, 33.2, 34.8]\n low_data = [32.7, 32.7, 32.8, 32.6, 32.8]\n close_data = [33.0, 32.9, 33.3, 33.1, 33.1]\n dates = [datetime(year=2013, month=10, day=10),\n datetime(year=2013, month=11, day=10),\n datetime(year=2013, month=12, day=10),\n datetime(year=2014, month=1, day=10),\n datetime(year=2014, month=2, day=10)]\n\n # Create ohlc\n fig = FF.create_candlestick(open_data, high_data,\n low_data, close_data, dates=dates)\n\n py.iplot(fig, filename='finance/simple-candlestick', validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n if dates is not None:\n FigureFactory._validate_equal_length(open, high, low, close, dates)\n else:\n FigureFactory._validate_equal_length(open, high, low, close)\n FigureFactory._validate_ohlc(open, high, low, close, direction,\n **kwargs)\n\n if direction is 'increasing':\n candle_incr_data = FigureFactory._make_increasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_incr_data\n elif direction is 'decreasing':\n candle_decr_data = FigureFactory._make_decreasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_decr_data\n else:\n candle_incr_data = FigureFactory._make_increasing_candle(\n open, high, low, close, dates, **kwargs)\n candle_decr_data = FigureFactory._make_decreasing_candle(\n open, high, low, close, dates, **kwargs)\n data = candle_incr_data + candle_decr_data\n\n layout = graph_objs.Layout()\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_distplot(hist_data, group_labels,\n bin_size=1., curve_type='kde',\n colors=[], rug_text=[], histnorm=DEFAULT_HISTNORM,\n show_hist=True, show_curve=True,\n show_rug=True):\n \"\"\"\n BETA function that creates a distplot similar to seaborn.distplot\n\n The distplot can be composed of all or any combination of the following\n 3 components: (1) histogram, (2) curve: (a) kernel density estimation\n or (b) normal curve, and (3) rug plot. Additionally, multiple distplots\n (from multiple datasets) can be created in the same plot.\n\n :param (list[list]) hist_data: Use list of lists to plot multiple data\n sets on the same plot.\n :param (list[str]) group_labels: Names for each data set.\n :param (list[float]|float) bin_size: Size of histogram bins.\n Default = 1.\n :param (str) curve_type: 'kde' or 'normal'. Default = 'kde'\n :param (str) histnorm: 'probability density' or 'probability'\n Default = 'probability density'\n :param (bool) show_hist: Add histogram to distplot? Default = True\n :param (bool) show_curve: Add curve to distplot? Default = True\n :param (bool) show_rug: Add rug to distplot? Default = True\n :param (list[str]) colors: Colors for traces.\n :param (list[list]) rug_text: Hovertext values for rug_plot,\n :return (dict): Representation of a distplot figure.\n\n Example 1: Simple distplot of 1 data set\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,\n 3.5, 4.1, 4.4, 4.5, 4.5,\n 5.0, 5.0, 5.2, 5.5, 5.5,\n 5.5, 5.5, 5.5, 6.1, 7.0]]\n\n group_labels = ['distplot example']\n\n fig = FF.create_distplot(hist_data, group_labels)\n\n url = py.plot(fig, filename='Simple distplot', validate=False)\n ```\n\n Example 2: Two data sets and added rug text\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n # Add histogram data\n hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,\n -0.9, -0.07, 1.95, 0.9, -0.2,\n -0.5, 0.3, 0.4, -0.37, 0.6]\n hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,\n 1.0, 0.8, 1.7, 0.5, 0.8,\n -0.3, 1.2, 0.56, 0.3, 2.2]\n\n # Group data together\n hist_data = [hist1_x, hist2_x]\n\n group_labels = ['2012', '2013']\n\n # Add text\n rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',\n 'f1', 'g1', 'h1', 'i1', 'j1',\n 'k1', 'l1', 'm1', 'n1', 'o1']\n\n rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',\n 'f2', 'g2', 'h2', 'i2', 'j2',\n 'k2', 'l2', 'm2', 'n2', 'o2']\n\n # Group text together\n rug_text_all = [rug_text_1, rug_text_2]\n\n # Create distplot\n fig = FF.create_distplot(\n hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)\n\n # Add title\n fig['layout'].update(title='Dist Plot')\n\n # Plot!\n url = py.plot(fig, filename='Distplot with rug text', validate=False)\n ```\n\n Example 3: Plot with normal curve and hide rug plot\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n\n x1 = np.random.randn(190)\n x2 = np.random.randn(200)+1\n x3 = np.random.randn(200)-1\n x4 = np.random.randn(210)+2\n\n hist_data = [x1, x2, x3, x4]\n group_labels = ['2012', '2013', '2014', '2015']\n\n fig = FF.create_distplot(\n hist_data, group_labels, curve_type='normal',\n show_rug=False, bin_size=.4)\n\n url = py.plot(fig, filename='hist and normal curve', validate=False)\n\n Example 4: Distplot with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n import numpy as np\n import pandas as pd\n\n df = pd.DataFrame({'2012': np.random.randn(200),\n '2013': np.random.randn(200)+1})\n py.iplot(FF.create_distplot([df[c] for c in df.columns], df.columns),\n filename='examples/distplot with pandas',\n validate=False)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n FigureFactory._validate_distplot(hist_data, curve_type)\n FigureFactory._validate_equal_length(hist_data, group_labels)\n\n if isinstance(bin_size, (float, int)):\n bin_size = [bin_size]*len(hist_data)\n\n hist = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_hist()\n\n if curve_type == 'normal':\n curve = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_normal()\n else:\n curve = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_kde()\n\n rug = _Distplot(\n hist_data, histnorm, group_labels, bin_size,\n curve_type, colors, rug_text,\n show_hist, show_curve).make_rug()\n\n data = []\n if show_hist:\n data.append(hist)\n if show_curve:\n data.append(curve)\n if show_rug:\n data.append(rug)\n layout = graph_objs.Layout(\n barmode='overlay',\n hovermode='closest',\n legend=dict(traceorder='reversed'),\n xaxis1=dict(domain=[0.0, 1.0],\n anchor='y2',\n zeroline=False),\n yaxis1=dict(domain=[0.35, 1],\n anchor='free',\n position=0.0),\n yaxis2=dict(domain=[0, 0.25],\n anchor='x1',\n dtick=1,\n showticklabels=False))\n else:\n layout = graph_objs.Layout(\n barmode='overlay',\n hovermode='closest',\n legend=dict(traceorder='reversed'),\n xaxis1=dict(domain=[0.0, 1.0],\n anchor='y2',\n zeroline=False),\n yaxis1=dict(domain=[0., 1],\n anchor='free',\n position=0.0))\n\n data = sum(data, [])\n return graph_objs.Figure(data=data, layout=layout)\n\n\n @staticmethod\n def create_dendrogram(X, orientation=\"bottom\", labels=None,\n colorscale=None):\n \"\"\"\n BETA function that returns a dendrogram Plotly figure object.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (str) orientation: 'top', 'right', 'bottom', or 'left'\n :param (list) labels: List of axis category labels(observation labels)\n :param (list) colorscale: Optional colorscale for dendrogram tree\n clusters\n\n Example 1: Simple bottom oriented dendrogram\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n X = np.random.rand(10,10)\n dendro = FF.create_dendrogram(X)\n plot_url = py.plot(dendro, filename='simple-dendrogram')\n\n ```\n\n Example 2: Dendrogram to put on the left of the heatmap\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n\n X = np.random.rand(5,5)\n names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']\n dendro = FF.create_dendrogram(X, orientation='right', labels=names)\n dendro['layout'].update({'width':700, 'height':500})\n\n py.iplot(dendro, filename='vertical-dendrogram')\n ```\n\n Example 3: Dendrogram with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import numpy as np\n import pandas as pd\n\n Index= ['A','B','C','D','E','F','G','H','I','J']\n df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)\n fig = FF.create_dendrogram(df, labels=Index)\n url = py.plot(fig, filename='pandas-dendrogram')\n ```\n \"\"\"\n dependencies = (_scipy_imported and _scipy__spatial_imported and\n _scipy__cluster__hierarchy_imported)\n\n if dependencies is False:\n raise ImportError(\"FigureFactory.create_dendrogram requires scipy, \\\n scipy.spatial and scipy.hierarchy\")\n\n s = X.shape\n if len(s) != 2:\n exceptions.PlotlyError(\"X should be 2-dimensional array.\")\n\n dendrogram = _Dendrogram(X, orientation, labels, colorscale)\n\n return {'layout': dendrogram.layout,\n 'data': dendrogram.data}\n\n @staticmethod\n def create_annotated_heatmap(z, x=None, y=None, annotation_text=None,\n colorscale='RdBu', font_colors=None,\n showscale=False, reversescale=False,\n **kwargs):\n \"\"\"\n BETA function that creates annotated heatmaps\n\n This function adds annotations to each cell of the heatmap.\n\n :param (list[list]|ndarray) z: z matrix to create heatmap.\n :param (list) x: x axis labels.\n :param (list) y: y axis labels.\n :param (list[list]|ndarray) annotation_text: Text strings for\n annotations. Should have the same dimensions as the z matrix. If no\n text is added, the values of the z matrix are annotated. Default =\n z matrix values.\n :param (list|str) colorscale: heatmap colorscale.\n :param (list) font_colors: List of two color strings: [min_text_color,\n max_text_color] where min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. If font_colors is not\n defined, the colors are defined logically as black or white\n depending on the heatmap's colorscale.\n :param (bool) showscale: Display colorscale. Default = False\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple annotated heatmap with default configuration\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n z = [[0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.4300],\n [0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.00000]]\n\n figure = FF.create_annotated_heatmap(z)\n py.iplot(figure)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Avoiding mutables in the call signature\n font_colors = font_colors if font_colors is not None else []\n FigureFactory._validate_annotated_heatmap(z, x, y, annotation_text)\n annotations = _AnnotatedHeatmap(z, x, y, annotation_text,\n colorscale, font_colors, reversescale,\n **kwargs).make_annotations()\n\n if x or y:\n trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,\n showscale=showscale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', dtick=1, side='top',\n gridcolor='rgb(0, 0, 0)'),\n yaxis=dict(ticks='', dtick=1, ticksuffix=' '))\n else:\n trace = dict(type='heatmap', z=z, colorscale=colorscale,\n showscale=showscale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', side='top',\n gridcolor='rgb(0, 0, 0)',\n showticklabels=False),\n yaxis=dict(ticks='', ticksuffix=' ',\n showticklabels=False))\n\n data = [trace]\n\n return graph_objs.Figure(data=data, layout=layout)\n\n @staticmethod\n def create_table(table_text, colorscale=None, font_colors=None,\n index=False, index_title='', annotation_offset=.45,\n height_constant=30, hoverinfo='none', **kwargs):\n \"\"\"\n BETA function that creates data tables\n\n :param (pandas.Dataframe | list[list]) text: data for table.\n :param (str|list[list]) colorscale: Colorscale for table where the\n color at value 0 is the header color, .5 is the first table color\n and 1 is the second table color. (Set .5 and 1 to avoid the striped\n table effect). Default=[[0, '#66b2ff'], [.5, '#d9d9d9'],\n [1, '#ffffff']]\n :param (list) font_colors: Color for fonts in table. Can be a single\n color, three colors, or a color for each row in the table.\n Default=['#000000'] (black text for the entire table)\n :param (int) height_constant: Constant multiplied by # of rows to\n create table height. Default=30.\n :param (bool) index: Create (header-colored) index column index from\n Pandas dataframe or list[0] for each list in text. Default=False.\n :param (string) index_title: Title for index column. Default=''.\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple Plotly Table\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n text = [['Country', 'Year', 'Population'],\n ['US', 2000, 282200000],\n ['Canada', 2000, 27790000],\n ['US', 2010, 309000000],\n ['Canada', 2010, 34000000]]\n\n table = FF.create_table(text)\n py.iplot(table)\n ```\n\n Example 2: Table with Custom Coloring\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n text = [['Country', 'Year', 'Population'],\n ['US', 2000, 282200000],\n ['Canada', 2000, 27790000],\n ['US', 2010, 309000000],\n ['Canada', 2010, 34000000]]\n\n table = FF.create_table(text,\n colorscale=[[0, '#000000'],\n [.5, '#80beff'],\n [1, '#cce5ff']],\n font_colors=['#ffffff', '#000000',\n '#000000'])\n py.iplot(table)\n ```\n Example 3: Simple Plotly Table with Pandas\n ```\n import plotly.plotly as py\n from plotly.tools import FigureFactory as FF\n\n import pandas as pd\n\n df = pd.read_csv('http://www.stat.ubc.ca/~jenny/notOcto/STAT545A/examples/gapminder/data/gapminderDataFiveYear.txt', sep='\\t')\n df_p = df[0:25]\n\n table_simple = FF.create_table(df_p)\n py.iplot(table_simple)\n ```\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n # Avoiding mutables in the call signature\n colorscale = \\\n colorscale if colorscale is not None else [[0, '#00083e'],\n [.5, '#ededee'],\n [1, '#ffffff']]\n font_colors = font_colors if font_colors is not None else ['#ffffff',\n '#000000',\n '#000000']\n\n FigureFactory._validate_table(table_text, font_colors)\n table_matrix = _Table(table_text, colorscale, font_colors, index,\n index_title, annotation_offset,\n **kwargs).get_table_matrix()\n annotations = _Table(table_text, colorscale, font_colors, index,\n index_title, annotation_offset,\n **kwargs).make_table_annotations()\n\n trace = dict(type='heatmap', z=table_matrix, opacity=.75,\n colorscale=colorscale, showscale=False,\n hoverinfo=hoverinfo, **kwargs)\n\n data = [trace]\n layout = dict(annotations=annotations,\n height=len(table_matrix)*height_constant + 50,\n margin=dict(t=0, b=0, r=0, l=0),\n yaxis=dict(autorange='reversed', zeroline=False,\n gridwidth=2, ticks='', dtick=1, tick0=.5,\n showticklabels=False),\n xaxis=dict(zeroline=False, gridwidth=2, ticks='',\n dtick=1, tick0=-0.5, showticklabels=False))\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Quiver(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_quiver() for docstring\n \"\"\"\n def __init__(self, x, y, u, v,\n scale, arrow_scale, angle, **kwargs):\n try:\n x = FigureFactory._flatten(x)\n except exceptions.PlotlyError:\n pass\n\n try:\n y = FigureFactory._flatten(y)\n except exceptions.PlotlyError:\n pass\n\n try:\n u = FigureFactory._flatten(u)\n except exceptions.PlotlyError:\n pass\n\n try:\n v = FigureFactory._flatten(v)\n except exceptions.PlotlyError:\n pass\n\n self.x = x\n self.y = y\n self.u = u\n self.v = v\n self.scale = scale\n self.arrow_scale = arrow_scale\n self.angle = angle\n self.end_x = []\n self.end_y = []\n self.scale_uv()\n barb_x, barb_y = self.get_barbs()\n arrow_x, arrow_y = self.get_quiver_arrows()\n\n def scale_uv(self):\n \"\"\"\n Scales u and v to avoid overlap of the arrows.\n\n u and v are added to x and y to get the\n endpoints of the arrows so a smaller scale value will\n result in less overlap of arrows.\n \"\"\"\n self.u = [i * self.scale for i in self.u]\n self.v = [i * self.scale for i in self.v]\n\n def get_barbs(self):\n \"\"\"\n Creates x and y startpoint and endpoint pairs\n\n After finding the endpoint of each barb this zips startpoint and\n endpoint pairs to create 2 lists: x_values for barbs and y values\n for barbs\n\n :rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint\n x_value pairs separated by a None to create the barb of the arrow,\n and list of startpoint and endpoint y_value pairs separated by a\n None to create the barb of the arrow.\n \"\"\"\n self.end_x = [i + j for i, j in zip(self.x, self.u)]\n self.end_y = [i + j for i, j in zip(self.y, self.v)]\n empty = [None] * len(self.x)\n barb_x = FigureFactory._flatten(zip(self.x, self.end_x, empty))\n barb_y = FigureFactory._flatten(zip(self.y, self.end_y, empty))\n return barb_x, barb_y\n\n def get_quiver_arrows(self):\n \"\"\"\n Creates lists of x and y values to plot the arrows\n\n Gets length of each barb then calculates the length of each side of\n the arrow. Gets angle of barb and applies angle to each side of the\n arrowhead. Next uses arrow_scale to scale the length of arrowhead and\n creates x and y values for arrowhead point1 and point2. Finally x and y\n values for point1, endpoint and point2s for each arrowhead are\n separated by a None and zipped to create lists of x and y values for\n the arrows.\n\n :rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2\n x_values separated by a None to create the arrowhead and list of\n point1, endpoint, point2 y_values separated by a None to create\n the barb of the arrow.\n \"\"\"\n dif_x = [i - j for i, j in zip(self.end_x, self.x)]\n dif_y = [i - j for i, j in zip(self.end_y, self.y)]\n\n # Get barb lengths(default arrow length = 30% barb length)\n barb_len = [None] * len(self.x)\n for index in range(len(barb_len)):\n barb_len[index] = math.hypot(dif_x[index], dif_y[index])\n\n # Make arrow lengths\n arrow_len = [None] * len(self.x)\n arrow_len = [i * self.arrow_scale for i in barb_len]\n\n # Get barb angles\n barb_ang = [None] * len(self.x)\n for index in range(len(barb_ang)):\n barb_ang[index] = math.atan2(dif_y[index], dif_x[index])\n\n # Set angles to create arrow\n ang1 = [i + self.angle for i in barb_ang]\n ang2 = [i - self.angle for i in barb_ang]\n\n cos_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n cos_ang1[index] = math.cos(ang1[index])\n seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]\n\n sin_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n sin_ang1[index] = math.sin(ang1[index])\n seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]\n\n cos_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n cos_ang2[index] = math.cos(ang2[index])\n seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]\n\n sin_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n sin_ang2[index] = math.sin(ang2[index])\n seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]\n\n # Set coordinates to create arrow\n for index in range(len(self.end_x)):\n point1_x = [i - j for i, j in zip(self.end_x, seg1_x)]\n point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]\n point2_x = [i - j for i, j in zip(self.end_x, seg2_x)]\n point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]\n\n # Combine lists to create arrow\n empty = [None] * len(self.end_x)\n arrow_x = FigureFactory._flatten(zip(point1_x, self.end_x,\n point2_x, empty))\n arrow_y = FigureFactory._flatten(zip(point1_y, self.end_y,\n point2_y, empty))\n return arrow_x, arrow_y\n\n\nclass _Streamline(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_streamline() for docstring\n \"\"\"\n def __init__(self, x, y, u, v,\n density, angle,\n arrow_scale, **kwargs):\n self.x = np.array(x)\n self.y = np.array(y)\n self.u = np.array(u)\n self.v = np.array(v)\n self.angle = angle\n self.arrow_scale = arrow_scale\n self.density = int(30 * density) # Scale similarly to other functions\n self.delta_x = self.x[1] - self.x[0]\n self.delta_y = self.y[1] - self.y[0]\n self.val_x = self.x\n self.val_y = self.y\n\n # Set up spacing\n self.blank = np.zeros((self.density, self.density))\n self.spacing_x = len(self.x) / float(self.density - 1)\n self.spacing_y = len(self.y) / float(self.density - 1)\n self.trajectories = []\n\n # Rescale speed onto axes-coordinates\n self.u = self.u / (self.x[-1] - self.x[0])\n self.v = self.v / (self.y[-1] - self.y[0])\n self.speed = np.sqrt(self.u ** 2 + self.v ** 2)\n\n # Rescale u and v for integrations.\n self.u *= len(self.x)\n self.v *= len(self.y)\n self.st_x = []\n self.st_y = []\n self.get_streamlines()\n streamline_x, streamline_y = self.sum_streamlines()\n arrows_x, arrows_y = self.get_streamline_arrows()\n\n def blank_pos(self, xi, yi):\n \"\"\"\n Set up positions for trajectories to be used with rk4 function.\n \"\"\"\n return (int((xi / self.spacing_x) + 0.5),\n int((yi / self.spacing_y) + 0.5))\n\n def value_at(self, a, xi, yi):\n \"\"\"\n Set up for RK4 function, based on Bokeh's streamline code\n \"\"\"\n if isinstance(xi, np.ndarray):\n self.x = xi.astype(np.int)\n self.y = yi.astype(np.int)\n else:\n self.val_x = np.int(xi)\n self.val_y = np.int(yi)\n a00 = a[self.val_y, self.val_x]\n a01 = a[self.val_y, self.val_x + 1]\n a10 = a[self.val_y + 1, self.val_x]\n a11 = a[self.val_y + 1, self.val_x + 1]\n xt = xi - self.val_x\n yt = yi - self.val_y\n a0 = a00 * (1 - xt) + a01 * xt\n a1 = a10 * (1 - xt) + a11 * xt\n return a0 * (1 - yt) + a1 * yt\n\n def rk4_integrate(self, x0, y0):\n \"\"\"\n RK4 forward and back trajectories from the initial conditions.\n\n Adapted from Bokeh's streamline -uses Runge-Kutta method to fill\n x and y trajectories then checks length of traj (s in units of axes)\n \"\"\"\n def f(xi, yi):\n dt_ds = 1. / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return ui * dt_ds, vi * dt_ds\n\n def g(xi, yi):\n dt_ds = 1. / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return -ui * dt_ds, -vi * dt_ds\n\n check = lambda xi, yi: (0 <= xi < len(self.x) - 1 and\n 0 <= yi < len(self.y) - 1)\n xb_changes = []\n yb_changes = []\n\n def rk4(x0, y0, f):\n ds = 0.01\n stotal = 0\n xi = x0\n yi = y0\n xb, yb = self.blank_pos(xi, yi)\n xf_traj = []\n yf_traj = []\n while check(xi, yi):\n xf_traj.append(xi)\n yf_traj.append(yi)\n try:\n k1x, k1y = f(xi, yi)\n k2x, k2y = f(xi + .5 * ds * k1x, yi + .5 * ds * k1y)\n k3x, k3y = f(xi + .5 * ds * k2x, yi + .5 * ds * k2y)\n k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)\n except IndexError:\n break\n xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.\n yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.\n if not check(xi, yi):\n break\n stotal += ds\n new_xb, new_yb = self.blank_pos(xi, yi)\n if new_xb != xb or new_yb != yb:\n if self.blank[new_yb, new_xb] == 0:\n self.blank[new_yb, new_xb] = 1\n xb_changes.append(new_xb)\n yb_changes.append(new_yb)\n xb = new_xb\n yb = new_yb\n else:\n break\n if stotal > 2:\n break\n return stotal, xf_traj, yf_traj\n\n sf, xf_traj, yf_traj = rk4(x0, y0, f)\n sb, xb_traj, yb_traj = rk4(x0, y0, g)\n stotal = sf + sb\n x_traj = xb_traj[::-1] + xf_traj[1:]\n y_traj = yb_traj[::-1] + yf_traj[1:]\n\n if len(x_traj) < 1:\n return None\n if stotal > .2:\n initxb, inityb = self.blank_pos(x0, y0)\n self.blank[inityb, initxb] = 1\n return x_traj, y_traj\n else:\n for xb, yb in zip(xb_changes, yb_changes):\n self.blank[yb, xb] = 0\n return None\n\n def traj(self, xb, yb):\n \"\"\"\n Integrate trajectories\n\n :param (int) xb: results of passing xi through self.blank_pos\n :param (int) xy: results of passing yi through self.blank_pos\n\n Calculate each trajectory based on rk4 integrate method.\n \"\"\"\n\n if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:\n return\n if self.blank[yb, xb] == 0:\n t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)\n if t is not None:\n self.trajectories.append(t)\n\n def get_streamlines(self):\n \"\"\"\n Get streamlines by building trajectory set.\n \"\"\"\n for indent in range(self.density // 2):\n for xi in range(self.density - 2 * indent):\n self.traj(xi + indent, indent)\n self.traj(xi + indent, self.density - 1 - indent)\n self.traj(indent, xi + indent)\n self.traj(self.density - 1 - indent, xi + indent)\n\n self.st_x = [np.array(t[0]) * self.delta_x + self.x[0] for t in\n self.trajectories]\n self.st_y = [np.array(t[1]) * self.delta_y + self.y[0] for t in\n self.trajectories]\n\n for index in range(len(self.st_x)):\n self.st_x[index] = self.st_x[index].tolist()\n self.st_x[index].append(np.nan)\n\n for index in range(len(self.st_y)):\n self.st_y[index] = self.st_y[index].tolist()\n self.st_y[index].append(np.nan)\n\n def get_streamline_arrows(self):\n \"\"\"\n Makes an arrow for each streamline.\n\n Gets angle of streamline at 1/3 mark and creates arrow coordinates\n based off of user defined angle and arrow_scale.\n\n :param (array) st_x: x-values for all streamlines\n :param (array) st_y: y-values for all streamlines\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :rtype (list, list) arrows_x: x-values to create arrowhead and\n arrows_y: y-values to create arrowhead\n \"\"\"\n arrow_end_x = np.empty((len(self.st_x)))\n arrow_end_y = np.empty((len(self.st_y)))\n arrow_start_x = np.empty((len(self.st_x)))\n arrow_start_y = np.empty((len(self.st_y)))\n for index in range(len(self.st_x)):\n arrow_end_x[index] = (self.st_x[index]\n [int(len(self.st_x[index]) / 3)])\n arrow_start_x[index] = (self.st_x[index]\n [(int(len(self.st_x[index]) / 3)) - 1])\n arrow_end_y[index] = (self.st_y[index]\n [int(len(self.st_y[index]) / 3)])\n arrow_start_y[index] = (self.st_y[index]\n [(int(len(self.st_y[index]) / 3)) - 1])\n\n dif_x = arrow_end_x - arrow_start_x\n dif_y = arrow_end_y - arrow_start_y\n\n streamline_ang = np.arctan(dif_y / dif_x)\n\n ang1 = streamline_ang + (self.angle)\n ang2 = streamline_ang - (self.angle)\n\n seg1_x = np.cos(ang1) * self.arrow_scale\n seg1_y = np.sin(ang1) * self.arrow_scale\n seg2_x = np.cos(ang2) * self.arrow_scale\n seg2_y = np.sin(ang2) * self.arrow_scale\n\n point1_x = np.empty((len(dif_x)))\n point1_y = np.empty((len(dif_y)))\n point2_x = np.empty((len(dif_x)))\n point2_y = np.empty((len(dif_y)))\n\n for index in range(len(dif_x)):\n if dif_x[index] >= 0:\n point1_x[index] = arrow_end_x[index] - seg1_x[index]\n point1_y[index] = arrow_end_y[index] - seg1_y[index]\n point2_x[index] = arrow_end_x[index] - seg2_x[index]\n point2_y[index] = arrow_end_y[index] - seg2_y[index]\n else:\n point1_x[index] = arrow_end_x[index] + seg1_x[index]\n point1_y[index] = arrow_end_y[index] + seg1_y[index]\n point2_x[index] = arrow_end_x[index] + seg2_x[index]\n point2_y[index] = arrow_end_y[index] + seg2_y[index]\n\n space = np.empty((len(point1_x)))\n space[:] = np.nan\n\n # Combine arrays into matrix\n arrows_x = np.matrix([point1_x, arrow_end_x, point2_x, space])\n arrows_x = np.array(arrows_x)\n arrows_x = arrows_x.flatten('F')\n arrows_x = arrows_x.tolist()\n\n # Combine arrays into matrix\n arrows_y = np.matrix([point1_y, arrow_end_y, point2_y, space])\n arrows_y = np.array(arrows_y)\n arrows_y = arrows_y.flatten('F')\n arrows_y = arrows_y.tolist()\n\n return arrows_x, arrows_y\n\n def sum_streamlines(self):\n \"\"\"\n Makes all streamlines readable as a single trace.\n\n :rtype (list, list): streamline_x: all x values for each streamline\n combined into single list and streamline_y: all y values for each\n streamline combined into single list\n \"\"\"\n streamline_x = sum(self.st_x, [])\n streamline_y = sum(self.st_y, [])\n return streamline_x, streamline_y\n\n\nclass _OHLC(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_ohlc_increase() for docstring.\n \"\"\"\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.empty = [None] * len(open)\n self.dates = dates\n\n self.all_x = []\n self.all_y = []\n self.increase_x = []\n self.increase_y = []\n self.decrease_x = []\n self.decrease_y = []\n self.get_all_xy()\n self.separate_increase_decrease()\n\n def get_all_xy(self):\n \"\"\"\n Zip data to create OHLC shape\n\n OHLC shape: low to high vertical bar with\n horizontal branches for open and close values.\n If dates were added, the smallest date difference is calculated and\n multiplied by .2 to get the length of the open and close branches.\n If no date data was provided, the x-axis is a list of integers and the\n length of the open and close branches is .2.\n \"\"\"\n self.all_y = list(zip(self.open, self.open, self.high,\n self.low, self.close, self.close, self.empty))\n if self.dates is not None:\n date_dif = []\n for i in range(len(self.dates) - 1):\n date_dif.append(self.dates[i + 1] - self.dates[i])\n date_dif_min = (min(date_dif)) / 5\n self.all_x = [[x - date_dif_min, x, x, x, x, x +\n date_dif_min, None] for x in self.dates]\n else:\n self.all_x = [[x - .2, x, x, x, x, x + .2, None]\n for x in range(len(self.open))]\n\n def separate_increase_decrease(self):\n \"\"\"\n Separate data into two groups: increase and decrease\n\n (1) Increase, where close > open and\n (2) Decrease, where close <= open\n \"\"\"\n for index in range(len(self.open)):\n if self.close[index] is None:\n pass\n elif self.close[index] > self.open[index]:\n self.increase_x.append(self.all_x[index])\n self.increase_y.append(self.all_y[index])\n else:\n self.decrease_x.append(self.all_x[index])\n self.decrease_y.append(self.all_y[index])\n\n def get_increase(self):\n \"\"\"\n Flatten increase data and get increase text\n\n :rtype (list, list, list): flat_increase_x: x-values for the increasing\n trace, flat_increase_y: y=values for the increasing trace and\n text_increase: hovertext for the increasing trace\n \"\"\"\n flat_increase_x = FigureFactory._flatten(self.increase_x)\n flat_increase_y = FigureFactory._flatten(self.increase_y)\n text_increase = ((\"Open\", \"Open\", \"High\",\n \"Low\", \"Close\", \"Close\", '')\n * (len(self.increase_x)))\n\n return flat_increase_x, flat_increase_y, text_increase\n\n def get_decrease(self):\n \"\"\"\n Flatten decrease data and get decrease text\n\n :rtype (list, list, list): flat_decrease_x: x-values for the decreasing\n trace, flat_decrease_y: y=values for the decreasing trace and\n text_decrease: hovertext for the decreasing trace\n \"\"\"\n flat_decrease_x = FigureFactory._flatten(self.decrease_x)\n flat_decrease_y = FigureFactory._flatten(self.decrease_y)\n text_decrease = ((\"Open\", \"Open\", \"High\",\n \"Low\", \"Close\", \"Close\", '')\n * (len(self.decrease_x)))\n\n return flat_decrease_x, flat_decrease_y, text_decrease\n\n\nclass _Candlestick(FigureFactory):\n \"\"\"\n Refer to FigureFactory.create_candlestick() for docstring.\n \"\"\"\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n if dates is not None:\n self.x = dates\n else:\n self.x = [x for x in range(len(self.open))]\n self.get_candle_increase()\n\n def get_candle_increase(self):\n \"\"\"\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n \"\"\"\n increase_y = []\n increase_x = []\n for index in range(len(self.open)):\n if self.close[index] > self.open[index]:\n increase_y.append(self.low[index])\n increase_y.append(self.open[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.high[index])\n increase_x.append(self.x[index])\n\n increase_x = [[x, x, x, x, x, x] for x in increase_x]\n increase_x = FigureFactory._flatten(increase_x)\n\n return increase_x, increase_y\n\n def get_candle_decrease(self):\n \"\"\"\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n \"\"\"\n decrease_y = []\n decrease_x = []\n for index in range(len(self.open)):\n if self.close[index] <= self.open[index]:\n decrease_y.append(self.low[index])\n decrease_y.append(self.open[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.high[index])\n decrease_x.append(self.x[index])\n\n decrease_x = [[x, x, x, x, x, x] for x in decrease_x]\n decrease_x = FigureFactory._flatten(decrease_x)\n\n return decrease_x, decrease_y\n\n\nclass _Distplot(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_distplot() for docstring\n \"\"\"\n def __init__(self, hist_data, histnorm, group_labels,\n bin_size, curve_type, colors,\n rug_text, show_hist, show_curve):\n self.hist_data = hist_data\n self.histnorm = histnorm\n self.group_labels = group_labels\n self.bin_size = bin_size\n self.show_hist = show_hist\n self.show_curve = show_curve\n self.trace_number = len(hist_data)\n if rug_text:\n self.rug_text = rug_text\n else:\n self.rug_text = [None] * self.trace_number\n\n self.start = []\n self.end = []\n if colors:\n self.colors = colors\n else:\n self.colors = [\n \"rgb(31, 119, 180)\", \"rgb(255, 127, 14)\",\n \"rgb(44, 160, 44)\", \"rgb(214, 39, 40)\",\n \"rgb(148, 103, 189)\", \"rgb(140, 86, 75)\",\n \"rgb(227, 119, 194)\", \"rgb(127, 127, 127)\",\n \"rgb(188, 189, 34)\", \"rgb(23, 190, 207)\"]\n self.curve_x = [None] * self.trace_number\n self.curve_y = [None] * self.trace_number\n\n for trace in self.hist_data:\n self.start.append(min(trace) * 1.)\n self.end.append(max(trace) * 1.)\n\n def make_hist(self):\n \"\"\"\n Makes the histogram(s) for FigureFactory.create_distplot().\n\n :rtype (list) hist: list of histogram representations\n \"\"\"\n hist = [None] * self.trace_number\n\n for index in range(self.trace_number):\n hist[index] = dict(type='histogram',\n x=self.hist_data[index],\n xaxis='x1',\n yaxis='y1',\n histnorm=self.histnorm,\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n marker=dict(color=self.colors[index]),\n autobinx=False,\n xbins=dict(start=self.start[index],\n end=self.end[index],\n size=self.bin_size[index]),\n opacity=.7)\n return hist\n\n def make_kde(self):\n \"\"\"\n Makes the kernel density estimation(s) for create_distplot().\n\n This is called when curve_type = 'kde' in create_distplot().\n\n :rtype (list) curve: list of kde representations\n \"\"\"\n curve = [None] * self.trace_number\n for index in range(self.trace_number):\n self.curve_x[index] = [self.start[index] +\n x * (self.end[index] - self.start[index])\n / 500 for x in range(500)]\n self.curve_y[index] = (scipy.stats.gaussian_kde\n (self.hist_data[index])\n (self.curve_x[index]))\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(type='scatter',\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis='x1',\n yaxis='y1',\n mode='lines',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index]))\n return curve\n\n def make_normal(self):\n \"\"\"\n Makes the normal curve(s) for create_distplot().\n\n This is called when curve_type = 'normal' in create_distplot().\n\n :rtype (list) curve: list of normal curve representations\n \"\"\"\n curve = [None] * self.trace_number\n mean = [None] * self.trace_number\n sd = [None] * self.trace_number\n\n for index in range(self.trace_number):\n mean[index], sd[index] = (scipy.stats.norm.fit\n (self.hist_data[index]))\n self.curve_x[index] = [self.start[index] +\n x * (self.end[index] - self.start[index])\n / 500 for x in range(500)]\n self.curve_y[index] = scipy.stats.norm.pdf(\n self.curve_x[index], loc=mean[index], scale=sd[index])\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(type='scatter',\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis='x1',\n yaxis='y1',\n mode='lines',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index]))\n return curve\n\n def make_rug(self):\n \"\"\"\n Makes the rug plot(s) for create_distplot().\n\n :rtype (list) rug: list of rug plot representations\n \"\"\"\n rug = [None] * self.trace_number\n for index in range(self.trace_number):\n\n rug[index] = dict(type='scatter',\n x=self.hist_data[index],\n y=([self.group_labels[index]] *\n len(self.hist_data[index])),\n xaxis='x1',\n yaxis='y2',\n mode='markers',\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=(False if self.show_hist or\n self.show_curve else True),\n text=self.rug_text[index],\n marker=dict(color=self.colors[index],\n symbol='line-ns-open'))\n return rug\n\n\nclass _Dendrogram(FigureFactory):\n \"\"\"Refer to FigureFactory.create_dendrogram() for docstring.\"\"\"\n\n def __init__(self, X, orientation='bottom', labels=None, colorscale=None,\n width=\"100%\", height=\"100%\", xaxis='xaxis', yaxis='yaxis'):\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n self.orientation = orientation\n self.labels = labels\n self.xaxis = xaxis\n self.yaxis = yaxis\n self.data = []\n self.leaves = []\n self.sign = {self.xaxis: 1, self.yaxis: 1}\n self.layout = {self.xaxis: {}, self.yaxis: {}}\n\n if self.orientation in ['left', 'bottom']:\n self.sign[self.xaxis] = 1\n else:\n self.sign[self.xaxis] = -1\n\n if self.orientation in ['right', 'bottom']:\n self.sign[self.yaxis] = 1\n else:\n self.sign[self.yaxis] = -1\n\n (dd_traces, xvals, yvals,\n ordered_labels, leaves) = self.get_dendrogram_traces(X, colorscale)\n\n self.labels = ordered_labels\n self.leaves = leaves\n yvals_flat = yvals.flatten()\n xvals_flat = xvals.flatten()\n\n self.zero_vals = []\n\n for i in range(len(yvals_flat)):\n if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:\n self.zero_vals.append(xvals_flat[i])\n\n self.zero_vals.sort()\n\n self.layout = self.set_figure_layout(width, height)\n self.data = graph_objs.Data(dd_traces)\n\n def get_color_dict(self, colorscale):\n \"\"\"\n Returns colorscale used for dendrogram tree clusters.\n\n :param (list) colorscale: Colors to use for the plot in rgb format.\n :rtype (dict): A dict of default colors mapped to the user colorscale.\n\n \"\"\"\n\n # These are the color codes returned for dendrograms\n # We're replacing them with nicer colors\n d = {'r': 'red',\n 'g': 'green',\n 'b': 'blue',\n 'c': 'cyan',\n 'm': 'magenta',\n 'y': 'yellow',\n 'k': 'black',\n 'w': 'white'}\n default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))\n\n if colorscale is None:\n colorscale = [\n 'rgb(0,116,217)', # blue\n 'rgb(35,205,205)', # cyan\n 'rgb(61,153,112)', # green\n 'rgb(40,35,35)', # black\n 'rgb(133,20,75)', # magenta\n 'rgb(255,65,54)', # red\n 'rgb(255,255,255)', # white\n 'rgb(255,220,0)'] # yellow\n\n for i in range(len(default_colors.keys())):\n k = list(default_colors.keys())[i] # PY3 won't index keys\n if i < len(colorscale):\n default_colors[k] = colorscale[i]\n\n return default_colors\n\n def set_axis_layout(self, axis_key):\n \"\"\"\n Sets and returns default axis object for dendrogram figure.\n\n :param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.\n :rtype (dict): An axis_key dictionary with set parameters.\n\n \"\"\"\n axis_defaults = {\n 'type': 'linear',\n 'ticks': 'outside',\n 'mirror': 'allticks',\n 'rangemode': 'tozero',\n 'showticklabels': True,\n 'zeroline': False,\n 'showgrid': False,\n 'showline': True,\n }\n\n if len(self.labels) != 0:\n axis_key_labels = self.xaxis\n if self.orientation in ['left', 'right']:\n axis_key_labels = self.yaxis\n if axis_key_labels not in self.layout:\n self.layout[axis_key_labels] = {}\n self.layout[axis_key_labels]['tickvals'] = \\\n [zv*self.sign[axis_key] for zv in self.zero_vals]\n self.layout[axis_key_labels]['ticktext'] = self.labels\n self.layout[axis_key_labels]['tickmode'] = 'array'\n\n self.layout[axis_key].update(axis_defaults)\n\n return self.layout[axis_key]\n\n def set_figure_layout(self, width, height):\n \"\"\"\n Sets and returns default layout object for dendrogram figure.\n\n \"\"\"\n self.layout.update({\n 'showlegend': False,\n 'autosize': False,\n 'hovermode': 'closest',\n 'width': width,\n 'height': height\n })\n\n self.set_axis_layout(self.xaxis)\n self.set_axis_layout(self.yaxis)\n\n return self.layout\n\n def get_dendrogram_traces(self, X, colorscale):\n \"\"\"\n Calculates all the elements needed for plotting a dendrogram.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (list) colorscale: Color scale for dendrogram tree clusters\n :rtype (tuple): Contains all the traces in the following order:\n (a) trace_list: List of Plotly trace objects for dendrogram tree\n (b) icoord: All X points of the dendrogram tree as array of arrays\n with length 4\n (c) dcoord: All Y points of the dendrogram tree as array of arrays\n with length 4\n (d) ordered_labels: leaf labels in the order they are going to\n appear on the plot\n (e) P['leaves']: left-to-right traversal of the leaves\n\n \"\"\"\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n d = scs.distance.pdist(X)\n Z = sch.linkage(d, method='complete')\n P = sch.dendrogram(Z, orientation=self.orientation,\n labels=self.labels, no_plot=True)\n\n icoord = scp.array(P['icoord'])\n dcoord = scp.array(P['dcoord'])\n ordered_labels = scp.array(P['ivl'])\n color_list = scp.array(P['color_list'])\n colors = self.get_color_dict(colorscale)\n\n trace_list = []\n\n for i in range(len(icoord)):\n # xs and ys are arrays of 4 points that make up the '∩' shapes\n # of the dendrogram tree\n if self.orientation in ['top', 'bottom']:\n xs = icoord[i]\n else:\n xs = dcoord[i]\n\n if self.orientation in ['top', 'bottom']:\n ys = dcoord[i]\n else:\n ys = icoord[i]\n color_key = color_list[i]\n trace = graph_objs.Scatter(\n x=np.multiply(self.sign[self.xaxis], xs),\n y=np.multiply(self.sign[self.yaxis], ys),\n mode='lines',\n marker=graph_objs.Marker(color=colors[color_key])\n )\n\n try:\n x_index = int(self.xaxis[-1])\n except ValueError:\n x_index = ''\n\n try:\n y_index = int(self.yaxis[-1])\n except ValueError:\n y_index = ''\n\n trace['xaxis'] = 'x' + x_index\n trace['yaxis'] = 'y' + y_index\n\n trace_list.append(trace)\n\n return trace_list, icoord, dcoord, ordered_labels, P['leaves']\n\n\nclass _AnnotatedHeatmap(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_annotated_heatmap() for docstring\n \"\"\"\n def __init__(self, z, x, y, annotation_text, colorscale,\n font_colors, reversescale, **kwargs):\n from plotly.graph_objs import graph_objs\n\n self.z = z\n if x:\n self.x = x\n else:\n self.x = range(len(z[0]))\n if y:\n self.y = y\n else:\n self.y = range(len(z))\n if annotation_text is not None:\n self.annotation_text = annotation_text\n else:\n self.annotation_text = self.z\n self.colorscale = colorscale\n self.reversescale = reversescale\n self.font_colors = font_colors\n\n def get_text_color(self):\n \"\"\"\n Get font color for annotations.\n\n The annotated heatmap can feature two text colors: min_text_color and\n max_text_color. The min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. The user can define these\n two colors. Otherwise the colors are defined logically as black or\n white depending on the heatmap's colorscale.\n\n :rtype (string, string) min_text_color, max_text_color: text\n color for annotations for heatmap values <\n (max_value - min_value)/2 and text color for annotations for\n heatmap values >= (max_value - min_value)/2\n \"\"\"\n # Plotly colorscales ranging from a lighter shade to a darker shade\n colorscales = ['Greys', 'Greens', 'Blues',\n 'YIGnBu', 'YIOrRd', 'RdBu',\n 'Picnic', 'Jet', 'Hot', 'Blackbody',\n 'Earth', 'Electric', 'Viridis']\n # Plotly colorscales ranging from a darker shade to a lighter shade\n colorscales_reverse = ['Reds']\n if self.font_colors:\n min_text_color = self.font_colors[0]\n max_text_color = self.font_colors[-1]\n elif self.colorscale in colorscales and self.reversescale:\n min_text_color = '#000000'\n max_text_color = '#FFFFFF'\n elif self.colorscale in colorscales:\n min_text_color = '#FFFFFF'\n max_text_color = '#000000'\n elif self.colorscale in colorscales_reverse and self.reversescale:\n min_text_color = '#FFFFFF'\n max_text_color = '#000000'\n elif self.colorscale in colorscales_reverse:\n min_text_color = '#000000'\n max_text_color = '#FFFFFF'\n elif isinstance(self.colorscale, list):\n if 'rgb' in self.colorscale[0][1]:\n min_col = map(int,\n self.colorscale[0][1].strip('rgb()').split(','))\n max_col = map(int,\n self.colorscale[-1][1].strip('rgb()').split(','))\n elif '#' in self.colorscale[0][1]:\n min_col = FigureFactory._hex_to_rgb(self.colorscale[0][1])\n max_col = FigureFactory._hex_to_rgb(self.colorscale[-1][1])\n else:\n min_col = [255, 255, 255]\n max_col = [255, 255, 255]\n\n if (min_col[0]*0.299 + min_col[1]*0.587 + min_col[2]*0.114) > 186:\n min_text_color = '#000000'\n else:\n min_text_color = '#FFFFFF'\n if (max_col[0]*0.299 + max_col[1]*0.587 + max_col[2]*0.114) > 186:\n max_text_color = '#000000'\n else:\n max_text_color = '#FFFFFF'\n else:\n min_text_color = '#000000'\n max_text_color = '#000000'\n return min_text_color, max_text_color\n\n def get_z_mid(self):\n \"\"\"\n Get the mid value of z matrix\n\n :rtype (float) z_avg: average val from z matrix\n \"\"\"\n if _numpy_imported and isinstance(self.z, np.ndarray):\n z_min = np.amin(self.z)\n z_max = np.amax(self.z)\n else:\n z_min = min(min(self.z))\n z_max = max(max(self.z))\n z_mid = (z_max+z_min) / 2\n return z_mid\n\n def make_annotations(self):\n \"\"\"\n Get annotations for each cell of the heatmap with graph_objs.Annotation\n\n :rtype (list[dict]) annotations: list of annotations for each cell of\n the heatmap\n \"\"\"\n from plotly.graph_objs import graph_objs\n min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)\n z_mid = _AnnotatedHeatmap.get_z_mid(self)\n annotations = []\n for n, row in enumerate(self.z):\n for m, val in enumerate(row):\n font_color = min_text_color if val < z_mid else max_text_color\n annotations.append(\n graph_objs.Annotation(\n text=str(self.annotation_text[n][m]),\n x=self.x[m],\n y=self.y[n],\n xref='x1',\n yref='y1',\n font=dict(color=font_color),\n showarrow=False))\n return annotations\n\n\nclass _Table(FigureFactory):\n \"\"\"\n Refer to TraceFactory.create_table() for docstring\n \"\"\"\n def __init__(self, table_text, colorscale, font_colors, index,\n index_title, annotation_offset, **kwargs):\n from plotly.graph_objs import graph_objs\n if _pandas_imported and isinstance(table_text, pd.DataFrame):\n headers = table_text.columns.tolist()\n table_text_index = table_text.index.tolist()\n table_text = table_text.values.tolist()\n table_text.insert(0, headers)\n if index:\n table_text_index.insert(0, index_title)\n for i in range(len(table_text)):\n table_text[i].insert(0, table_text_index[i])\n self.table_text = table_text\n self.colorscale = colorscale\n self.font_colors = font_colors\n self.index = index\n self.annotation_offset = annotation_offset\n self.x = range(len(table_text[0]))\n self.y = range(len(table_text))\n\n def get_table_matrix(self):\n \"\"\"\n Create z matrix to make heatmap with striped table coloring\n\n :rtype (list[list]) table_matrix: z matrix to make heatmap with striped\n table coloring.\n \"\"\"\n header = [0] * len(self.table_text[0])\n odd_row = [.5] * len(self.table_text[0])\n even_row = [1] * len(self.table_text[0])\n table_matrix = [None] * len(self.table_text)\n table_matrix[0] = header\n for i in range(1, len(self.table_text), 2):\n table_matrix[i] = odd_row\n for i in range(2, len(self.table_text), 2):\n table_matrix[i] = even_row\n if self.index:\n for array in table_matrix:\n array[0] = 0\n return table_matrix\n\n def get_table_font_color(self):\n \"\"\"\n Fill font-color array.\n\n Table text color can vary by row so this extends a single color or\n creates an array to set a header color and two alternating colors to\n create the striped table pattern.\n\n :rtype (list[list]) all_font_colors: list of font colors for each row\n in table.\n \"\"\"\n if len(self.font_colors) == 1:\n all_font_colors = self.font_colors*len(self.table_text)\n elif len(self.font_colors) == 3:\n all_font_colors = list(range(len(self.table_text)))\n all_font_colors[0] = self.font_colors[0]\n for i in range(1, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[1]\n for i in range(2, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[2]\n elif len(self.font_colors) == len(self.table_text):\n all_font_colors = self.font_colors\n else:\n all_font_colors = ['#000000']*len(self.table_text)\n return all_font_colors\n\n def make_table_annotations(self):\n \"\"\"\n Generate annotations to fill in table text\n\n :rtype (list) annotations: list of annotations for each cell of the\n table.\n \"\"\"\n from plotly.graph_objs import graph_objs\n table_matrix = _Table.get_table_matrix(self)\n all_font_colors = _Table.get_table_font_color(self)\n annotations = []\n for n, row in enumerate(self.table_text):\n for m, val in enumerate(row):\n # Bold text in header and index\n format_text = ('<b>' + str(val) + '</b>' if n == 0 or\n self.index and m < 1 else str(val))\n # Match font color of index to font color of header\n font_color = (self.font_colors[0] if self.index and m == 0\n else all_font_colors[n])\n annotations.append(\n graph_objs.Annotation(\n text=format_text,\n x=self.x[m] - self.annotation_offset,\n y=self.y[n],\n xref='x1',\n yref='y1',\n align=\"left\",\n xanchor=\"left\",\n font=dict(color=font_color),\n showarrow=False))\n return annotations\n"
] | [
[
"scipy.spatial.distance.pdist",
"scipy.stats.norm.fit",
"numpy.multiply",
"numpy.asarray",
"scipy.stats.gaussian_kde",
"numpy.amax",
"numpy.vstack",
"scipy.cluster.hierarchy.dendrogram",
"numpy.cos",
"scipy.cluster.hierarchy.linkage",
"numpy.linspace",
"numpy.mean",
"numpy.int",
"scipy.array",
"numpy.tile",
"numpy.atleast_2d",
"numpy.zeros",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.percentile",
"numpy.matrix",
"numpy.arctan",
"scipy.stats.norm.pdf",
"numpy.amin",
"numpy.sqrt",
"numpy.sin"
]
] |
vbod/text | [
"07c044b8b851ace1e9a033c9597cdb1bee2d69e0"
] | [
"tensorflow_text/python/ops/sentencepiece_tokenizer_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for SentencePieceProcessor Tensorflow op.\"\"\"\n\nimport sys\nimport tempfile\nfrom absl.testing import parameterized\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer\n\n\ndef _utf8(tokens):\n if sys.version_info[0] == 2:\n return tokens\n if isinstance(tokens, list):\n return [_utf8(t) for t in tokens]\n else:\n return tokens.encode('utf-8')\n\n\nclass TestSavedModelModule(module.Module):\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[None], dtype=dtypes.string)\n ])\n def tokenize(self, inputs):\n return self.tokenizer.tokenize(inputs)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SentencepieceTokenizerOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def getTokenizerAndSetOptions(self, reverse, add_bos, add_eos, out_type):\n self.reverse = reverse\n self.add_bos = add_bos\n self.add_eos = add_eos\n self.out_type = out_type\n return SentencepieceTokenizer(\n self.model,\n reverse=reverse,\n add_bos=add_bos,\n add_eos=add_eos,\n out_type=out_type)\n\n def transformExpected(self, expected, is_offsets=False):\n bos = _utf8('<s>')\n eos = _utf8('</s>')\n if is_offsets:\n bos = 0\n eos = 0\n elif self.out_type == dtypes.int32:\n bos = 1\n eos = 2\n if not isinstance(expected[0], list):\n if self.add_bos:\n expected = [bos] + expected\n if self.add_eos:\n expected = expected + [eos]\n if self.reverse:\n expected = [x for x in reversed(expected)]\n else:\n return [self.transformExpected(x) for x in expected]\n return expected\n\n def setUp(self):\n super(SentencepieceTokenizerOpTest, self).setUp()\n sentencepiece_model_file = (\n 'tensorflow_text/python/ops/test_data/'\n 'test_oss_model.model')\n self.model = gfile.GFile(sentencepiece_model_file, 'rb').read()\n\n def testGetVocabSize(self):\n sp = SentencepieceTokenizer(self.model)\n self.assertAllEqual(1000, sp.vocab_size())\n\n def testIdToStringScalar(self):\n sp = SentencepieceTokenizer(self.model)\n result = sp.id_to_string(125)\n self.assertAllEqual('ve', result)\n\n def testIdToStringVector(self):\n sp = SentencepieceTokenizer(self.model)\n pieces = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])\n ids = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]]\n result = sp.id_to_string(ragged_factory_ops.constant(ids))\n self.assertAllEqual(pieces, result)\n\n def testIdToStringRagged(self):\n sp = SentencepieceTokenizer(self.model)\n pieces = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd', 's']]])\n ids = [[[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]],\n [[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n result = sp.id_to_string(ragged_factory_ops.constant(ids, dtypes.int32))\n self.assertAllEqual(pieces, result)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeScalar(self, reverse, add_bos, add_eos,\n out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentence = 'I love lamp.'\n expected = []\n if out_type == dtypes.int32:\n expected = [9, 169, 21, 125, 169, 579, 6]\n else:\n expected = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])\n expected = self.transformExpected(expected)\n result = sp.tokenize(sentence)\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentence), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeVec(self, reverse, add_bos, add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = ['I love carpet', 'I love desk.', 'I love lamp.']\n expected = []\n if out_type == dtypes.int32:\n expected = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],\n [9, 169, 21, 125, 169, 579, 6]]\n else:\n expected = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])\n expected = self.transformExpected(expected)\n result = sp.tokenize(sentences)\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeUniformTensorMatrix(self, reverse, add_bos,\n add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = [['I love carpet', 'I love desk.'],\n ['I love lamp.', 'Never tell me the odds']]\n expected = []\n if out_type == dtypes.int32:\n expected = [[[9, 169, 21, 125, 78, 48, 132, 15],\n [9, 169, 21, 125, 727, 6]],\n [[9, 169, 21, 125, 169, 579, 6],\n [4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n else:\n expected = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.']],\n [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'],\n ['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',\n 's']]])\n expected = self.transformExpected(expected)\n result = sp.tokenize(constant_op.constant(sentences))\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeRaggedMatrix(self, reverse, add_bos, add_eos,\n out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n expected = []\n if out_type == dtypes.int32:\n expected = [[[9, 169, 21, 125, 78, 48, 132, 15],\n [9, 169, 21, 125, 727, 6], [9, 169, 21, 125, 169, 579, 6]],\n [[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]\n else:\n expected = _utf8(\n [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',\n 's']]])\n expected = self.transformExpected(expected)\n result = sp.tokenize(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected, result)\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (False, False, False, dtypes.int32),\n (False, False, True, dtypes.int32),\n (False, True, False, dtypes.int32),\n (False, True, True, dtypes.int32),\n (True, False, False, dtypes.int32),\n (True, False, True, dtypes.int32),\n (True, True, False, dtypes.int32),\n (True, True, True, dtypes.int32),\n (False, False, False, dtypes.string),\n (False, False, True, dtypes.string),\n (False, True, False, dtypes.string),\n (False, True, True, dtypes.string),\n (True, False, False, dtypes.string),\n (True, False, True, dtypes.string),\n (True, True, False, dtypes.string),\n (True, True, True, dtypes.string),\n ])\n def testTokenizeAndDetokenizeWithOffsetsScalar(self, reverse, add_bos,\n add_eos, out_type):\n sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)\n sentence = 'I love lamp.'\n expected_tok = []\n expected_starts = [0, 1, 3, 4, 6, 8, 11]\n expected_limits = [1, 3, 4, 6, 8, 11, 12]\n if out_type == dtypes.int32:\n expected_tok = [9, 169, 21, 125, 169, 579, 6]\n else:\n expected_tok = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])\n expected_tok = self.transformExpected(expected_tok)\n expected_starts = self.transformExpected(expected_starts, True)\n expected_limits = self.transformExpected(expected_limits, True)\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentence))\n self.assertAllEqual(expected_tok, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentence), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsSingleElementVector(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = ['I love lamp.']\n expected_tokens = [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[0, 1, 3, 4, 6, 8, 11]]\n expected_limits = [[1, 3, 4, 6, 8, 11, 12]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsVector(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = ['I love carpet.', 'I love desk.', 'I love lamp.']\n expected_tokens = [['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],\n [0, 1, 3, 4, 6, 8, 11]]\n expected_limits = [[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],\n [1, 3, 4, 6, 8, 11, 12]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testTokenizeAndDetokenizeWithOffsetsMatrix(self):\n sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)\n sentences = [['I love carpet.', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n expected_tokens = [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],\n ['▁I', '▁l', 'o', 've', '▁desk', '.'],\n ['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],\n [[\n '▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o',\n 'd', 'd', 's'\n ]]]\n expected_tokens = _utf8(expected_tokens)\n expected_starts = [[[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],\n [0, 1, 3, 4, 6, 8, 11]],\n [[0, 0, 1, 5, 10, 13, 17, 18, 19, 20, 21]]]\n expected_limits = [[[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],\n [1, 3, 4, 6, 8, 11, 12]],\n [[0, 1, 5, 10, 13, 17, 18, 19, 20, 21, 22]]]\n (tokens, starts,\n limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))\n self.assertAllEqual(expected_tokens, tokens)\n self.assertAllEqual(expected_starts, starts)\n self.assertAllEqual(expected_limits, limits)\n detokenized = sp.detokenize(tokens)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n @parameterized.parameters([\n (-1, 0.1, dtypes.int32),\n (64, 0.1, dtypes.int32),\n (0, 0.0, dtypes.int32),\n (-1, 0.1, dtypes.string),\n (64, 0.1, dtypes.string),\n (0, 0.0, dtypes.string),\n ])\n def testSampleTokenizeAndDetokenize(self, nbest_size, alpha, out_type):\n sp = SentencepieceTokenizer(\n self.model, nbest_size=nbest_size, alpha=alpha, out_type=out_type)\n sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],\n ['Never tell me the odds']]\n result = sp.tokenize(ragged_factory_ops.constant(sentences))\n detokenized = sp.detokenize(result)\n self.assertAllEqual(_utf8(sentences), detokenized)\n\n def testSavedModel(self):\n sp = SentencepieceTokenizer(self.model)\n test_module = TestSavedModelModule(sp)\n inputs = constant_op.constant(['hello world'])\n expected_result = test_module.tokenize(inputs)\n temp_dir = tempfile.mkdtemp(dir=test.get_temp_dir())\n save.save(test_module, temp_dir)\n restored_model = load.load(temp_dir)\n self.assertAllEqual(restored_model.tokenize(inputs), expected_result)\n file_io.delete_recursively(temp_dir)\n\n def testBasicPipeline(self):\n if not context.executing_eagerly():\n self.skipTest('testBasicPipeline only supported in eager mode.')\n\n sp = SentencepieceTokenizer(self.model)\n\n strings = ['hello', 'world']\n dataset = dataset_ops.Dataset.from_tensor_slices(strings)\n # Ensure we can map the tokenizer across the dataset.\n dataset1 = dataset.map(sp.tokenize)\n # Ensure there's no error with a second map call.\n dataset2 = dataset.map(sp.tokenize)\n\n expected = sp.tokenize(strings)\n for i, result in enumerate(dataset1):\n self.assertAllEqual(result, expected[i])\n for i, result in enumerate(dataset2):\n self.assertAllEqual(result, expected[i])\n\n def testEmptyModel(self):\n with self.cached_session():\n with self.assertRaises(errors.InvalidArgumentError):\n sp = SentencepieceTokenizer()\n result = sp.tokenize('whatever')\n result.eval()\n\n def testInvalidModel(self):\n with self.cached_session():\n with self.assertRaises(errors.InternalError):\n sp = SentencepieceTokenizer('invalid model')\n result = sp.tokenize('whatever')\n result.eval()\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.platform.test.get_temp_dir",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.platform.test.main",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.lib.io.file_io.delete_recursively",
"tensorflow.python.framework.constant_op.constant"
]
] |
jmkinder1/code-samples | [
"9c6cd3c6f16579a6c1f5210779b8ec6ad53fbdba"
] | [
"projectile.py"
] | [
"# projectile.py\n# -----------------------------------------------------------------------------\n# Calculate how long an object is in the air when thrown from a specified height\n# with a range of initial speeds assuming constant acceleration due to gravity:\n# \t0.5 * g * t**2 - v0 * t - y0 = 0\n# ----------------------------------------------------------------------------- \nimport numpy as np\n\n#%% Initialization of variables.\ninitial_speed = 0.0\t\t\t# v0 = initial vertical speed of ball in [m/s]\nimpact_time = 0.0\t\t\t# t = time of impact in [s] (computed in loop)\n\n#%% Initialization of parameters.\ng = 9.8066\t\t\t\t\t# gravitational acceleration in [m/s^2]\ninitial_height = 2.0\t\t# y0 = height ball is thrown from in [m]\nspeed_increment = 5.0\t\t# how much to increase speed in [m/s] for each iteration\ncutoff_time = 10.0\t\t\t# stop computing after impact time exceeds cutoff\n\n#%% Calculate and display impact time. Increment initial speed each step.\n#\tRepeat until impact time exceeds cutoff.\nwhile impact_time < cutoff_time:\n\t# Use quadratic equation to solve kinematic equation for impact time:\n\timpact_time = (np.sqrt(initial_speed**2 + 2 * g * initial_height) + initial_speed) / g\n\tprint(\"speed= {} m/s; time= {:.1f} s\".format(initial_speed, impact_time))\n\tinitial_speed += speed_increment\nprint(\"Calculation complete.\")\n"
] | [
[
"numpy.sqrt"
]
] |
yil8/GPN | [
"e0ccba70db6f1d3264f8d3dd38fc4c62bcebd7ad"
] | [
"model/functional.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.7):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n scores: (N) FloatTensor\n boxes: (N, 4) FloatTensor\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n Return:\n The indices of the kept boxes with respect to N.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(dim=0, descending=True) # sort in ascending order\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[0] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[1:] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n\n keep = keep[:count]\n\n return keep\n\n\ndef n_proposals(out_cls):\n vals, idcs = out_cls.view(-1, 2).max(1)\n n_proposals = idcs.eq(1).type(torch.cuda.FloatTensor).sum() / len(out_cls)\n\n return n_proposals\n\n\ndef acc(out_cls, labels):\n pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)\n out_cls_pos = torch.index_select(out_cls.view(-1, 2), 0, pos_idcs)\n prob_pos = F.softmax(out_cls_pos, dim=1)[:, 1]\n acc_pos = prob_pos.ge(0.5).type(\n torch.cuda.FloatTensor).sum() / len(prob_pos)\n\n neg_idcs = labels.view(-1).eq(0).nonzero().view(-1)\n out_cls_neg = torch.index_select(out_cls.view(-1, 2), 0, neg_idcs)\n prob_neg = F.softmax(out_cls_neg, dim=1)[:, 0]\n acc_neg = prob_neg.ge(0.5).type(\n torch.cuda.FloatTensor).sum() / len(prob_neg)\n\n return (acc_pos, acc_neg)\n\n\ndef angle_err(out_ellipse, labels, ellipse_targets):\n pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)\n out_ellipse_keep = torch.index_select(out_ellipse.view(-1, 5), 0, pos_idcs)\n ellipse_targets_keep = torch.index_select(ellipse_targets.view(-1, 5), 0,\n pos_idcs)\n\n out_tan = out_ellipse_keep[:, 4]\n out_angle = torch.atan(out_tan) * 180 / np.pi\n targets_tan = ellipse_targets_keep[:, 4]\n targets_angle = torch.atan(targets_tan) * 180 / np.pi\n\n err = torch.abs(out_angle - targets_angle).sum() / len(out_angle)\n\n return err\n"
] | [
[
"torch.atan",
"torch.nn.functional.softmax",
"torch.mul",
"torch.abs",
"torch.index_select",
"torch.clamp"
]
] |
AhmedElshaarany/RoboND-Rover-Project | [
"9dad356d4585bb567ee436062afdd82d9d7eb4de"
] | [
"code/supporting_functions.py"
] | [
"import numpy as np\nimport cv2\nfrom PIL import Image\nfrom io import BytesIO, StringIO\nimport base64\nimport time\n\n# Define a function to convert telemetry strings to float independent of decimal convention\ndef convert_to_float(string_to_convert):\n if ',' in string_to_convert:\n float_value = np.float(string_to_convert.replace(',','.'))\n else: \n float_value = np.float(string_to_convert)\n return float_value\n\ndef update_rover(Rover, data):\n # Initialize start time and sample positions\n if Rover.start_time == None:\n Rover.start_time = time.time()\n Rover.total_time = 0\n samples_xpos = np.int_([convert_to_float(pos.strip()) for pos in data[\"samples_x\"].split(';')])\n samples_ypos = np.int_([convert_to_float(pos.strip()) for pos in data[\"samples_y\"].split(';')])\n Rover.samples_pos = (samples_xpos, samples_ypos)\n Rover.samples_to_find = np.int(data[\"sample_count\"])\n # Or just update elapsed time\n else:\n tot_time = time.time() - Rover.start_time\n if np.isfinite(tot_time):\n Rover.total_time = tot_time\n # Print out the fields in the telemetry data dictionary\n print(data.keys())\n # The current speed of the rover in m/s\n Rover.vel = convert_to_float(data[\"speed\"])\n # The current position of the rover\n Rover.pos = [convert_to_float(pos.strip()) for pos in data[\"position\"].split(';')]\n # The current yaw angle of the rover\n Rover.yaw = convert_to_float(data[\"yaw\"])\n # The current yaw angle of the rover\n Rover.pitch = convert_to_float(data[\"pitch\"])\n # The current yaw angle of the rover\n Rover.roll = convert_to_float(data[\"roll\"])\n # The current throttle setting\n Rover.throttle = convert_to_float(data[\"throttle\"])\n # The current steering angle\n Rover.steer = convert_to_float(data[\"steering_angle\"])\n # Near sample flag\n Rover.near_sample = np.int(data[\"near_sample\"])\n # Picking up flag\n Rover.picking_up = np.int(data[\"picking_up\"])\n # Update number of rocks collected\n Rover.samples_collected = Rover.samples_to_find - np.int(data[\"sample_count\"])\n\n print('speed =',Rover.vel, 'position =', Rover.pos, 'throttle =', \n Rover.throttle, 'steer_angle =', Rover.steer, 'near_sample:', Rover.near_sample, \n 'picking_up:', data[\"picking_up\"], 'sending pickup:', Rover.send_pickup, \n 'total time:', Rover.total_time, 'samples remaining:', data[\"sample_count\"], \n 'samples collected:', Rover.samples_collected)\n # Get the current image from the center camera of the rover\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n Rover.img = np.asarray(image)\n\n # Return updated Rover and separate image for optional saving\n return Rover, image\n\n# Define a function to create display output given worldmap results\ndef create_output_images(Rover):\n\n # Create a scaled map for plotting and clean up obs/nav pixels a bit\n if np.max(Rover.worldmap[:,:,2]) > 0:\n nav_pix = Rover.worldmap[:,:,2] > 0\n navigable = Rover.worldmap[:,:,2] * (255 / np.mean(Rover.worldmap[nav_pix, 2]))\n else: \n navigable = Rover.worldmap[:,:,2]\n if np.max(Rover.worldmap[:,:,0]) > 0:\n obs_pix = Rover.worldmap[:,:,0] > 0\n obstacle = Rover.worldmap[:,:,0] * (255 / np.mean(Rover.worldmap[obs_pix, 0]))\n else:\n obstacle = Rover.worldmap[:,:,0]\n\n likely_nav = navigable >= obstacle\n obstacle[likely_nav] = 0\n plotmap = np.zeros_like(Rover.worldmap)\n plotmap[:, :, 0] = obstacle\n plotmap[:, :, 2] = navigable\n plotmap = plotmap.clip(0, 255)\n # Overlay obstacle and navigable terrain map with ground truth map\n map_add = cv2.addWeighted(plotmap, 1, Rover.ground_truth, 0.5, 0)\n\n # Check whether any rock detections are present in worldmap\n rock_world_pos = Rover.worldmap[:,:,1].nonzero()\n # If there are, we'll step through the known sample positions\n # to confirm whether detections are real\n samples_located = 0\n if rock_world_pos[0].any():\n \n rock_size = 2\n for idx in range(len(Rover.samples_pos[0])):\n test_rock_x = Rover.samples_pos[0][idx]\n test_rock_y = Rover.samples_pos[1][idx]\n rock_sample_dists = np.sqrt((test_rock_x - rock_world_pos[1])**2 + \\\n (test_rock_y - rock_world_pos[0])**2)\n # If rocks were detected within 3 meters of known sample positions\n # consider it a success and plot the location of the known\n # sample on the map\n if np.min(rock_sample_dists) < 3:\n samples_located += 1\n Rover.samples_located = samples_located\n map_add[test_rock_y-rock_size:test_rock_y+rock_size, \n test_rock_x-rock_size:test_rock_x+rock_size, :] = 255\n\n # Calculate some statistics on the map results\n # First get the total number of pixels in the navigable terrain map\n tot_nav_pix = np.float(len((plotmap[:,:,2].nonzero()[0])))\n # Next figure out how many of those correspond to ground truth pixels\n good_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] > 0)).nonzero()[0]))\n # Next find how many do not correspond to ground truth pixels\n bad_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] == 0)).nonzero()[0]))\n # Grab the total number of map pixels\n tot_map_pix = np.float(len((Rover.ground_truth[:,:,1].nonzero()[0])))\n # Calculate the percentage of ground truth map that has been successfully found\n perc_mapped = round(100*good_nav_pix/tot_map_pix, 1)\n # Calculate the number of good map pixel detections divided by total pixels \n # found to be navigable terrain\n if tot_nav_pix > 0:\n fidelity = round(100*good_nav_pix/(tot_nav_pix), 1)\n else:\n fidelity = 0\n # Flip the map for plotting so that the y-axis points upward in the display\n map_add = np.flipud(map_add).astype(np.float32)\n # Add some text about map and rock sample detection results\n cv2.putText(map_add,\"Time: \"+str(np.round(Rover.total_time, 1))+' s', (0, 10), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Mapped: \"+str(perc_mapped)+'%', (0, 25), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Fidelity: \"+str(fidelity)+'%', (0, 40), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\"Rocks\", (0, 55), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\" Located: \"+str(samples_located), (0, 70), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n cv2.putText(map_add,\" Collected: \"+str(Rover.samples_collected), (0, 85), \n cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)\n # Convert map and vision image to base64 strings for sending to server\n pil_img = Image.fromarray(map_add.astype(np.uint8))\n buff = BytesIO()\n pil_img.save(buff, format=\"JPEG\")\n encoded_string1 = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n \n pil_img = Image.fromarray(Rover.vision_image.astype(np.uint8))\n buff = BytesIO()\n pil_img.save(buff, format=\"JPEG\")\n encoded_string2 = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n\n return encoded_string1, encoded_string2\n\n\n\n"
] | [
[
"numpy.zeros_like",
"numpy.flipud",
"numpy.mean",
"numpy.asarray",
"numpy.float",
"numpy.max",
"numpy.min",
"numpy.sqrt",
"numpy.round",
"numpy.isfinite",
"numpy.int"
]
] |
QinchengZhang/PathologySegmentation | [
"7a2c21346739a79c33e7a7ccc081018821868eb7"
] | [
"Training/pytorch/train.py"
] | [
"# -*- coding: utf-8 -*-\n'''\nAuthor: TJUZQC\nDate: 2020-10-26 10:26:51\nLastEditors: TJUZQC\nLastEditTime: 2020-11-20 19:23:55\nDescription: None\n'''\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport yaml\nfrom torch import optim\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom evaluation import eval_net\nfrom models import ChooseModel, init_weights\nfrom utils.dataset import BasicDataset\n\nconf = yaml.load(open(os.path.join(\n sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)\ndir_img = conf['DATASET']['IMGS_DIR']\ndir_mask = conf['DATASET']['MASKS_DIR']\ndir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']\n\n\ndef train_net(net,\n device,\n epochs=5,\n batch_size=16,\n lr=0.001,\n val_percent=0.1,\n save_cp=True,\n img_scale=0.5,\n use_apex=False,\n optimizer='adam',\n classes=2,\n lr_scheduler='steplr',\n lr_scheduler_cfgs: dict = {'step_size': 10}):\n\n dataset = BasicDataset(dir_img, dir_mask, img_scale,\n train=True, classes=classes)\n n_val = int(len(dataset) * val_percent)\n n_train = len(dataset) - n_val\n train, val = random_split(dataset, [n_train, n_val])\n train_loader = DataLoader(\n train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)\n val_loader = DataLoader(val, batch_size=batch_size,\n shuffle=False, num_workers=8, pin_memory=True)\n\n writer = SummaryWriter(\n comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')\n global_step = 0\n\n logging.info(f'''Starting training:\n Epochs: {epochs}\n Batch size: {batch_size}\n Learning rate: {lr}\n Training size: {n_train}\n Validation size: {n_val}\n Checkpoints: {save_cp}\n Device: {device.type}\n Images scaling: {img_scale}\n Use apex: {use_apex}\n ''')\n optimizers = {\n 'adadelta': optim.Adadelta,\n 'adagrad': optim.Adagrad,\n 'adam': optim.Adam,\n 'adamw': optim.AdamW,\n 'sparseadam': optim.SparseAdam,\n 'adamax': optim.Adamax,\n 'asgd': optim.ASGD,\n 'lbfgs': optim.LBFGS,\n 'rmsprop': optim.RMSprop,\n 'rprop': optim.Rprop,\n 'sgd': optim.SGD,\n }\n optimizer = optimizers.get(optimizer, None)(\n net.parameters(), lr=lr, weight_decay=1e-8)\n lr_scheduler_getter = {\n 'lambdalr': torch.optim.lr_scheduler.LambdaLR,\n 'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,\n 'steplr': torch.optim.lr_scheduler.StepLR,\n 'multisteplr': torch.optim.lr_scheduler.MultiStepLR,\n 'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,\n 'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,\n 'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,\n 'cycliclr': torch.optim.lr_scheduler.CyclicLR,\n 'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,\n }\n lr_scheduler = lr_scheduler_getter.get(\n lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)\n if use_apex:\n try:\n from apex import amp\n net, optimizer = amp.initialize(net, optimizer, opt_level=\"O1\")\n except ImportError as e:\n print(e)\n use_apex = False\n\n if net.n_classes > 1:\n criterion = nn.CrossEntropyLoss()\n else:\n criterion = nn.BCEWithLogitsLoss()\n\n for epoch in range(epochs):\n net.train()\n\n epoch_loss = 0\n with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:\n for batch in train_loader:\n imgs = batch['image']\n true_masks = batch['mask']\n assert imgs.shape[1] == net.n_channels, \\\n f'Network has been defined with {net.n_channels} input channels, ' \\\n f'but loaded images have {imgs.shape[1]} channels. Please check that ' \\\n 'the images are loaded correctly.'\n\n imgs = imgs.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n if net.n_classes > 1:\n b, c, w, h = true_masks.shape\n true_masks = true_masks.view(b, w, h)\n masks_pred = net(imgs)\n loss = criterion(masks_pred, true_masks)\n epoch_loss += loss.item()\n writer.add_scalar('Loss/train', loss.item(), global_step)\n\n pbar.set_postfix(**{'loss (batch)': loss.item()})\n\n optimizer.zero_grad()\n if not use_apex:\n loss.backward()\n else:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n optimizer.step()\n\n pbar.update(imgs.shape[0])\n global_step += 1\n dataset_len = len(dataset)\n a1 = dataset_len // 10\n a2 = dataset_len / 10\n b1 = global_step % a1\n b2 = global_step % a2\n\n if global_step % (len(dataset) // (10 * batch_size)) == 0:\n dice_coeff, pA, oA, precision, recall, f1score = eval_net(\n net, val_loader, device, n_val)\n if net.n_classes > 1:\n logging.info(\n 'Validation cross entropy: {}'.format(dice_coeff))\n writer.add_scalar('Loss/test', dice_coeff, global_step)\n\n else:\n logging.info(\n 'Validation Dice Coeff: {}'.format(dice_coeff))\n writer.add_scalar('Dice/test', dice_coeff, global_step)\n logging.info(\n 'Validation Pixel Accuracy: {}'.format(pA))\n writer.add_scalar('pA/test', pA, global_step)\n logging.info(\n 'Validation Overall Accuracy: {}'.format(oA))\n writer.add_scalar('oA/test', oA, global_step)\n logging.info(\n 'Validation Precision: {}'.format(precision))\n writer.add_scalar('precision/test',\n precision, global_step)\n logging.info(\n 'Validation Recall: {}'.format(recall))\n writer.add_scalar('recall/test', recall, global_step)\n logging.info(\n 'Validation F1-score: {}'.format(f1score))\n writer.add_scalar(\n 'F1-score/test', f1score, global_step)\n\n writer.add_images('images', imgs, global_step)\n if net.n_classes == 1:\n writer.add_images(\n 'masks/true', true_masks, global_step)\n writer.add_images(\n 'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)\n lr_scheduler.step()\n\n if save_cp:\n try:\n os.mkdir(dir_checkpoint)\n logging.info('Created checkpoint directory')\n except OSError:\n pass\n torch.save(net.state_dict(),\n os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))\n logging.info(\n f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))\n\n writer.close()\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-n', '--network', metavar='NETWORK', type=str,\n default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')\n parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],\n help='Number of epochs', dest='epochs')\n parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],\n help='Batch size', dest='batchsize')\n parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],\n help='Learning rate', dest='lr')\n parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],\n help='Load model from a .pth file')\n parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],\n help='Downscaling factor of the images')\n parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],\n help='Percent of the data that is used as validation (0-100)')\n parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],\n help='Init weights type')\n parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],\n help='Automatic Mixed Precision')\n parser.add_argument('-o', '--optimizer', dest='optimizer',\n type=str, default=conf['OPTIMIZER'], help='Optimizer type')\n parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',\n type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(levelname)s: %(message)s')\n args = get_args()\n device = torch.device('cuda' if torch.cuda.is_available(\n ) and conf['DEVICE'].lower() == 'cuda' else 'cpu')\n logging.info(f'Using device {device}')\n\n network = args.network.lower()\n # Change here to adapt to your data\n # n_channels=3 for RGB images\n # n_classes is the number of probabilities you want to get per pixel\n # - For 1 class and background, use n_classes=1\n # - For 2 classes, use n_classes=1\n # - For N > 2 classes, use n_classes=N\n net = ChooseModel(network)(\n n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])\n assert net is not None, f'check your argument --network'\n\n logging.info(f'Network:\\n'\n f'\\t{net.n_channels} input channels\\n'\n f'\\t{net.n_classes} output channels (classes)\\n'\n f'\\t{\"Bilinear\" if net.bilinear else \"Dilated conv\"} upscaling\\n'\n f'\\tApex is {\"using\" if args.use_apex == \"True\" else \"not using\"}')\n init_weights(net, args.init_type)\n if args.load:\n net.load_state_dict(\n torch.load(args.load, map_location=device)\n )\n logging.info(f'Model loaded from {args.load}')\n\n net.to(device=device)\n # faster convolutions, but more memory\n # cudnn.benchmark = True\n\n try:\n train_net(net=net,\n epochs=args.epochs,\n batch_size=args.batchsize,\n lr=args.lr,\n device=device,\n img_scale=args.scale,\n val_percent=args.val / 100,\n use_apex=(args.use_apex == \"True\"),\n optimizer=args.optimizer.lower(),\n classes=conf['DATASET']['NUM_CLASSES'],\n lr_scheduler=args.lr_scheduler,\n lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n logging.info('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.utils.data.random_split",
"torch.nn.BCEWithLogitsLoss",
"torch.utils.tensorboard.SummaryWriter",
"torch.sigmoid"
]
] |
kastman/nipype | [
"15a8d6f57067494196fe639095253217a9235c3c"
] | [
"nipype/utils/misc.py"
] | [
"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Miscellaneous utility functions\n\"\"\"\nfrom __future__ import (print_function, unicode_literals, division,\n absolute_import)\nfrom builtins import next, str\n\nimport os\nimport sys\nimport re\nfrom collections import Iterator\nfrom warnings import warn\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom future.utils import raise_from\nfrom future import standard_library\ntry:\n from textwrap import indent as textwrap_indent\nexcept ImportError:\n\n def textwrap_indent(text, prefix):\n \"\"\" A textwrap.indent replacement for Python < 3.3 \"\"\"\n if not prefix:\n return text\n splittext = text.splitlines(True)\n return prefix + prefix.join(splittext)\n\n\nstandard_library.install_aliases()\n\n\ndef human_order_sorted(l):\n \"\"\"Sorts string in human order (i.e. 'stat10' will go after 'stat2')\"\"\"\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n if isinstance(text, tuple):\n text = text[0]\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n return sorted(l, key=natural_keys)\n\n\ndef trim(docstring, marker=None):\n if isinstance(docstring, bytes):\n docstring = str(docstring, 'utf-8')\n\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n for line in lines[1:]:\n # replace existing REST marker with doc level marker\n stripped = line.lstrip().strip().rstrip()\n if marker is not None and stripped and \\\n all([s == stripped[0] for s in stripped]) and \\\n stripped[0] not in [':']:\n line = line.replace(stripped[0], marker)\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\n\ndef find_indices(condition):\n \"Return the indices where ravel(condition) is true\"\n res, = np.nonzero(np.ravel(condition))\n return res\n\n\ndef is_container(item):\n \"\"\"Checks if item is a container (list, tuple, dict, set)\n\n Parameters\n ----------\n item : object\n object to check for .__iter__\n\n Returns\n -------\n output : Boolean\n True if container\n False if not (eg string)\n \"\"\"\n if isinstance(item, str):\n return False\n elif hasattr(item, '__iter__'):\n return True\n else:\n return False\n\n\ndef container_to_string(cont):\n \"\"\"Convert a container to a command line string.\n\n Elements of the container are joined with a space between them,\n suitable for a command line parameter.\n\n If the container `cont` is only a sequence, like a string and not a\n container, it is returned unmodified.\n\n Parameters\n ----------\n cont : container\n A container object like a list, tuple, dict, or a set.\n\n Returns\n -------\n cont_str : string\n Container elements joined into a string.\n\n \"\"\"\n if hasattr(cont, '__iter__') and not isinstance(cont, str):\n cont = ' '.join(cont)\n return str(cont)\n\n\n# Dependency checks. Copied this from Nipy, with some modificiations\n# (added app as a parameter).\ndef package_check(pkg_name,\n version=None,\n app=None,\n checker=LooseVersion,\n exc_failed_import=ImportError,\n exc_failed_check=RuntimeError):\n \"\"\"Check that the minimal version of the required package is installed.\n\n Parameters\n ----------\n pkg_name : string\n Name of the required package.\n version : string, optional\n Minimal version number for required package.\n app : string, optional\n Application that is performing the check. For instance, the\n name of the tutorial being executed that depends on specific\n packages. Default is *Nipype*.\n checker : object, optional\n The class that will perform the version checking. Default is\n distutils.version.LooseVersion.\n exc_failed_import : Exception, optional\n Class of the exception to be thrown if import failed.\n exc_failed_check : Exception, optional\n Class of the exception to be thrown if version check failed.\n\n Examples\n --------\n package_check('numpy', '1.3')\n package_check('scipy', '0.7', 'tutorial1')\n\n \"\"\"\n\n if app:\n msg = '%s requires %s' % (app, pkg_name)\n else:\n msg = 'Nipype requires %s' % pkg_name\n if version:\n msg += ' with version >= %s' % (version, )\n try:\n mod = __import__(pkg_name)\n except ImportError as e:\n raise_from(exc_failed_import(msg), e)\n if not version:\n return\n try:\n have_version = mod.__version__\n except AttributeError as e:\n raise_from(\n exc_failed_check('Cannot find version for %s' % pkg_name), e)\n if checker(have_version) < checker(version):\n raise exc_failed_check(msg)\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n lower = v.lower()\n if lower in (\"yes\", \"true\", \"t\", \"1\"):\n return True\n elif lower in (\"no\", \"false\", \"n\", \"f\", \"0\"):\n return False\n else:\n raise ValueError(\"%s cannot be converted to bool\" % v)\n\n\ndef flatten(S):\n if S == []:\n return S\n if isinstance(S[0], list):\n return flatten(S[0]) + flatten(S[1:])\n return S[:1] + flatten(S[1:])\n\n\ndef unflatten(in_list, prev_structure):\n if not isinstance(in_list, Iterator):\n in_list = iter(in_list)\n\n if not isinstance(prev_structure, list):\n return next(in_list)\n\n out = []\n for item in prev_structure:\n out.append(unflatten(in_list, item))\n return out\n\n\ndef normalize_mc_params(params, source):\n \"\"\"\n Normalize a single row of motion parameters to the SPM format.\n\n SPM saves motion parameters as:\n x Right-Left (mm)\n y Anterior-Posterior (mm)\n z Superior-Inferior (mm)\n rx Pitch (rad)\n ry Roll (rad)\n rz Yaw (rad)\n \"\"\"\n if source.upper() == 'FSL':\n params = params[[3, 4, 5, 0, 1, 2]]\n elif source.upper() in ('AFNI', 'FSFAST'):\n params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]\n params[3:] = params[3:] * np.pi / 180.\n elif source.upper() == 'NIPY':\n from nipy.algorithms.registration import to_matrix44, aff2euler\n matrix = to_matrix44(params)\n params = np.zeros(6)\n params[:3] = matrix[:3, 3]\n params[-1:2:-1] = aff2euler(matrix)\n\n return params\n\n\ndef dict_diff(dold, dnew, indent=0):\n \"\"\"Helper to log what actually changed from old to new values of\n dictionaries.\n\n typical use -- log difference for hashed_inputs\n \"\"\"\n # First check inputs, since they usually are lists of tuples\n # and dicts are required.\n if isinstance(dnew, list):\n dnew = dict(dnew)\n if isinstance(dold, list):\n dold = dict(dold)\n\n # Compare against hashed_inputs\n # Keys: should rarely differ\n new_keys = set(dnew.keys())\n old_keys = set(dold.keys())\n\n diff = []\n if new_keys - old_keys:\n diff += [\" * keys not previously seen: %s\" % (new_keys - old_keys)]\n\n if old_keys - new_keys:\n diff += [\" * keys not presently seen: %s\" % (old_keys - new_keys)]\n\n # Add topical message\n if diff:\n diff.insert(0, \"Dictionaries had differing keys:\")\n\n diffkeys = len(diff)\n\n # Values in common keys would differ quite often,\n # so we need to join the messages together\n for k in new_keys.intersection(old_keys):\n try:\n new, old = dnew[k], dold[k]\n same = new == old\n if not same:\n # Since JSON does not discriminate between lists and\n # tuples, we might need to cast them into the same type\n # as the last resort. And lets try to be more generic\n same = old.__class__(new) == old\n except Exception:\n same = False\n if not same:\n diff += [\" * %s: %r != %r\" % (k, dnew[k], dold[k])]\n\n if len(diff) > diffkeys:\n diff.insert(diffkeys, \"Some dictionary entries had differing values:\")\n\n return textwrap_indent('\\n'.join(diff), ' ' * indent)\n\n\ndef rgetcwd(error=True):\n \"\"\"\n Robust replacement for getcwd when folders get removed\n If error==True, this is just an alias for os.getcwd()\n \"\"\"\n if error:\n return os.getcwd()\n\n try:\n cwd = os.getcwd()\n except OSError as exc:\n # Changing back to cwd is probably not necessary\n # but this makes sure there's somewhere to change to.\n cwd = os.getenv('PWD')\n if cwd is None:\n raise OSError((\n exc.errno, 'Current directory does not exist anymore, '\n 'and nipype was not able to guess it from the environment'))\n warn('Current folder does not exist, replacing with \"%s\" instead.' % cwd)\n return cwd\n"
] | [
[
"numpy.ravel",
"numpy.asarray",
"numpy.zeros"
]
] |
jscsmk/CenterNet | [
"d7c643bba2b373c15abfa3d25ffd5304a313fa49"
] | [
"src/lib/models/networks/msra_resnet.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\nBN_MOMENTUM = 0.1\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, heads, head_conv, **kwargs):\n self.inplanes = 64\n self.deconv_with_bias = False\n self.heads = heads\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n 3,\n [256, 256, 256],\n [4, 4, 4],\n )\n # self.final_layer = []\n\n for head in sorted(self.heads):\n num_output = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(256, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, num_output, \n kernel_size=1, stride=1, padding=0))\n else:\n fc = nn.Conv2d(\n in_channels=256,\n out_channels=num_output,\n kernel_size=1,\n stride=1,\n padding=0)\n self.__setattr__(head, fc)\n\n # self.final_layer = nn.ModuleList(self.final_layer)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = x.float()\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n ret = {}\n for head in self.heads:\n ret[head] = self.__getattr__(head)(x)\n return [ret]\n\n def init_weights(self, num_layers, pretrained=True):\n if pretrained:\n # print('=> init resnet deconv weights from normal distribution')\n for _, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n # print('=> init {}.weight as normal(0, 0.001)'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n # print('=> init {}.weight as 1'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n # print('=> init final conv weights from normal distribution')\n for head in self.heads:\n final_layer = self.__getattr__(head)\n for i, m in enumerate(final_layer.modules()):\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # print('=> init {}.weight as normal(0, 0.001)'.format(name))\n # print('=> init {}.bias as 0'.format(name))\n if m.weight.shape[0] == self.heads[head]:\n if 'hm' in head:\n nn.init.constant_(m.bias, -2.19)\n else:\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n #pretrained_state_dict = torch.load(pretrained)\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n else:\n print('=> imagenet pretrained model dose not exist')\n print('=> please download it first')\n raise ValueError('imagenet pretrained model does not exist')\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net(num_layers, heads, head_conv):\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, heads, head_conv=head_conv)\n #model.init_weights(num_layers, pretrained=True)\n return model\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d"
]
] |
1chimaruGin/Oject_classifier | [
"d27ca8f47d2d0af107582c25a0756dda15361c2e"
] | [
"objifier/data_loader.py"
] | [
"import torch\nfrom torchvision import datasets, transforms\nimport os\n\ntransform = {\n \"train\": transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]\n ),\n ]\n ),\n \"val\": transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]\n ),\n ]\n ),\n}\n\n\ndef get_loader(root, batch_size, num_workers):\n\n dataset = {\n x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])\n for x in [\"train\", \"val\"]\n }\n\n data_loader = {\n x: torch.utils.data.DataLoader(\n dataset[x], batch_size=batch_size, shuffle=(x == \"train\"),\n num_workers=num_workers,\n )\n for x in [\"train\", \"val\"]\n }\n\n dataset_size = {x: len(dataset[x]) for x in [\"train\", \"val\"]}\n\n return data_loader, dataset_size\n\n\ndef CIFAR10(batch_size, root=\"data/\"):\n dataset = {\n x: datasets.CIFAR10(\n root, train=(x == \"train\"), download=True, transform=transform[x]\n )\n for x in [\"train\", \"val\"]\n }\n\n data_loader = {\n x: torch.utils.data.DataLoader(\n dataset[x], batch_size=batch_size, shuffle=(x == \"train\")\n )\n for x in [\"train\", \"val\"]\n }\n\n dataset_size = {x: len(dataset[x]) for x in [\"train\", \"val\"]}\n\n return data_loader, dataset_size\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
alexweav/Deep-Learning | [
"f245708e40f36c4734ea0d4a7e6587624e4b116f"
] | [
"LearnyMcLearnface/Layers/AffineLayer.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 06 14:54:11 2016\n\n@author: Alexander Weaver\n\"\"\"\n\n\"\"\"\nPerforms an affine (fully connected) operation on its input\nAn affine layer with out_dim neurons takes a data array of size Nx(in_dim), x\nand returns a linearly transformed Nx(out_dim) data array\nThe transformation result, z, is determined by a (in_dim)x(out_dim) weight matrix, W, and\na (out_dim) bias vector, b. The transformation of any one data point (one row in x) is given by:\n z = Wx + b\nConstructing this object initializes the parameters following a gaussian random distribution with \nstandard deviation given by weight_scale. \nForward propagating this object performs the affine transformation on the given array, X.\nBackpropagating this object returns the derivatives of x, W, and b with respect to the final output of\nthe network.\n\"\"\"\n\nimport numpy as np\n\nclass AffineLayer(object):\n \n def __init__(self, in_dim, out_dim, weight_scale, data_type=np.float32):\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.weight_scale = weight_scale\n self.data_type = data_type\n self.W = np.random.randn(in_dim, out_dim) * weight_scale\n self.W = self.W.astype(self.data_type)\n self.b = np.zeros(out_dim)\n self.b = self.b.astype(self.data_type)\n \n def forward(self, x, W=None, b=None):\n if W is None:\n W = self.W\n if b is None:\n b = self.b\n N = x.shape[0]\n reshaped_x = x.reshape(N, np.prod(x.shape[1:]))\n out = reshaped_x.dot(W) + b\n self.cache_x = x\n return out\n \n def backward(self, dout):\n x = self.cache_x\n N = x.shape[0] \n reshaped_x = x.reshape(N, np.prod(x.shape[1:]))\n dx = dout.dot(np.transpose(self.W)).reshape(x.shape)\n self.dW = np.transpose(reshaped_x).dot(dout)\n self.db = np.sum(dout, axis=0)\n return dx"
] | [
[
"numpy.sum",
"numpy.transpose",
"numpy.zeros",
"numpy.random.randn",
"numpy.prod"
]
] |
Arushacked/tensorflow | [
"9abd61ae0b2d239d3060cdd3d46b54a105159828",
"9abd61ae0b2d239d3060cdd3d46b54a105159828"
] | [
"tensorflow/python/distribute/collective_all_reduce_strategy.py",
"tensorflow/python/ops/clip_ops.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport weakref\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# TODO(yuefengz): support in-graph replication.\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for synchronous training on multiple workers.\n\n This strategy implements synchronous distributed training across multiple\n workers, each with potentially multiple GPUs. Similar to\n `tf.distribute.MirroredStrategy`, it creates copies of all variables in the\n model on each device across all workers.\n\n It uses CollectiveOps's implementation of multi-worker all-reduce to\n to keep variables in sync. A collective op is a single op in the\n TensorFlow graph which can automatically choose an all-reduce algorithm in\n the TensorFlow runtime according to hardware, network topology and tensor\n sizes.\n\n By default it uses all local GPUs or CPU for single-worker training.\n\n When 'TF_CONFIG' environment variable is set, it parses cluster_spec,\n task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy\n which mirrored models on GPUs of all machines in a cluster. In the current\n implementation, it uses all GPUs in a cluster and it assumes all workers have\n the same number of GPUs.\n\n You can also pass a `distribute.cluster_resolver.ClusterResolver` instance\n when instantiating the strategy. The task_type, task_id etc. will be parsed\n from the resolver instance instead of from the `TF_CONFIG` env var.\n\n It supports both eager mode and graph mode. However, for eager mode, it has to\n set up the eager context in its constructor and therefore all ops in eager\n mode have to run after the strategy object is created.\n\n \"\"\"\n # TODO(anjalisridhar): Update our guides with examples showing how we can use\n # the cluster_resolver argument.\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO,\n cluster_resolver=None):\n \"\"\"Creates the strategy.\n\n Args:\n communication: optional Enum of type\n `distribute.experimental.CollectiveCommunication`. This provides a way\n for the user to override the choice of collective op communication.\n Possible values include `AUTO`, `RING`, and `NCCL`.\n cluster_resolver: optional `distribute.cluster_resolver.ClusterResolver`\n object. The default ClusterResolver that is used is the\n TFConfigClusterResolver which is instantiated from the TF_CONFIG env\n var.\n \"\"\"\n # TODO(b/150151677): consider move communication to CollectiveHints.\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication,\n cluster_resolver=cluster_resolver))\n\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended._num_gpus_per_worker)\n\n @classmethod\n def _from_local_devices(\n cls,\n devices,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n def scope(self): # pylint: disable=useless-super-delegation\n \"\"\"Returns a context manager selecting this Strategy as current.\n\n Inside a `with strategy.scope():` code block, this thread\n will use a variable creator set by `strategy`, and will\n enter its \"cross-replica context\".\n\n In `MultiWorkerMirroredStrategy`, all variables created inside\n `strategy.scope() will be mirrored on all replicas of each worker.\n Moreover, it also sets a default device scope so that ops without\n specified devices will end up on the correct worker.\n\n Returns:\n A context manager to use for creating variables with this strategy.\n \"\"\"\n return super(CollectiveAllReduceStrategy, self).scope()\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"]) # pylint: disable=missing-docstring\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO,\n cluster_resolver=None):\n \"\"\"Initializes the object.\"\"\"\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication,\n cluster_resolver=cluster_resolver))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_gpu_per_worker\").set(self.extended._num_gpus_per_worker)\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n communication,\n cluster_resolver):\n self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n assert isinstance(\n communication,\n cross_device_ops_lib.CollectiveCommunication)\n self._communication = communication\n self._initialize_strategy(self._cluster_resolver)\n self._cfer_fn_cache = weakref.WeakKeyDictionary()\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local(self, cluster_resolver, devices=None):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if devices:\n local_devices = devices\n else:\n if num_gpus:\n local_devices = tuple(\"/device:GPU:%d\" % i for i in range(num_gpus))\n else:\n local_devices = (\"/device:CPU:0\",)\n\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices),\n collective_keys=self._collective_keys,\n communication=self._communication)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys,\n communication=cross_device_ops_lib.CollectiveCommunication.RING,\n )\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\"Single-worker MultiWorkerMirroredStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices, self._communication)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = config_pb2.ConfigProto()\n config_proto = self._update_config_proto(config_proto)\n\n if hasattr(cluster_resolver, \"port\"):\n port = cluster_resolver.port\n else:\n port = 0\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\",\n port=port)\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"%s/device:GPU:%d\" % (self._worker_device, i)\n for i in range(num_gpus))\n else:\n local_devices = (self._worker_device,)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices) * self._num_workers,\n collective_keys=self._collective_keys,\n communication=self._communication)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys,\n communication=cross_device_ops_lib.CollectiveCommunication.RING,\n )\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n host_device = device_util.get_host_for_device(self._worker_device)\n self._input_workers = input_lib.InputWorkers(\n [(host_device, self.worker_devices)])\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, \"\n \"task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type,\n task_id, self._num_workers, local_devices,\n self._communication)\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n if replica_id == 0: # First replica on each worker.\n assert device is not None\n assert primary_var is None\n\n def initial_value_fn(): # pylint: disable=g-missing-docstring\n # Only the first device participates in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([device])\n group_size = self._num_workers\n collective_instance_key = (\n self._collective_keys.get_variable_instance_key())\n\n with ops.device(device):\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value = initial_value()\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(\n initial_value, dtype=kwargs.get(\"dtype\", None))\n\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(initial_value.shape,\n initial_value.dtype,\n group_size, group_key,\n collective_instance_key)\n return initial_value\n\n return initial_value_fn\n else:\n return super(CollectiveAllReduceExtended,\n self)._get_variable_creator_initial_value(\n replica_id=replica_id,\n device=device,\n primary_var=primary_var,\n **kwargs)\n\n def _make_input_context(self):\n if self._cluster_spec is None:\n input_pipeline_id = 0\n else:\n input_pipeline_id = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=input_pipeline_id,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset, options):\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _experimental_distribute_datasets_from_function(self, dataset_fn,\n options):\n input_context = self._make_input_context()\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn=dataset_fn,\n input_workers=self._input_workers,\n input_contexts=[input_context],\n strategy=self._container_strategy())\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if (not ops.executing_eagerly_outside_functions() and\n self._communication ==\n cross_device_ops_lib.CollectiveCommunication.NCCL):\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _get_cross_device_ops(self, value):\n # CollectiveAllReduce works on a predefined set of devices. In most cases\n # they should be the compute devices, but certain use cases may reduce host\n # tensors as well (e.g. early stopping). We infer the cross_device_ops to\n # use based on the number of devices, since inputs don't always have device\n # annotations. The compute devices one is preferred since we can potentially\n # leverage NCCL.\n if isinstance(value, values.DistributedValues):\n num_devices = len(value._values) # pylint: disable=protected-access\n else:\n num_devices = 1\n if num_devices == len(self.worker_devices):\n return self._cross_device_ops\n else:\n return self._host_cross_device_ops\n\n def _reduce_to(self, reduce_op, value, destinations, experimental_hints):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, len(self.worker_devices))\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n experimental_hints=experimental_hints)\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication ==\n cross_device_ops_lib.CollectiveCommunication.NCCL) and\n self._num_gpus_per_worker == 0):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return self._num_workers > 1\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Operations for clipping (gradient, weight) tensors to min/max values.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"clip_by_value\")\[email protected]_dispatch_support\ndef clip_by_value(t, clip_value_min, clip_value_max,\n name=None):\n \"\"\"Clips tensor values to a specified min and max.\n\n Given a tensor `t`, this operation returns a tensor of the same type and\n shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\n Any values less than `clip_value_min` are set to `clip_value_min`. Any values\n greater than `clip_value_max` are set to `clip_value_max`.\n\n Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\n correct results.\n\n For example:\n\n Basic usage passes a scalar as the min and max value.\n\n >>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])\n >>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)\n >>> t2.numpy()\n array([[-1., -1., 0.],\n [ 0., 1., 1.]], dtype=float32)\n\n The min and max can be the same size as `t`, or broadcastable to that size.\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[2],[1]]\n >>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n >>> t3.numpy()\n array([[ 2., 2., 10.],\n [ 1., 1., 10.]], dtype=float32)\n\n Broadcasting fails, intentionally, if you would expand the dimensions of `t`\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[[2, 1]]] # Has a third axis\n >>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]\n\n It throws a `TypeError` if you try to clip an `int` to a `float` value\n (`tf.cast` the input to `float` first).\n\n >>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)\n >>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)\n Traceback (most recent call last):\n ...\n TypeError: Cannot convert ...\n\n\n Args:\n t: A `Tensor` or `IndexedSlices`.\n clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n clip_value_max: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array\n broadcasting that would make the returned tensor larger than the input.\n TypeError: If dtype of the input is `int32` and dtype of\n the `clip_value_min` or `clip_value_max` is `float32`\n \"\"\"\n with ops.name_scope(name, \"clip_by_value\",\n [t, clip_value_min, clip_value_max]) as name:\n values = ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t, name=\"t\")\n\n # Go through list of tensors, for each value in each tensor clip\n t_min = math_ops.minimum(values, clip_value_max)\n # Assert that the shape is compatible with the initial shape,\n # to prevent unintentional broadcasting.\n _ = values.shape.merge_with(t_min.shape)\n\n t_max = math_ops.maximum(t_min, clip_value_min, name=name)\n _ = values.shape.merge_with(t_max.shape)\n\n if isinstance(t, ops.IndexedSlices):\n t_max = ops.IndexedSlices(t_max, t.indices, t.dense_shape)\n\n return t_max\n # TODO(scottzhu): switch to use new implementation in 2 weeks.\n # return gen_math_ops.clip_by_value(\n # t, clip_value_min, clip_value_max, name=name)\n\n\n# TODO(scottzhu): switch to use new implementation in 2 weeks.\n# @ops.RegisterGradient(\"ClipByValue\")\ndef _clip_by_value_grad(op, grad):\n \"\"\"Returns grad of clip_by_value.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n z = op.inputs[2]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n sz = array_ops.shape(z)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xymask = math_ops.less(x, y)\n xzmask = math_ops.greater(x, z)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n rx, rz = gen_array_ops.broadcast_gradient_args(sx, sz)\n xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)\n ygrad = array_ops.where(xymask, grad, zeros)\n zgrad = array_ops.where(xzmask, grad, zeros)\n gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)\n return (gx, gy, gz)\n\n\n@tf_export(\"clip_by_norm\")\[email protected]_dispatch_support\ndef clip_by_norm(t, clip_norm, axes=None, name=None):\n \"\"\"Clips tensor values to a maximum L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,\n along the dimensions given in `axes`. Specifically, in the default case\n where all dimensions are used for calculation, if the L2-norm of `t` is\n already less than or equal to `clip_norm`, then `t` is not modified. If\n the L2-norm is greater than `clip_norm`, then this operation returns a\n tensor of the same type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm(t)`\n\n In this case, the L2-norm of the output tensor is `clip_norm`.\n\n As another example, if `t` is a matrix and `axes == [1]`, then each row\n of the output will have L2-norm less than or equal to `clip_norm`. If\n `axes == [0]` instead, each column of the output will be clipped.\n\n Code example:\n\n >>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32)\n >>> tf.clip_by_norm(some_nums, 2.0).numpy()\n array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]],\n dtype=float32)\n\n This operation is typically used to clip gradients before applying them with\n an optimizer. Most gradient data is a collection of different shaped tensors\n for different parts of the model. Thus, this is a common usage:\n\n ```\n # Get your gradients after training\n loss_value, grads = grad(model, features, labels)\n\n # Apply some clipping\n grads = [tf.clip_by_norm(g, norm)\n for g in grads]\n\n # Continue on with training\n optimizer.apply_gradients(grads)\n ```\n\n Args:\n t: A `Tensor` or `IndexedSlices`. This must be a floating point type.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also\n floating point\n axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions\n to use for computing the L2-norm. If `None` (the default), uses all\n dimensions.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n ValueError: If the clip_norm tensor is not a 0-D scalar tensor.\n TypeError: If dtype of the input is not a floating point or\n complex type.\n \"\"\"\n with ops.name_scope(name, \"clip_by_norm\", [t, clip_norm]) as name:\n values = ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t, name=\"t\")\n\n # Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm\n l2sum = math_ops.reduce_sum(values * values, axes, keepdims=True)\n pred = l2sum > 0\n # Two-tap tf.where trick to bypass NaN gradients\n l2sum_safe = array_ops.where(pred, l2sum, array_ops.ones_like(l2sum))\n l2norm = array_ops.where(pred, math_ops.sqrt(l2sum_safe), l2sum)\n intermediate = values * clip_norm\n # Assert that the shape is compatible with the initial shape,\n # to prevent unintentional broadcasting.\n _ = values.shape.merge_with(intermediate.shape)\n values_clip = array_ops.identity(\n intermediate / math_ops.maximum(l2norm, clip_norm), name=name)\n\n if isinstance(t, ops.IndexedSlices):\n return ops.IndexedSlices(values_clip, t.indices, t.dense_shape)\n\n return values_clip\n\n\n@tf_export(\"linalg.global_norm\", v1=[\"linalg.global_norm\", \"global_norm\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"global_norm\")\ndef global_norm(t_list, name=None):\n \"\"\"Computes the global norm of multiple tensors.\n\n Given a tuple or list of tensors `t_list`, this operation returns the\n global norm of the elements in all tensors in `t_list`. The global norm is\n computed as:\n\n `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`\n\n Any entries in `t_list` that are of type None are ignored.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n name: A name for the operation (optional).\n\n Returns:\n A 0-D (scalar) `Tensor` of type `float`.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n \"\"\"\n if (not isinstance(t_list, collections_abc.Sequence) or\n isinstance(t_list, six.string_types)):\n raise TypeError(\"t_list should be a sequence\")\n t_list = list(t_list)\n with ops.name_scope(name, \"global_norm\", t_list) as name:\n values = [\n ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t,\n name=\"t_%d\" % i)\n if t is not None else t\n for i, t in enumerate(t_list)]\n half_squared_norms = []\n for v in values:\n if v is not None:\n with ops.colocate_with(v):\n half_squared_norms.append(gen_nn_ops.l2_loss(v))\n\n half_squared_norm = math_ops.reduce_sum(array_ops.stack(half_squared_norms))\n\n norm = math_ops.sqrt(\n half_squared_norm *\n constant_op.constant(2.0, dtype=half_squared_norm.dtype),\n name=\"global_norm\")\n\n return norm\n\n\n@tf_export(\"clip_by_global_norm\")\[email protected]_dispatch_support\ndef clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):\n \"\"\"Clips values of multiple tensors by the ratio of the sum of their norms.\n\n Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,\n this operation returns a list of clipped tensors `list_clipped`\n and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,\n if you've already computed the global norm for `t_list`, you can specify\n the global norm with `use_norm`.\n\n To perform the clipping, the values `t_list[i]` are set to:\n\n t_list[i] * clip_norm / max(global_norm, clip_norm)\n\n where:\n\n global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))\n\n If `clip_norm > global_norm` then the entries in `t_list` remain as they are,\n otherwise they're all shrunk by the global ratio.\n\n If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`\n to signal that an error occurred.\n\n Any of the entries of `t_list` that are of type `None` are ignored.\n\n This is the correct way to perform gradient clipping (Pascanu et al., 2012).\n\n However, it is slower than `clip_by_norm()` because all the parameters must be\n ready before the clipping operation can be performed.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.\n use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global\n norm to use. If not provided, `global_norm()` is used to compute the norm.\n name: A name for the operation (optional).\n\n Returns:\n list_clipped: A list of `Tensors` of the same type as `list_t`.\n global_norm: A 0-D (scalar) `Tensor` representing the global norm.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n\n References:\n On the difficulty of training Recurrent Neural Networks:\n [Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html)\n ([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))\n \"\"\"\n if (not isinstance(t_list, collections_abc.Sequence) or\n isinstance(t_list, six.string_types)):\n raise TypeError(\"t_list should be a sequence\")\n t_list = list(t_list)\n if use_norm is None:\n use_norm = global_norm(t_list, name)\n\n with ops.name_scope(name, \"clip_by_global_norm\",\n t_list + [clip_norm]) as name:\n # Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm\n scale_for_finite = clip_norm * math_ops.minimum(\n 1.0 / use_norm,\n constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)\n # If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,\n # this will make scale NaN.\n scale = scale_for_finite + (use_norm - use_norm)\n\n values = [\n ops.convert_to_tensor(\n t.values if isinstance(t, ops.IndexedSlices) else t,\n name=\"t_%d\" % i)\n if t is not None else t\n for i, t in enumerate(t_list)]\n\n values_clipped = []\n for i, v in enumerate(values):\n if v is None:\n values_clipped.append(None)\n else:\n with ops.colocate_with(v):\n values_clipped.append(\n array_ops.identity(v * scale, name=\"%s_%d\" % (name, i)))\n\n list_clipped = [\n ops.IndexedSlices(c_v, t.indices, t.dense_shape)\n if isinstance(t, ops.IndexedSlices)\n else c_v\n for (c_v, t) in zip(values_clipped, t_list)]\n\n return list_clipped, use_norm\n\n\[email protected](\n date=None,\n instructions=\"clip_by_average_norm is deprecated in TensorFlow 2.0. Please \"\n \"use clip_by_norm(t, clip_norm * tf.cast(tf.size(t), tf.float32), name) \"\n \"instead.\")\n@tf_export(v1=[\"clip_by_average_norm\"])\[email protected]_dispatch_support\ndef clip_by_average_norm(t, clip_norm, name=None):\n \"\"\"Clips tensor values to a maximum average L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its average L2-norm is less than or equal to\n `clip_norm`. Specifically, if the average L2-norm is already less than or\n equal to `clip_norm`, then `t` is not modified. If the average L2-norm is\n greater than `clip_norm`, then this operation returns a tensor of the same\n type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm_avg(t)`\n\n In this case, the average L2-norm of the output tensor is `clip_norm`.\n\n This operation is typically used to clip gradients before applying them with\n an optimizer.\n\n Args:\n t: A `Tensor`.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"clip_by_average_norm\", [t, clip_norm]) as name:\n t = ops.convert_to_tensor(t, name=\"t\")\n\n # Calculate L2-norm per element, clip elements by ratio of clip_norm to\n # L2-norm per element\n n_element = math_ops.cast(array_ops.size(t), dtypes.float32)\n l2norm_inv = math_ops.rsqrt(\n math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))\n tclip = array_ops.identity(\n t * clip_norm * math_ops.minimum(\n l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),\n name=name)\n\n return tclip\n"
] | [
[
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.framework.ops.device",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver"
],
[
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_nn_ops.l2_loss",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.gen_array_ops.broadcast_gradient_args"
]
] |
hirakjyoti08/jina | [
"437943dd2dab87e22b0662b2081f13250918ec01"
] | [
"tests/unit/clients/python/test_on_err.py"
] | [
"from typing import Optional\n\nimport aiohttp\nimport grpc\n\nfrom jina.excepts import BadClientCallback\nfrom jina import Flow, Client\n\nimport numpy as np\nimport pytest\nfrom docarray import DocumentArray\nfrom docarray.document.generators import from_ndarray\n\n\ndef validate(x):\n raise NotImplementedError\n\n\[email protected](\n reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'\n)\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_error(protocol):\n # In this particular test, when you write two tests in a row, you are testing the following case:\n #\n # You are testing exception in client's callback, not error in client's request generator\n # 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client\n # 2. Server probably has something hold in the stream\n # 3. Restart the client, keep server untouched.\n # 4. Now, server stucks (because it considers the last connection wasn't end yet)\n def validate(x):\n raise NotImplementedError\n\n with Flow(protocol=protocol).add() as f:\n t = 0\n try:\n f.index(\n from_ndarray(np.random.random([5, 4])),\n on_done=validate,\n continue_on_error=False,\n )\n except BadClientCallback:\n # bad client callback will break the `async for req in stub.Call(req_iter)`\n t = 1\n # now query the gateway again, make sure gateway's channel is still usable\n f.index(\n from_ndarray(np.random.random([5, 4])),\n on_done=validate,\n continue_on_error=True,\n )\n assert t == 1\n\n\[email protected](\n 'protocol,exception',\n [\n ('websocket', aiohttp.ClientError),\n ('grpc', grpc.aio._call.AioRpcError),\n ('http', aiohttp.ClientError),\n ],\n)\ndef test_client_on_error_call(protocol, exception):\n\n with pytest.raises(exception):\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n )\n\n\[email protected](\n 'protocol,exception',\n [\n ('websocket', aiohttp.client_exceptions.ClientConnectorError),\n ('grpc', grpc.aio._call.AioRpcError),\n ('http', aiohttp.client_exceptions.ClientConnectorError),\n ],\n)\ndef test_client_on_error_raise_exception(protocol, exception):\n class OnError:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response, exception_param: Optional[Exception] = None):\n self.is_called = True\n assert type(exception_param) == exception\n\n on_error = OnError()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_error=on_error,\n )\n\n assert on_error.is_called\n\n\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_error_deprecation(protocol):\n class OnError:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response): # this is deprecated\n self.is_called = True\n\n on_error = OnError()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_error=on_error,\n )\n\n assert on_error.is_called\n\n\[email protected]('protocol', ['websocket', 'grpc', 'http'])\ndef test_client_on_always_after_exception(protocol):\n class OnAlways:\n def __init__(self):\n self.is_called = False\n\n def __call__(self, response):\n self.is_called = True\n\n on_always = OnAlways()\n\n Client(host='0.0.0.0', protocol=protocol, port=12345).post(\n '/blah',\n inputs=DocumentArray.empty(10),\n on_always=on_always,\n )\n\n assert on_always.is_called\n"
] | [
[
"numpy.random.random"
]
] |
thundergolfer/catboost | [
"60942dee40f1407466d0b1e486f0a1d445e6aa91"
] | [
"catboost/pytest/test.py"
] | [
"from itertools import permutations\nimport yatest.common\nfrom yatest.common import ExecutionTimeoutError, ExecutionError\nimport pytest\nimport os\nimport filecmp\nimport numpy as np\nimport pandas as pd\nimport timeit\nimport json\n\nimport catboost\n\nfrom catboost_pytest_lib import (\n apply_catboost,\n compare_evals_with_precision,\n compare_fit_evals_with_precision,\n compare_evals,\n data_file,\n execute_catboost_fit,\n execute_dist_train,\n format_crossvalidation,\n generate_concatenated_random_labeled_dataset,\n get_catboost_binary_path,\n get_limited_precision_dsv_diff_tool,\n local_canonical_file,\n permute_dataset_columns,\n remove_time_from_json,\n)\n\nCATBOOST_PATH = yatest.common.binary_path(\"catboost/app/catboost\")\n\nBOOSTING_TYPE = ['Ordered', 'Plain']\nGROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']\nBOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),\n ('Plain', 'Lossguide'), ('Plain', 'Depthwise')]\n\nPREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']\n\nBINCLASS_LOSSES = ['Logloss', 'CrossEntropy']\nMULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']\nCLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES\nREGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'RMSEWithUncertainty', 'LogLinQuantile', 'Lq']\nPAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']\nGROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']\nRANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES\nALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES\n\nSAMPLING_UNIT_TYPES = ['Object', 'Group']\n\nOVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']\n\nLOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',\n 'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']\n\nLEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']\n\n# test both parallel in and non-parallel modes\n# default block size (5000000) is too big to run in parallel on these tests\nSCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']\nSCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']\n\nSEPARATOR_TYPES = [\n 'ByDelimiter',\n 'BySense',\n]\n\nTEXT_FEATURE_ESTIMATORS = [\n 'BoW',\n 'NaiveBayes',\n 'BM25',\n 'BoW,NaiveBayes',\n 'BoW,NaiveBayes,BM25'\n]\n\nROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE = data_file('rotten_tomatoes_small_with_embeddings', 'train')\nROTTEN_TOMATOES_WITH_EMBEDDINGS_CD_BINCLASS_FILE = data_file(\n 'rotten_tomatoes_small_with_embeddings',\n 'cd_binclass'\n)\nROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE = data_file(\n 'rotten_tomatoes_small_with_embeddings',\n 'cd_binclass_only_embeddings'\n)\n\n\ndef diff_tool(threshold=None):\n return get_limited_precision_dsv_diff_tool(threshold, True)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_multiregression(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiRMSE',\n '-f', data_file('multiregression', 'train'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--cv-rand', '42',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--boost-from-average', '0'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiregression_single(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiRMSE',\n pool='multiregression',\n train='train',\n test='test',\n cd='train_single.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--boost-from-average', '0'))))]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('n_trees', [100, 500])\ndef test_multiregression(boosting_type, grow_policy, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\[email protected]('target_count', [1, 2, 3])\ndef test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):\n np.random.seed(42)\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n evals = []\n for perm in permutations(range(target_count)):\n inv_perm = range(target_count)\n for i, j in enumerate(perm):\n inv_perm[j] = i\n\n np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\\t')\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))\n evals.append(eval[:, inv_perm])\n\n for eva in evals:\n assert np.allclose(eva, evals[0])\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [10, 100, 1000])\[email protected]('target_count', [1, 2, 3])\ndef test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):\n np.random.seed(42)\n ERR_PERC = 0.1\n\n X_COUNT = 200\n X_DIM = 5\n\n x = np.random.randn(X_COUNT, X_DIM)\n y = np.stack([\n np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))\n for i in range(target_count)\n ], axis=1)\n\n test_size = X_COUNT // 2\n x_test, y_test = x[:test_size], y[:test_size]\n x_train, y_train = x[test_size:], y[test_size:]\n\n train_file = yatest.common.test_output_path('train')\n test_file = yatest.common.test_output_path('test')\n np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\\t')\n np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\\t')\n\n get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))\n get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))\n get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))\n\n with open(get_cd_path(target_count), 'w') as cd:\n cd.write(''.join(('{}\\tTarget\\tm\\n'.format(i) for i in range(target_count))))\n\n fit_cmd = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(target_count),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(target_count),\n '--eval-file', get_eval_path(target_count),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', fit_cmd)\n\n for i in range(target_count):\n with open(get_cd_path(i), 'w') as cd:\n cd.write(''.join((('{}\\tTarget\\n'.format(j) if j == i else '{}\\tAuxiliary\\n'.format(j)) for j in range(target_count))))\n\n rmse_fit_cmd = (\n '--loss-function', 'RMSE',\n '--boosting-type', boosting_type,\n '-f', train_file,\n '-t', test_file,\n '--column-description', get_cd_path(i),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', get_model_path(i),\n '--eval-file', get_eval_path(i),\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', rmse_fit_cmd)\n\n multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\\t', skiprows=1, usecols=range(1, target_count + 1))\n rmse_eval = np.stack([\n np.loadtxt(get_eval_path(i), delimiter='\\t', skiprows=1, usecols=1)\n for i in range(target_count)\n ], axis=1)\n\n # cannot compare approxes because they are very different due to different boosting algorithms\n multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)\n rmse_loss = np.mean((rmse_eval - y_test)**2)\n\n assert rmse_loss.shape == multi_rmse_loss.shape\n assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_single(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_calc_path = yatest.common.test_output_path('test.calc')\n output_metric_path = yatest.common.test_output_path('test.metric')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_calc_path\n )\n yatest.common.execute(cmd_calc)\n\n cmd_metric = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--column-description', data_file('multiregression', 'train_single.cd'),\n '-T', '4',\n '-m', output_model_path,\n '--input-path', data_file('multiregression', 'test'),\n '-o', output_metric_path,\n '--metrics', 'MultiRMSE'\n )\n yatest.common.execute(cmd_metric)\n return [\n local_canonical_file(output_eval_path),\n local_canonical_file(output_calc_path),\n local_canonical_file(output_metric_path)\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('n_trees', [100, 500])\ndef test_multiregression_with_cat_features(boosting_type, n_trees):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd_fit = (\n '--loss-function', 'MultiRMSE',\n '--boosting-type', boosting_type,\n '-f', data_file('multiregression', 'train'),\n '-t', data_file('multiregression', 'test'),\n '--column-description', data_file('multiregression', 'train_with_cat_features.cd'),\n '-i', '{}'.format(n_trees),\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):\n newton_eval_path = yatest.common.test_output_path('newton.eval')\n gradient_eval_path = yatest.common.test_output_path('gradient.eval')\n\n def run_catboost(eval_path, leaf_estimation_method):\n cmd = [\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--leaf-estimation-method', leaf_estimation_method,\n '-i', '20',\n '-T', '4',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(newton_eval_path, 'Newton')\n run_catboost(gradient_eval_path, 'Gradient')\n assert filecmp.cmp(newton_eval_path, gradient_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_pool_with_QueryId(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--grow-policy', grow_policy\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_averagegain(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryauc(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_queryaverage(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])\[email protected]('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])\ndef test_stochastic_filter(sigma, num_estimations):\n model_path = yatest.common.test_output_path('model.bin')\n cd_path = yatest.common.test_output_path('pool.cd')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n\n prng = np.random.RandomState(seed=0)\n\n n_samples_by_query = 20\n n_features = 10\n n_queries = 50\n\n n_samples = n_samples_by_query * n_queries\n\n features = prng.uniform(0, 1, size=(n_samples, n_features))\n weights = prng.uniform(0, 1, size=n_features)\n\n labels = np.dot(features, weights)\n query_ids = np.arange(0, n_samples) // n_queries\n money = (n_queries - np.arange(0, n_samples) % n_queries) * 10\n\n labels = labels.reshape((n_samples, 1))\n query_ids = query_ids.reshape((n_samples, 1))\n money = money.reshape((n_samples, 1))\n\n features = np.hstack((labels, query_ids, money, features))\n\n n_learn = int(0.7 * n_samples)\n learn = features[:n_learn, :]\n test = features[n_learn:, :]\n np.savetxt(train_path, learn, fmt='%.5f', delimiter='\\t')\n np.savetxt(test_path, test, fmt='%.5f', delimiter='\\t')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')\n test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')\n loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations\n\n cmd = [\n '--loss-function', loss_description,\n '--leaf-estimation-backtracking', 'No',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-m', model_path,\n '--use-best-model', 'false',\n ]\n\n cmd_one_thread = cmd + [\n '--learn-err-log', learn_error_one_thread_path,\n '--test-err-log', test_error_one_thread_path,\n '-T', '1'\n ]\n\n cmd_four_thread = cmd + [\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '-T', '4'\n ]\n execute_catboost_fit('CPU', cmd_one_thread)\n execute_catboost_fit('CPU', cmd_four_thread)\n\n compare_evals(learn_error_one_thread_path, learn_error_path)\n compare_evals(test_error_one_thread_path, test_error_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('metric', ['DCG', 'NDCG'])\[email protected]('top', [-1, 1, 10])\[email protected]('dcg_type', ['Base', 'Exp'])\[email protected]('denominator', ['Position', 'LogPosition'])\ndef test_stochastic_rank(metric, top, dcg_type, denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(\n metric, top, dcg_type, denominator)\n\n cmd = (\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('top', [-1, 1, 10])\[email protected]('decay', [1.0, 0.6, 0.0])\ndef test_stochastic_rank_pfound_with_many_ones(top, decay):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)\n\n np.random.seed(0)\n train_with_ones = yatest.common.test_output_path('train_with_ones')\n TARGET_COLUMN = 2\n with open(data_file('querywise', 'train')) as fin:\n with open(train_with_ones, 'w') as fout:\n for line in fin.readlines():\n if np.random.random() < 0.25:\n parts = line.split('\\t')\n parts[TARGET_COLUMN] = '1.0'\n line = '\\t'.join(parts)\n fout.write(line)\n\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', loss,\n '-f', train_with_ones,\n '--cd', data_file('querywise', 'train.cd.query_id'),\n '-i', '10',\n '--learn-err-log', learn_error_path\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('top', [2, 100])\ndef test_averagegain_with_query_weights(boosting_type, top):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('top_size', [2, 5, 10, -1])\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('cd_file', ['train.cd', 'train.cd.subgroup_id'])\ndef test_pfound(top_size, boosting_type, cd_file):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_params_ordering():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')\n test_error_path = yatest.common.test_output_path('ignored.tsv')\n\n def get_cmd(custom_metric, learn_error_path):\n return (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '--custom-metric', custom_metric,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', get_cmd(\"PFound:top=1;decay=0.6;hints=skip_train~false\", learn_error_path))\n execute_catboost_fit('CPU', get_cmd(\"PFound:decay=0.6;top=1;hints=skip_train~false\", learn_error_reversed_path))\n\n with open(learn_error_path) as f:\n assert 'PFound:top=1;decay=0.6' in f.read()\n with open(learn_error_reversed_path) as f:\n assert 'PFound:decay=0.6;top=1' in f.read()\n\n\ndef test_recall_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'RecallAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_precision_at_k():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'PrecisionAt:top=3',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mapk(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'MAP:top={}'.format(10),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ndcg_power_mode', ['Base', 'Exp'])\[email protected]('metric_type', ['DCG', 'NDCG'])\[email protected]('ndcg_denominator', ['None', 'LogPosition', 'Position'])\ndef test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_queryrmse_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairlogit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path, learn_pairs):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', learn_pairs),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path, 'train.pairs')\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path)]\n\n\ndef test_pairs_generation_with_max_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n def run_catboost(eval_path):\n cmd = [\n '--loss-function', 'PairLogit:max_pairs=30',\n '--eval-metric', 'PairLogit:max_pairs=30',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--ctr', 'Borders,Counter',\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--fstr-file', output_fstr_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(output_eval_path)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(output_eval_path),\n local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_pairlogit_no_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_pairlogit_approx_on_full_history():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--approx-on-full-history',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'PairLogitPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_pairwise_reproducibility(loss_function):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '5',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\ndef test_pairs_vs_grouped_pairs():\n output_model_path = yatest.common.test_output_path('model.bin')\n\n def run_catboost(learn_pairs_path_with_scheme, test_pairs_path_with_scheme, eval_path):\n cmd = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', learn_pairs_path_with_scheme,\n '--test-pairs', test_pairs_path_with_scheme,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n ]\n execute_catboost_fit('CPU', cmd)\n\n eval_path_ungrouped = yatest.common.test_output_path('test_eval_ungrouped')\n run_catboost(\n data_file('querywise', 'train.pairs'),\n data_file('querywise', 'test.pairs'),\n eval_path_ungrouped\n )\n\n eval_path_grouped = yatest.common.test_output_path('test_eval_grouped')\n run_catboost(\n 'dsv-grouped://' + data_file('querywise', 'train.grouped_pairs'),\n 'dsv-grouped://' + data_file('querywise', 'test.grouped_pairs'),\n eval_path_grouped\n )\n\n assert filecmp.cmp(eval_path_ungrouped, eval_path_grouped)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRank:permutations=5;decay=0.9',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_yetirank_pairwise(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'YetiRankPairwise',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', ('YetiRank', 'YetiRankPairwise'))\ndef test_yetirank_default_metric(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--model-file', output_model_path,\n '--boosting-type', 'Plain',\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path)]\n\n\[email protected]('eval_metric', ['MRR', 'MRR:top=1', 'ERR', 'ERR:top=1'])\ndef test_reciprocal_rank_metrics(eval_metric):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', 'YetiRank',\n '--eval-metric', eval_metric,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.query_id'),\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\nNAN_MODE = ['Min', 'Max']\n\n\[email protected]('nan_mode', NAN_MODE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode(nan_mode, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult_nan', 'train_small'),\n '-t', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', nan_mode,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_nan', 'test_small'),\n '--column-description', data_file('adult_nan', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('border_count', [64, 255, 350, 1000, 2500])\ndef test_different_border_count(border_count):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '20',\n '-T', '4',\n '-x', str(border_count),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode_forbidden(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--nan-mode', 'Forbidden',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_iter(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_overfit_detector_inc_to_dec(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '2000',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--od-pval', '0.5',\n '--od-type', 'IncToDec',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)\ndef test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n snapshot_path = yatest.common.test_output_path('snapshot')\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.5',\n '--rsm', '1',\n '--leaf-estimation-iterations', '10',\n '--max-ctr-complexity', '4',\n '--snapshot-file', snapshot_path,\n '--od-type', overfitting_detector_type\n )\n if overfitting_detector_type == 'IncToDec':\n cmd_prefix += (\n '--od-wait', '2',\n '--od-pval', '0.5'\n )\n elif overfitting_detector_type == 'Iter':\n cmd_prefix += ('--od-wait', '2')\n\n cmd_first = cmd_prefix + ('-i', '10')\n execute_catboost_fit('CPU', cmd_first)\n\n cmd_second = cmd_prefix + ('-i', '2000')\n execute_catboost_fit('CPU', cmd_second)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\ndef test_per_object_approx_on_full_history(leaf_estimation_method):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '--approx-on-full-history',\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-w', '0.5',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '20',\n '--use-best-model', 'false')\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_shrink_model(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '1',\n '--od-pval', '0.99',\n '--rsm', '1',\n '--use-best-model', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', leaf_estimation_method,\n '--leaf-estimation-iterations', '2',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'column_name',\n ['doc_id', 'sample_id'],\n ids=['column_name=doc_id', 'column_name=sample_id']\n)\ndef test_sample_id(loss_function, column_name):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n column_description = data_file('adult_' + column_name, 'train.cd')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('adult_doc_id', 'train'),\n '-t', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_doc_id', 'test'),\n '--column-description', column_description,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nPOOLS = ['amazon', 'adult']\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_apply_missing_vals(boosting_type, grow_policy):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('test_adult_missing_val.tsv'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_crossentropy(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'CrossEntropy',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_permutation_block(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-permutation-block', '239',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ignored_features(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '0:1:3:5-7:10000',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('adult', 'train.cd')\n cd_path = yatest.common.test_output_path('train.cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n # Corrupt some features by making them 'Num'\n if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4\n cd_line = cd_line.replace('Categ', 'Num')\n if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6\n cd_line = cd_line.replace('Categ', 'Num')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '-I', '4:6', # Ignore the corrupted features\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n # Not needed: return [local_canonical_file(output_eval_path)]\n\n\ndef test_ignored_features_not_read_names():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n input_cd_path = data_file('black_friday', 'cd')\n cd_path = yatest.common.test_output_path('cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_path, \"wt\") as f:\n for cd_line in cd_lines:\n if cd_line.split() == ('2', 'Categ', 'Gender'):\n cd_line = cd_line.replace('2', 'Num', 'Gender')\n if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):\n cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')\n f.write(cd_line)\n\n cmd = (\n '--loss-function', 'RMSE',\n '--has-header',\n '--learn-set', data_file('black_friday', 'train'),\n '--test-set', data_file('black_friday', 'test'),\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-I', 'Gender:Product_Category_3',\n )\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_baseline(boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline(boosting_type, loss_function):\n labels = ['0', '1', '2', '3']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4'\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(eval_path, formula_predict_path))\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_lost_class(boosting_type, loss_function):\n labels = [0, 1, 2, 3]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--bootstrap-type', 'No',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Gradient'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_not_binarized', 'train_small'),\n '-t', data_file('adult_not_binarized', 'test_small'),\n '--column-description', data_file('adult_not_binarized', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--target-border', '0.5',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n base_cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '--counter-calc-method', 'SkipTest', # TODO(kirillovs): remove after setting SkipTest as default type\n '-w', '0.03',\n '-T', '4',\n )\n\n train_with_test_cmd = base_cmd + (\n '-t', data_file('adult', 'test_small'),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', train_with_test_cmd)\n\n train_without_test_cmd = base_cmd + (\n '-m', output_model_path_without_test,\n )\n execute_catboost_fit('CPU', train_without_test_cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')\n\n base_calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--prediction-type', 'RawFormulaVal'\n )\n calc_cmd = base_calc_cmd + (\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n )\n calc_cmd_without_test = base_calc_cmd + (\n '-m', output_model_path_without_test,\n '--output-path', formula_predict_without_test_path,\n )\n yatest.common.execute(calc_cmd)\n yatest.common.execute(calc_cmd_without_test)\n if loss_function == 'MAPE':\n # TODO(kirillovs): uncomment this after resolving MAPE problems\n # assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]\n else:\n assert(compare_evals(output_eval_path, formula_predict_path))\n assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_cv(is_inverted, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_query(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_pairs(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('bad_cv_params', ['XX', 'YY', 'XY'])\ndef test_multiple_cv_spec(bad_cv_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if bad_cv_params == 'XX':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=False, n=4, k=7))\n elif bad_cv_params == 'XY':\n cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n elif bad_cv_params == 'YY':\n cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),\n '--cv', format_crossvalidation(is_inverted=True, n=4, k=7))\n else:\n raise Exception('bad bad_cv_params value:' + bad_cv_params)\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('error_type', ['0folds', 'fold_idx_overflow'])\ndef test_bad_fold_cv_spec(is_inverted, error_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n ('--cv:Inverted' if is_inverted else '--cv:Classical'),\n {'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],\n '--eval-file', output_eval_path,\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_empty_eval(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_time(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--has-time',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'loss_function',\n LOSS_FUNCTIONS_SHORT,\n ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]\n)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Gradient',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Plain',\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-iterations', '1',\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '40',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--leaf-estimation-iterations', '7',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_priors(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'\n 'Counter:Prior=0',\n '--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'\n '6:Borders:Prior=0.666,Counter:Prior=0.666;'\n '8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Buckets'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--fold-len-multiplier', '1.5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nFSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']\nDATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr(fstr_type, boosting_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr_with_text_features(fstr_type, boosting_type, grow_policy):\n pool = 'rotten_tomatoes'\n\n separator_type = 'ByDelimiter'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train'),\n cd_path=data_file(pool, 'cd_binclass'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--text-processing', json.dumps(text_processing)) +\n (('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', ['LossFunctionChange', 'ShapValues'])\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_fstr_with_text_features_shap(fstr_type, boosting_type, grow_policy):\n pool = 'rotten_tomatoes'\n\n separator_type = 'ByDelimiter'\n feature_estimators = 'NaiveBayes'\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train'),\n cd_path=data_file(pool, 'cd_binclass'),\n boosting_type=boosting_type,\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--random-strength', '0', '--text-processing', json.dumps(text_processing)) +\n (('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_normalized_model(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=True,\n additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_target_border(fstr_type, grow_policy):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--target-border', '0.4')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_weights(fstr_type, grow_policy):\n return do_test_fstr(\n fstr_type,\n loss_function='RMSE',\n input_path=data_file('querywise', 'train'),\n cd_path=data_file('querywise', 'train.cd.weight'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\[email protected]('grow_policy', GROW_POLICIES)\ndef test_fstr_with_class_weights(fstr_type, grow_policy):\n pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=data_file(pool, 'train_small'),\n cd_path=data_file(pool, 'train.cd'),\n boosting_type='Plain',\n grow_policy=grow_policy,\n normalize=False,\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\[email protected]('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)\ndef test_fstr_with_target_border_and_class_weights(fstr_type):\n if fstr_type == 'PredictionDiff':\n # because PredictionDiff needs pool without categorical features\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd')\n else:\n train_path = data_file('adult_not_binarized', 'train_small')\n cd_path = data_file('adult_not_binarized', 'train.cd')\n\n return do_test_fstr(\n fstr_type,\n loss_function='Logloss',\n input_path=train_path,\n cd_path=cd_path,\n boosting_type='Plain',\n grow_policy='SymmetricTree',\n normalize=False,\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\ndef do_test_fstr(\n fstr_type,\n loss_function,\n input_path,\n cd_path,\n boosting_type,\n grow_policy,\n normalize,\n additional_train_params=()\n):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', input_path,\n '--column-description', cd_path,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '-m', model_path\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n if fstr_type == 'PredictionDiff':\n with open(input_path) as input:\n fstr_pool_path = yatest.common.test_output_path('input.tsv')\n with open(fstr_pool_path, \"w\") as output:\n output.write(input.readline())\n output.write(input.readline())\n input_path = fstr_pool_path\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', input_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', output_fstr_path,\n '--fstr-type', fstr_type\n )\n\n if normalize:\n make_model_normalized(model_path)\n if not(\n fstr_type == 'PredictionValuesChange' or\n fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES\n ):\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\ndef make_model_normalized(model_path):\n yatest.common.execute([\n CATBOOST_PATH,\n 'normalize-model',\n '--model-path', model_path,\n '--output-model', model_path,\n '--set-scale', '0.5',\n '--set-bias', '0.125',\n ])\n\n\[email protected]('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])\ndef test_loss_change_fstr(loss_function):\n return do_test_loss_change_fstr(loss_function, normalize=False)\n\n\ndef test_loss_change_fstr_normalized():\n return do_test_loss_change_fstr('QueryRMSE', normalize=True)\n\n\ndef do_test_loss_change_fstr(loss_function, normalize):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')\n\n def add_loss_specific_params(cmd, fstr_mode):\n if loss_function in ['PairLogit', 'PairLogitPairwise']:\n cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))\n if fstr_mode:\n cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))\n else:\n cmd += ('--column-description', data_file('querywise', 'train.cd'))\n return cmd\n\n cmd_prefix = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '--learn-set', data_file('querywise', 'train'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', model_path\n )\n cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd_prefix = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)\n if normalize:\n make_model_normalized(model_path)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n return\n\n yatest.common.execute(fstr_cmd)\n\n fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fit_output, fstr_output, rtol=1e-6))\n\n return [local_canonical_file(output_fstr_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('ranking_parameters', [\n {'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},\n {'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}\n])\ndef test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):\n model_path = yatest.common.test_output_path('model.bin')\n fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')\n fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')\n internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')\n internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')\n\n pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'\n pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')\n cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')\n has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()\n\n cmd = (\n '--use-best-model', 'false',\n '--learn-set', pool_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path,\n '--loss-function', ranking_parameters['loss-function']\n ) + has_header_suffix\n\n if ranking_parameters['loss-function'] == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_0,\n '--fstr-internal-file', internal_fstr_path_0,\n '--fstr-type', 'FeatureImportance')\n )\n execute_catboost_fit(\n 'CPU',\n cmd + ('--fstr-file', fstr_path_1,\n '--fstr-internal-file', internal_fstr_path_1,\n '--fstr-type', ranking_parameters['fstr-type'])\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', pool_path,\n '--column-description', cd_path,\n '--model-file', model_path,\n ) + has_header_suffix\n\n yatest.common.execute(\n fstr_cmd + ('--output-path', fstr_path_1,\n '--fstr-type', 'FeatureImportance')\n )\n yatest.common.execute(\n fstr_cmd + ('--output-path', internal_fstr_path_1,\n '--fstr-type', 'InternalFeatureImportance')\n )\n\n assert filecmp.cmp(fstr_path_0, fstr_path_1)\n assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loss_change_fstr_without_pairs(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '--model-file', model_path\n\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--model-file', model_path,\n '--output-path', output_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n\n try:\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--model-file', model_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(fstr_cmd)\n except:\n return [local_canonical_file(output_fstr_path)]\n\n assert False\n\n\ndef test_loss_change_fstr_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')\n output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')\n train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '--learn-set', get_pool_path('train', True),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '-i', '10',\n '-T', '4',\n '--fstr-file', train_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train'),\n '--column-description', cd_file,\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_dsv_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', get_pool_path('train', True),\n '--input-pairs', data_file('querywise', 'train.pairs'),\n '--model-file', output_model_path,\n '--output-path', output_quantized_fstr_path,\n '--fstr-type', 'LossFunctionChange',\n )\n yatest.common.execute(cmd)\n\n fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\\t')\n fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\\t')\n train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\\t')\n assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))\n assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('grow_policy', GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):\n\n def run_catboost(threads, model_path, eval_path):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '25',\n '-T', str(threads),\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n model_1 = yatest.common.test_output_path('model_1.bin')\n eval_1 = yatest.common.test_output_path('test_1.eval')\n run_catboost(1, model_1, eval_1)\n model_4 = yatest.common.test_output_path('model_4.bin')\n eval_4 = yatest.common.test_output_path('test_4.eval')\n run_catboost(4, model_4, eval_4)\n assert filecmp.cmp(eval_1, eval_4)\n\n\nBORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']\n\n\[email protected]('border_type', BORDER_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_border_types(border_type, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--feature-border-type', border_type,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('depth', [4, 8])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_deep_tree_classification(depth, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--depth', str(depth),\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--leaf-estimation-method', 'Newton',\n '--eval-file', output_eval_path,\n '--l2-leaf-reg', '5'\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\nREG_LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',\n 'Huber:delta=1.0']\n\n\[email protected]('loss_function', REG_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n\n if boosting_type == 'Plain':\n cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']\n execute_catboost_fit('CPU', cmd)\n assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n assert(compare_evals(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path)]\n\n\nBORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']\n\n\[email protected](\n 'border_type',\n BORDER_TYPES,\n ids=lambda border_type: 'border_type=%s' % border_type\n)\[email protected](\n 'border_count',\n [1, 3, 10],\n ids=lambda border_count: 'border_count=%d' % border_count\n)\[email protected](\n 'boosting_type',\n BOOSTING_TYPE,\n ids=lambda boosting_type: 'boosting_type=%s' % boosting_type\n)\ndef test_ctr_target_quantization(border_type, border_count, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', 'Borders:TargetBorderType=' + border_type,\n '--ctr-target-border-count', str(border_count)\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCOUNTER_METHODS = ['Full', 'SkipTest']\n\n\[email protected]('counter_calc_method', COUNTER_METHODS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_counter_calc(counter_calc_method, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '60',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--counter-calc-method', counter_calc_method\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nCTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']\n\n\[email protected]('ctr_type', CTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--ctr', ctr_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_overfitting_detector_metric(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC:hints=skip_train~false',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_same_metric_skip_different(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')\n learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')\n\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n ]\n\n cmd_without_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n ]\n cmd_with_custom_metric = cmd + [\n '--eval-metric', 'AUC:hints=skip_train~true',\n '--custom-metric', 'AUC:hints=skip_train~false',\n '--learn-err-log', learn_error_path_with_custom_metric,\n '--test-err-log', test_error_path_with_custom_metric,\n ]\n\n execute_catboost_fit('CPU', cmd_without_custom_metric)\n execute_catboost_fit('CPU', cmd_with_custom_metric)\n\n assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)\n\n\[email protected]('loss_function', BINCLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_classification(loss_function, boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n custom_metrics = [\n metric for metric in\n [\n 'AUC:hints=skip_train~false',\n 'Logloss',\n 'CrossEntropy',\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'BalancedAccuracy',\n 'BalancedErrorRate',\n 'Kappa',\n 'WKappa',\n 'BrierScore',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss',\n 'NormalizedGini'\n ]\n if metric != loss_function\n ]\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', ','.join(custom_metrics),\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_loglikelihood_of_prediction(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '--boosting-type', boosting_type,\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', 'LogLikelihoodOfPrediction',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_multiclassification(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_small', 'train_small'),\n '-t', data_file('cloudness_small', 'test_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric',\n 'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_prediction_type(boosting_type):\n model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'Probability'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_calc_no_target(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')\n calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--counter-calc-method', 'SkipTest',\n '--eval-file', fit_output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('train_notarget.cd'),\n '-m', model_path,\n '--output-path', calc_output_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(fit_output_eval_path, calc_output_eval_path))\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_classification_progress_restore(boosting_type):\n\n def run_catboost(iters, model_path, eval_path, additional_params=None):\n import random\n import shutil\n import string\n letters = string.ascii_lowercase\n train_random_name = ''.join(random.choice(letters) for i in xrange(8))\n shutil.copy(data_file('adult', 'train_small'), train_random_name)\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', train_random_name,\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', str(iters),\n '-T', '4',\n '-m', model_path,\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n execute_catboost_fit('CPU', cmd)\n\n canon_model_path = yatest.common.test_output_path('canon_model.bin')\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n run_catboost(30, canon_model_path, canon_eval_path)\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])\n assert filecmp.cmp(canon_eval_path, eval_path)\n # TODO(kirillovs): make this active when progress_file parameter will be deleted from json params\n # assert filecmp.cmp(canon_model_path, model_path)\n\n\[email protected]('loss_function', CLASSIFICATION_LOSSES)\[email protected]('prediction_type', PREDICTION_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_prediction_type(prediction_type, loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--prediction-type', prediction_type\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_const_feature(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n train_path = yatest.common.test_output_path('train_small')\n test_path = yatest.common.test_output_path('test_small')\n train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\\t')\n test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\\t')\n train_dataset[:, 14] = '0'\n test_dataset[:, 14] = '0'\n np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\\t')\n np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\\t')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nQUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']\n\n\[email protected]('loss_function', QUANTILE_LOSS_FUNCTIONS)\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\ndef test_quantile_targets(loss_function, boosting_type, grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function + ':alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_targets_exact(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_weights(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('higgs', 'train_small'),\n '-t', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantile_categorical(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Quantile:alpha=0.9',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--leaf-estimation-method', 'Exact'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_quantile_exact_distributed():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MAE',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n other_options=(\n '--leaf-estimation-method', 'Exact',\n '--boost-from-average', 'False'\n )\n )))]\n\n\nCUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',\n 'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',\n 'FairLoss:smoothness=0.9']\n\n\[email protected]('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss(custom_loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '50',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--custom-metric', custom_loss_function,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n eps = 0 if 'MSLE' not in custom_loss_function else 1e-9\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),\n local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]\n\n\ndef test_train_dir():\n output_model_path = 'model.bin'\n output_eval_path = 'test.eval'\n train_dir_path = 'trainDir'\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--train-dir', train_dir_path,\n '--fstr-file', 'fstr.tsv',\n '--fstr-internal-file', 'ifstr.tsv'\n )\n execute_catboost_fit('CPU', cmd)\n outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']\n for output in outputs:\n assert os.path.isfile(train_dir_path + '/' + output)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n borders_file = yatest.common.test_output_path('borders.tsv')\n borders_file_output = borders_file + '.out'\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--output-borders-file': borders_file_output,\n }\n\n params_binarized = dict(params)\n params_binarized['--input-borders-file'] = borders_file_output\n params_binarized['--output-borders-file'] = borders_file\n params_binarized['-m'] = output_model_path_binarized\n\n execute_catboost_fit(task_type='CPU', params=params)\n\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n execute_catboost_fit(\n task_type='CPU',\n params=params_binarized,\n )\n\n apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)\n apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)\n\n assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))\n assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(predictions_path_test),\n local_canonical_file(predictions_path_learn),\n local_canonical_file(borders_file)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_feature_id_fstr(boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n fstr_cmd = (\n CATBOOST_PATH,\n 'fstr',\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train_with_id.cd'),\n '-m', model_path,\n '-o', output_fstr_path,\n )\n yatest.common.execute(fstr_cmd)\n\n return local_canonical_file(output_fstr_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-names', '1,0'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,1.,0.25,0.75'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_names_multiclass_last_class_missed(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'test_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path,\n '--class-names', '0.,0.5,0.25,0.75,1.',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_logloss(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_multiclass(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--class-weights', '0.5,2'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_params_from_file(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '6',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--params-file', data_file('params.json')\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_lost_class(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_class_weight_with_lost_class(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'MultiClass',\n '-f', data_file('cloudness_lost_class', 'train_small'),\n '-t', data_file('cloudness_lost_class', 'test_small'),\n '--column-description', data_file('cloudness_lost_class', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--classes-count', '3',\n '--class-weights', '0.5,2,2',\n '--prediction-type', 'RawFormulaVal,Class',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_one_hot(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--one-hot-max-size', '10'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', calc_eval_path\n )\n yatest.common.execute(calc_cmd)\n\n assert(compare_evals(output_eval_path, calc_eval_path))\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_random_strength(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n '--random-strength', '100'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult_all_categorical.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '100',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-x', '1',\n '-n', '8',\n '-w', '0.1',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('used_ram_limit', ['1Kb', '4Gb'])\[email protected](\n 'dev_score_calc_obj_block_size',\n ['600', '5000000'],\n ids=['calc_block=600', 'calc_block=5000000']\n)\ndef test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--used-ram-limit', used_ram_limit,\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '--depth', '7',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected](\n 'ignored_features',\n [True, False],\n ids=['ignored_features=True', 'ignored_features=False']\n)\ndef test_apply_with_permuted_columns(ignored_features):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '-i', '20',\n '-w', '0.03',\n '-T', '6',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if ignored_features:\n cmd += ('--ignore-features', '0:2:5')\n\n execute_catboost_fit('CPU', cmd)\n\n permuted_test_path, permuted_cd_path = permute_dataset_columns(\n data_file('airlines_5K', 'test'),\n data_file('airlines_5K', 'cd'),\n seed=123)\n\n permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', permuted_test_path,\n '--has-header',\n '--column-description', permuted_cd_path,\n '-m', output_model_path,\n '--output-path', permuted_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal,Label'\n )\n yatest.common.execute(calc_cmd)\n assert filecmp.cmp(output_eval_path, permuted_predict_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTree',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--sampling-frequency', 'PerTreeLevel',\n '--bootstrap-type', 'Bernoulli',\n '--subsample', '0.5',\n )\n if grow_policy == 'Lossguide':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n else:\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--bagging-temperature', '0.5',\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(output_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_plain(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--boosting-type', 'Plain',\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_bootstrap(boosting_type, dev_score_calc_obj_block_size):\n bootstrap_option = {\n 'no': ('--bootstrap-type', 'No',),\n 'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),\n 'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)\n }\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n )\n for bootstrap in bootstrap_option:\n model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')\n eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')\n execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])\n\n ref_eval_path = yatest.common.test_output_path('test_no.eval')\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))\n assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_json_logging():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_json_logging_metric_period():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n json_path = yatest.common.test_output_path('catboost_training.json')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--json-log', json_path,\n '--metric-period', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(remove_time_from_json(json_path))]\n\n\ndef test_output_columns_format():\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n # Intentionally skipped: -t ...\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--output-columns', 'SampleId,RawFormulaVal,#2,Label',\n '--eval-file', output_eval_path\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--output-columns', 'SampleId,RawFormulaVal'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(output_eval_path, formula_predict_path)\n\n\ndef test_eval_period():\n model_path = yatest.common.test_output_path('adult_model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '--output-path', formula_predict_path,\n '--eval-period', '2'\n )\n yatest.common.execute(calc_cmd)\n\n return local_canonical_file(formula_predict_path)\n\n\ndef test_weights_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('adult_weight', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Weight,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult_weight', 'train_weight'),\n '-t', data_file('adult_weight', 'test_weight'),\n '--column-description', data_file('train_adult_baseline.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_baseline_from_file_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '-i', '10',\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', data_file('higgs', 'train_small'),\n '--test-set', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train_weight.cd'),\n '--learn-baseline', data_file('higgs', 'train_baseline'),\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '-i', '10',\n '--ignore-features', '0', # baseline column\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_1_path,\n '--output-columns', 'SampleId,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_group_weight_output():\n model_path = yatest.common.test_output_path('model.bin')\n fit_eval_path = yatest.common.test_output_path('test_0.eval')\n calc_eval_path = yatest.common.test_output_path('test_1.eval')\n\n fit_cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'QueryRMSE',\n '--learn-set', data_file('querywise', 'train'),\n '--test-set', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '-i', '10',\n '-m', model_path,\n '--eval-file', fit_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(fit_cmd)\n fit_eval = pd.read_csv(fit_eval_path, sep='\\t')\n test_group_weight = pd.read_csv(data_file('querywise', 'test'), sep='\\t', header=None)[0]\n assert 'GroupWeight' in fit_eval.columns\n assert np.allclose(fit_eval['GroupWeight'], test_group_weight)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.group_weight'),\n '--output-path', calc_eval_path,\n '--output-columns', 'SampleId,RawFormulaVal,GroupWeight'\n )\n yatest.common.execute(calc_cmd)\n calc_eval = pd.read_csv(calc_eval_path, sep='\\t')\n assert 'GroupWeight' in calc_eval.columns\n assert np.allclose(calc_eval['GroupWeight'], test_group_weight)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_from_file(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path_0 = yatest.common.test_output_path('test_0.eval')\n output_eval_path_1 = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', output_eval_path_0,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.,0.25,0.5,0.75',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n\n try:\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('precipitation_small', 'train_small'),\n '-t', data_file('precipitation_small', 'train_small'),\n '--column-description', data_file('precipitation_small', 'train.cd'),\n '--learn-baseline', output_eval_path_0,\n '--test-baseline', output_eval_path_0,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--prediction-type', 'RawFormulaVal,Class',\n '--class-names', '0.5,0.25,0.75.,0.',\n '--eval-file', output_eval_path_1,\n )\n execute_catboost_fit('CPU', cmd)\n except:\n return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]\n\n assert False\n\n\ndef test_baseline_from_file_output_on_quantized_pool():\n output_model_path = yatest.common.test_output_path('model.bin')\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n '--column-description', data_file('higgs', 'train_baseline.cd'),\n '--learning-rate', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_0_path,\n )\n execute_catboost_fit('CPU', cmd + ('-i', '10'))\n execute_catboost_fit('CPU', cmd + (\n '-i', '10',\n '--learn-baseline', eval_0_path,\n '--test-baseline', eval_0_path,\n '--eval-file', eval_0_path))\n\n execute_catboost_fit('CPU', cmd + (\n '-i', '20',\n '--eval-file', eval_1_path))\n\n compare_evals(eval_0_path, eval_1_path)\n\n\ndef test_query_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_subgroup_output():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.subgroup_id'),\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-w', '0.1',\n '--one-hot-max-size', '102',\n '--bootstrap-type', 'No',\n '--random-strength', '0',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):\n pool_path = schema + data_file(pool, train)\n test_path = test_schema + data_file(pool, test)\n cd_path = data_file(pool, cd)\n cmd = (\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--random-strength', '0',\n '--has-time',\n '--bootstrap-type', 'No',\n '--boosting-type', 'Plain',\n )\n if dev_score_calc_obj_block_size:\n cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)\n return cmd + other_options\n\n\ndef run_dist_train(cmd, output_file_switch='--eval-file'):\n eval_0_path = yatest.common.test_output_path('test_0.eval')\n execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))\n\n eval_1_path = yatest.common.test_output_path('test_1.eval')\n execute_dist_train(cmd + (output_file_switch, eval_1_path,))\n\n eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\\t', skiprows=1)\n eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\\t', skiprows=1)\n assert(np.allclose(eval_0, eval_1, atol=1e-5))\n return eval_1_path\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_with_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='MultiClass',\n pool='cloudness_small',\n train='train_small',\n test='test_small',\n cd='train_float_weight.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='test_small',\n cd='train.cd',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\[email protected]('target', ['PairLogitPairwise', 'QuerySoftMax'])\ndef test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=target,\n pool='querywise',\n train='train_x128_greedylogsum_aqtaa.bin',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-pairs', data_file('querywise', pairs_file)))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train.quantized',\n test='test',\n cd='train.cd.query_id',\n schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--learn-group-weights', data_file('querywise', 'train.group_weights')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='higgs',\n train='train_small_x128_greedylogsum.bin',\n test='train_small_x128_greedylogsum.bin',\n cd='train_baseline.cd',\n schema='quantized://',\n test_schema='quantized://',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',\n '--test-baseline', data_file('higgs', 'test_baseline'),\n '--learn-baseline', data_file('higgs', 'train_baseline')))))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_queryrmse(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_subgroup(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QueryRMSE',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--eval-metric', 'PFound')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_pairlogit(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogit',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.query_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))\n )))]\n\n\[email protected]('pairs_file', ['train.pairs', 'train.pairs.weighted'])\ndef test_dist_train_pairlogitpairwise(pairs_file):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='PairLogitPairwise',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd',\n other_options=('--learn-pairs', data_file('querywise', pairs_file))\n )))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_dist_train_querysoftmax(dev_score_calc_obj_block_size):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='QuerySoftMax',\n pool='querywise',\n train='train',\n test='test',\n cd='train.cd.subgroup_id',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_baseline.cd',\n other_options=('--eval-metric', 'AUC')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected]('loss_func', ['Logloss', 'RMSE'])\ndef test_dist_train_auc_weight(loss_func):\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function=loss_func,\n pool='higgs',\n train='train_small',\n test='test_small',\n cd='train_weight.cd',\n other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](reason='Boost from average for distributed training')\[email protected]('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])\ndef test_dist_train_snapshot(schema, train):\n train_cmd = make_deterministic_train_cmd(\n loss_function='RMSE',\n pool='higgs',\n train=train,\n test='test_small',\n schema=schema,\n cd='train.cd')\n\n eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')\n execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))\n\n snapshot_path = yatest.common.test_output_path('snapshot')\n execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))\n\n eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')\n execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))\n\n assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))\n return [local_canonical_file(eval_5_plus_5_trees_path)]\n\n\ndef test_dist_train_yetirank():\n return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(\n loss_function='YetiRank',\n pool='querywise',\n train='repeat_same_query_8_times',\n test='repeat_same_query_8_times',\n cd='train.cd'\n ), output_file_switch='--test-err-log'))]\n\n\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\[email protected](\n 'one_hot_max_size',\n [2, 255],\n ids=['one_hot_max_size=2', 'one_hot_max_size=255']\n)\ndef test_dist_train_with_cat_features(dev_score_calc_obj_block_size, one_hot_max_size):\n cmd = make_deterministic_train_cmd(\n loss_function='Logloss',\n pool='adult',\n train='train_small',\n test='test_small',\n cd='train.cd',\n dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,\n other_options=('--one-hot-max-size', str(one_hot_max_size))\n )\n\n if one_hot_max_size == 2:\n with pytest.raises(yatest.common.ExecutionError):\n run_dist_train(cmd)\n else:\n return [local_canonical_file(run_dist_train(cmd))]\n\n\ndef test_no_target():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n pairs_path = yatest.common.test_output_path('pairs')\n\n np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\\t', fmt='%.4f')\n np.savetxt(cd_path, [('0', 'Num')], delimiter='\\t', fmt='%s')\n np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\\t', fmt='%i')\n\n cmd = (\n '-f', train_path,\n '--cd', cd_path,\n '--learn-pairs', pairs_path\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('loss_function', ALL_LOSSES)\ndef test_const_target(loss_function):\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n np.savetxt(\n train_path,\n [[0, 0, 0],\n [0, 0, 1],\n [0, 0, 2],\n [0, 0, 3],\n [0, 0, 4]],\n delimiter='\\t',\n fmt='%.4f'\n )\n np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\\t', fmt='%s')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_negative_weights():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write('0\\tNum\\n1\\tWeight\\n2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, -1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_zero_learning_rate():\n train_path = yatest.common.test_output_path('train')\n cd_path = yatest.common.test_output_path('train.cd')\n\n open(cd_path, 'wt').write(\n '0\\tNum\\n'\n '1\\tNum\\n'\n '2\\tTarget\\n')\n np.savetxt(train_path, [\n [0, 1, 2],\n [1, 1, 1]], delimiter='\\t', fmt='%.4f')\n cmd = ('-f', train_path,\n '--cd', cd_path,\n '--learning-rate', '0.0',\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=(), additional_eval_params=()):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n ) + additional_eval_params\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])\ndef test_eval_metrics(metric, metric_period):\n if metric == 'PFound':\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'\n elif metric == 'PairAccuracy':\n # note: pairs are autogenerated\n train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'\n else:\n train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'\n\n return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)\n\n\ndef test_eval_metrics_with_target_border():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_eval_metrics_with_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult', 'train_small'),\n test=data_file('adult', 'test_small'),\n cd=data_file('adult', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_eval_metrics_with_target_border_and_class_weights():\n return do_test_eval_metrics(\n metric='Logloss',\n metric_period='1',\n train=data_file('adult_not_binarized', 'train_small'),\n test=data_file('adult_not_binarized', 'test_small'),\n cd=data_file('adult_not_binarized', 'train.cd'),\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_eval_metrics_with_boost_from_average_and_model_shrinkage(config):\n mode, rate, lr = config\n train = data_file('higgs', 'train_small')\n test = data_file('higgs', 'test_small')\n cd = data_file('higgs', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Logloss',\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', '10',\n '--learn-err-log', learn_error_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--boost-from-average', 'true'\n )\n execute_catboost_fit('CPU', cmd)\n\n test_eval_path = yatest.common.test_output_path('test_output.tsv')\n learn_eval_path = yatest.common.test_output_path('learn_output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', train,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', learn_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'Logloss',\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', test_eval_path,\n '--block-size', '100',\n '--eval-period', '10',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n test_first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1:], 8)\n test_second_metrics = np.round(np.loadtxt(test_eval_path, skiprows=1)[:, 1:], 8)\n learn_first_metrics = np.round(np.loadtxt(learn_error_path, skiprows=1)[:, 1:], 8)\n learn_second_metrics = np.round(np.loadtxt(learn_eval_path, skiprows=1)[:, 1:], 8)\n assert test_first_metrics[-1] == test_second_metrics[-1]\n assert learn_first_metrics[-1] == learn_second_metrics[-1]\n\n\[email protected]('metrics', ['AUC', 'AUC,Precision'])\ndef test_eval_metrics_with_binarized_target(metrics):\n train = data_file('adult', 'train_small')\n test = data_file('adult', 'test_small')\n cd = data_file('adult', 'train.cd')\n loss_function = 'Logloss'\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--target-border', '0.25',\n '--custom-metric', metrics,\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metrics,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats',\n )\n yatest.common.execute(cmd)\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('dataset', ['cloudness_small', 'cloudness_lost_class'])\ndef test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):\n if metric in MULTICLASS_LOSSES and metric != loss_function:\n # MultiClass and MultiClassOneVsAll are incompatible\n return\n\n train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '3',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n start_index = 1 if metric == loss_function else 2\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\ndef test_eval_metrics_class_names():\n labels = ['a', 'b', 'c', 'd']\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n cmd = (\n '--loss-function', 'MultiClass',\n '--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--class-names', ','.join(labels),\n )\n execute_catboost_fit('CPU', cmd)\n\n eval_cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--save-stats'\n )\n execute_catboost_fit('CPU', cmd)\n yatest.common.execute(eval_cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy', 'AUC:type=Ranking'])\ndef test_eval_metrics_with_baseline(metric_period, metric):\n train = data_file('adult_weight', 'train_weight')\n test = data_file('adult_weight', 'test_weight')\n cd = data_file('train_adult_baseline.cd')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', metric,\n '-f', train,\n '-t', test,\n '--column-description', cd,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test,\n '--column-description', cd,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['Accuracy'])\ndef test_eval_metrics_multiclass_with_baseline(metric_period, metric):\n labels = [0, 1, 2, 3]\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_path = yatest.common.test_output_path('output.tsv')\n\n cmd = (\n '--loss-function', 'MultiClass',\n '--eval-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--classes-count', '4',\n '--metric-period', metric_period\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metric,\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '-o', eval_path,\n '--block-size', '100',\n '--eval-period', metric_period,\n '--save-stats'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)\n second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '--ctr-leaf-count-limit', '10',\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])\ndef test_boost_from_average(boosting_type, grow_policy, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')\n output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')\n output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')\n baselined_train = yatest.common.test_output_path('baselined_train')\n baselined_test = yatest.common.test_output_path('baselined_test')\n baselined_cd = yatest.common.test_output_path('baselined.cd')\n\n train_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n original_cd = data_file('adult', 'train.cd')\n\n # use float32 beacause we use float in C++\n sum_target = np.float32(0)\n obj_count = np.float32(0)\n with open(train_path) as train_f:\n for line in train_f:\n obj_count += 1\n sum_target += np.float32(line.split()[1])\n\n mean_target = sum_target / obj_count\n if loss_function in ['Logloss', 'CrossEntropy']:\n mean_target = -np.log(1 / mean_target - 1)\n mean_target_str = str(mean_target)\n\n def append_baseline_to_pool(source, target):\n with open(source) as source_f, open(target, 'w') as target_f:\n for line in source_f:\n target_f.write(line.rstrip('\\n') + '\\t' + mean_target_str + '\\n')\n\n append_baseline_to_pool(train_path, baselined_train)\n append_baseline_to_pool(test_path, baselined_test)\n\n with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:\n for line in cd_input:\n cd_output.write(line)\n cd_output.write('18\\tBaseline\\n')\n\n base_cmd = (\n '--loss-function', loss_function,\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '-i', '30',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', base_cmd + (\n '-f', baselined_train,\n '-t', baselined_test,\n '--boost-from-average', '0',\n '--column-description', baselined_cd,\n '--eval-file', output_eval_path_with_baseline,\n ))\n execute_catboost_fit('CPU', base_cmd + (\n '-f', train_path,\n '-t', test_path,\n '--boost-from-average', '1',\n '--column-description', original_cd,\n '--eval-file', output_eval_path_with_avg,\n ))\n yatest.common.execute((\n CATBOOST_PATH, 'calc',\n '--cd', original_cd,\n '--input-path', test_path,\n '-m', output_model_path,\n '-T', '1',\n '--output-path', output_calc_eval_path,\n ))\n\n assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)\n assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)\n return [local_canonical_file(output_eval_path_with_avg)]\n\n\[email protected]('eval_period', ['1', '2'])\ndef test_eval_non_additive_metric(eval_period):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_path,\n '--eval-period', eval_period,\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')\n cmd = (\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', 'AUC:hints=skip_train~false',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '-o', output_eval_in_parts,\n '--eval-period', eval_period,\n '--calc-on-parts',\n '--block-size', '10'\n )\n yatest.common.execute(cmd)\n\n first_metrics = np.loadtxt(output_eval_path, skiprows=1)\n second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)\n assert np.all(first_metrics == second_metrics)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('max_ctr_complexity', [1, 2])\ndef test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):\n one_hot_max_size = 2\n cd_path = yatest.common.test_output_path('cd.txt')\n train_path = yatest.common.test_output_path('train.txt')\n test_path = yatest.common.test_output_path('test.txt')\n model_path = yatest.common.test_output_path('model.bin')\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n np.savetxt(cd_path, [['0', 'Target'],\n ['1', 'Categ'],\n ['2', 'Categ']\n ], fmt='%s', delimiter='\\t')\n np.savetxt(train_path, [['1', 'A', 'X'],\n ['1', 'B', 'Y'],\n ['1', 'C', 'Y'],\n ['0', 'A', 'Z'],\n ['0', 'B', 'Z'],\n ], fmt='%s', delimiter='\\t')\n np.savetxt(test_path, [['1', 'A', 'Y'],\n ['1', 'D', 'U'],\n ['1', 'D', 'U']\n ], fmt='%s', delimiter='\\t')\n cmd_fit = ('--loss-function', 'Logloss',\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--cd', cd_path,\n '-f', train_path,\n '-t', test_path,\n '-m', model_path,\n '--eval-file', test_eval_path,\n '-i', '5',\n '-T', '1',\n '--max-ctr-complexity', str(max_ctr_complexity),\n '--one-hot-max-size', str(one_hot_max_size),\n )\n cmd_calc = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_path,\n '-m', model_path,\n '-T', '1',\n '--output-path', calc_eval_path,\n )\n execute_catboost_fit('CPU', cmd_fit)\n yatest.common.execute(cmd_calc)\n assert(compare_evals(test_eval_path, calc_eval_path))\n\n\ndef do_test_object_importances(pool, loss_function, additional_train_params):\n output_model_path = yatest.common.test_output_path('model.bin')\n object_importances_path = yatest.common.test_output_path('object_importances.tsv')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '10',\n '--boosting-type', 'Plain',\n '-T', '4',\n '-m', output_model_path,\n '--use-best-model', 'false'\n ) + additional_train_params\n execute_catboost_fit('CPU', cmd)\n\n cmd = (\n CATBOOST_PATH,\n 'ostr',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path,\n '-o', object_importances_path,\n )\n yatest.common.execute(cmd)\n\n return [local_canonical_file(object_importances_path)]\n\n\[email protected]('loss_function', ['RMSE', 'Logloss', 'Poisson'])\[email protected]('leaf_estimation_iteration', ['1', '2'])\ndef test_object_importances(loss_function, leaf_estimation_iteration):\n additional_train_params = (\n '--leaf-estimation-method', 'Gradient',\n '--leaf-estimation-iterations', leaf_estimation_iteration\n )\n return do_test_object_importances(\n pool='adult',\n loss_function=loss_function,\n additional_train_params=additional_train_params\n )\n\n\ndef test_object_importances_with_target_border():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4')\n )\n\n\ndef test_object_importances_with_class_weights():\n return do_test_object_importances(\n pool='adult',\n loss_function='Logloss',\n additional_train_params=('--class-weights', '0.25,0.75')\n )\n\n\ndef test_object_importances_with_target_border_and_class_weights():\n return do_test_object_importances(\n pool='adult_not_binarized',\n loss_function='Logloss',\n additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')\n )\n\n\n# Create `num_tests` test files from `test_input_path`.\ndef split_test_to(num_tests, test_input_path):\n test_input_lines = open(test_input_path).readlines()\n test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]\n for testno in range(num_tests):\n test_path = test_paths[testno]\n test_lines = test_input_lines[testno::num_tests]\n open(test_path, 'wt').write(''.join(test_lines))\n return test_paths\n\n\n# Create a few shuffles from list of test files, for use with `-t` option.\ndef create_test_shuffles(test_paths, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n num_tests = len(test_paths)\n num_shuffles = num_tests # if num_tests < 3 else num_tests * (num_tests - 1)\n test_shuffles = set()\n while len(test_shuffles) < num_shuffles:\n test_shuffles.add(tuple(prng.permutation(test_paths)))\n return [','.join(shuffle) for shuffle in test_shuffles]\n\n\ndef fit_calc_cksum(fit_stem, calc_stem, test_shuffles):\n import hashlib\n last_cksum = None\n for i, shuffle in enumerate(test_shuffles):\n model_path = yatest.common.test_output_path('model{}.bin'.format(i))\n eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))\n execute_catboost_fit('CPU', fit_stem + (\n '-t', shuffle,\n '-m', model_path,\n ))\n yatest.common.execute(calc_stem + (\n '-m', model_path,\n '--output-path', eval_path,\n ))\n cksum = hashlib.md5(open(eval_path).read()).hexdigest()\n if last_cksum is None:\n last_cksum = cksum\n continue\n assert(last_cksum == cksum)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_order_independent(boosting_type, num_tests):\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = (\n '--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (\n CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\[email protected]('num_tests', [3, 4])\[email protected]('boosting_type', ['Plain', 'Ordered'])\ndef test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n fit_stem = (\n '--loss-function', 'QueryRMSE',\n '-f', train_path,\n '--cd', cd_path,\n '--boosting-type', boosting_type,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n calc_stem = (CATBOOST_PATH, 'calc',\n '--cd', cd_path,\n '--input-path', test_input_path,\n '-T', '4',\n )\n # We use a few shuffles of tests and check equivalence of resulting models\n prng = np.random.RandomState(seed=20181219)\n test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)\n fit_calc_cksum(fit_stem, calc_stem, test_shuffles)\n\n\ndef test_multiple_eval_sets_no_empty():\n train_path = data_file('adult', 'train_small')\n cd_path = data_file('adult', 'train.cd')\n test_input_path = data_file('adult', 'test_small')\n fit_stem = ('--loss-function', 'RMSE',\n '-f', train_path,\n '--cd', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n )\n test0_path = yatest.common.test_output_path('test0.txt')\n open(test0_path, 'wt').write('')\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', fit_stem + (\n '-t', ','.join((test_input_path, test0_path))\n ))\n\n\[email protected]('loss_function', ['RMSE', 'QueryRMSE'])\ndef test_multiple_eval_sets(loss_function):\n num_tests = 5\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n eval_path = yatest.common.test_output_path('test.eval')\n test_paths = list(reversed(split_test_to(num_tests, test_input_path)))\n cmd = ('--loss-function', loss_function,\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--use-best-model', 'false',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(eval_path)]\n\n\ndef test_multiple_eval_sets_err_log():\n num_tests = 3\n train_path = data_file('querywise', 'train')\n cd_path = data_file('querywise', 'train.cd.query_id')\n test_input_path = data_file('querywise', 'test')\n test_err_log_path = yatest.common.test_output_path('test-err.log')\n json_log_path = yatest.common.test_output_path('json.log')\n test_paths = reversed(split_test_to(num_tests, test_input_path))\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', ','.join(test_paths),\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--test-err-log', test_err_log_path,\n '--json-log', json_log_path,\n )\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(test_err_log_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\n# Cast<float>(CityHash('Quvena')) is QNaN\n# Cast<float>(CityHash('Sineco')) is SNaN\[email protected]('cat_value', ['Normal', 'Quvena', 'Sineco'])\ndef test_const_cat_feature(cat_value):\n\n def make_a_set(nrows, value, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n label = prng.randint(0, nrows, [nrows, 1])\n feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))\n return np.concatenate([label, feature], axis=1)\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=20181219)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n cmd = ('--loss-function', 'RMSE',\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_model_metadata():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '2',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '-w', '0.1',\n '--set-metadata-from-freeargs',\n 'A', 'A',\n 'BBB', 'BBB',\n 'CCC', 'A'\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'metadata', 'set',\n '-m', output_model_path,\n '--key', 'CCC',\n '--value', 'CCC'\n )\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(output_model_path)\n\n assert 'A' == py_catboost.get_metadata()['A']\n assert 'BBB' == py_catboost.get_metadata()['BBB']\n assert 'CCC' == py_catboost.get_metadata()['CCC']\n\n\ndef test_fit_multiclass_with_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '--use-best-model', 'false',\n '--prediction-type', 'RawFormulaVal,Class',\n '--eval-file', eval_path\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_extract_multiclass_labels_from_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '--class-names', ','.join(labels),\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-T', '4',\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])\ndef test_save_class_labels_from_data(loss_function):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n if loss_function == 'Logloss':\n cmd += ('--target-border', '0.5')\n\n execute_catboost_fit('CPU', cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n if loss_function in MULTICLASS_LOSSES:\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n elif loss_function == 'Logloss':\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n else:\n assert 'class_params' not in py_catboost.get_metadata()\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_apply_multiclass_labels_from_data(prediction_type):\n labels = [10000000, 7, 0, 9999]\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type,\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n yatest.common.execute(calc_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n if prediction_type in ['Probability', 'RawFormulaVal']:\n with open(eval_path, \"rt\") as f:\n for line in f:\n assert line[:-1] == 'SampleId\\t{}:Class=0.0\\t{}:Class=7.0\\t{}:Class=9999.0\\t{}:Class=10000000.0' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n break\n else: # Class\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in labels\n\n return [local_canonical_file(eval_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', loss_function,\n '--classes-count', '4',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type\n )\n\n yatest.common.execute(calc_cmd)\n\n if prediction_type == 'RawFormulaVal':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity\n\n if prediction_type == 'Probability':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert (abs(float(line[:-1].split()[1])) < 1e-307\n and abs(float(line[:-1].split()[4])) < 1e-307) # fictitious probabilities must be virtually zero\n\n if prediction_type == 'Class':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero\n\n return [local_canonical_file(eval_path)]\n\n\ndef test_set_class_names_implicitly():\n INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']\n SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_cmd = (\n '--loss-function', 'MultiClass',\n '-f', train_path,\n '--column-description', cd_path,\n '-i', '10',\n '-T', '4',\n '-m', model_path,\n '--use-best-model', 'false',\n )\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', 'RawFormulaVal,Class',\n )\n\n execute_catboost_fit('CPU', fit_cmd)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n yatest.common.execute(calc_cmd)\n\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if not i:\n assert line[:-1] == 'SampleId\\t{}:Class=19.2\\t{}:Class=7.\\t{}:Class=8.0\\t{}:Class=a\\t{}:Class=bc\\tClass' \\\n .format(*(['RawFormulaVal'] * 5))\n else:\n label = line[:-1].split()[-1]\n assert label in SAVED_CLASS_LABELS\n\n return [local_canonical_file(eval_path)]\n\n\nCANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')\n\n\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_multiclass_model_backward_compatibility(prediction_type):\n model = catboost.CatBoost()\n model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)\n\n assert 'class_params' not in model.get_metadata()\n\n pool = catboost.Pool(data_file('cloudness_small', 'train_small'),\n column_description=data_file('cloudness_small', 'train.cd'))\n model.predict(data=pool, prediction_type='Class')\n model.eval_metrics(data=pool, metrics=['Accuracy'])\n\n output_path = yatest.common.test_output_path('out.txt')\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('cloudness_small', 'train_small'),\n '--column-description', data_file('cloudness_small', 'train.cd'),\n '-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,\n '--prediction-type', prediction_type,\n '--output-path', output_path,\n )\n\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(output_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('use_best_model', ['true', 'false'])\ndef test_learning_rate_auto_set(boosting_type, use_best_model):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', use_best_model,\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--od-type', 'Iter',\n '--od-wait', '2',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_paths_with_dsv_scheme():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', 'dsv://' + data_file('querywise', 'train'),\n '-t', 'dsv://' + data_file('querywise', 'test'),\n '--column-description', 'dsv://' + data_file('querywise', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_skip_train():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n json_log_path = yatest.common.test_output_path('json_log.json')\n cmd = (\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--custom-metric', 'AverageGain:top=2;hints=skip_train~true',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false',\n '--json-log', json_log_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(remove_time_from_json(json_log_path))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight(boosting_type, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', 'YetiRank',\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd', output_eval_path_first)\n run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('loss_function', ['QueryRMSE', 'RMSE'])\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):\n\n def run_catboost(train_path, test_path, cd_path, eval_path):\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', train_path),\n '-t', data_file('querywise', test_path),\n '--column-description', data_file('querywise', cd_path),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '10',\n '-T', '4',\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n output_eval_path_first = yatest.common.test_output_path('test_first.eval')\n output_eval_path_second = yatest.common.test_output_path('test_second.eval')\n run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)\n run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)\n assert filecmp.cmp(output_eval_path_first, output_eval_path_second)\n\n\ndef test_snapshot_without_random_seed():\n\n def run_catboost(iters, eval_path, additional_params=None):\n cmd = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', str(iters),\n '-T', '4',\n '--use-best-model', 'False',\n '--eval-file', eval_path,\n ]\n if additional_params:\n cmd += additional_params\n tmpfile = 'test_data_dumps'\n with open(tmpfile, 'w') as f:\n execute_catboost_fit('CPU', cmd, stdout=f)\n with open(tmpfile, 'r') as output:\n line_count = sum(1 for line in output)\n return line_count\n\n model_path = yatest.common.test_output_path('model.bin')\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n additional_params = ['--snapshot-file', progress_path, '-m', model_path]\n\n first_line_count = run_catboost(15, eval_path, additional_params=additional_params)\n second_line_count = run_catboost(30, eval_path, additional_params=additional_params)\n third_line_count = run_catboost(45, eval_path, additional_params=additional_params)\n assert first_line_count == second_line_count == third_line_count\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n cb_model = catboost.CatBoost()\n cb_model.load_model(model_path)\n random_seed = cb_model.random_seed_\n run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_interval():\n\n def run_with_timeout(cmd, timeout):\n try:\n execute_catboost_fit('CPU', cmd, timeout=timeout)\n except ExecutionTimeoutError:\n return True\n return False\n\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n ]\n\n measure_time_iters = 100\n exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)\n\n SNAPSHOT_INTERVAL = 1\n TIMEOUT = 5\n TOTAL_TIME = 25\n iters = int(TOTAL_TIME / (exec_time / measure_time_iters))\n\n canon_eval_path = yatest.common.test_output_path('canon_test.eval')\n canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]\n execute_catboost_fit('CPU', canon_params)\n\n eval_path = yatest.common.test_output_path('test.eval')\n progress_path = yatest.common.test_output_path('test.cbp')\n model_path = yatest.common.test_output_path('model.bin')\n params = cmd + ['--snapshot-file', progress_path,\n '--snapshot-interval', str(SNAPSHOT_INTERVAL),\n '-m', model_path,\n '--eval-file', eval_path,\n '-i', str(iters)]\n\n was_timeout = False\n while run_with_timeout(params, TIMEOUT):\n was_timeout = True\n assert was_timeout\n assert filecmp.cmp(canon_eval_path, eval_path)\n\n\ndef test_snapshot_with_different_params():\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-T', '4',\n '-i', '10',\n '--snapshot-file', 'snapshot.cbp'\n ]\n\n cmd_1 = cmd + ['--eval-metric', 'Logloss']\n cmd_2 = cmd + ['--eval-metric', 'Accuracy']\n execute_catboost_fit('CPU', cmd_1)\n try:\n execute_catboost_fit('CPU', cmd_2)\n except ExecutionError:\n return\n\n assert False\n\n\[email protected]('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'QuerySoftMax',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '--grow-policy', grow_policy,\n '--leaf-estimation-method', leaf_estimation_method,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_shap_verbose():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n output_log = yatest.common.test_output_path('log')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '12',\n '--fstr-type', 'ShapValues',\n '-T', '4',\n '-m', output_model_path,\n ]\n with open(output_log, 'w') as log:\n yatest.common.execute(cmd_shap, stdout=log)\n with open(output_log, 'r') as log:\n line_count = sum(1 for line in log)\n assert line_count == 5\n\n\ndef test_shap_approximate():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n execute_catboost_fit('CPU', cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Approximate',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\ndef test_shap_exact():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_values_path = yatest.common.test_output_path('shapval')\n cmd_fit = [\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '--learning-rate', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '250',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_fit)\n cmd_shap = [\n CATBOOST_PATH,\n 'fstr',\n '-o', output_values_path,\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--verbose', '0',\n '--fstr-type', 'ShapValues',\n '--shap-calc-type', 'Exact',\n '-T', '4',\n '-m', output_model_path,\n ]\n yatest.common.execute(cmd_shap)\n\n return [local_canonical_file(output_values_path)]\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']\n\n\[email protected]('bagging_temperature', ['0', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':\n return pytest.xfail(reason='MLTOOLS-1801')\n\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bayesian',\n '--sampling-unit', sampling_unit,\n '--bagging-temperature', bagging_temperature,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('subsample', ['0.5', '1'])\[email protected]('sampling_unit', SAMPLING_UNIT_TYPES)\[email protected]('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)\[email protected](\n 'dev_score_calc_obj_block_size',\n SCORE_CALC_OBJ_BLOCK_SIZES,\n ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS\n)\ndef test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', loss_function,\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--bootstrap-type', 'Bernoulli',\n '--sampling-unit', sampling_unit,\n '--subsample', subsample,\n '--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))\n eps = 0 if yatest.common.context.sanitize is None else 0.1\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]\n\n\[email protected]('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])\[email protected]('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])\ndef test_bad_metrics_combination(loss_function, metric):\n BAD_PAIRS = {\n 'Logloss': ['RMSE', 'MultiClass'],\n 'RMSE': ['Logloss', 'MultiClass'],\n 'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],\n 'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],\n 'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],\n 'YetiRank': ['Logloss', 'RMSE', 'MultiClass']\n }\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\\t')\n\n data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, data, fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, data, fmt='%s', delimiter='\\t')\n\n cmd = (\n '--loss-function', loss_function,\n '--custom-metric', metric,\n '-f', train_path,\n '-t', test_path,\n '--column-description', cd_path,\n '-i', '4',\n '-T', '4',\n )\n\n try:\n execute_catboost_fit('CPU', cmd)\n except Exception:\n assert metric in BAD_PAIRS[loss_function]\n return\n\n assert metric not in BAD_PAIRS[loss_function]\n\n\[email protected]('metric', [('good', ',AUC,'), ('bad', ',')])\ndef test_extra_commas(metric):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-w', '0.03',\n '-i', '10',\n '-T', '4',\n '--custom-metric', metric[1]\n )\n if metric[0] == 'good':\n execute_catboost_fit('CPU', cmd)\n if metric[0] == 'bad':\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,\n border_count=128, other_options=()):\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--cd', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-x', str(border_count),\n '--feature-border-type', 'GreedyLogSum',\n '-m', model_path,\n '--eval-file', eval_path,\n )\n execute_catboost_fit('CPU', cmd + other_options)\n\n\ndef test_quantized_pool():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_features():\n test_path = data_file('higgs', 'test_small')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path=data_file('higgs', 'train_small'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=tsv_eval_path,\n other_options=('-I', '5',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='Logloss',\n pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),\n test_path=test_path,\n cd_path=data_file('higgs', 'train.cd'),\n eval_path=quantized_eval_path,\n other_options=('-I', '5',)\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_groupid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_ignored_during_quantization():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n other_options=('-I', '18-36',)\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_quantized_test():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),\n test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_quantized_pool_with_large_grid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n border_count=1024\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),\n test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert filecmp.cmp(tsv_eval_path, quantized_eval_path)\n\n\ndef test_learn_without_header_eval_with_header():\n train_path = yatest.common.test_output_path('airlines_without_header')\n with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:\n with open(train_path, 'w') as without_header_file:\n without_header_file.writelines(with_header_file.readlines()[1:])\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cmd_fit = (\n '--loss-function', 'Logloss',\n '-f', train_path,\n '--cd', data_file('airlines_5K', 'cd'),\n '-i', '10',\n '-m', model_path\n )\n execute_catboost_fit('CPU', cmd_fit)\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('airlines_5K', 'test'),\n '--cd', data_file('airlines_5K', 'cd'),\n '-m', model_path,\n '--has-header'\n )\n yatest.common.execute(cmd_calc)\n\n\ndef test_group_weights_file():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, cd_file, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', cd_file),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.cd', True)\n run_catboost(second_eval_path, 'train.cd.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_group_weights_file_quantized():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n\n def run_catboost(eval_path, train, test, is_additional_query_weights):\n cmd = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', 'quantized://' + data_file('querywise', train),\n '-t', 'quantized://' + data_file('querywise', test),\n '-i', '5',\n '-T', '4',\n '--eval-file', eval_path,\n ]\n if is_additional_query_weights:\n cmd += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n execute_catboost_fit('CPU', cmd)\n\n run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)\n run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_mode_roc():\n eval_path = yatest.common.test_output_path('eval.tsv')\n output_roc_path = yatest.common.test_output_path('test.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--counter-calc-method', 'SkipTest',\n '--eval-file', eval_path,\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n roc_cmd = (\n CATBOOST_PATH,\n 'roc',\n '--eval-file', eval_path,\n '--output-path', output_roc_path\n )\n yatest.common.execute(roc_cmd)\n\n return local_canonical_file(output_roc_path)\n\n\[email protected]('pool', ['adult', 'higgs', 'adult_nan'])\ndef test_convert_model_to_json(pool):\n output_model_path = yatest.common.test_output_path('model')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--use-best-model', 'false',\n '-f', data_file(pool, 'train_small'),\n '-t', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '-m', output_model_path,\n '--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',\n '--model-format', 'CatboostBinary,Json'\n )\n execute_catboost_fit('CPU', cmd)\n formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')\n formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.json',\n '--model-format', 'Json',\n '--output-path', formula_predict_path_json\n )\n yatest.common.execute(calc_cmd)\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file(pool, 'test_small'),\n '--column-description', data_file(pool, 'train.cd'),\n '-m', output_model_path + '.bin',\n '--output-path', formula_predict_path_bin\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))\n\n\nLOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']\n\n\[email protected]('loss_function', LOSS_FUNCTIONS_NO_MAPE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_adult_pool(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')\n quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', quantized_train_file,\n '-t', quantized_test_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n cd_file = data_file('quantized_adult', 'pool.cd')\n test_file = data_file('quantized_adult', 'test_small.tsv')\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_with_one_thread(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', quantized_train_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '1',\n '-m', output_model_path,\n '--target-border', '0.5',\n )\n print(cmd)\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_eval_result_on_different_pool_type():\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def run_catboost(train, test, eval_path):\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--border-count', '128',\n '-f', train,\n '-t', test,\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--eval-file', eval_path,\n )\n\n execute_catboost_fit('CPU', cmd)\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)\n run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)\n\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_apply_on_different_pool_type():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n cd_file = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '--learn-set', get_pool_path('train', True),\n '--test-set', get_pool_path('test', True),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--model-file', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test'),\n '--column-description', cd_file,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', get_pool_path('test', True),\n '--model-file', output_model_path,\n '--output-path', output_quantized_eval_path,\n '--prediction-type', 'RawFormulaVal'\n )\n yatest.common.execute(cmd)\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n\n\ndef test_apply_output_column_by_idx():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n learn = data_file('black_friday', 'train')\n test = data_file('black_friday', 'test')\n cd = data_file('black_friday', 'cd')\n\n cmd = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '--learn-set', learn,\n '--test-set', test,\n '--column-description', cd,\n '-i', '10',\n '-T', '4',\n '--model-file', output_model_path,\n '--has-header'\n )\n execute_catboost_fit('CPU', cmd)\n\n column_names = [\n 'Gender',\n 'Age',\n 'Occupation',\n 'City_Category',\n 'Stay_In_Current_City_Years',\n 'Marital_Status',\n 'Product_Category_1',\n 'Product_Category_2',\n 'Product_Category_3',\n ]\n output_columns = ['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)]\n output_columns = ['RawFormulaVal'] + ['GroupId', 'SampleId'] + output_columns + ['Label']\n output_columns = ','.join(output_columns)\n\n cmd = (\n CATBOOST_PATH, 'calc',\n '--input-path', test,\n '--column-description', cd,\n '--model-file', output_model_path,\n '--output-path', output_eval_path,\n '--output-columns', output_columns,\n '--has-header'\n )\n yatest.common.execute(cmd)\n\n with open(output_eval_path, 'r') as f:\n f.readline()\n eval_lines = f.readlines()\n with open(test, 'r') as f:\n f.readline()\n test_lines = f.readlines()\n\n assert len(eval_lines) == len(test_lines)\n for i in range(len(eval_lines)):\n eval_line = eval_lines[i].split('\\t')[1:] # skip RawFormulaVal\n test_line = test_lines[i].split('\\t')\n\n for eval_column, test_column in zip(eval_line, test_line):\n assert eval_column == test_column\n\n\[email protected](\n 'dataset_name,loss_function,has_pairs,has_group_weights',\n [\n ('adult_small_broken_features', 'Logloss', False, False),\n ('querywise_broken_pairs', 'RMSE', True, False),\n ('querywise_broken_group_weights', 'RMSE', False, True),\n ]\n)\ndef test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n # iterations and threads are specified just to finish fast if test is xpass\n cmd = (\n '--loss-function', loss_function,\n '--learn-set', data_file('broken_format', dataset_name, 'train'),\n '--test-set', data_file('broken_format', dataset_name, 'test'),\n '--column-description', data_file('broken_format', dataset_name, 'train.cd'),\n '-i', '1',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n if has_pairs:\n cmd += (\n '--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),\n '--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),\n )\n if has_group_weights:\n cmd += (\n '--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),\n '--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),\n )\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]_fixtures('compressed_data')\[email protected](\n 'loss_function,eval_metric,boosting_type',\n [\n ('QueryRMSE', 'NDCG', 'Plain'),\n ('QueryRMSE', 'NDCG', 'Ordered'),\n # Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise\n ('YetiRankPairwise', 'NDCG', 'Plain'),\n ('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),\n ('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),\n ],\n ids=[\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',\n 'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'\n ]\n)\ndef test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n cmd = (\n '--loss-function', loss_function,\n '-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),\n '-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),\n '--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),\n '--boosting-type', boosting_type,\n '-i', '100',\n '-T', '8',\n '--eval-metric', eval_metric,\n '--metric-period', '100',\n '--use-best-model', 'false',\n '--test-err-log', test_error_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]\n\n\ndef test_gradient_walker():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--boosting-type', 'Ordered',\n '--max-ctr-complexity', '4',\n '--leaf-estimation-iterations', '10',\n '--leaf-estimation-backtracking', 'AnyImprovement',\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\n# training with pairwise scoring with categorical features on CPU does not yet support one-hot features\n# so they are disabled by default, explicit non-default specification should be an error\[email protected](\n 'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],\n ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']\n)\ndef test_groupwise_with_bad_one_hot_max_size(loss_function):\n cmd = (\n '--loss-function', loss_function,\n '--has-header',\n '-f', data_file('black_friday', 'train'),\n '-t', data_file('black_friday', 'test'),\n '--column-description', data_file('black_friday', 'cd'),\n '--boosting-type', 'Plain',\n '-i', '10',\n '-T', '4',\n '--eval-metric', 'NDCG',\n '--one_hot_max_size', '10'\n )\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_load_quantized_pool_with_double_baseline():\n # Dataset with 3 random columns, first column is Target, seconds columns is Num, third column\n # is Baseline.\n #\n # There are only 10 rows in dataset.\n cmd = (\n '-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),\n '-i', '10')\n\n execute_catboost_fit('CPU', cmd)\n\n\ndef test_write_predictions_to_streams():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')\n\n cmd = (\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--eval-file', output_eval_path,\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', output_model_path\n )\n execute_catboost_fit('CPU', cmd)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stdout',\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:\n yatest.common.execute(calc_cmd, stdout=catboost_stdout)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', output_model_path,\n '--output-path', 'stream://stderr'\n )\n with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:\n yatest.common.execute(calc_cmd, stderr=catboost_stderr)\n\n assert compare_evals(output_eval_path, calc_output_eval_path_redirected)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_mvs_bootstrap(boosting_type):\n def run_catboost(eval_path, mvs_sample_rate):\n cmd = [\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '5',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', data_file('airlines_5K', 'cd'),\n '--has-header',\n '--boosting-type', boosting_type,\n '--bootstrap-type', 'MVS',\n '--subsample', mvs_sample_rate,\n '-i', '50',\n '-w', '0.03',\n '-T', '6',\n '-r', '0',\n '--leaf-estimation-iterations', '10',\n '--eval-file', eval_path,\n ]\n execute_catboost_fit('CPU', cmd)\n\n ref_eval_path = yatest.common.test_output_path('test.eval')\n run_catboost(ref_eval_path, '0.5')\n\n for sample_rate in ('0.1', '0.9'):\n eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))\n run_catboost(eval_path, sample_rate)\n assert (filecmp.cmp(ref_eval_path, eval_path) is False)\n\n return [local_canonical_file(ref_eval_path)]\n\n\ndef test_simple_ctr():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n simple_ctr = ','.join((\n 'Borders:TargetBorderCount=15',\n 'Buckets:TargetBorderCount=15',\n 'Borders:TargetBorderType=MinEntropy',\n 'Counter:CtrBorderCount=20',\n ))\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', 'Ordered',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--simple-ctr', simple_ctr,\n ))\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_output_options():\n output_options_path = 'training_options.json'\n train_dir = 'catboost_info'\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--train-dir', train_dir,\n '--training-options-file', output_options_path,\n )\n execute_catboost_fit('CPU', cmd)\n return local_canonical_file(os.path.join(train_dir, output_options_path))\n\n\ndef test_target_border():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n '--target-border', '0.3'\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_monotonic_constraint():\n train_pool = catboost.Pool(\n data_file('higgs', 'train_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n test_pool = catboost.Pool(\n data_file('higgs', 'test_small'),\n column_description=data_file('higgs', 'train.cd')\n )\n monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]\n model = catboost.CatBoostRegressor(\n n_estimators=100,\n learning_rate=0.2,\n monotone_constraints=monotone_constraints,\n verbose=False\n ).fit(train_pool, eval_set=test_pool)\n\n dummy_data = np.zeros((1, test_pool.num_col()))\n dummy_target = np.zeros(len(dummy_data))\n feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)\n for feature_index, feature_name in enumerate(model.feature_names_):\n monotonicity = monotone_constraints[feature_index]\n if monotonicity == 0:\n continue\n feature_borders = feature_stats[feature_name]['borders']\n if len(feature_borders) == 0:\n continue\n mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2\n min_value = feature_borders[0] - 1\n max_value = feature_borders[-1] + 1\n feature_values = np.array([min_value] + list(mid_values) + [max_value])\n for obj in test_pool.get_features():\n obj_variations = np.zeros((len(feature_values), test_pool.num_col()))\n obj_variations[:] = obj.reshape((1, -1))\n obj_variations[:, feature_index] = feature_values\n model_predicts = model.predict(obj_variations)\n prediction_deltas = model_predicts[1:] - model_predicts[:-1]\n assert np.all(prediction_deltas * monotonicity >= 0)\n\n\ndef test_different_formats_of_monotone_constraints():\n eval_path = yatest.common.test_output_path('eval.tsv')\n eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')\n eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train_with_id.cd'),\n '-i', '20'\n ]\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])\n assert not filecmp.cmp(eval_path_with_monotone1, eval_path)\n\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n params_file = yatest.common.test_output_path(\"params.json\")\n for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:\n json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))\n execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])\n assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)\n\n\nclass TestModelWithoutParams(object):\n\n @pytest.fixture(\n params=[\n ('cut-info', 'RMSE'),\n ('cut-params', 'RMSE'),\n ('cut-info', 'QueryRMSE'),\n ('cut-params', 'QueryRMSE'),\n ],\n ids=lambda param: '-'.join(param),\n )\n def model_etc(self, request):\n cut, loss = request.param\n model_json = yatest.common.test_output_path('model.json')\n learn_set = data_file('querywise', 'train')\n test_set = data_file('querywise', 'test')\n cd = data_file('querywise', 'train.cd')\n cmd = (\n '--loss-function', loss,\n '--learn-set', learn_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--iterations', '10',\n '--model-file', model_json,\n '--model-format', 'Json',\n '--use-best-model', 'false'\n )\n execute_catboost_fit('CPU', cmd)\n model = json.load(open(model_json))\n if cut == 'cut-info':\n model.pop('model_info')\n if cut == 'cut-params':\n model['model_info'].pop('params')\n json.dump(model, open(model_json, 'wt'))\n return model_json, learn_set, test_set, cd\n\n def test_ostr(self, model_etc):\n model_json, train_set, test_set, cd = model_etc\n ostr_result = yatest.common.test_output_path('result.txt')\n ostr_cmd = (\n CATBOOST_PATH, 'ostr',\n '--learn-set', train_set,\n '--test-set', test_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', ostr_result,\n )\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(ostr_cmd)\n\n @pytest.mark.parametrize('should_fail,fstr_type', [\n (False, 'FeatureImportance'),\n (False, 'PredictionValuesChange'),\n (True, 'LossFunctionChange'),\n (False, 'ShapValues'),\n ])\n def test_fstr(self, model_etc, fstr_type, should_fail):\n model_json, train_set, _, cd = model_etc\n fstr_result = yatest.common.test_output_path('result.txt')\n fstr_cmd = (\n CATBOOST_PATH, 'fstr',\n '--input-path', train_set,\n '--column-description', cd,\n '--model-file', model_json,\n '--model-format', 'Json',\n '--output-path', fstr_result,\n '--fstr-type', fstr_type,\n )\n if should_fail:\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(fstr_cmd)\n else:\n yatest.common.execute(fstr_cmd)\n\n\ndef test_equal_feature_names():\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', (\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd.equal_names'),\n ))\n\n\ndef enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):\n if eval_mode == 'OneVsOthers':\n baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'\n else:\n baseline = 'Baseline_fold_{fold_idx}'\n if not only_baseline:\n testing = 'Testing_set_{set_idx}_fold_{fold_idx}'\n dirs = []\n for set_idx in range(set_count):\n for fold_idx in range(offset, offset + fold_count):\n fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)\n if fold not in dirs:\n dirs += [fold]\n if not only_baseline:\n fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)\n dirs += [fold]\n return dirs\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\ndef test_eval_feature(eval_mode, features_to_eval, offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('offset', [0, 2])\ndef test_eval_feature_empty_feature_set(offset):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n eval_mode = 'OneVsNone'\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = 1\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('fold_size_unit', ['Object', 'Group'])\ndef test_eval_feature_timesplit(eval_mode, fold_size_unit):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n train_dir = yatest.common.test_output_path('')\n fold_count = 2\n features_to_eval = '2-5;10-15'\n offset = 2\n fold_size = 500\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '-i', '30',\n '-T', '4',\n '-w', '0.7',\n '--feature-eval-output-file', output_eval_path,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', fold_size_unit,\n '--fold-size', str(fold_size),\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n '--fstr-file', fstr_file,\n '--learn-timestamps', data_file('querywise', 'train.timestamps'),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(cmd)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n artifacts += [\n local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),\n local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),\n ]\n return artifacts\n\n\[email protected]('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])\[email protected]('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])\[email protected]('offset', [0, 2])\[email protected]('fstr_mode', ['fstr', 'model'])\ndef test_eval_feature_snapshot(eval_mode, features_to_eval, offset, fstr_mode):\n test_err_log = 'test_error.log'\n fstr_file = 'fstrs'\n model_file = 'model.bin'\n fold_count = 2\n snapshot_interval = 1\n\n def make_cmd(summary, train_dir):\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '200',\n '-T', '4',\n '-w', '0.1',\n '--boost-from-average', 'False',\n '--permutations', '1',\n '--snapshot-interval', str(snapshot_interval),\n '--features-to-evaluate', features_to_eval,\n '--feature-eval-mode', eval_mode,\n '--feature-eval-output-file', summary,\n '--offset', str(offset),\n '--fold-count', str(fold_count),\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--test-err-log', test_err_log,\n '--train-dir', train_dir,\n )\n if fstr_mode == 'fstr':\n cmd += ('--fstr-file', fstr_file,)\n else:\n cmd += (\n '--model-file', model_file,\n '--use-best-model', 'False',\n )\n return cmd\n\n reference_summary = yatest.common.test_output_path('reference_feature.eval')\n reference_dir = yatest.common.test_output_path('reference')\n yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))\n\n snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')\n snapshot_dir = yatest.common.test_output_path('snapshot')\n snapshot = yatest.common.test_output_path('eval_feature.snapshot')\n eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n resume_from_snapshot_count = 15\n for idx in range(resume_from_snapshot_count):\n timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1\n stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)\n yatest.common.execute(['rm', '-rf', snapshot_dir])\n yatest.common.execute(eval_with_snapshot_cmd)\n\n assert filecmp.cmp(reference_summary, snapshot_summary)\n\n pj = os.path.join\n set_count = len(features_to_eval.split(';'))\n for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):\n assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))\n if fstr_mode == 'fstr':\n assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))\n else:\n def load_json_model(model_path):\n model = catboost.CatBoost()\n model.load_model(model_path)\n model.save_model(model_path + '.json', format='json')\n with open(model_path + '.json') as json_model_file:\n json_model = json.load(json_model_file)\n json_model[\"model_info\"][\"output_options\"] = \"\"\n json_model[\"model_info\"][\"train_finish_time\"] = \"\"\n json_model[\"model_info\"][\"model_guid\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"snapshot_file\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"save_snapshot\"] = \"\"\n json_model[\"model_info\"][\"params\"][\"flat_params\"][\"train_dir\"] = \"\"\n return json_model\n assert load_json_model(pj(reference_dir, output_dir, model_file)) == load_json_model(pj(snapshot_dir, output_dir, model_file))\n\n\ndef test_eval_feature_snapshot_wrong_options():\n summary = yatest.common.test_output_path('eval_feature_summary')\n snapshot = yatest.common.test_output_path('eval_feature_snapshot')\n\n def make_cmd(fold_size):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'RMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', str(fold_size),\n '--snapshot-file', snapshot\n )\n\n def stop_after_timeout(cmd, timeout):\n try:\n yatest.common.execute(cmd, timeout=timeout)\n except ExecutionTimeoutError:\n pass\n\n stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd(fold_size=20))\n\n\ndef test_eval_feature_parse_timestamps():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd(timestamps_file):\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '600',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--fold-size', '40',\n '--learn-timestamps', data_file('querywise', timestamps_file),\n '--timesplit-quantile', '0.75'\n )\n\n yatest.common.execute(make_cmd('train.timestamps'))\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd('train.group_weights'))\n\n\ndef test_eval_feature_relative_fold_size():\n summary = yatest.common.test_output_path('eval_feature_summary')\n\n def make_cmd():\n return (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '100',\n '-T', '4',\n '-w', '0.1',\n '--permutations', '1',\n '--snapshot-interval', '1',\n '--features-to-evaluate', '2-5',\n '--feature-eval-mode', 'OneVsAll',\n '--feature-eval-output-file', summary,\n '--offset', '0',\n '--fold-count', '5',\n '--fold-size-unit', 'Group',\n '--relative-fold-size', '0.1',\n )\n\n yatest.common.execute(make_cmd())\n\n with pytest.raises(yatest.common.ExecutionError):\n yatest.common.execute(make_cmd() + ('--fold-size', '40',))\n\n\nTEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']\n\n\[email protected]('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])\[email protected]('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('eval_metric_use_weights', [True, False, None],\n ids=['eval_weights=' + str(mode) for mode in [True, False, None]])\[email protected]('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,\n ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])\[email protected]('custom_metric_use_weights', [True, False, None],\n ids=['custom_weights=' + str(mode) for mode in [True, False, None]])\ndef test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n if dataset_has_weights:\n train_pool_filename = data_file('adult_weight', 'train_weight')\n test_pool_filename = data_file('adult_weight', 'test_weight')\n pool_cd_filename = data_file('adult_weight', 'train.cd')\n else:\n train_pool_filename = data_file('adult', 'train_small')\n test_pool_filename = data_file('adult', 'test_small')\n pool_cd_filename = data_file('adult', 'train.cd')\n\n eval_metric = eval_metric_loss\n if eval_metric == 'AUC':\n eval_metric += ':hints=skip_train~false'\n if eval_metric_use_weights is not None:\n eval_metric += ';' if eval_metric_loss == 'AUC' else ':'\n eval_metric += 'use_weights=' + str(eval_metric_use_weights)\n\n custom_metric = custom_metric_loss\n if custom_metric == 'AUC':\n custom_metric += ':hints=skip_train~false'\n if custom_metric_use_weights is not None:\n custom_metric += ';' if custom_metric_loss == 'AUC' else ':'\n custom_metric += 'use_weights=' + str(custom_metric_use_weights)\n\n cmd = (\n '--loss-function', 'Logloss',\n '-f', train_pool_filename,\n '-t', test_pool_filename,\n '--cd', pool_cd_filename,\n '-i', '10',\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-metric', eval_metric,\n '--custom-metric', custom_metric,\n )\n should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)\n try:\n execute_catboost_fit('CPU', cmd)\n except ExecutionError:\n assert should_fail\n return\n for filename in [learn_error_path, test_error_path]:\n with open(filename, 'r') as f:\n metrics_descriptions = f.readline().split('\\t')[1:] # without 'iter' column\n metrics_descriptions[-1] = metrics_descriptions[-1][:-1] # remove '\\n' symbol\n unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])\n assert len(metrics_descriptions) == len(unique_metrics_descriptions)\n expected_objective_metric_description = 'Logloss'\n\n if dataset_has_weights:\n expected_eval_metric_description = \\\n eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)\n\n if custom_metric_loss == 'AUC':\n expected_custom_metrics_descriptions = \\\n ['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]\n else:\n expected_custom_metrics_descriptions = (\n [custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']\n if custom_metric_use_weights is None\n else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])\n else:\n expected_eval_metric_description = eval_metric_loss\n expected_custom_metrics_descriptions = [custom_metric_loss]\n assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_leafwise_scoring():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', cmd)\n learn_errors_log = open(learn_error_path).read()\n execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])\n new_learn_errors_log = open(learn_error_path).read()\n assert new_learn_errors_log == learn_errors_log\n\n\ndef test_group_features():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')\n model_path = yatest.common.test_output_path('model.bin')\n fit_cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '-m', model_path,\n '--learn-err-log', learn_error_path\n ]\n execute_catboost_fit('CPU', fit_cmd)\n calc_cmd = [\n CATBOOST_PATH,\n 'calc',\n '-m', model_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', test_predictions_path,\n '--output-columns', 'Probability'\n ]\n yatest.common.execute(calc_cmd)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]\n\n\ndef test_model_sum():\n model_path = yatest.common.test_output_path('model.bin')\n model_eval = yatest.common.test_output_path('model_eval.txt')\n execute_catboost_fit('CPU', [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '10',\n '-m', model_path,\n '-t', data_file('adult', 'test_small'),\n '--eval-file', model_eval,\n '--output-columns', 'SampleId,RawFormulaVal',\n ])\n\n sum_path = yatest.common.test_output_path('sum.bin')\n yatest.common.execute([\n CATBOOST_PATH,\n 'model-sum',\n '--model-with-weight', '{}={}'.format(model_path, 0.75),\n '--model-with-weight', '{}={}'.format(model_path, 0.25),\n '--output-path', sum_path,\n ])\n\n sum_eval = yatest.common.test_output_path('sum_eval.txt')\n yatest.common.execute([\n CATBOOST_PATH,\n 'calc',\n '-m', sum_path,\n '--input-path', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '--output-path', sum_eval,\n ])\n yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])\n\n\ndef test_external_feature_names():\n fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')\n fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')\n\n for cd_has_feature_names in [False, True]:\n if cd_has_feature_names:\n cd_file = data_file('adult', 'train_with_id.cd')\n fstr_path = fstr_cd_with_id_path\n else:\n cd_file = data_file('adult', 'train.cd')\n fstr_path = fstr_cd_without_id_path\n\n cmd = (\n '--loss-function', 'Logloss',\n '--target-border', '0.5',\n '-f', data_file('adult', 'train_small'),\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '--feature-names-path', data_file('adult', 'feature_names'),\n '--fstr-type', 'FeatureImportance',\n '--fstr-file', fstr_path\n )\n execute_catboost_fit('CPU', cmd)\n\n assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)\n\n return [local_canonical_file(fstr_cd_with_id_path)]\n\n\ndef test_diffusion_temperature():\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--langevin', 'True',\n '--diffusion-temperature', '1000',\n '--eval-file', output_eval_path\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])\ndef test_model_shrink_correct(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n execute_catboost_fit('CPU', cmd)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])\ndef test_model_shrink_incorrect(config):\n mode, rate, lr = config\n output_eval_path = yatest.common.test_output_path('test.eval')\n cmd = [\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--cd', data_file('adult', 'train.cd'),\n '-i', '50',\n '-r', '0',\n '--eval-file', output_eval_path,\n '--model-shrink-mode', mode,\n '--model-shrink-rate', str(rate),\n '--learning-rate', str(lr)\n ]\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', cmd)\n\n\[email protected]('average', ['Macro', 'Micro', 'Weighted'])\ndef test_total_f1_params(average):\n return do_test_eval_metrics(\n metric='TotalF1:average=' + average,\n metric_period='1',\n train=data_file('cloudness_small', 'train_small'),\n test=data_file('cloudness_small', 'test_small'),\n cd=data_file('cloudness_small', 'train.cd'),\n loss_function='MultiClass'\n )\n\n\ndef test_eval_metrics_with_pairs():\n do_test_eval_metrics(\n metric='PairAccuracy',\n metric_period='1',\n train=data_file('querywise', 'train'),\n test=data_file('querywise', 'test'),\n cd=data_file('querywise', 'train.cd'),\n loss_function='PairLogit',\n additional_train_params=(\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs')\n ),\n additional_eval_params=(\n '--input-pairs', data_file('querywise', 'test.pairs')\n )\n )\n\n\ndef test_tweedie():\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n cmd = (\n '--loss-function', 'Tweedie:variance_power=1.5',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '-i', '100',\n '--learning-rate', '0.5',\n '--learn-err-log', learn_error_path\n )\n execute_catboost_fit('CPU', cmd)\n\n return [local_canonical_file(learn_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\ndef test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [\n local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(test_eval_path)\n ]\n\n\[email protected]('separator_type', SEPARATOR_TYPES)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n return [\n local_canonical_file(learn_error_path),\n local_canonical_file(test_error_path),\n local_canonical_file(test_eval_path)\n ]\n\n\[email protected]('grow_policy', GROW_POLICIES)\ndef test_shrink_model_with_text_features(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n loss_function = 'MultiClass'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--text-processing', json.dumps(text_processing),\n '--grow-policy', grow_policy,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'true',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss'])\ndef test_virtual_ensembles(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train_small')\n test_path = data_file('querywise', 'test') if loss_function in REGRESSION_LOSSES else data_file('adult', 'test_small')\n cd_path = data_file('querywise', 'train.cd') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train.cd')\n test_eval_path = yatest.common.test_output_path('test.eval')\n\n cmd = [\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '--posterior-sampling', 'true',\n '--eval-file', test_eval_path,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n ]\n if loss_function == 'RMSEWithUncertainty':\n cmd += ['--prediction-type', 'RMSEWithUncertainty']\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--virtual-ensembles-count', '1',\n '--prediction-type', 'VirtEnsembles',\n )\n yatest.common.execute(calc_cmd)\n assert compare_evals(test_eval_path, formula_predict_path, skip_header=True)\n\n\[email protected]('virtual_ensembles_count', ['1', '10'])\[email protected]('prediction_type', ['TotalUncertainty', 'VirtEnsembles'])\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MultiClass'])\ndef test_uncertainty_prediction(virtual_ensembles_count, prediction_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n pool_names = {\n 'RMSE' : 'querywise',\n 'RMSEWithUncertainty' : 'querywise',\n 'Logloss' : 'adult',\n 'MultiClass' : 'cloudness_small'\n }\n pool_name = pool_names[loss_function]\n train_path = data_file(pool_name, 'train') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train_small')\n test_path = data_file(pool_name, 'test') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'test_small')\n cd_path = data_file(pool_name, 'train.cd') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '--posterior-sampling', 'true',\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--virtual-ensembles-count', virtual_ensembles_count,\n '--prediction-type', prediction_type,\n )\n yatest.common.execute(calc_cmd)\n\n model = catboost.CatBoost()\n model.load_model(output_model_path)\n pool = catboost.Pool(test_path, column_description=cd_path)\n py_preds = model.virtual_ensembles_predict(\n pool,\n prediction_type=prediction_type,\n virtual_ensembles_count=int(virtual_ensembles_count))\n\n cli_preds = np.genfromtxt(\n formula_predict_path,\n delimiter='\\t',\n dtype=float,\n skip_header=True)\n assert(np.allclose(py_preds.reshape(-1,), cli_preds[:, 1:].reshape(-1,), rtol=1e-10))\n\n return local_canonical_file(formula_predict_path)\n\n\[email protected]('loss_function', ['RMSE', 'RMSEWithUncertainty'])\ndef test_uncertainty_prediction_requirements(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n train_path = data_file('querywise', 'train')\n test_path = data_file('querywise', 'test')\n cd_path = data_file('querywise', 'train.cd')\n cmd = (\n '--use-best-model', 'false',\n '-f', train_path,\n '-t', test_path,\n '--loss-function', loss_function,\n '--column-description', cd_path,\n '-i', '200',\n '-T', '4',\n '-m', output_model_path,\n )\n execute_catboost_fit('CPU', cmd)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', output_model_path,\n '--output-path', formula_predict_path,\n '--prediction-type', 'VirtEnsembles'\n )\n try:\n yatest.common.execute(calc_cmd)\n except:\n return\n # assert replaced to warning\n # assert False\n\n\nDICTIONARIES_OPTIONS = [\n {\n \"Simple\": \"token_level_type=Word:occurrence_lower_bound=50\"\n },\n {\n \"UniGramOccur5\": \"occurrence_lower_bound=5:token_level_type=Letter\",\n \"BiGramOccur2\": \"occurrence_lower_bound=2:gram_order=2:token_level_type=Letter\",\n \"WordDictOccur1\": \"occurrence_lower_bound=1:token_level_type=Word\",\n \"WordDictOccur2\": \"occurrence_lower_bound=2:token_level_type=Word\",\n \"WordDictOccur3\": \"occurrence_lower_bound=3:token_level_type=Word\"\n },\n {\n \"Unigram\": \"gram_order=1:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Bigram\": \"gram_order=2:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Trigram\": \"gram_order=3:token_level_type=Letter:occurrence_lower_bound=50\"\n },\n {\n \"Letter\": \"token_level_type=Letter:occurrence_lower_bound=50\",\n \"Word\": \"token_level_type=Word:occurrence_lower_bound=50\"\n }\n]\n\n\[email protected]('dictionaries', DICTIONARIES_OPTIONS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_text_processing_options(dictionaries, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])\n feature_estimators = 'BM25,BoW,NaiveBayes'\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n cmd = (\n '--loss-function', loss_function,\n '--eval-metric', 'Accuracy',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--column-description', cd_file,\n '--dictionaries', dictionaries,\n '--feature-calcers', feature_estimators,\n '--boosting-type', 'Plain',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fit_with_per_feature_text_options(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n text_processing = {\n 'tokenizers': [\n {'tokenizer_id': 'Space', 'delimiter': ' '},\n {'tokenizer_id': 'Comma', 'delimiter': ','},\n ],\n 'dictionaries': [\n {'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},\n ],\n 'feature_processing': {\n '0': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n '1': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},\n ],\n '2': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n }\n }\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', data_file(pool_name, 'train'),\n '-t', test_file,\n '--text-processing', json.dumps(text_processing),\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_embeddings_train(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n cmd = (\n '--loss-function', 'Logloss',\n '--eval-metric', 'AUC',\n '-f', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n '-t', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n '--column-description', ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--eval-file', test_eval_path,\n '--output-columns', 'RawFormulaVal',\n '--use-best-model', 'false',\n )\n execute_catboost_fit('CPU', cmd)\n\n apply_catboost(\n output_model_path,\n ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,\n ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,\n calc_eval_path,\n output_columns=['RawFormulaVal']\n )\n assert filecmp.cmp(test_eval_path, calc_eval_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_dump_options():\n snapshot_path = yatest.common.test_output_path('snapshot.bin')\n key = 'summary'\n value = '{\"key1\":\"value1\", \"key2\":\"value2\"}'\n cmd = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '--snapshot-file', snapshot_path,\n '--use-best-model', 'false',\n '--set-metadata-from-freeargs', '--', key, value,\n )\n execute_catboost_fit('CPU', cmd)\n\n options_path = yatest.common.test_output_path('options.json')\n dump_options_cmd = (\n get_catboost_binary_path(),\n 'dump-options',\n '--input', snapshot_path,\n '--output', options_path\n )\n yatest.common.execute(dump_options_cmd)\n with open(options_path) as options:\n options_json = json.load(options)\n assert options_json['metadata'][key] == value\n\n\ndef prepare_pool_metainfo_with_feature_tags():\n pool_metainfo = {\n 'tags': {\n 'A': {\n 'features': [0, 1, 2, 3, 4, 5, 6, 7]\n },\n 'B': {\n 'features': [12, 13, 14, 15, 16]\n },\n 'C': {\n 'features': [5, 6, 7, 8, 9, 10, 11, 12, 13]\n }\n }\n }\n pool_metainfo_path = yatest.common.test_output_path('pool_metainfo.json')\n with open(pool_metainfo_path, 'w') as f:\n json.dump(pool_metainfo, f)\n\n return pool_metainfo, pool_metainfo_path\n\n\ndef test_feature_tags_in_ignore_features():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n )\n\n for ignored_tags in (['A'], ['A', 'B'], ['B', 'C']):\n output_eval_path_1 = yatest.common.test_output_path('1_test.eval')\n ignored_features = sum((pool_metainfo['tags'][tag]['features'] for tag in ignored_tags), [])\n cmd_1 = base_cmd + (\n '--eval-file', output_eval_path_1,\n '--ignore-features', ':'.join(map(str, ignored_features)),\n )\n\n output_eval_path_2 = yatest.common.test_output_path('2_test.eval')\n cmd_2 = base_cmd + (\n '--eval-file', output_eval_path_2,\n '--ignore-features', ':'.join('#{}'.format(tag) for tag in ignored_tags),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_eval_path_1, output_eval_path_2)\n\n\ndef test_feature_tags_in_features_for_select():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'select-features',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n '--num-features-to-select', '3',\n '--features-selection-algorithm', 'RecursiveByPredictionValuesChange',\n '--features-selection-steps', '2',\n '--train-final-model',\n )\n\n for selection_tags in (['A', 'B'], ['A', 'C'], ['B', 'C'], ['A', 'B', 'C']):\n output_summary_path_1 = yatest.common.test_output_path('1_summary.json')\n features_for_select = sum((pool_metainfo['tags'][tag]['features'] for tag in selection_tags), [])\n cmd_1 = base_cmd + (\n '--features-selection-result-path', output_summary_path_1,\n '--features-for-select', ','.join(map(str, features_for_select)),\n )\n\n output_summary_path_2 = yatest.common.test_output_path('2_summary.json')\n cmd_2 = base_cmd + (\n '--features-selection-result-path', output_summary_path_2,\n '--features-for-select', ','.join('#{}'.format(tag) for tag in selection_tags),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_summary_path_1, output_summary_path_2)\n\n\ndef test_feature_tags_in_features_to_evaluate():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n base_cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--feature-eval-mode', 'OneVsAll',\n '-i', '30',\n '-T', '4',\n '--fold-count', '2',\n '--fold-size-unit', 'Object',\n '--fold-size', '50'\n )\n\n features_to_evaluate_1 = []\n features_to_evaluate_2 = []\n for tags_set in (['A'], ['A', 'B'], ['B', 'C']):\n features_set = sum((pool_metainfo['tags'][tag]['features'] for tag in tags_set), [])\n features_to_evaluate_1.append(','.join(map(str, features_set)))\n features_to_evaluate_2.append(','.join('#{}'.format(tag) for tag in tags_set))\n\n output_eval_path_1 = yatest.common.test_output_path('1_feature.eval')\n cmd_1 = base_cmd + (\n '--feature-eval-output-file', output_eval_path_1,\n '--features-to-evaluate', ';'.join(map(str, features_to_evaluate_1)),\n )\n\n output_eval_path_2 = yatest.common.test_output_path('2_feature.eval')\n cmd_2 = base_cmd + (\n '--feature-eval-output-file', output_eval_path_2,\n '--features-to-evaluate', ';'.join(features_to_evaluate_2),\n '--pool-metainfo-path', pool_metainfo_path,\n )\n\n yatest.common.execute(cmd_1)\n yatest.common.execute(cmd_2)\n assert filecmp.cmp(output_eval_path_1, output_eval_path_2)\n\n\ndef test_feature_tags_in_options_file():\n pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()\n\n training_options_path = yatest.common.test_output_path('training_options.json')\n cmd = (\n CATBOOST_PATH,\n 'fit',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '50',\n '-T', '4',\n '--pool-metainfo-path', pool_metainfo_path,\n '--training-options-file', training_options_path,\n )\n yatest.common.execute(cmd)\n\n with open(training_options_path) as f:\n options = json.load(f)\n assert options['pool_metainfo_options'] == pool_metainfo\n"
] | [
[
"numpy.allclose",
"pandas.read_csv",
"numpy.savetxt",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.seed",
"numpy.float32",
"numpy.genfromtxt",
"numpy.random.RandomState",
"numpy.arange",
"numpy.hstack",
"numpy.all",
"numpy.log",
"numpy.random.random",
"numpy.loadtxt",
"numpy.array",
"numpy.dot",
"numpy.mean"
]
] |
hz512/Smart-Parking-Enforcement-System | [
"e990903de545693ad6e2536bf167c69ab672d16a"
] | [
"utils/callbacks.py"
] | [
"import tensorflow.keras as tfk\r\nimport tensorflow as tf\r\nimport tensorflow.keras.layers as layers\r\nimport json\r\nimport collections\r\nfrom datetime import datetime\r\nimport os\r\n\r\n\r\nclass LrStepDecay(tfk.callbacks.Callback):\r\n def __init__(self,\r\n decay_rate,\r\n decay_at):\r\n super(LrStepDecay, self).__init__()\r\n self.decay_rate = decay_rate\r\n self.decay_at = decay_at\r\n self.counter = 0\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n if self.counter >= len(self.decay_at):\r\n return\r\n\r\n if epoch >= self.decay_at[self.counter]:\r\n self.counter += 1\r\n new_lr = float(tfk.backend.get_value(self.model.optimizer.learning_rate)) * self.decay_rate\r\n tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)\r\n print(\"\\nEpoch %05d: Learning rate is %3.6f.\" % (epoch, new_lr))\r\n\r\n\r\nclass Logger(tfk.callbacks.Callback):\r\n\r\n def __init__(self,\r\n name,\r\n log_dir):\r\n super(Logger, self).__init__()\r\n self.name = name\r\n self.log_dir = log_dir\r\n self.log = collections.defaultdict(list)\r\n self.start_time = datetime.now()\r\n if not os.path.isdir(self.log_dir):\r\n os.mkdir(self.log_dir)\r\n\r\n def on_epoch_begin(self, epoch, logs=None):\r\n self.start_time = datetime.now()\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n file = open('{}/{}.json'.format(self.log_dir, self.name), 'w')\r\n for key in logs:\r\n self.log[key].append(logs[key])\r\n self.log['epoch'].append(epoch)\r\n self.log['walltime'].append((datetime.now() - self.start_time).seconds)\r\n json.dump(self.log, file)\r\n file.close()"
] | [
[
"tensorflow.keras.backend.get_value",
"tensorflow.keras.backend.set_value"
]
] |
CenIII/pose-ae-train | [
"8780ba9f3d80ca3a724bbee7b815073adc3d3e6e"
] | [
"data/coco_pose/ref.py"
] | [
"import numpy as np\nimport pickle\nimport h5py\nfrom scipy.misc import imread\nimport os \nfrom pycocotools.coco import COCO\nfrom pycocotools import mask \n\ndata_dir = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/images'\nann_path = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/annotations/person_keypoints_train2014.json'\n\nref_dir = os.path.dirname(__file__)\n\nassert os.path.exists(data_dir)\nassert os.path.exists(ann_path)\ncoco, img_ids, num_examples = None, None, None\n\nwith open(ref_dir + '/valid_id', 'r') as f:\n valid_id = list(map(lambda x:int(x.strip()), f.readlines()))\nvalid_id_set = set(valid_id)\n\ndef init():\n global coco, img_ids, num_examples\n ann_file = os.path.join(ann_path)\n coco = COCO(ann_file)\n img_ids = coco.getImgIds()\n num_examples = len(img_ids)\n\n# num_parts = 17\n# part_mask = np.array([0,0,0,0,0,0,0,1,1,1,1,0,0,1,1,1,1])\n# part_ref = {'ankle':[15,16],'knee':[13,14],'hip':[11,12],\n# 'wrist':[9,10],'elbow':[7,8],'shoulder':[5,6],\n# 'face':[0,1,2],'ears':[3,4]}\n# part_labels = ['nose','eye_l','eye_r','ear_l','ear_r',\n# 'sho_l','sho_r','elb_l','elb_r','wri_l','wri_r',\n# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']\n# basic_order = ['sho_l','sho_r', 'nose', 'eye_l','eye_r','ear_l',\n# 'ear_r','elb_l','elb_r','wri_l','wri_r',\n# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']\n# pairRef = [\n# [1,2],[2,3],[1,3],\n# [6,8],[8,10],[12,14],[14,16],\n# [7,9],[9,11],[13,15],[15,17],\n# [6,7],[12,13],[6,12],[7,13]\n# ]\n# pairRef = np.array(pairRef) - 1\n\nflipRef = [i-1 for i in [1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16] ]\n\n# part_idx = {b:a for a, b in enumerate(part_labels)}\n# basic_order = [part_idx[i] for i in basic_order]\n\n\ndef initialize(opt):\n return\n\ndef image_path(idx):\n img_info = coco.loadImgs(img_ids[idx])[0]\n path = img_info['file_name'].split('_')[1] + '/' + img_info['file_name']\n return os.path.join(data_dir, path)\n\ndef load_image(idx):\n return imread(image_path(idx),mode='RGB')\n\n\ndef num_objects(idx, anns=None, should_greater_than_1 = False):\n if anns is None: anns = get_anns(idx)\n return len(anns)\n\ndef setup_val_split(opt = None):\n if coco is None:\n return [], []\n\n tmp_idxs = []\n for i in range(num_examples):\n if num_objects(i, None) > 0:\n tmp_idxs += [i]\n ref_idxs = np.array(tmp_idxs,dtype=int) #39935 images that # of ppl > 0\n ### choose image_id from valid_id_set\n\n valid = {}\n train = []\n for i in ref_idxs:\n if img_ids[i] in valid_id_set:\n valid[ img_ids[i] ]=i\n else:\n train.append(i)\n return np.array(train), np.array([valid[i] for i in valid_id if i in valid])\n\ndef get_anns(idx):\n ann_ids = coco.getAnnIds(imgIds=img_ids[idx])\n tmp_ann = coco.loadAnns(ann_ids)\n # Filter tmp_ann for people with no keypoints annotated\n return [tmp_ann[i] for i in range(len(tmp_ann)) if tmp_ann[i]['num_keypoints'] > 0]\n\ndef get_mask(idx):\n ann_ids = coco.getAnnIds(imgIds=img_ids[idx])\n anns = coco.loadAnns(ann_ids)\n img = coco.loadImgs(img_ids[idx])[0]\n m = np.zeros((img['height'], img['width']))\n for j in anns:\n if j['iscrowd']:\n rle = mask.frPyObjects(j['segmentation'], img['height'], img['width'])\n m += mask.decode(rle)\n return m < 0.5\n\ndef get_keypoints(idx, anns=None):\n if anns is None: anns = get_anns(idx)\n num_people = num_objects(idx, anns)\n kps = np.zeros((num_people, 17, 3))\n for i in range(num_people):\n kps[i] = np.array(anns[i]['keypoints']).reshape([-1,3])\n return kps\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
Sujit-O/dyngem | [
"a879bf362d1e9409faa4e1186c345337ad6d0189"
] | [
"dynamicgem/test/test_dynRNN.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module is for testing dynRNN\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nfrom dynamicgem.embedding.dynRNN import DynRNN\nfrom dynamicgem.graph_generation import dynamic_SBM_graph as sbm\nfrom dynamicgem.visualization import plot_dynamic_sbm_embedding\nfrom time import time\n\n\ndef test_dynRNN():\n # Parameters for Stochastic block model graph\n # Todal of 1000 nodes\n node_num = 100\n # Test with two communities\n community_num = 2\n # At each iteration migrate 10 nodes from one community to the another\n node_change_num = 2\n # Length of total time steps the graph will dynamically change\n length = 7\n # output directory for result\n outdir = './output'\n intr = './intermediate'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n if not os.path.exists(intr):\n os.mkdir(intr)\n testDataType = 'sbm_cd'\n # Generate the dynamic graph\n dynamic_sbm_series = list(sbm.get_community_diminish_series_v2(node_num,\n community_num,\n length,\n 1, # comminity ID to perturb\n node_change_num))\n graphs = [g[0] for g in dynamic_sbm_series]\n # parameters for the dynamic embedding\n # dimension of the embedding\n dim_emb = 8\n lookback = 2\n\n # dynRNN\n embedding = DynRNN(d=dim_emb,\n beta=5,\n n_prev_graphs=lookback,\n nu1=1e-6,\n nu2=1e-6,\n n_enc_units=[500, 300],\n n_dec_units=[500, 300],\n rho=0.3,\n n_iter=2,\n xeta=1e-3,\n n_batch=100,\n modelfile=['./intermediate/enc_model_dynRNN.json',\n './intermediate/dec_model_dynRNN.json'],\n weightfile=['./intermediate/enc_weights_dynRNN.hdf5',\n './intermediate/dec_weights_dynRNN.hdf5'],\n savefilesuffix=\"testing\")\n embs = []\n t1 = time()\n for temp_var in range(lookback + 1, length + 1):\n emb, _ = embedding.learn_embeddings(graphs[:temp_var])\n embs.append(emb)\n print(embedding._method_name + ':\\n\\tTraining time: %f' % (time() - t1))\n plt.figure()\n plt.clf()\n plot_dynamic_sbm_embedding.plot_dynamic_sbm_embedding_v2(embs[-5:-1], dynamic_sbm_series[-5:])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf"
]
] |
kobeeraveendran/hackfsu5 | [
"5614d832423f56913bd35d96e2472068a106b376"
] | [
"fft_prototype.py"
] | [
"import matplotlib.pyplot as plt\r\nfrom scipy.io import wavfile # get the api\r\nfrom scipy.fftpack import fft\r\nfrom pylab import *\r\n\r\ndef f(filename):\r\n # song files are in ogg... we need it to be in wav.\r\n fs, data = wavfile.read(filename) \r\n \r\n # songs have multiple channels, but we only need one channel\r\n a = data.T[0]\r\n \r\n # this is 8-bit track, b is now normalized on [-1,1)\r\n #b=[(ele/2**16)*2-1 for ele in a] \r\n\r\n # create a list of complex number\r\n c = fft(a)\r\n\r\n # only need half of the fft list (because the internet says so)\r\n d = len(c)//2 \r\n\r\n #bam, it is plotted and saved. \r\n #plt.plot(abs(c[:(d-1)]),'r')\r\n #savefig(filename+'.png',bbox_inches='tight')\r\n\t\r\n return c\r\n\r\nguitar = f(\"auldlangguitar.wav\")\r\nviolin = f(\"auldlangviolin.wav\")\r\nharmon = f(\"auldlangharmonica.wav\")\r\ncombine= f(\"combined.wav\")\r\ncut = combine[:-14]\r\ncombined2 = guitar + violin\r\n\r\nplt.plot(np.abs(guitar), 'r')\r\n#plt.show()\r\nsavefig('guitarplot.png',bbox_inches='tight')\r\n\r\ngc = np.dot(guitar, combined2)\r\nvc = np.dot(violin, combined2)\r\nhc = np.dot(harmon, combined2)\r\n\r\nng = guitar #/ np.linalg.norm(guitar)\r\nnv = violin #/ np.linalg.norm(violin)\r\nnh = harmon #/ np.linalg.norm(harmon)\r\nnc = combined2 #/ np.linalg.norm(cut)\r\n\r\na = np.column_stack((ng, nv, nh))\r\n\r\nx, res, rank, s = np.linalg.lstsq(a, nc)\r\nplt.plot(np.abs(ng * x[0]), 'r')\r\n#plt.show()\r\nsavefig('decompguitarplot.png',bbox_inches='tight')\r\ndecompGuitar = np.fft.ifft(ng * 1 + nv *1)\r\nprint(\"X\\n\")\r\nprint(x)\r\n\r\n\r\nprint(\"decomp real\")\r\nprint(np.real(decompGuitar))\r\ntest = np.fft.ifft(guitar)\r\n\r\ndecompreal = (decompGuitar)\r\ndecompreal = decompreal #/ np.min(np.abs(decompreal[np.nonzero(decompreal)]))\r\n\r\n\r\norigfs, origdata = wavfile.read(\"auldlangguitar.wav\")\r\nb = np.column_stack((decompGuitar.astype(origdata.dtype), decompGuitar.astype(origdata.dtype)))\r\nwavfile.write(\"decompguitar.wav\", origfs, b)\r\nnp.savetxt(\"guitar.csv\", test.astype(uint8) , delimiter= \",\")\r\nnp.savetxt(\"combined.csv\", combine, delimiter= \",\")\r\nnp.savetxt(\"channel2.csv\", decompreal.astype(uint8), delimiter= \",\")\r\nprint(\"decomp orig\")\r\nprint(np.min(decompreal[np.nonzero(decompreal)]))\r\n"
] | [
[
"scipy.fftpack.fft",
"scipy.io.wavfile.write",
"scipy.io.wavfile.read"
]
] |
rathbird/Birds_of_Prey_CNN_Classifier | [
"13ceb78db2408709804263395175482cff6c6973"
] | [
"src/googlexfr.py"
] | [
"# import the necessary packages\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom minigooglenet import minigooglenet_functional\n\n#set param values\n#classes (eagles, vultures)\nn_categories = 2\ndir_train = '.'\ntrain_size = 2183\ntest_size = 501\nbatch_size = 16\nEPOCHS = 6\n\n#train data - 2 classes, 1000 per class\ndatagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n#test data, no transformation\ndatagen_validation = ImageDataGenerator(preprocessing_function=preprocess_input)\n\n#load images while model is running\ntrain_generator = datagen_train.flow_from_directory(\n directory='./data/train/', \n target_size=(100,100),\n color_mode='rgb',\n batch_size=32,\n class_mode='categorical',\n shuffle=True,\n seed=42)\n\nvalid_generator = datagen_validation.flow_from_directory(\n directory=\"./data/test/\",\n target_size=(100, 100),\n color_mode=\"rgb\",\n batch_size=1,\n class_mode=\"categorical\",\n shuffle=False,\n seed=42)\n\n#create model\ngoogle = minigooglenet_functional(100, 100, 3, n_categories)\n\n#compile model with very slow learning rate\ngoogle.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])\n\nhistory = google.fit(train_generator, steps_per_epoch=train_size//batch_size, epochs=1, validation_data=valid_generator, validation_steps= test_size//batch_size)\n\n#save model\ngoogle.save('models/googlexfr')\n\n#analyze results\n\n#Confution Matrix and Classification Report\nY_pred = google.predict(valid_generator, test_size // batch_size+1)\ny_pred = np.argmax(Y_pred, axis=1)\n\nprint('Confusion Matrix')\nprint(confusion_matrix(valid_generator.classes, y_pred))\nprint('Classification Report')\ntarget_names = ['eagle', 'vulture']\nprint(classification_report(valid_generator.classes, y_pred, target_names=target_names))\n\n\n\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"sklearn.metrics.classification_report",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.argmax",
"sklearn.metrics.confusion_matrix"
]
] |
az2104nas/sztnb302alsr2bs21on | [
"6084c82c59a4a89498a191d96c231f47df10317d"
] | [
"naslib/search_spaces/nasbench1shot1/search_spaces/search_space_1.py"
] | [
"import itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nasbench import api\n\nfrom naslib.search_spaces.nasbench1shot1.search_space import SearchSpace\nfrom naslib.search_spaces.nasbench1shot1.utils import upscale_to_nasbench_format, OUTPUT_NODE, INPUT, CONV1X1, OUTPUT\nfrom naslib.search_spaces.nasbench1shot1.wrappers import Model, Architecture, NasbenchWrapper\n\n\nclass SearchSpace1(SearchSpace):\n def __init__(self):\n super(SearchSpace1, self).__init__(search_space_number=1, num_intermediate_nodes=4)\n \"\"\"\n SEARCH SPACE 1\n \"\"\"\n self.num_parents_per_node = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 2,\n '4': 2,\n '5': 2\n }\n if sum(self.num_parents_per_node.values()) > 9:\n raise ValueError('Each nasbench cell has at most 9 edges.')\n\n self.test_min_error = 0.05448716878890991\n self.valid_min_error = 0.049278855323791504\n\n def create_nasbench_adjacency_matrix(self, parents, **kwargs):\n adjacency_matrix = self._create_adjacency_matrix(parents, adjacency_matrix=np.zeros([6, 6]),\n node=OUTPUT_NODE - 1)\n # Create nasbench compatible adjacency matrix\n return upscale_to_nasbench_format(adjacency_matrix)\n\n def create_nasbench_adjacency_matrix_with_loose_ends(self, parents):\n return upscale_to_nasbench_format(self._create_adjacency_matrix_with_loose_ends(parents))\n\n def generate_adjacency_matrix_without_loose_ends(self):\n for adjacency_matrix in self._generate_adjacency_matrix(adjacency_matrix=np.zeros([6, 6]),\n node=OUTPUT_NODE - 1):\n yield upscale_to_nasbench_format(adjacency_matrix)\n\n def objective_function(self, nasbench, config, budget=108):\n adjacency_matrix, node_list = super(SearchSpace1, self).convert_config_to_nasbench_format(config)\n # adjacency_matrix = upscale_to_nasbench_format(adjacency_matrix)\n node_list = [INPUT, *node_list, CONV1X1, OUTPUT]\n adjacency_list = adjacency_matrix.astype(np.int).tolist()\n model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)\n nasbench_data = nasbench.query(model_spec, epochs=budget)\n\n # record the data to history\n architecture = Model()\n arch = Architecture(adjacency_matrix=adjacency_matrix,\n node_list=node_list)\n architecture.update_data(arch, nasbench_data, budget)\n self.run_history.append(architecture)\n\n return nasbench_data['validation_accuracy'], nasbench_data['training_time']\n\n def generate_with_loose_ends(self):\n for _, parent_node_3, parent_node_4, output_parents in itertools.product(\n *[itertools.combinations(list(range(int(node))), num_parents) for node, num_parents in\n self.num_parents_per_node.items()][2:]):\n parents = {\n '0': [],\n '1': [0],\n '2': [0, 1],\n '3': parent_node_3,\n '4': parent_node_4,\n '5': output_parents\n }\n adjacency_matrix = self.create_nasbench_adjacency_matrix_with_loose_ends(parents)\n yield adjacency_matrix\n\n"
] | [
[
"numpy.zeros"
]
] |
apapaion/menpowidgets | [
"237a39ddf4e65c57e8165f8a87f25a25f34d4698"
] | [
"menpowidgets/base.py"
] | [
"from collections import Sized, OrderedDict\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as mc\nimport numpy as np\n\nimport ipywidgets\nimport IPython.display as ipydisplay\n\nfrom menpo.base import name_of_callable\nfrom menpo.image import MaskedImage, Image\nfrom menpo.image.base import _convert_patches_list_to_single_array\nfrom menpo.shape import TriMesh, ColouredTriMesh, TexturedTriMesh\nfrom menpo.visualize import print_dynamic\nfrom menpo.landmark import LandmarkManager\n\nfrom .options import (RendererOptionsWidget, TextPrintWidget,\n SaveMatplotlibFigureOptionsWidget, AnimationOptionsWidget,\n ImageOptionsWidget, LandmarkOptionsWidget,\n PlotMatplotlibOptionsWidget, PatchOptionsWidget,\n LinearModelParametersWidget, CameraSnapshotWidget,\n Shape2DOptionsWidget, Shape3DOptionsWidget,\n SaveMayaviFigureOptionsWidget, Mesh3DOptionsWidget)\nfrom .tools import LogoWidget, SwitchWidget\nfrom .utils import (extract_group_labels_from_landmarks,\n extract_groups_labels_from_image, render_image,\n render_patches)\nfrom .checks import check_n_parameters\nfrom .style import map_styles_to_hex_colours\n\n\ndef menpowidgets_src_dir_path():\n r\"\"\"\n The path to the top of the menpowidgets package.\n\n Useful for locating where the logos folder is stored.\n\n Returns\n -------\n path : ``pathlib.Path``\n The full path to the top of the Menpo package\n \"\"\"\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n return Path(os.path.abspath(__file__)).parent\n\n\ndef visualize_shapes_2d(shapes, figure_size=(7, 7), browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n 2D shapes. The supported objects are:\n\n ================================== =\n Object\n ================================== =\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n `menpo.shape.TriMesh`\n ================================== =\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n shapes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ================================== =\n Object\n ================================== =\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n `menpo.shape.TriMesh`\n ================================== =\n\n or subclasses of those.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 2D shape\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that shapes is a list even with one member\n if not isinstance(shapes, Sized):\n shapes = [shapes]\n\n # Get the number of shapes\n n_shapes = len(shapes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected shape index\n i = shape_number_wid.selected_values if n_shapes > 1 else 0\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options['image_view'] = shape_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(shapes[i], 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n idx = shapes[i].labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_face_colour.append(options['marker_face_colour'][idx])\n marker_edge_colour.append(options['marker_edge_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = shapes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Update info text widget\n update_info(shapes[i], custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(shape, custom_info_callback=None):\n min_b, max_b = shape.bounds()\n rang = shape.range()\n cm = shape.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(shape)),\n \"> {} points\".format(shape.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H\".format(\n min_b[0], max_b[0], min_b[1], max_b[1]),\n \"> Range: {0:.1f}W, {1:.1f}H\".format(rang[0], rang[1]),\n \"> Centre of mass: ({0:.1f}, {1:.1f})\".format(cm[0], cm[1]),\n \"> Norm: {0:.2f}\".format(shape.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(shape):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # If the object is a LabelledPointUndirectedGraph, grab the labels\n labels = None\n if hasattr(shapes[0], 'labels'):\n labels = shapes[0].labels\n\n # Create widgets\n shape_options_wid = Shape2DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=None, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_shapes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get current shape and check if it has labels\n i = change['new']\n labels = None\n if hasattr(shapes[i], 'labels'):\n labels = shapes[i].labels\n\n # Update shape options\n shape_options_wid.set_widget_state(labels=labels,\n allow_callback=True)\n\n # Shape selection slider\n index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}\n shape_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n [info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_shapes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_shapes_3d(shapes, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n 3D shapes. The supported objects are:\n\n ==================================\n Object\n ==================================\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n ==================================\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n shapes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ==================================\n Object\n ==================================\n `menpo.shape.PointCloud`\n `menpo.shape.PointUndirectedGraph`\n `menpo.shape.PointDirectedGraph`\n `menpo.shape.PointTree`\n `menpo.shape.LabelledPointGraph`\n ==================================\n\n or subclasses of those.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 2D shape\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print_dynamic('Initializing...')\n\n # Make sure that shapes is a list even with one member\n if not isinstance(shapes, Sized):\n shapes = [shapes]\n\n # Get the number of shapes\n n_shapes = len(shapes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # Get selected shape index\n i = shape_number_wid.selected_values if n_shapes > 1 else 0\n\n # Update info text widget\n update_info(shapes[i], custom_info_callback=custom_info_callback)\n\n # Create options dictionary\n options = dict()\n if isinstance(shapes[i], TriMesh):\n # Note that 3D TriMesh has a totally different set of options\n # compared to any other PointCloud or PointGraph. However, in order\n # for visualize_shapes_3d to support TriMeshes, we simply use the\n # options that are common. This means that most of the widget's\n # options will have no effect on rendering...\n options['mesh_type'] = 'wireframe'\n if shape_options_wid.selected_values['markers']['render_markers']:\n options['mesh_type'] = 'fancymesh'\n options['line_width'] = \\\n shape_options_wid.selected_values['lines']['line_width']\n options['colour'] = \\\n shape_options_wid.selected_values['lines']['line_colour'][0]\n options['marker_style'] = \\\n shape_options_wid.selected_values['markers']['marker_style']\n options['marker_size'] = \\\n shape_options_wid.selected_values['markers']['marker_size']\n options['marker_resolution'] = \\\n shape_options_wid.selected_values['markers']['marker_resolution']\n options['step'] = \\\n shape_options_wid.selected_values['markers']['step']\n else:\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n\n # Correct options based on the type of the shape\n if hasattr(shapes[i], 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = shapes[i].labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Render shape with selected options\n save_figure_wid.renderer = shapes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n alpha=1.0, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(shape, custom_info_callback=None):\n min_b, max_b = shape.bounds()\n rang = shape.range()\n cm = shape.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(shape)),\n \"> {} points\".format(shape.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(min_b[0], max_b[0], min_b[1], max_b[1],\n min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(shape.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(shape):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # If the object is a LabelledPointUndirectedGraph, grab the labels\n labels = None\n if hasattr(shapes[0], 'labels'):\n labels = shapes[0].labels\n\n # Create widgets\n shape_options_wid = Shape3DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_shapes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get current shape and check if it has labels\n i = change['new']\n labels = None\n if hasattr(shapes[i], 'labels'):\n labels = shapes[i].labels\n\n # Update shape options\n shape_options_wid.set_widget_state(labels=labels,\n allow_callback=True)\n\n # Shape selection slider\n index = {'min': 0, 'max': n_shapes-1, 'step': 1, 'index': 0}\n shape_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, shape_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n [info_wid, shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_shapes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef visualize_landmarks_2d(landmarks, figure_size=(7, 7),\n browser_style='buttons', custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n `menpo.landmark.LandmarkManager` (or subclass) objects. The landmark\n managers can have a combination of different attributes, e.g.\n landmark groups and labels etc.\n\n Parameters\n ----------\n landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass\n The `list` of landmark managers to be visualized.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts a landmark group and\n returns a list of custom messages to be printed per landmark group.\n Each custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that landmarks is a list even with one landmark manager member\n if isinstance(landmarks, LandmarkManager):\n landmarks = [landmarks]\n\n # Get the number of landmark managers\n n_landmarks = len(landmarks)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = landmark_number_wid.selected_values if n_landmarks > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n if landmark_options_wid.selected_values['landmarks']['render_landmarks']:\n # get shape\n shape = landmarks[i][g]\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options['image_view'] = landmark_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(shape, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n landmark_options_wid.selected_values['landmarks']['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = shape.labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = shape.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n else:\n ipydisplay.clear_output()\n\n # update info text widget\n update_info(landmarks[i], g, custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(landmarks, group, custom_info_callback=None):\n if group is not None:\n min_b, max_b = landmarks[group].bounds()\n rang = landmarks[group].range()\n cm = landmarks[group].centre()\n text_per_line = [\n \"> {} landmark points\".format(landmarks[group].n_points),\n \"> {}\".format(name_of_callable(landmarks[group])),\n \"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H\".format(\n min_b[0], max_b[0], min_b[1], max_b[1]),\n \"> Range: {0:.1f}W, {1:.1f}H\".format(rang[0], rang[1]),\n \"> Centre of mass: ({0:.1f}, {1:.1f})\".format(cm[0], cm[1]),\n \"> Norm: {0:.2f}\".format(landmarks[group].norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(landmarks[group]):\n text_per_line.append('> {}'.format(msg))\n else:\n text_per_line = [\"No landmarks available.\"]\n\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])\n first_label = labels_keys[0] if labels_keys else None\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=first_label, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_landmarks > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels\n i = landmark_number_wid.selected_values\n g_keys, l_keys = extract_group_labels_from_landmarks(\n landmarks[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=True)\n\n # Landmark selection slider\n index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}\n landmark_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, landmark_options_wid, renderer_options_wid,\n save_figure_wid])\n tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_landmarks > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_landmarks_3d(landmarks, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of\n `menpo.landmark.LandmarkManager` (or subclass) objects. The landmark\n managers can have a combination of different attributes, e.g.\n landmark groups and labels etc.\n\n Parameters\n ----------\n landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass\n The `list` of landmark managers to be visualized.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts a landmark group and\n returns a list of custom messages to be printed per landmark group.\n Each custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that landmarks is a list even with one landmark manager member\n if not isinstance(landmarks, list):\n landmarks = [landmarks]\n\n # Get the number of landmark managers\n n_landmarks = len(landmarks)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = landmark_number_wid.selected_values if n_landmarks > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # update info text widget\n update_info(landmarks[i], g, custom_info_callback=custom_info_callback)\n\n if landmark_options_wid.selected_values['landmarks']['render_landmarks']:\n # get shape\n shape = landmarks[i][g]\n\n options = dict()\n if isinstance(shape, TriMesh):\n # Note that 3D TriMesh has a totally different set of options\n # compared to any other PointCloud or PointGraph. However, in\n # order for visualize_landmarks_3d to support TriMeshes, we\n # simply use the options that are common. This means that most\n # of the widget's options will have no effect on rendering...\n options['mesh_type'] = 'wireframe'\n if landmark_options_wid.selected_values['markers'][\n 'render_markers']:\n options['mesh_type'] = 'fancymesh'\n options['line_width'] = \\\n landmark_options_wid.selected_values['lines']['line_width']\n options['colour'] = \\\n landmark_options_wid.selected_values['lines']['line_colour'][0]\n options['marker_style'] = \\\n landmark_options_wid.selected_values['markers']['marker_style']\n options['marker_size'] = \\\n landmark_options_wid.selected_values['markers']['marker_size']\n options['marker_resolution'] = \\\n landmark_options_wid.selected_values['markers'][\n 'marker_resolution']\n options['step'] = \\\n landmark_options_wid.selected_values['markers']['step']\n else:\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n\n # Correct options based on the type of the shape\n if hasattr(shape, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n landmark_options_wid.selected_values['landmarks']['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = shape.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Render shape with selected options\n save_figure_wid.renderer = shape.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n alpha=1.0, **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n else:\n ipydisplay.clear_output()\n\n # Define function that updates the info text\n def update_info(landmarks, group, custom_info_callback=None):\n if group is not None:\n min_b, max_b = landmarks[group].bounds()\n rang = landmarks[group].range()\n cm = landmarks[group].centre()\n text_per_line = [\n \"> {} landmark points\".format(landmarks[group].n_points),\n \"> {}\".format(name_of_callable(landmarks[group])),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(\n min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(landmarks[group].norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(landmarks[group]):\n text_per_line.append('> {}'.format(msg))\n else:\n text_per_line = [\"No landmarks available.\"]\n\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])\n first_label = labels_keys[0] if labels_keys else None\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='3D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=first_label,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_landmarks > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels\n i = landmark_number_wid.selected_values\n g_keys, l_keys = extract_group_labels_from_landmarks(\n landmarks[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=True)\n\n # Landmark selection slider\n index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}\n landmark_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Shape', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, landmark_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, landmark_options_wid, renderer_options_wid,\n save_figure_wid])\n tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_landmarks > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef visualize_meshes_3d(meshes, browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of 3D meshes. The supported\n objects are:\n\n ==================================\n Object\n ==================================\n `menpo.shape.TriMesh`\n `menpo.shape.ColouredTriMesdh`\n `menpo.shape.TexturedTriMesh`\n ==================================\n\n Any instance of the above can be combined in the input `list`.\n\n Parameters\n ----------\n meshes : `list`\n The `list` of objects to be visualized. It can contain a combination of\n\n ==================================\n Object\n ==================================\n `menpo.shape.TriMesh`\n `menpo.shape.ColouredTriMesdh`\n `menpo.shape.TexturedTriMesh`\n ==================================\n\n or subclasses of those.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not ``None``, it should be a function that accepts a 3D mesh\n and returns a list of custom messages to be printed about it. Each\n custom message will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!!\n from menpowidgets.utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that meshes is a list even with one member\n if not isinstance(meshes, Sized):\n meshes = [meshes]\n\n # Get the number of meshes\n n_meshes = len(meshes)\n\n # Define the styling options\n main_style = 'warning'\n\n # Define render function\n def render_function(_):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected mesh index\n i = mesh_number_wid.selected_values if n_meshes > 1 else 0\n\n # Update info text widget\n update_info(meshes[i], custom_info_callback=custom_info_callback)\n\n # Render instance\n save_figure_wid.renderer = meshes[i].view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **mesh_options_wid.selected_values)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(mesh, custom_info_callback=None):\n min_b, max_b = mesh.bounds()\n rang = mesh.range()\n cm = mesh.centre()\n text_per_line = [\n \"> {}\".format(name_of_callable(mesh)),\n \"> {} points\".format(mesh.n_points),\n \"> Bounds: [{0:.1f}-{1:.1f}]X, [{2:.1f}-{3:.1f}]Y, \"\n \"[{4:.1f}-{5:.1f}]Z\".format(\n min_b[0], max_b[0], min_b[1], max_b[1], min_b[2], max_b[2]),\n \"> Range: {0:.1f}X, {1:.1f}Y, {2:.1f}Z\".format(rang[0], rang[1],\n rang[2]),\n \"> Centre of mass: ({0:.1f}X, {1:.1f}Y, {2:.1f}Z)\".format(\n cm[0], cm[1], cm[2]),\n \"> Norm: {0:.2f}\".format(mesh.norm())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(mesh):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n mesh_options_wid = Mesh3DOptionsWidget(\n textured=(isinstance(meshes[0], ColouredTriMesh) or\n isinstance(meshes[0], TexturedTriMesh)),\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_meshes > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n i = change['new']\n\n # Update shape options\n mesh_options_wid.set_widget_state(\n textured=(isinstance(meshes[i], ColouredTriMesh) or\n isinstance(meshes[i], TexturedTriMesh)),\n allow_callback=True)\n\n # selection slider\n index = {'min': 0, 'max': n_meshes-1, 'step': 1, 'index': 0}\n mesh_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Mesh', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, mesh_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab([info_wid, mesh_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Mesh', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_meshes > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n with out:\n print_dynamic('')\n\n\ndef visualize_images(images, figure_size=(7, 7), browser_style='buttons',\n custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of `menpo.image.Image` (or\n subclass) objects. The images can have a combination of different\n attributes, e.g. masked or not, landmarked or not, without multiple\n landmark groups and labels etc.\n\n Parameters\n ----------\n images : `list` of `menpo.image.Image` or subclass\n The `list` of images to be visualized.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts an image and returns\n a list of custom messages to be printed per image. Each custom message\n will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that images is a list even with one member\n if not isinstance(images, Sized):\n images = [images]\n\n # Get the number of images\n n_images = len(images)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index and selected group\n i = image_number_wid.selected_values if n_images > 1 else 0\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # check if image is masked\n image_is_masked = isinstance(images[i], MaskedImage)\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n options.update(renderer_options_wid.selected_values['legend'])\n options.update(image_options_wid.selected_values)\n options.update(landmark_options_wid.selected_values['landmarks'])\n\n # Correct options based on the type of the shape\n if (images[i].has_landmarks and\n hasattr(images[i].landmarks[g], 'labels')):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = images[i].landmarks[g].labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = render_image(\n image=images[i], renderer=save_figure_wid.renderer,\n image_is_masked=image_is_masked, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(images[i], image_is_masked, g,\n custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(img, image_is_masked, group, custom_info_callback=None):\n # Prepare masked (or non-masked) string\n masked_str = 'Masked Image' if image_is_masked else 'Image'\n # Get image path, if available\n path_str = img.path if hasattr(img, 'path') else 'No path available'\n # Create text lines\n text_per_line = [\n \"> {} of size {} with {} channel{}\".format(\n masked_str, img._str_shape(), img.n_channels,\n 's' * (img.n_channels > 1)),\n \"> Path: '{}'\".format(path_str)]\n if image_is_masked:\n text_per_line.append(\n \"> {} masked pixels (attached mask {:.1%} true)\".format(\n img.n_true_pixels(), img.mask.proportion_true()))\n text_per_line.append(\"> min={:.3f}, max={:.3f}\".format(\n img.pixels.min(), img.pixels.max()))\n if img.has_landmarks:\n text_per_line.append(\"> {} landmark points\".format(\n img.landmarks[group].n_points))\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(img):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n groups_keys, labels_keys = extract_groups_labels_from_image(images[0])\n first_label = labels_keys[0] if labels_keys else None\n image_options_wid = ImageOptionsWidget(\n n_channels=images[0].n_channels,\n image_is_masked=isinstance(images[0], MaskedImage),\n render_function=render_function)\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=first_label, axes_x_limits=None, axes_y_limits=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_images > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Get new groups and labels, then update landmark options\n i = image_number_wid.selected_values\n g_keys, l_keys = extract_groups_labels_from_image(images[i])\n\n # Update landmarks options\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=images[i].n_channels,\n image_is_masked=isinstance(images[i], MaskedImage),\n allow_callback=True)\n\n # Image selection slider\n index = {'min': 0, 'max': n_images-1, 'step': 1, 'index': 0}\n image_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Image', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, image_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, image_options_wid, landmark_options_wid,\n renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Image', 'Landmarks', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_images > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_patches(patches, patch_centers, figure_size=(7, 7),\n browser_style='buttons', custom_info_callback=None):\n r\"\"\"\n Widget that allows browsing through a `list` of patch-based images.\n\n The patches argument can have any of the two formats that are returned from\n the `extract_patches()` and `extract_patches_around_landmarks()` methods\n of `menpo.image.Image`. Specifically it can be:\n\n 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`\n 2. `list` of ``n_center * n_offset`` `menpo.image.Image` objects\n\n The patches can have a combination of different attributes, e.g. number of\n centers, number of offsets, number of channels etc.\n\n Parameters\n ----------\n patches : `list`\n The `list` of patch-based images to be visualized. It can consist of\n objects with any of the two formats that are returned from the\n `extract_patches()` and `extract_patches_around_landmarks()` methods.\n Specifically, it can either be an\n ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a\n `list` of ``n_center * n_offset`` `menpo.image.Image` objects.\n patch_centers : `list` of `menpo.shape.PointCloud`\n The centers to set the patches around. If the `list` has only one\n `menpo.shape.PointCloud` then this will be used for all patches members.\n Otherwise, it needs to have the same length as patches.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n browser_style : ``{'buttons', 'slider'}``, optional\n It defines whether the selector of the objects will have the form of\n plus/minus buttons or a slider.\n custom_info_callback: `function` or ``None``, optional\n If not None, it should be a function that accepts an image and returns\n a list of custom messages to be printed per image. Each custom message\n will be printed in a separate line.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that patches is a list even with one member\n if (isinstance(patches, list) and isinstance(patches[0], Image)) or \\\n not isinstance(patches, list):\n patches = [patches]\n\n # Make sure that patch_centers is a list even with one shape\n if not isinstance(patch_centers, list):\n patch_centers = [patch_centers] * len(patches)\n elif isinstance(patch_centers, list) and len(patch_centers) == 1:\n patch_centers *= len(patches)\n\n # Make sure all patch-based images are in the single array format\n for i in range(len(patches)):\n if isinstance(patches[i], list):\n patches[i] = _convert_patches_list_to_single_array(\n patches[i], patch_centers[i].n_points)\n\n # Get the number of patch_based images\n n_patches = len(patches)\n\n # Define the styling options\n main_style = 'info'\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # get selected index\n i = image_number_wid.selected_values if n_patches > 1 else 0\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n image_options = dict(image_options_wid.selected_values)\n del image_options['masked_enabled']\n options.update(image_options)\n options.update(patch_options_wid.selected_values)\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render image with selected options\n save_figure_wid.renderer = render_patches(\n patches=patches[i], patch_centers=patch_centers[i],\n renderer=save_figure_wid.renderer, figure_size=new_figure_size,\n **options)\n\n # update info text widget\n update_info(patches[i], custom_info_callback=custom_info_callback)\n\n # Define function that updates the info text\n def update_info(ptchs, custom_info_callback=None):\n text_per_line = [\n \"> Patch-Based Image with {} patche{} and {} offset{}.\".format(\n ptchs.shape[0], 's' * (ptchs.shape[0] > 1), ptchs.shape[1],\n 's' * (ptchs.shape[1] > 1)),\n \"> Each patch has size {}H x {}W with {} channel{}.\".format(\n ptchs.shape[3], ptchs.shape[4], ptchs.shape[2],\n 's' * (ptchs.shape[2] > 1)),\n \"> min={:.3f}, max={:.3f}\".format(ptchs.min(), ptchs.max())]\n if custom_info_callback is not None:\n # iterate over the list of messages returned by the callback\n # function and append them in the text_per_line.\n for msg in custom_info_callback(ptchs):\n text_per_line.append('> {}'.format(msg))\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Create widgets\n shape_options_wid = Shape2DOptionsWidget(\n labels=None, render_function=None)\n shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False\n shape_options_wid.add_render_function(render_function)\n patch_options_wid = PatchOptionsWidget(\n n_patches=patches[0].shape[0], n_offsets=patches[0].shape[1],\n render_function=render_function)\n image_options_wid = ImageOptionsWidget(\n n_channels=patches[0].shape[2], image_is_masked=False,\n render_function=None)\n image_options_wid.interpolation_checkbox.button_wid.value = False\n image_options_wid.add_render_function(render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,\n axes_x_limits=None, axes_y_limits=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_patches > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n # Selected object\n i = image_number_wid.selected_values\n\n # Update patch options\n patch_options_wid.set_widget_state(\n n_patches=patches[i].shape[0], n_offsets=patches[i].shape[1],\n allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=patches[i].shape[2], image_is_masked=False,\n allow_callback=True)\n\n # Image selection slider\n index = {'min': 0, 'max': n_patches-1, 'step': 1, 'index': 0}\n image_number_wid = AnimationOptionsWidget(\n index, render_function=update_widgets, index_style=browser_style,\n interval=0.2, description='Image', loop_enabled=True,\n continuous_update=False)\n\n # Header widget\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n header_wid = ipywidgets.HBox([logo_wid, image_number_wid])\n header_wid.layout.align_items = 'center'\n header_wid.layout.margin = '0px 0px 10px 0px'\n else:\n # Header widget\n header_wid = LogoWidget(style=main_style)\n header_wid.layout.margin = '0px 10px 0px 0px'\n options_box = ipywidgets.Tab(\n children=[info_wid, patch_options_wid, image_options_wid,\n shape_options_wid, renderer_options_wid, save_figure_wid])\n tab_titles = ['Info', 'Patches', 'Image', 'Shape', 'Renderer', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n if n_patches > 1:\n wid = ipywidgets.VBox([header_wid, options_box])\n else:\n wid = ipywidgets.HBox([header_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef plot_graph(x_axis, y_axis, legend_entries=None, figure_size=(9, 5)):\n r\"\"\"\n Widget that allows plotting various curves in a graph.\n\n The widget has options tabs regarding the graph and the renderer (lines,\n markers, legend, figure, axes, grid) and saving the figure to file.\n\n Parameters\n ----------\n x_axis : `list` of `float`\n The values of the horizontal axis. Note that these values are common for\n all the curves.\n y_axis : `list` of `lists` of `float`\n A `list` that stores a `list` of values to be plotted for each curve.\n legend_entries : `list` or `str` or ``None``, optional\n The `list` of names that will appear on the legend for each curve. If\n ``None``, then the names format is ``curve {}.format(i)``.\n figure_size : (`int`, `int`), optional\n The initial size of the rendered figure.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n from menpo.visualize import plot_curve\n print('Initializing...')\n\n # Get number of curves to be plotted\n n_curves = len(y_axis)\n\n # Define the styling options\n main_style = 'danger'\n\n # Parse options\n if legend_entries is None:\n legend_entries = [\"curve {}\".format(i) for i in range(n_curves)]\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # plot with selected options\n opts = plot_wid.selected_values.copy()\n new_figure_size = (\n plot_wid.selected_values['zoom'][0] * figure_size[0],\n plot_wid.selected_values['zoom'][1] * figure_size[1])\n del opts['zoom']\n save_figure_wid.renderer = plot_curve(\n x_axis=x_axis, y_axis=y_axis, figure_size=new_figure_size,\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **opts)\n\n # show plot\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n plot_wid = PlotMatplotlibOptionsWidget(\n legend_entries=legend_entries, render_function=render_function)\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n logo = LogoWidget(style=main_style)\n logo.layout.margin = '0px 10px 0px 0px'\n tmp_children = list(plot_wid.tab_box.children)\n tmp_children.append(save_figure_wid)\n plot_wid.tab_box.children = tmp_children\n plot_wid.tab_box.set_title(0, 'Labels')\n plot_wid.tab_box.set_title(1, 'Style')\n plot_wid.tab_box.set_title(2, 'Legend')\n plot_wid.tab_box.set_title(3, 'Axes')\n plot_wid.tab_box.set_title(4, 'Zoom')\n plot_wid.tab_box.set_title(5, 'Grid')\n plot_wid.tab_box.set_title(6, 'Export')\n\n # Display final widget\n wid = ipywidgets.HBox([logo, plot_wid])\n wid.box_style = main_style\n wid.layout.border = '2px solid' + map_styles_to_hex_colours(main_style)\n plot_wid.container.border = '0px'\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef save_matplotlib_figure(renderer):\n r\"\"\"\n Widget that allows to save a figure, which was generated with Matplotlib,\n to file.\n\n Parameters\n ----------\n renderer : `menpo.visualize.viewmatplotlib.MatplotlibRenderer`\n The Matplotlib renderer object.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n # Create sub-widgets\n logo_wid = LogoWidget()\n logo_wid.layout.margin = '0px 10px 0px 0px'\n save_figure_wid = SaveMatplotlibFigureOptionsWidget(renderer,\n style='warning')\n wid = ipywidgets.HBox([logo_wid, save_figure_wid])\n\n # Display widget\n ipydisplay.display(wid)\n\n\ndef save_mayavi_figure(renderer):\n r\"\"\"\n Widget that allows to save a figure, which was generated with Mayavi,\n to file.\n\n Parameters\n ----------\n renderer : `menpo3d.visualize.viewmayavi.MayaviRenderer`\n The Mayavi renderer object.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n # Create sub-widgets\n logo_wid = LogoWidget()\n logo_wid.layout.margin = '0px 10px 0px 0px'\n save_figure_wid = SaveMayaviFigureOptionsWidget(renderer,\n style='warning')\n wid = ipywidgets.HBox([logo_wid, save_figure_wid])\n\n # Display widget\n ipydisplay.display(wid)\n\n\ndef visualize_shape_model_2d(shape_model, n_parameters=5, mode='multiple',\n parameters_bounds=(-3.0, 3.0), figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical 2D shape model.\n\n Parameters\n ----------\n shape_model : `list` of `menpo.shape.PCAModel` or `subclass`\n The multi-scale shape model to be visualized. Note that each level can\n have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n from menpo.visualize.viewmatplotlib import (_set_axes_options,\n _parse_axes_limits)\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that shape_model is a list even with one member\n if not isinstance(shape_model, list):\n shape_model = [shape_model]\n\n # Get the number of levels (i.e. number of shape models)\n n_levels = len(shape_model)\n\n # Define the styling options\n main_style = 'warning'\n\n # Get the maximum number of components per level\n max_n_params = [sp.n_active_components for sp in shape_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Compute weights\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n shape_model[level].eigenvalues[:len(parameters)] ** 0.5)\n\n # Get the mean\n mean = shape_model[level].mean()\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options['image_view'] = shape_options_wid.selected_values['image_view']\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n\n # Correct options based on the type of the shape\n if hasattr(mean, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use the legend options\n options.update(renderer_options_wid.selected_values['legend'])\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n idx = mean.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_face_colour.append(options['marker_face_colour'][idx])\n marker_edge_colour.append(options['marker_edge_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render with selected options\n if mode_wid.value == 1:\n # Deformation mode\n # Compute instance\n instance = shape_model[level].instance(weights)\n\n # Render mean shape\n if mean_wid.selected_values:\n save_figure_wid.renderer = mean.view(\n figure_id=save_figure_wid.renderer.figure_id,\n new_figure=False, figure_size=None,\n image_view=options['image_view'],\n render_lines=options['render_lines'],\n line_colour='yellow', line_style=options['line_style'],\n line_width=options['line_width'],\n render_markers=options['render_markers'],\n marker_style=options['marker_style'],\n marker_size=options['marker_size'],\n marker_face_colour='yellow', marker_edge_colour='yellow',\n marker_edge_width=options['marker_edge_width'])\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Get instance range\n instance_range = instance.range()\n else:\n # Vectors mode\n # Compute instance\n instance_lower = shape_model[level].instance([-p for p in weights])\n instance_upper = shape_model[level].instance(weights)\n\n # Render mean shape\n save_figure_wid.renderer = mean.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size, **options)\n\n # Render vectors\n ax = plt.gca()\n x_min = np.Inf\n y_min = np.Inf\n x_max = -np.Inf\n y_max = -np.Inf\n for p in range(mean.n_points):\n xm = mean.points[p, 0]\n ym = mean.points[p, 1]\n xl = instance_lower.points[p, 0]\n yl = instance_lower.points[p, 1]\n xu = instance_upper.points[p, 0]\n yu = instance_upper.points[p, 1]\n if options['image_view']:\n # image mode\n lines = [[(ym, xm), (yl, xl)], [(ym, xm), (yu, xu)]]\n else:\n # point cloud mode\n lines = [[(xm, ym), (xl, yl)], [(xm, ym), (xu, yu)]]\n lc = mc.LineCollection(lines, colors=('g', 'b'),\n linestyles='solid', linewidths=2)\n # update min, max\n y_min = np.min([y_min, xl, xu])\n y_max = np.max([y_max, xl, xu])\n x_min = np.min([x_min, yl, yu])\n x_max = np.max([x_max, yl, yu])\n\n # add collection\n ax.add_collection(lc)\n\n # parse axes limits\n axes_x_limits, axes_y_limits = _parse_axes_limits(\n x_min, x_max, y_min, y_max, options['axes_x_limits'],\n options['axes_y_limits'])\n _set_axes_options(\n ax, render_axes=options['render_axes'],\n inverted_y_axis=options['image_view'],\n axes_font_name=options['axes_font_name'],\n axes_font_size=options['axes_font_size'],\n axes_font_style=options['axes_font_style'],\n axes_font_weight=options['axes_font_weight'],\n axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,\n axes_x_ticks=options['axes_x_ticks'],\n axes_y_ticks=options['axes_y_ticks'])\n\n # Get instance range\n instance_range = mean.range()\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Update info\n update_info(level, instance_range)\n\n # Define function that updates the info text\n def update_info(level, instance_range):\n text_per_line = [\n \"> Level {} out of {}\".format(level + 1, n_levels),\n \"> {} components in total\".format(shape_model[level].n_components),\n \"> {} active components\".format(\n shape_model[level].n_active_components),\n \"> {:.1f}% variance kept\".format(\n shape_model[level].variance_ratio() * 100),\n \"> Instance range: {:.1f} x {:.1f}\".format(instance_range[0],\n instance_range[1]),\n \"> {} landmark points, {} features\".format(\n shape_model[level].mean().n_points,\n shape_model[level].n_features)]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = shape_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n shape_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n mode_dict = OrderedDict()\n mode_dict['Deformation'] = 1\n mode_dict['Vectors'] = 2\n mode_wid = ipywidgets.RadioButtons(\n options=mode_dict, description='Mode', value=1,\n layout=ipywidgets.Layout(width='6cm'))\n mode_wid.observe(render_function, names='value', type='change')\n mean_wid = SwitchWidget(\n selected_value=False, description='Render mean shape',\n description_location='right', switch_type='checkbox')\n mean_wid.observe(render_function, names='selected_values', type='change')\n\n # Function that controls mean shape checkbox visibility\n def mean_visible(change):\n if change['new'] == 1:\n mean_wid.button_wid.disabled = False\n else:\n mean_wid.button_wid.disabled = True\n mean_wid.set_widget_state(False, allow_callback=False)\n mode_wid.observe(mean_visible, names='value', type='change')\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n labels = None\n if hasattr(shape_model[0].mean(), 'labels'):\n labels = shape_model[0].mean().labels\n shape_options_wid = Shape2DOptionsWidget(\n labels=labels, render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n labels=None, axes_x_limits=0.1, axes_y_limits=0.1,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n model_parameters_wid.set_widget_state(\n n_parameters=n_parameters[change['new']],\n params_str='Parameter ', allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n radio_children = [level_wid, mode_wid, mean_wid]\n else:\n radio_children = [mode_wid, mean_wid]\n radio_wids = ipywidgets.VBox(radio_children)\n tmp_wid = ipywidgets.HBox([radio_wids, model_parameters_wid])\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_shape_model_3d(shape_model, n_parameters=5, mode='multiple',\n parameters_bounds=(-15.0, 15.0)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical 3D shape model.\n\n Parameters\n ----------\n shape_model : `list` of `menpo.shape.PCAModel` or `subclass`\n The multi-scale shape model to be visualized. Note that each level can\n have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n out = ipywidgets.Output()\n ipydisplay.display(out)\n with out:\n ipydisplay.clear_output(wait=True)\n print('Initializing...')\n\n # Make sure that shape_model is a list even with one member\n if not isinstance(shape_model, list):\n shape_model = [shape_model]\n\n # Get the number of levels (i.e. number of shape models)\n n_levels = len(shape_model)\n\n # Check if the model is TriMesh or any other 3D shape class\n is_trimesh = isinstance(shape_model[0].template_instance, TriMesh)\n\n # Define the styling options\n main_style = 'warning'\n\n # Get the maximum number of components per level\n max_n_params = [sp.n_active_components for sp in shape_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n save_figure_wid.renderer.clear_figure()\n with out:\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Compute weights\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n shape_model[level].eigenvalues[:len(parameters)] ** 0.5)\n\n # Compute instance\n instance = shape_model[level].instance(weights)\n\n # Create options dictionary\n options = dict()\n if is_trimesh:\n options.update(shape_options_wid.selected_values)\n else:\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_mayavi'])\n # Correct options based on the type of the shape\n if hasattr(instance, 'labels'):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...use with_labels\n options['with_labels'] = \\\n shape_options_wid.selected_values['with_labels']\n # ...correct colours\n line_colour = []\n marker_colour = []\n for lbl in options['with_labels']:\n idx = instance.labels.index(lbl)\n line_colour.append(options['line_colour'][idx])\n marker_colour.append(options['marker_colour'][idx])\n options['line_colour'] = line_colour\n options['marker_colour'] = marker_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_colour'] = options['marker_colour'][0]\n\n # Update info\n update_info(level, instance.range())\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **options)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(level, instance_range):\n text_per_line = [\n \"> Level {} out of {}\".format(level + 1, n_levels),\n \"> {} components in total\".format(shape_model[level].n_components),\n \"> {} active components\".format(\n shape_model[level].n_active_components),\n \"> {:.1f}% variance kept\".format(\n shape_model[level].variance_ratio() * 100),\n \"> Instance range: {:.1f} x {:.1f}\".format(instance_range[0],\n instance_range[1]),\n \"> {} points\".format(\n shape_model[level].mean().n_points)]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n with out:\n ipydisplay.clear_output(wait=True)\n plt.subplot(121)\n shape_model[level].plot_eigenvalues_ratio()\n plt.subplot(122)\n shape_model[level].plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n if is_trimesh:\n shape_options_wid = Mesh3DOptionsWidget(textured=False,\n render_function=render_function)\n else:\n labels = None\n if hasattr(shape_model[0].mean(), 'labels'):\n labels = shape_model[0].mean().labels\n shape_options_wid = Shape3DOptionsWidget(labels=labels,\n render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['numbering_mayavi'], labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n model_parameters_wid.set_widget_state(\n n_parameters=n_parameters[change['new']],\n params_str='Parameter ', allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_wid = ipywidgets.HBox([level_wid, model_parameters_wid])\n else:\n tmp_wid = ipywidgets.HBox(children=[model_parameters_wid])\n if is_trimesh:\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, info_wid, save_figure_wid])\n tab_titles = ['Model', 'Mesh', 'Info', 'Export']\n else:\n options_box = ipywidgets.Tab(\n children=[tmp_wid, shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Shape', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n with out:\n print_dynamic('')\n\n\ndef visualize_appearance_model(appearance_model, n_parameters=5,\n mode='multiple', parameters_bounds=(-3.0, 3.0),\n figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical appearance model.\n\n Parameters\n ----------\n appearance_model : `list` of `menpo.model.PCAModel` or subclass\n The multi-scale appearance model to be visualized. Note that each level\n can have different number of components.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that appearance_model is a list even with one member\n if not isinstance(appearance_model, list):\n appearance_model = [appearance_model]\n\n # Get the number of levels (i.e. number of appearance models)\n n_levels = len(appearance_model)\n\n # Define the styling options\n main_style = 'success'\n\n # Get the maximum number of components per level\n max_n_params = [ap.n_active_components for ap in appearance_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Compute weights and instance\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)\n instance = appearance_model[level].instance(weights)\n image_is_masked = isinstance(instance, MaskedImage)\n g = landmark_options_wid.selected_values['landmarks']['group']\n\n # Create options dictionary\n options = dict()\n options.update(landmark_options_wid.selected_values['lines'])\n options.update(landmark_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n options.update(renderer_options_wid.selected_values['legend'])\n options.update(image_options_wid.selected_values)\n options.update(landmark_options_wid.selected_values['landmarks'])\n\n # Correct options based on the type of the shape\n if (instance.has_landmarks and\n hasattr(instance.landmarks[g], 'labels')):\n # If the shape is a LabelledPointUndirectedGraph ...\n # ...correct colours\n line_colour = []\n marker_face_colour = []\n marker_edge_colour = []\n for lbl in options['with_labels']:\n id = instance.landmarks[g].labels.index(lbl)\n line_colour.append(options['line_colour'][id])\n marker_face_colour.append(options['marker_face_colour'][id])\n marker_edge_colour.append(options['marker_edge_colour'][id])\n options['line_colour'] = line_colour\n options['marker_face_colour'] = marker_face_colour\n options['marker_edge_colour'] = marker_edge_colour\n else:\n # If shape is PointCloud, TriMesh or PointGraph\n # ...correct colours\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] *\n figure_size[1])\n\n # Render shape with selected options\n save_figure_wid.renderer = render_image(\n image=instance, renderer=save_figure_wid.renderer,\n image_is_masked=image_is_masked, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(instance, level, g)\n\n # Define function that updates the info text\n def update_info(image, level, group):\n lvl_app_mod = appearance_model[level]\n lp = 0 if group is None else image.landmarks[group].n_points\n text_per_line = [\n \"> Level: {} out of {}.\".format(level + 1, n_levels),\n \"> {} components in total.\".format(lvl_app_mod.n_components),\n \"> {} active components.\".format(lvl_app_mod.n_active_components),\n \"> {:.1f}% variance kept.\".format(\n lvl_app_mod.variance_ratio() * 100),\n \"> Reference shape of size {} with {} channel{}.\".format(\n image._str_shape(),\n image.n_channels, 's' * (image.n_channels > 1)),\n \"> {} features.\".format(lvl_app_mod.n_features),\n \"> {} landmark points.\".format(lp),\n \"> Instance: min={:.3f}, max={:.3f}\".format(image.pixels.min(),\n image.pixels.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n groups_keys, labels_keys = extract_groups_labels_from_image(\n appearance_model[0].mean())\n image_options_wid = ImageOptionsWidget(\n n_channels=appearance_model[0].mean().n_channels,\n image_is_masked=isinstance(appearance_model[0].mean(),\n MaskedImage),\n render_function=render_function)\n landmark_options_wid = LandmarkOptionsWidget(\n group_keys=groups_keys, labels_keys=labels_keys,\n type='2D', render_function=render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib', 'legend'],\n axes_x_limits=None, axes_y_limits=None, labels=None,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n tmp_children = [model_parameters_wid]\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n value = change['new']\n # Update model parameters widget\n model_parameters_wid.set_widget_state(\n n_parameters[value], params_str='Parameter ',\n allow_callback=False)\n\n # Update landmarks options\n g_keys, l_keys = extract_groups_labels_from_image(\n appearance_model[value].mean())\n landmark_options_wid.set_widget_state(\n group_keys=g_keys, labels_keys=l_keys, allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=appearance_model[value].mean().n_channels,\n image_is_masked=isinstance(\n appearance_model[value].mean(), MaskedImage),\n allow_callback=True)\n\n # Create pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_children.insert(0, level_wid)\n tmp_wid = ipywidgets.HBox(tmp_children)\n options_box = ipywidgets.Tab(\n children=[tmp_wid, image_options_wid, landmark_options_wid,\n renderer_options_wid, info_wid, save_figure_wid])\n tab_titles = ['Model', 'Image', 'Landmarks', 'Renderer', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_patch_appearance_model(appearance_model, centers,\n n_parameters=5, mode='multiple',\n parameters_bounds=(-3.0, 3.0),\n figure_size=(7, 7)):\n r\"\"\"\n Widget that allows the dynamic visualization of a multi-scale linear\n statistical patch-based appearance model.\n\n Parameters\n ----------\n appearance_model : `list` of `menpo.model.PCAModel` or subclass\n The multi-scale patch-based appearance model to be visualized. Note that\n each level can have different number of components.\n centers : `list` of `menpo.shape.PointCloud` or subclass\n The centers to set the patches around. If the `list` has only one\n `menpo.shape.PointCloud` then this will be used for all appearance model\n levels. Otherwise, it needs to have the same length as\n `appearance_model`.\n n_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n figure_size : (`int`, `int`), optional\n The size of the plotted figures.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print('Initializing...')\n\n # Make sure that appearance_model is a list even with one member\n if not isinstance(appearance_model, list):\n appearance_model = [appearance_model]\n\n # Get the number of levels (i.e. number of appearance models)\n n_levels = len(appearance_model)\n\n # Make sure that centers is a list even with one pointcloud\n if not isinstance(centers, list):\n centers = [centers] * n_levels\n elif isinstance(centers, list) and len(centers) == 1:\n centers *= n_levels\n\n # Define the styling options\n main_style = 'success'\n\n # Get the maximum number of components per level\n max_n_params = [ap.n_active_components for ap in appearance_model]\n\n # Check the given number of parameters (the returned n_parameters is a list\n # of len n_scales)\n n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)\n\n # Define render function\n def render_function(change):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = level_wid.value if n_levels > 1 else 0\n\n # Compute weights and instance\n parameters = model_parameters_wid.selected_values\n weights = (parameters *\n appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)\n instance = appearance_model[level].instance(weights)\n\n # Create options dictionary\n options = dict()\n options.update(shape_options_wid.selected_values['lines'])\n options.update(shape_options_wid.selected_values['markers'])\n options.update(\n renderer_options_wid.selected_values['numbering_matplotlib'])\n options.update(renderer_options_wid.selected_values['axes'])\n image_options = dict(image_options_wid.selected_values)\n del image_options['masked_enabled']\n options.update(image_options)\n options.update(patch_options_wid.selected_values)\n options['line_colour'] = options['line_colour'][0]\n options['marker_face_colour'] = options['marker_face_colour'][0]\n options['marker_edge_colour'] = options['marker_edge_colour'][0]\n\n # Get figure size\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * figure_size[0],\n renderer_options_wid.selected_values['zoom_one'] * figure_size[1])\n\n # Render image with selected options\n save_figure_wid.renderer = render_patches(\n patches=instance.pixels, patch_centers=centers[level],\n renderer=save_figure_wid.renderer, figure_size=new_figure_size,\n **options)\n\n # Update info\n update_info(instance, level)\n\n # Define function that updates the info text\n def update_info(image, level):\n lvl_app_mod = appearance_model[level]\n text_per_line = [\n \"> Level: {} out of {}.\".format(level + 1, n_levels),\n \"> {} components in total.\".format(lvl_app_mod.n_components),\n \"> {} active components.\".format(lvl_app_mod.n_active_components),\n \"> {:.1f}% variance kept.\".format(\n lvl_app_mod.variance_ratio() * 100),\n \"> Each patch has size {}H x {}W with {} channel{}.\".format(\n image.pixels.shape[3], image.pixels.shape[4],\n image.pixels.shape[2], 's' * (image.pixels.shape[2] > 1)),\n \"> {} features.\".format(lvl_app_mod.n_features),\n \"> {} landmark points.\".format(image.pixels.shape[0]),\n \"> Instance: min={:.3f}, max={:.3f}\".format(image.pixels.min(),\n image.pixels.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot variance function\n def plot_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Get selected level\n level = 0\n if n_levels > 1:\n level = level_wid.value\n\n # Render\n new_figure_size = (\n renderer_options_wid.selected_values['zoom_one'] * 10,\n renderer_options_wid.selected_values['zoom_one'] * 3)\n plt.subplot(121)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False)\n plt.subplot(122)\n save_figure_wid.renderer = \\\n appearance_model[level].plot_eigenvalues_cumulative_ratio(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n figure_size=new_figure_size)\n save_figure_wid.renderer.force_draw()\n\n # Create widgets\n model_parameters_wid = LinearModelParametersWidget(\n n_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_variance,\n animation_step=0.5, interval=0., loop_enabled=True,\n continuous_update=False)\n shape_options_wid = Shape2DOptionsWidget(\n labels=None, render_function=None)\n shape_options_wid.line_options_wid.render_lines_switch.button_wid.value = False\n shape_options_wid.add_render_function(render_function)\n patch_options_wid = PatchOptionsWidget(\n n_patches=appearance_model[0].mean().pixels.shape[0],\n n_offsets=appearance_model[0].mean().pixels.shape[1],\n render_function=render_function)\n image_options_wid = ImageOptionsWidget(\n n_channels=appearance_model[0].mean().pixels.shape[2],\n image_is_masked=isinstance(appearance_model[0].mean(), MaskedImage),\n render_function=None)\n image_options_wid.interpolation_checkbox.button_wid.value = False\n image_options_wid.add_render_function(render_function)\n renderer_options_wid = RendererOptionsWidget(\n options_tabs=['zoom_one', 'axes', 'numbering_matplotlib'], labels=None,\n axes_x_limits=None, axes_y_limits=None, render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMatplotlibFigureOptionsWidget()\n\n # Group widgets\n tmp_children = [model_parameters_wid]\n if n_levels > 1:\n # Define function that updates options' widgets state\n def update_widgets(change):\n value = change['new']\n # Update model parameters widget\n model_parameters_wid.set_widget_state(\n n_parameters[value], params_str='Parameter ',\n allow_callback=False)\n\n # Update patch options\n patch_options_wid.set_widget_state(\n n_patches=appearance_model[value].mean().pixels.shape[0],\n n_offsets=appearance_model[value].mean().pixels.shape[1],\n allow_callback=False)\n\n # Update channels options\n image_options_wid.set_widget_state(\n n_channels=appearance_model[value].mean().pixels.shape[2],\n image_is_masked=isinstance(appearance_model[value].mean(),\n MaskedImage),\n allow_callback=True)\n\n # Define pyramid radiobuttons\n radio_str = OrderedDict()\n for l in range(n_levels):\n if l == 0:\n radio_str[\"Level {} (low)\".format(l)] = l\n elif l == n_levels - 1:\n radio_str[\"Level {} (high)\".format(l)] = l\n else:\n radio_str[\"Level {}\".format(l)] = l\n level_wid = ipywidgets.RadioButtons(\n options=radio_str, description='Pyramid', value=n_levels-1,\n layout=ipywidgets.Layout(width='6cm'))\n level_wid.observe(update_widgets, names='value', type='change')\n level_wid.observe(render_function, names='value', type='change')\n tmp_children.insert(0, level_wid)\n tmp_wid = ipywidgets.HBox(tmp_children)\n options_box = ipywidgets.Tab(\n children=[tmp_wid, patch_options_wid, image_options_wid,\n shape_options_wid, renderer_options_wid, info_wid,\n save_figure_wid])\n tab_titles = ['Model', 'Patches', 'Channels', 'Shape', 'Renderer', 'Info',\n 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n\n\ndef visualize_morphable_model(mm, n_shape_parameters=5, n_texture_parameters=5,\n mode='multiple', parameters_bounds=(-15.0, 15.0)):\n r\"\"\"\n Widget that allows the dynamic visualization of a 3D Morphable Model.\n\n Parameters\n ----------\n mm : `menpo3d.morhpablemodel.ColouredMorphableModel` or `subclass`\n The multi-scale 3D Morphable Model to be visualized.\n n_shape_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the shape parameters\n sliders. If `int`, then the number of sliders per level is the minimum\n between `n_parameters` and the number of active components per level.\n If `list` of `int`, then a number of sliders is defined per level.\n If ``None``, all the active components per level will have a slider.\n n_texture_parameters : `int` or `list` of `int` or ``None``, optional\n The number of principal components to be used for the tecture\n parameters sliders. If `int`, then the number of sliders per level is\n the minimum between `n_parameters` and the number of active components\n per level. If `list` of `int`, then a number of sliders is defined per\n level. If ``None``, all the active components per level will have a\n slider.\n mode : ``{'single', 'multiple'}``, optional\n If ``'single'``, then only a single slider is constructed along with a\n drop down menu. If ``'multiple'``, then a slider is constructed for each\n parameter.\n parameters_bounds : (`float`, `float`), optional\n The minimum and maximum bounds, in std units, for the sliders.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n print_dynamic('Initializing...')\n\n # Define the styling options\n main_style = 'info'\n\n # Check the given number of parameters\n n_shape_parameters = check_n_parameters(\n n_shape_parameters, 1, [mm.shape_model.n_active_components])\n n_texture_parameters = check_n_parameters(\n n_texture_parameters, 1, [mm.texture_model.n_active_components])\n\n # Define render function\n def render_function(change):\n # Clear current figure\n save_figure_wid.renderer.clear_figure()\n ipydisplay.clear_output(wait=True)\n\n # Compute weights\n shape_weights = shape_model_parameters_wid.selected_values\n shape_weights = (\n shape_weights *\n mm.shape_model.eigenvalues[:len(shape_weights)] ** 0.5)\n texture_weights = texture_model_parameters_wid.selected_values\n texture_weights = (\n texture_weights *\n mm.texture_model.eigenvalues[:len(texture_weights)] ** 0.5)\n instance = mm.instance(shape_weights=shape_weights,\n texture_weights=texture_weights)\n # TODO: Is this really needed?\n instance = instance.clip_texture()\n\n # Update info\n update_info(mm, instance)\n\n # Render instance\n save_figure_wid.renderer = instance.view(\n figure_id=save_figure_wid.renderer.figure_id, new_figure=False,\n **mesh_options_wid.selected_values)\n\n # Force rendering\n save_figure_wid.renderer.force_draw()\n\n # Define function that updates the info text\n def update_info(mm, instance):\n text_per_line = [\n \"> {} vertices, {} triangles\".format(mm.n_vertices,\n mm.n_triangles),\n \"> {} shape components ({:.2f}% of variance)\".format(\n mm.shape_model.n_components,\n mm.shape_model.variance_ratio() * 100),\n \"> {} texture channels\".format(mm.n_channels),\n \"> {} texture components ({:.2f}% of variance)\".format(\n mm.texture_model.n_components,\n mm.texture_model.variance_ratio() * 100),\n \"> Instance: min={:.3f} , max={:.3f}\".format(\n instance.colours.min(), instance.colours.max())]\n info_wid.set_widget_state(text_per_line=text_per_line)\n\n # Plot shape variance function\n def plot_shape_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Render\n plt.subplot(121)\n mm.shape_model.plot_eigenvalues_ratio()\n plt.subplot(122)\n mm.shape_model.plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Plot texture variance function\n def plot_texture_variance(name):\n # Clear current figure, but wait until the generation of the new data\n # that will be rendered\n ipydisplay.clear_output(wait=True)\n\n # Render\n plt.subplot(121)\n mm.texture_model.plot_eigenvalues_ratio()\n plt.subplot(122)\n mm.texture_model.plot_eigenvalues_cumulative_ratio()\n plt.show()\n\n # Create widgets\n shape_model_parameters_wid = LinearModelParametersWidget(\n n_shape_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_shape_variance,\n animation_step=0.5, interval=0., loop_enabled=True)\n texture_model_parameters_wid = LinearModelParametersWidget(\n n_texture_parameters[0], render_function, params_str='Parameter ',\n mode=mode, params_bounds=parameters_bounds, params_step=0.1,\n plot_variance_visible=True, plot_variance_function=plot_texture_variance,\n animation_step=0.5, interval=0., loop_enabled=True)\n mesh_options_wid = Mesh3DOptionsWidget(textured=True,\n render_function=render_function)\n info_wid = TextPrintWidget(text_per_line=[''])\n save_figure_wid = SaveMayaviFigureOptionsWidget()\n\n # Group widgets\n model_parameters_wid = ipywidgets.HBox(\n [ipywidgets.Tab([shape_model_parameters_wid,\n texture_model_parameters_wid])])\n model_parameters_wid.children[0].set_title(0, 'Shape')\n model_parameters_wid.children[0].set_title(1, 'Texture')\n options_box = ipywidgets.Tab([model_parameters_wid, mesh_options_wid,\n info_wid, save_figure_wid])\n tab_titles = ['Model', 'Mesh', 'Info', 'Export']\n for (k, tl) in enumerate(tab_titles):\n options_box.set_title(k, tl)\n logo_wid = LogoWidget(style=main_style)\n logo_wid.layout.margin = '0px 10px 0px 0px'\n wid = ipywidgets.HBox([logo_wid, options_box])\n\n # Set widget's style\n wid.box_style = main_style\n wid.layout.border = '2px solid ' + map_styles_to_hex_colours(main_style)\n\n # Display final widget\n final_box = ipywidgets.Box([wid])\n final_box.layout.display = 'flex'\n ipydisplay.display(final_box)\n\n # Trigger initial visualization\n render_function({})\n print_dynamic('')\n\n\ndef webcam_widget(canvas_width=640, hd=True, n_preview_windows=5):\n r\"\"\"\n Webcam widget for taking snapshots. The snapshots are dynamically previewed\n in a FIFO stack of thumbnails.\n\n Parameters\n ----------\n canvas_width : `int`, optional\n The initial width of the rendered canvas. Note that this doesn't actually\n change the webcam resolution. It simply rescales the rendered image, as\n well as the size of the returned screenshots.\n hd : `bool`, optional\n If ``True``, then the webcam will be set to high definition (HD), i.e.\n 720 x 1280. Otherwise the default resolution will be used.\n n_preview_windows : `int`, optional\n The number of preview thumbnails that will be used as a FIFO stack to\n show the captured screenshots. It must be at least 4.\n\n Returns\n -------\n snapshots : `list` of `menpo.image.Image`\n The list of captured images.\n \"\"\"\n # Ensure that the code is being run inside a Jupyter kernel!\n from .utils import verify_ipython_and_kernel\n verify_ipython_and_kernel()\n\n # Set update function\n images = []\n\n def update(_):\n images.append(wid.selected_values[-1])\n\n # Create widgets\n wid = CameraSnapshotWidget(\n canvas_width=canvas_width, hd=hd, n_preview_windows=n_preview_windows,\n preview_windows_margin=3, style='danger', preview_style='warning',\n render_function=update)\n wid.container.layout.border = (\n '2px solid' + map_styles_to_hex_colours('danger'))\n\n # Display widget\n ipydisplay.display(wid)\n\n # Return\n return images\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.min",
"matplotlib.collections.LineCollection"
]
] |
sumanthd17/mt5 | [
"c99b4e3ad1c69908c852c730a1323ccb52d48f58"
] | [
"multilingual_t5/baseline_mr/baseline_mr.py"
] | [
"\"\"\"baseline_mr dataset.\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\n\n# TODO(baseline_mr): Markdown description that will appear on the catalog page.\n_DESCRIPTION = \"\"\"\nDescription is **formatted** as markdown.\n\nIt should also contain any processing which has been applied (if any),\n(e.g. corrupted example skipped, images cropped,...):\n\"\"\"\n\n# TODO(baseline_mr): BibTeX citation\n_CITATION = \"\"\"\n\"\"\"\n\n\nclass BaselineMr(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for baseline_mr dataset.\"\"\"\n\n VERSION = tfds.core.Version('1.0.0')\n RELEASE_NOTES = {\n '1.0.0': 'Initial release.',\n }\n\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n # TODO(baseline_mr): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'source': tfds.features.Text(),\n 'target': tfds.features.Text(),\n }),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n # TODO(baseline_mr): Downloads the data and defines the splits\n path = dl_manager.download_and_extract('https://storage.googleapis.com/ai4b-anuvaad-nmt/baselines/mT5/baseline_mr/strict-en-mr.zip')\n\n # TODO(baseline_mr): Returns the Dict[split names, Iterator[Key, Example]]\n return {\n 'train': self._generate_examples(source=path/'en-mr/train/train.mr', target=path/'en-mr/train/train.en'),\n 'validation': self._generate_examples(source=path/'en-mr/dev/dev.mr', target=path/'en-mr/dev/dev.en')\n }\n\n def _generate_examples(self, source, target):\n \"\"\"Yields examples.\"\"\"\n # TODO(baseline_mr): Yields (key, example) tuples from the dataset\n src = tf.io.gfile.GFile(source, 'r').readlines()\n tgt = tf.io.gfile.GFile(target, 'r').readlines()\n for idx, row in enumerate(zip(src, tgt)):\n yield idx, {\n 'source': row[0],\n 'target': row[1],\n }\n"
] | [
[
"tensorflow.io.gfile.GFile"
]
] |
dixiak/gnes | [
"12513d29157a06bd22923717fd0c19a856f20193"
] | [
"gnes/preprocessor/video/shotdetect.py"
] | [
"# Tencent is pleased to support the open source community by making GNES available.\n#\n# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nimport numpy as np\n\nfrom ..base import BaseVideoPreprocessor\nfrom ..helper import compute_descriptor, compare_descriptor, detect_peak_boundary, compare_ecr\nfrom ..io_utils import video as video_util\nfrom ...proto import gnes_pb2, array2blob\n\n\nclass ShotDetectPreprocessor(BaseVideoPreprocessor):\n store_args_kwargs = True\n\n def __init__(self,\n frame_size: str = '192:168',\n descriptor: str = 'block_hsv_histogram',\n distance_metric: str = 'bhattacharya',\n detect_method: str = 'threshold',\n frame_rate: int = 10,\n frame_num: int = -1,\n *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.frame_size = frame_size\n self.descriptor = descriptor\n self.distance_metric = distance_metric\n self.detect_method = detect_method\n self.frame_rate = frame_rate\n self.frame_num = frame_num\n self._detector_kwargs = kwargs\n\n def detect_shots(self, frames: 'np.ndarray') -> List[List['np.ndarray']]:\n descriptors = []\n for frame in frames:\n descriptor = compute_descriptor(\n frame, method=self.descriptor, **self._detector_kwargs)\n descriptors.append(descriptor)\n\n # compute distances between frames\n if self.distance_metric == 'edge_change_ration':\n dists = compare_ecr(descriptors)\n else:\n dists = [\n compare_descriptor(pair[0], pair[1], self.distance_metric)\n for pair in zip(descriptors[:-1], descriptors[1:])\n ]\n\n shot_bounds = detect_peak_boundary(dists, self.detect_method)\n\n shots = []\n for ci in range(0, len(shot_bounds) - 1):\n shots.append(frames[shot_bounds[ci]:shot_bounds[ci + 1]])\n\n return shots\n\n def apply(self, doc: 'gnes_pb2.Document') -> None:\n super().apply(doc)\n\n if doc.raw_bytes:\n all_frames = video_util.capture_frames(\n input_data=doc.raw_bytes,\n scale=self.frame_size,\n fps=self.frame_rate,\n vframes=self.frame_num)\n num_frames = len(all_frames)\n assert num_frames > 0\n shots = self.detect_shots(all_frames)\n\n for ci, frames in enumerate(shots):\n c = doc.chunks.add()\n c.doc_id = doc.doc_id\n # chunk_data = np.concatenate(frames, axis=0)\n chunk_data = np.array(frames)\n c.blob.CopyFrom(array2blob(chunk_data))\n c.offset = ci\n c.weight = len(frames) / num_frames\n else:\n self.logger.error('bad document: \"raw_bytes\" is empty!')\n"
] | [
[
"numpy.array"
]
] |
chokyzhou/gym-flappy-bird | [
"ffe1089501f3e2e113a8868cd27480653dbe0ef7"
] | [
"src/flappy_bird_gym/envs/flappy_bird_env_simple.py"
] | [
"#\n# Copyright (c) 2020 Gabriel Nogueira (Talendar)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\n\"\"\" Implementation of a Flappy Bird OpenAI Gym environment that yields simple\nnumerical information about the game's state as observations.\n\"\"\"\n\nfrom typing import Dict, Tuple, Optional, Union\n\nimport gym\nimport numpy as np\nimport pygame\n\nfrom flappy_bird_gym.envs.game_logic import FlappyBirdLogic\nfrom flappy_bird_gym.envs.game_logic import PIPE_WIDTH, PIPE_HEIGHT\nfrom flappy_bird_gym.envs.game_logic import PLAYER_WIDTH, PLAYER_HEIGHT\nfrom flappy_bird_gym.envs.renderer import FlappyBirdRenderer\n\n\nclass FlappyBirdEnvSimple(gym.Env):\n \"\"\" Flappy Bird Gym environment that yields simple observations.\n\n The observations yielded by this environment are simple numerical\n information about the game's state. Specifically, the observations are:\n\n * Horizontal distance to the next pipe;\n * Difference between the player's y position and the next hole's y\n position.\n\n The reward received by the agent in each step is equal to the score obtained\n by the agent in that step. A score point is obtained every time the bird\n passes a pipe.\n\n Args:\n screen_size (Tuple[int, int]): The screen's width and height.\n normalize_obs (bool): If `True`, the observations will be normalized\n before being returned.\n pipe_gap (int): Space between a lower and an upper pipe.\n bird_color (str): Color of the flappy bird. The currently available\n colors are \"yellow\", \"blue\" and \"red\".\n pipe_color (str): Color of the pipes. The currently available colors are\n \"green\" and \"red\".\n background (Optional[str]): Type of background image. The currently\n available types are \"day\" and \"night\". If `None`, no background will\n be drawn.\n \"\"\"\n\n metadata = {'render.modes': ['human']}\n\n def __init__(self,\n screen_size: Tuple[int, int] = (288, 512),\n normalize_obs: bool = True,\n pipe_gap: int = 100,\n bird_color: str = \"yellow\",\n pipe_color: str = \"green\",\n background: Optional[str] = \"day\") -> None:\n self.action_space = gym.spaces.Discrete(2)\n self.observation_space = gym.spaces.Box(-np.inf, np.inf,\n shape=(3,),\n dtype=np.float32)\n self._screen_size = screen_size\n self._normalize_obs = normalize_obs\n self._pipe_gap = pipe_gap\n\n self._game = None\n self._renderer = None\n\n self._bird_color = bird_color\n self._pipe_color = pipe_color\n self._bg_type = background\n\n def _get_observation(self):\n up_pipe = low_pipe = None\n h_dist = 0\n for up_pipe, low_pipe in zip(self._game.upper_pipes,\n self._game.lower_pipes):\n h_dist = (low_pipe[\"x\"] + PIPE_WIDTH / 2\n - (self._game.player_x - PLAYER_WIDTH / 2))\n h_dist += 3 # extra distance to compensate for the buggy hit-box\n if h_dist >= 0:\n break\n\n upper_pipe_y = up_pipe[\"y\"] + PIPE_HEIGHT\n lower_pipe_y = low_pipe[\"y\"]\n player_y = self._game.player_y\n y_vel = self._game.player_vel_y\n\n v_dist = (upper_pipe_y + lower_pipe_y) / 2 - (player_y\n + PLAYER_HEIGHT/2)\n\n if self._normalize_obs:\n h_dist /= self._screen_size[0]\n v_dist /= self._screen_size[1]\n\n return np.array([\n h_dist,\n v_dist,\n y_vel,\n ])\n\n def step(self,\n action: Union[FlappyBirdLogic.Actions, int],\n ) -> Tuple[np.ndarray, float, bool, Dict]:\n \"\"\" Given an action, updates the game state.\n\n Args:\n action (Union[FlappyBirdLogic.Actions, int]): The action taken by\n the agent. Zero (0) means \"do nothing\" and one (1) means \"flap\".\n\n Returns:\n A tuple containing, respectively:\n\n * an observation (horizontal distance to the next pipe;\n difference between the player's y position and the next hole's\n y position);\n * a reward (always 1);\n * a status report (`True` if the game is over and `False`\n otherwise);\n * an info dictionary.\n \"\"\"\n alive = self._game.update_state(action)\n obs = self._get_observation()\n\n reward = 1\n\n done = not alive\n info = {\"score\": self._game.score}\n\n return obs, reward, done, info\n\n def reset(self):\n \"\"\" Resets the environment (starts a new game). \"\"\"\n self._game = FlappyBirdLogic(screen_size=self._screen_size,\n pipe_gap_size=self._pipe_gap)\n if self._renderer is not None:\n self._renderer.game = self._game\n\n return self._get_observation()\n\n def render(self, mode='human') -> None:\n \"\"\" Renders the next frame. \"\"\"\n if self._renderer is None:\n self._renderer = FlappyBirdRenderer(screen_size=self._screen_size,\n bird_color=self._bird_color,\n pipe_color=self._pipe_color,\n background=self._bg_type)\n self._renderer.game = self._game\n self._renderer.make_display()\n\n self._renderer.draw_surface(show_score=True)\n self._renderer.update_display()\n\n def close(self):\n \"\"\" Closes the environment. \"\"\"\n if self._renderer is not None:\n pygame.display.quit()\n self._renderer = None\n super().close()\n"
] | [
[
"numpy.array"
]
] |
mapattacker/flask-serverless | [
"9612b7cbc5157770d88f352e0676911658c4de9a"
] | [
"project/app.py"
] | [
"import pickle\nimport traceback\n\nimport numpy as np\nfrom flask import Flask, request\n\nfrom config import MODELPATH, DEBUG\n\n\napp = Flask(__name__)\nmodel = pickle.load(open(MODELPATH, 'rb'))\n\n\[email protected](\"/predict\", methods=[\"POST\"])\ndef predict():\n \"\"\"{\"input\": [5.8, 2.8, 5.1, 2.4]}\"\"\"\n try:\n content = request.json\n sample = content[\"input\"]\n\n sample = np.array(sample).reshape(1, -1)\n prediction = model.predict(sample).tolist()[0]\n\n return {\"prediction\": prediction}\n except Exception as e:\n tb = traceback.format_exc()\n return {\"errorMessages\": tb.replace(\"\\n\",\"\")}\n\n\nif __name__ == \"__main__\":\n app.run(debug=DEBUG)"
] | [
[
"numpy.array"
]
] |
LeonardoSaccotelli/Numerical-Calculus-Project | [
"becb480a611c9a57416127f6b0289085fe180ee4"
] | [
"5_Quadrature Formulas/Algoritmi_Quadratura.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 7 17:58:09 2020\n\n@author: Leonardo Saccotelli\n\"\"\"\n\nimport numpy as np\n\n\"\"\"\nFORMULA DEI TRAPEZI\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n\"\"\"\ndef Trapezoid(f_x, a, b):\n #Calcolo l'integrale\n T = (b-a)*(f_x(a)+f_x(b))/2\n return T\n\n\"\"\"\nFORMULA DEI TRAPEZI COMPOSTI\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n - il numero di intervallini \n\"\"\"\ndef CompositeTrapezoid(f_x, a, b, N):\n #Estrpolo N+1 intervalli equidistanti da [a,b]\n z = np.linspace(a,b,N+1)\n \n #Calcolo f_x() in ogni punto di z\n fz = f_x(z)\n \n S = 0\n #Calcolo del trapezio composto\n for i in range(1,N):\n S = S + fz[i]\n\n TC = (fz[0] + 2*S + fz[N])*(b-a)/2/N\n \n return TC\n\n\"\"\"\nFORMULA DI SIMPSON\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n\"\"\"\ndef Simpson(f_x, a, b):\n #Calcolo l'integrale\n T = ((b-a)/6) * (f_x(a) +4 * f_x((b+a)/2) + f_x(b))\n return T\n\n\"\"\"\nFORMULA DI SIMPSON COMPOSTA\n Al metodo vengono passati:\n - la funzione integranda\n - l'estremo inferiore di integrazione\n - l'estremo superiore di integrazione\n - il numero di intervalli\n\"\"\"\ndef CompositeSimpson(f, a, b, N):\n #Genero n+1 intervallini in [a,b]\n z = np.linspace(a,b,N+1)\n #Calcolo f negli intervalli z\n fz = f(z)\n \n #Definisco le somme dispari e le somme pari\n S_d = 0\n S_p = 0\n \n #Definisco l'ampiezza dei singoli intervalli\n h = (b-a)/N\n \n #Calcolo le somme dispari\n for i in range(1,N,2):\n S_d = S_d + fz[i]\n #Calcolo le somme pari\n for i in range(2,N-1,2):\n S_p = S_p + fz[i]\n \n Tsc = (fz[0] + 4*S_d + 2*S_p + fz[N])*h/3\n \n return Tsc\n \n \n \n\n\n\n"
] | [
[
"numpy.linspace"
]
] |
KristinaRay/english-arabic-nmt-bot | [
"1e0baddc81b829b3ee1abe95143cdef5c1206dd2"
] | [
"data/get_dataset.py"
] | [
"import os\nimport tqdm\nimport numpy as np\nimport requests\nimport youtokentome as yttm\nfrom argparse import ArgumentParser\nfrom zipfile import ZipFile\n\nfrom config import *\nfrom data.preprocessing import *\nfrom utils import *\n\nDATA_FILE_PATH = f'{DATA_PATH}/data.zip'\nDATA_URL = 'https://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/moses/ar-en.txt.zip'\nTRG_FILE_NAME = 'OpenSubtitles.ar-en.ar'\nSRC_FILE_NAME = 'OpenSubtitles.ar-en.en'\nTRG_SAMPLE_FILE_PATH = f'{DATA_PATH}/ar.txt'\nSRC_SAMPLE_FILE_PATH = f'{DATA_PATH}/en.txt'\nTRG_ORIG_FILE_PATH = f'{DATA_PATH}/{TRG_FILE_NAME}'\nSRC_ORIG_FILE_PATH = f'{DATA_PATH}/{SRC_FILE_NAME}'\n\ndef fetch_dataset(data_url, data_path, data_file_path):\n \n \"\"\" Download data \"\"\"\n \n if not os.path.exists(data_path):\n os.makedirs(data_path)\n print(\"Dataset not found, downloading...\")\n response = requests.get(data_url, stream=True)\n filename = data_url.split(\"/\")[-1]\n total_size_in_bytes= int(response.headers.get('content-length', 0))\n progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n\n with open(data_file_path, 'wb') as file:\n for data in response.iter_content(1024):\n progress_bar.update(len(data))\n file.write(data)\n progress_bar.close()\n \n log(\"Download complete\")\n log(\"Extracting...\")\n \n zip = ZipFile(DATA_FILE_PATH, \"r\")\n zip.extract(TRG_FILE_NAME, DATA_PATH)\n zip.extract(SRC_FILE_NAME, DATA_PATH)\n zip.close()\n log(\"Extracting complete\")\n \n num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH)) # number of lines in arabic file\n num_lines_en = sum(1 for line in open(SRC_ORIG_FILE_PATH)) # number of lines in english file\n \n assert num_lines_ar == num_lines_en, \"Lost some data\"\n assert os.path.exists(data_path)\n\n else:\n\n log('Datasets are found')\n\ndef create_sample(sample_size, max_text_len):\n \"\"\"\n Clean data sample and remove duplicates\n \"\"\"\n log('Creating txt files for both languages...')\n num_lines_ar = sum(1 for line in open(TRG_ORIG_FILE_PATH)) \n sample_data_size = 2 * sample_size \n chosen_lines = set(np.random.choice(np.arange(num_lines_ar), size=sample_data_size, replace=False))\n en_sub = open(SRC_ORIG_FILE_PATH, \"r\") \n ar_sub = open(TRG_ORIG_FILE_PATH, \"r\") \n unique_pairs = set()\n with open(SRC_TXT_FILE_PATH, \"a+\") as en, open(TRG_TXT_FILE_PATH, \"a+\") as ar:\n for idx, (en_line, ar_line) in enumerate(zip(en_sub, ar_sub)):\n if idx in chosen_lines:\n src = clean_en_text(en_line)\n trg = clean_ar_text(ar_line)\n if 2 < len(src) <= max_text_len and 2 < len(trg) < max_text_len:\n if ((src + trg) not in unique_pairs and (len(unique_pairs) < sample_size)): \n en.write(src)\n ar.write(trg)\n unique_pairs.add((src + trg))\n elif len(unique_pairs) >= sample_size: \n break\n assert len(unique_pairs) == sample_size, \"Not enough data\"\n en_sub.close()\n ar_sub.close()\n en.close()\n ar.close()\n log(\"Done\")\n log(f'Number of unique pairs of sentences: {len(unique_pairs)}')\n \n\ndef main(): \n fetch_dataset(DATA_URL, DATA_PATH, DATA_FILE_PATH)\n parser = ArgumentParser()\n parser.add_argument(\"--sample_size\", required=True, type=int, help='Number of the sentence pairs to prepare for the training')\n parser.add_argument(\"--max_text_len\", required=True, type=int, help='Max character length of the sentences')\n args = parser.parse_args()\n \n create_sample(args.sample_size, args.max_text_len)\n \n log('Training tokenizers...')\n \n yttm.BPE.train(data=TRG_TXT_FILE_PATH, vocab_size=TRG_VOCAB_SIZE, model=TRG_TOKENIZER_PATH)\n yttm.BPE.train(data=SRC_TXT_FILE_PATH, vocab_size=SRC_VOCAB_SIZE, model=SRC_TOKENIZER_PATH)\n \n log(\"Done\")\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.arange"
]
] |
anmolmore/Chatbot-for-COVID-19-FAQ-using-Dialogflow | [
"f80670e9ee67e18c790da85d49e9c9617753c6f8"
] | [
"Model_codebase_2_flask.py"
] | [
"#11915010\tRaghu Punnamraju\n#11915043\tAnmol More\n#11915001\tSriganesh Balamurugan\n#11915052\tKapil Bindal\n\nimport pandas as pd\nfrom ast import literal_eval\n\nfrom cdqa.utils.filters import filter_paragraphs\nfrom cdqa.utils.download import download_model, download_bnpp_data\nfrom cdqa.pipeline.cdqa_sklearn import QAPipeline\n\n#read the cleaned dataset and just take question and context for our model\ndf = pd.read_csv('data/dataset_collected.csv', usecols=['question', 'context'])\n\n#convert paragraphs to a list\ndf['paragraphs'] = df[df.columns[1:]].apply(\n lambda x: x.dropna().values.tolist(),\n axis=1)\n\ndf.rename(columns={\"question\": \"title\"}, inplace=True)\ndf.drop(columns='context', inplace=True)\ndf.to_csv('df_corona.csv', index=False)\n\n#use a lighter pipleline model to build pipeline on top of it\ncdqa_pipeline = QAPipeline(reader='models/distilbert_qa.joblib')\ncdqa_pipeline.fit_retriever(df=df)\n\nprint('Welcome to Corona Chatbot ! How can I help you ? ')\nprint('Press enter twice to quit')\n\nwhile True:\n\tquery = input()\n\tprediction = cdqa_pipeline.predict(query=query)\n\tprint('Query : {}\\n'.format(query))\n\tprint('Reply from Bot: {}\\n'.format(prediction[0]))"
] | [
[
"pandas.read_csv"
]
] |
hellopikaqiu/AIchallenger_MachineReadingComprehension | [
"03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42",
"03c8d4ab60f6ac9c7f777fd2c932cc01300b5c42"
] | [
"best_single_model/focal_loss.py",
"baseline/config.py"
] | [
"\"\"\"\nAI Challenger观点型问题阅读理解\n\nfocal_loss.py\n\n@author: yuhaitao\n\"\"\"\n# -*- coding:utf-8 -*-\nimport tensorflow as tf\n\n\ndef sparse_focal_loss(logits, labels, gamma=2):\n \"\"\"\n Computer focal loss for multi classification\n Args:\n labels: A int32 tensor of shape [batch_size].\n logits: A float32 tensor of shape [batch_size,num_classes].\n gamma: A scalar for focal loss gamma hyper-parameter.\n Returns:\n A tensor of the same shape as `lables`\n \"\"\"\n with tf.name_scope(\"focal_loss\"):\n y_pred = tf.nn.softmax(logits, dim=-1) # [batch_size,num_classes]\n labels = tf.one_hot(labels, depth=y_pred.shape[1])\n L = -labels * ((1 - y_pred)**gamma) * tf.log(y_pred)\n L = tf.reduce_sum(L, axis=1)\n return L\n\n'''\nif __name__ == '__main__':\n labels = tf.constant([0, 1], name=\"labels\")\n logits = tf.constant([[0.7, 0.2, 0.1], [0.6, 0.1, 0.3]], name=\"logits\")\n a = tf.reduce_mean(sparse_focal_loss(logits, tf.stop_gradient(labels)))\n with tf.Session() as sess:\n print(sess.run(a))'''\n",
"\"\"\"\nAI Challenger观点型问题阅读理解\n\nconfig.py:配置文件,程序运行入口\n\n@author: yuhaitao\n\"\"\"\n# -*- coding:utf-8 -*-\nimport os\nimport tensorflow as tf\n\nimport data_process\nfrom main import train, test, dev\nfrom file_save import *\nfrom examine_dev import examine_dev\n\nflags = tf.flags\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\ntrain_file = os.path.join(\"file\", \"ai_challenger_oqmrc_trainingset.json\")\ndev_file = os.path.join(\"file\", \"ai_challenger_oqmrc_validationset.json\")\ntest_file = os.path.join(\"file\", \"ai_challenger_oqmrc_testa.json\")\n'''\ntrain_file = os.path.join(\"file\", \"train_demo.json\")\ndev_file = os.path.join(\"file\", \"val_demo.json\")\ntest_file = os.path.join(\"file\", \"test_demo.json\")'''\n\ntarget_dir = \"data\"\nlog_dir = \"log/event\"\nsave_dir = \"log/model\"\nprediction_dir = \"log/prediction\"\ntrain_record_file = os.path.join(target_dir, \"train.tfrecords\")\ndev_record_file = os.path.join(target_dir, \"dev.tfrecords\")\ntest_record_file = os.path.join(target_dir, \"test.tfrecords\")\nid2vec_file = os.path.join(target_dir, \"id2vec.json\") # id号->向量\nword2id_file = os.path.join(target_dir, \"word2id.json\") # 词->id号\ntrain_eval = os.path.join(target_dir, \"train_eval.json\")\ndev_eval = os.path.join(target_dir, \"dev_eval.json\")\ntest_eval = os.path.join(target_dir, \"test_eval.json\")\n\nif not os.path.exists(target_dir):\n os.makedirs(target_dir)\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\nif not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir)\n\nflags.DEFINE_string(\"mode\", \"train\", \"train/debug/test\")\nflags.DEFINE_string(\"gpu\", \"0\", \"0/1\")\nflags.DEFINE_string(\"experiment\", \"lalala\", \"每次存不同模型分不同的文件夹\")\nflags.DEFINE_string(\"model_name\", \"default\", \"选取不同的模型\")\n\nflags.DEFINE_string(\"target_dir\", target_dir, \"\")\nflags.DEFINE_string(\"log_dir\", log_dir, \"\")\nflags.DEFINE_string(\"save_dir\", save_dir, \"\")\nflags.DEFINE_string(\"prediction_dir\", prediction_dir, \"\")\nflags.DEFINE_string(\"train_file\", train_file, \"\")\nflags.DEFINE_string(\"dev_file\", dev_file, \"\")\nflags.DEFINE_string(\"test_file\", test_file, \"\")\n\nflags.DEFINE_string(\"train_record_file\", train_record_file, \"\")\nflags.DEFINE_string(\"dev_record_file\", dev_record_file, \"\")\nflags.DEFINE_string(\"test_record_file\", test_record_file, \"\")\nflags.DEFINE_string(\"train_eval_file\", train_eval, \"\")\nflags.DEFINE_string(\"dev_eval_file\", dev_eval, \"\")\nflags.DEFINE_string(\"test_eval_file\", test_eval, \"\")\nflags.DEFINE_string(\"word2id_file\", word2id_file, \"\")\nflags.DEFINE_string(\"id2vec_file\", id2vec_file, \"\")\n\nflags.DEFINE_integer(\"para_limit\", 150, \"Limit length for paragraph\")\nflags.DEFINE_integer(\"ques_limit\", 30, \"Limit length for question\")\nflags.DEFINE_integer(\"min_count\", 1, \"embedding 的最小出现次数\")\nflags.DEFINE_integer(\"embedding_size\", 300, \"the dimension of vector\")\n\nflags.DEFINE_integer(\"capacity\", 15000, \"Batch size of dataset shuffle\")\nflags.DEFINE_integer(\"num_threads\", 4, \"Number of threads in input pipeline\")\n# 使用cudnn训练,提升6倍速度\nflags.DEFINE_boolean(\"use_cudnn\", True, \"Whether to use cudnn (only for GPU)\")\nflags.DEFINE_boolean(\"is_bucket\", False, \"Whether to use bucketing\")\n\nflags.DEFINE_integer(\"batch_size\", 64, \"Batch size\")\nflags.DEFINE_integer(\"num_steps\", 250000, \"Number of steps\")\nflags.DEFINE_integer(\"checkpoint\", 1000, \"checkpoint for evaluation\")\nflags.DEFINE_integer(\"period\", 500, \"period to save batch loss\")\nflags.DEFINE_integer(\"val_num_batches\", 150, \"Num of batches for evaluation\")\nflags.DEFINE_float(\"init_learning_rate\", 0.001,\n \"Initial learning rate for Adam\")\nflags.DEFINE_float(\"init_emb_lr\", 0., \"\")\nflags.DEFINE_float(\"keep_prob\", 0.7, \"Keep prob in rnn\")\nflags.DEFINE_float(\"grad_clip\", 5.0, \"Global Norm gradient clipping rate\")\nflags.DEFINE_integer(\"hidden\", 60, \"Hidden size\") # best:128\nflags.DEFINE_integer(\"patience\", 5, \"Patience for learning rate decay\")\nflags.DEFINE_string(\"optimizer\", \"Adam\", \"\")\nflags.DEFINE_string(\"loss_function\", \"default\", \"\")\nflags.DEFINE_boolean(\"use_dropout\", True, \"\")\n\n\ndef main(_):\n config = flags.FLAGS\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu # 选择一块gpu\n if config.mode == \"train\":\n train(config)\n elif config.mode == \"prepro\":\n data_process.prepro(config)\n elif config.mode == \"debug\":\n config.num_steps = 2\n config.val_num_batches = 1\n config.checkpoint = 1\n config.period = 1\n train(config)\n elif config.mode == \"test\":\n test(config)\n elif config.mode == \"examine\":\n examine_dev(config)\n elif config.mode == \"save_dev\":\n save_dev(config)\n elif config.mode == \"save_test\":\n save_test(config)\n else:\n print(\"Unknown mode\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.name_scope",
"tensorflow.one_hot",
"tensorflow.log",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum"
],
[
"tensorflow.app.run"
]
] |
muell-monster/google-research | [
"294a888bbb6678ac255c6422fd703c325cbb0772"
] | [
"flax_models/t5x/train.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script pre-trains or fine-tunes a Transformer using the T5 data pipeline.\"\"\"\nfrom concurrent.futures import thread\nimport functools\nimport importlib\nimport os\nfrom typing import Any, Mapping, Sequence, Tuple\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\n# Set Linen to add profiling information when constructing Modules.\n# Must be set before flax imports.\n# pylint:disable=g-import-not-at-top\nos.environ['FLAX_PROFILE'] = 'true'\nfrom flax import linen as nn\nfrom flax import optim\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nimport jax\nfrom jax import lax\nfrom jax import random\nfrom jax.interpreters.sharded_jit import sharded_jit\nimport jax.numpy as jnp\nimport ml_collections\nfrom ml_collections import config_flags\nimport numpy as np\nimport t5\nfrom t5x import checkpoint_importer\nfrom t5x import input_pipeline\nfrom t5x import models\nfrom t5x import partitions\nfrom t5x import train_lib\nimport tensorflow as tf\n\n# pylint:disable=g-long-lambda\n\n\nFLAGS = flags.FLAGS\nCFG = None\nPyTreeDef = type(jax.tree_structure(None))\nTransformerConfig = models.TransformerConfig\njax.config.parse_flags_with_absl()\n\nflags.DEFINE_string(\n 'model_dir', default=None, help='Directory to store model data.')\n\nflags.DEFINE_string(\n 'data_dir', default=None, help='Tensorflow datasets directory.')\n\nconfig_flags.DEFINE_config_file(\n name='config',\n default='configs/t5_small_glue.py',\n help_string='training config file.')\n\nConfigDict = ml_collections.ConfigDict\n\n\ndef get_configs(\n config\n):\n \"\"\"Get train, eval, and predict model configs.\n\n Args:\n config: The config dict for the experiment.\n\n Returns:\n A triple (train_config, eval_config, predict_config).\n \"\"\"\n train_config = TransformerConfig(\n vocab_size=config.vocab_size,\n output_vocab_size=config.vocab_size,\n share_embeddings=config.share_embeddings,\n logits_via_embedding=config.logits_via_embedding,\n dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32,\n emb_dim=config.emb_dim,\n num_heads=config.num_heads,\n num_layers=config.num_layers,\n qkv_dim=config.qkv_dim,\n mlp_dim=config.mlp_dim,\n mlp_activations=config.mlp_activations,\n position_embeddings='relative',\n relative_attention_num_buckets=config.relative_attention_num_buckets,\n relative_attention_max_distance=config.relative_attention_max_distance,\n max_len=max(config.max_input_length, config.max_target_length,\n config.max_eval_input_length, config.max_eval_target_length),\n dropout_rate=config.dropout_rate,\n attention_dropout_rate=config.attention_dropout_rate,\n deterministic=False,\n decode=False,\n kernel_init=nn.initializers.xavier_uniform(),\n bias_init=nn.initializers.normal(stddev=1e-6))\n eval_config = train_config.replace(deterministic=True) # pytype: disable=attribute-error\n predict_config = train_config.replace( # pytype: disable=attribute-error\n deterministic=True,\n decode=True,\n max_decode_len=config.max_eval_target_length)\n\n return (train_config, eval_config, predict_config)\n\n\ndef get_initial_params(rng, config,\n transformer_config,\n optimizer_def):\n \"\"\"Get the initial parameter tree.\"\"\"\n input_shape = (config.batch_size, CFG.max_input_length)\n target_shape = (config.batch_size, CFG.max_target_length)\n initial_variables = models.Transformer(transformer_config).init(\n rng, jnp.ones(input_shape, jnp.float32),\n jnp.ones(target_shape, jnp.float32))\n # apply an optimizer to the parameters\n return optimizer_def.create(initial_variables['params'])\n\n\ndef main(argv):\n global CFG\n CFG = FLAGS.config\n\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Guarantee that the JAX bfloat16 extension is used rather than TF bfloat16.\n _ = np.array(jnp.array([1.0], dtype=jnp.bfloat16))\n\n # Use hardware RNG for bernoulli randoms in dropout mask creation.\n if CFG.hardware_rng:\n models.set_hardware_bernoulli()\n\n if 'module_import' in CFG and CFG.module_import:\n for module in CFG.module_import:\n importlib.import_module(module)\n\n if 'additional_task_cache_dirs' in CFG and CFG.additional_task_cache_dirs:\n t5.data.add_global_cache_dirs(CFG.additional_task_cache_dirs)\n\n num_partitions = CFG.num_partitions\n topology = train_lib.compute_multihost_topology(num_partitions)\n batch_size = CFG.batch_size\n eval_batch_size = CFG.eval_batch_size\n per_replica_set_eval_batch_size = eval_batch_size // topology.num_replica_sets\n if batch_size % topology.num_replicas:\n raise ValueError('Batch size must be divisible by the number of replicas.')\n\n steps_per_epoch = CFG.steps_per_epoch\n logging.info('steps per epoch: %d', steps_per_epoch)\n\n broadcast = functools.partial(\n train_lib.broadcast,\n num_replicas=topology.per_replica_set_num_replicas,\n num_partitions=topology.per_host_num_partitions,\n devices=topology.this_host_device_assignment)\n\n if jax.host_id() == 0:\n tf.io.gfile.makedirs(FLAGS.model_dir)\n tf.io.gfile.copy(FLAGS['config'].config_filename,\n os.path.join(FLAGS.model_dir, 'config.py'),\n overwrite=True)\n train_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'train'))\n eval_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'eval'))\n else:\n train_summary_writer = None\n eval_summary_writer = None\n\n # Write summaries in background thread to avoid blocking on device sync\n if CFG.infeed:\n # Infeed is currently synchronous, so do it in a background thread too\n infeed_pool = thread.ThreadPoolExecutor(jax.local_device_count(), 'infeed')\n\n (train_ds, eval_ds), eval_cache = input_pipeline.get_datasets_and_cache(\n CFG, topology.num_replica_sets, topology.replica_set_id,\n topology.per_replica_set_host_id)\n\n vocab = input_pipeline.get_vocabulary(CFG.mixture_or_task_name)\n encoder = vocab.tf_tokenizer\n eos_id = vocab.tokenizer.eos_id()\n\n def decode_tokens(toks,\n eos_id = eos_id,\n max_id = 32000):\n \"\"\"Decode tokens back to unicode.\"\"\"\n del eos_id\n # TODO(levskaya): T5 doesn't seem to emit EOS tokens? double check this\n # is the best decoding function or just switch to using tf_decode.\n # valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)\n valid_toks = toks.astype(np.int32)\n valid_toks[valid_toks >= max_id] = 3\n return encoder.detokenize(valid_toks).numpy().decode('utf-8')\n\n logging.info('Initializing model, optimizer, and step functions.')\n\n train_config, eval_config, predict_config = get_configs(CFG)\n\n rng = random.PRNGKey(CFG.random_seed)\n rng, init_rng = random.split(rng)\n # This is used for infeed conversion from feature dict <--> tuple\n train_keys = [\n 'inputs', 'targets', 'inputs_position', 'targets_position',\n 'inputs_segmentation', 'targets_segmentation'\n ]\n device_train_input_shape = tuple([\n (batch_size // topology.num_replicas,\n CFG.max_input_length if 'inputs' in k else CFG.max_target_length)\n for k in train_keys\n ])\n\n learning_rate_fn = train_lib.create_learning_rate_scheduler(\n factors=CFG.schedule,\n base_learning_rate=CFG.learning_rate,\n warmup_steps=CFG.warmup_steps)\n\n # First, we only abstractly initialize the optimizer and model parameters,\n # since the parameters may not even fit in device memory!\n # TODO(jekbradbury): make optimizer_defs compare by value so it can be created\n # in get_initial_params without causing pytree incompatibility\n optimizer_def = optim.Adafactor(\n CFG.learning_rate, decay_rate=0.8, step_offset=CFG.step_offset)\n initialize_params_fn = functools.partial(\n get_initial_params,\n config=CFG,\n transformer_config=eval_config,\n optimizer_def=optimizer_def)\n optimizer = jax.eval_shape(initialize_params_fn, init_rng)\n # tuple-like pytree leaves for global_arg_shapes\n optimizer_shapes = jax.tree_map(lambda x: partitions.Spec(*x.shape),\n optimizer)\n\n # Build parameter partition annotations for preserving partitions from train\n # to eval.\n if num_partitions > 1:\n optimizer_partitions = optimizer.restore_state(\n partitions.set_partitions(num_partitions, optimizer.state_dict()))\n per_host_optimizer_partitions = optimizer.restore_state(\n partitions.set_partitions(topology.per_host_num_partitions,\n optimizer.state_dict()))\n\n # Restore unreplicated optimizer + model state from last checkpoint.\n # TODO(jekbradbury,levskaya): implement sharded native checkpoint/restore\n existing_checkpoint_found = False\n if CFG.restore_checkpoints:\n existing_checkpoint_found = train_lib.checkpoint_exists(FLAGS.model_dir)\n optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)\n\n # Import a pretrained-T5 checkpoint only if we didn't import a local\n # \"native\" checkpoint (e.g. due to resuming a pre-empted finetuning run.)\n # TODO(jekbradbury,levskaya): implement sharded T5 checkpoint/restore\n if CFG.restore_t5_checkpoint and not existing_checkpoint_found:\n optimizer = checkpoint_importer.restore_from_t5_checkpoint(\n optimizer, CFG.restore_t5_checkpoint)\n\n if CFG.restore_t5_checkpoint or existing_checkpoint_found:\n if num_partitions > 1:\n # Until checkpoint/restore is sharded, the restored checkpoint is global\n # and we need to slice each sharded parameter into the chunk containing\n # only the partitions that are present on this host.\n def per_host_chunk(x, spec):\n if spec is None or spec is x: # unsharded or not a parameter\n return x\n if spec[0] == 1:\n dim_size = x.shape[1]\n elif spec[1] == 1:\n dim_size = x.shape[0]\n else:\n raise NotImplementedError()\n chunk_size = (\n dim_size * topology.per_host_num_partitions // num_partitions)\n lower = topology.per_replica_set_host_id * chunk_size\n upper = (topology.per_replica_set_host_id + 1) * chunk_size\n if spec[0] == 1:\n return x[:, lower:upper]\n else:\n return x[lower:upper]\n\n optimizer = jax.tree_multimap(per_host_chunk, optimizer,\n optimizer_partitions)\n else:\n # If pretraining and no checkpoint imported, we jit the (sharded-) init\n # function to minimize fragmentation. We use the same pmap(sharded_jit)\n # setup as the training step/loop to initialize everything \"in-place\" and\n # avoid communication or OOM.\n if num_partitions > 1:\n initialize_params_fn = sharded_jit(\n initialize_params_fn,\n in_parts=None,\n local_in_parts=None,\n out_parts=optimizer_partitions,\n local_out_parts=per_host_optimizer_partitions,\n # devices=one_replica_device_assignment,\n )\n initialize_params_fn = jax.pmap(\n initialize_params_fn,\n 'batch',\n in_axes=0,\n axis_size=topology.num_replicas,\n devices=topology.device_assignment)\n init_rng = broadcast(init_rng)\n optimizer = initialize_params_fn(init_rng)\n # We maintain the optimizer in unbroadcasted form (i.e. with no leading\n # replica axis). This is equivalent to the as-yet-nonexistent pmap kwarg\n # out_axes=None.\n optimizer = train_lib.unbroadcast(optimizer)\n else:\n optimizer = jax.jit(initialize_params_fn)(init_rng)\n\n # ---------------------------------------------------------------------------\n # Compile multidevice versions of train/eval/predict step and cache init fn.\n # ---------------------------------------------------------------------------\n\n # We can use either a single train-step for a host training loop:\n\n # train_step(optimizer, batch, prev_metrics, dropout_rng, **kwargs)\n # --> new_optimizer, metrics, new_dropout_rng\n def p_train_step(optimizer, batch,\n prev_metrics,\n dropout_rng):\n return train_lib.train_step(\n optimizer,\n batch,\n prev_metrics,\n dropout_rng,\n config=train_config,\n learning_rate_fn=learning_rate_fn,\n num_microbatches=CFG.microbatches,\n label_smoothing=CFG.label_smoothing,\n z_loss=CFG.z_loss,\n use_bfloat16=CFG.use_bfloat16)\n\n if num_partitions > 1:\n p_train_step = sharded_jit(\n p_train_step,\n in_parts=(optimizer_partitions, None, None, None),\n local_in_parts=(per_host_optimizer_partitions, None, None, None),\n out_parts=(optimizer_partitions, None, None),\n local_out_parts=(per_host_optimizer_partitions, None, None))\n # TODO(levskaya): the in_axes spec below might be wrong, double-check.\n p_train_step = jax.pmap(\n p_train_step,\n axis_name='batch',\n in_axes=(None, 0, 0, 0),\n donate_argnums=(0,),\n global_arg_shapes=(optimizer_shapes, None, None, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # OR, we use an on-device loop that feeds the training step via infeed queue.\n def device_train_loop_cond(\n args\n ):\n \"\"\"Stopping criterion for on-device loop.\"\"\"\n _, _, _, _, step, epoch = args\n return step // steps_per_epoch == epoch\n\n def device_train_loop_body(\n args\n ):\n \"\"\"On-device loop body.\"\"\"\n optimizer, dropout_rngs, metrics, token, step, epoch = args\n # Ordering input data from infeed requires threading a symbolic token\n # through the computation.\n input_data, token = lax.infeed(\n token,\n shape=tuple(\n [jax.ShapedArray(s, jnp.int32) for s in device_train_input_shape]))\n # Rebuild input dict from infeed data tuple.\n batch = {k: v for k, v in zip(train_keys, input_data)}\n # Run the train_step function and return the loop state.\n optimizer, metrics, dropout_rngs = train_lib.train_step(\n optimizer,\n batch,\n metrics,\n dropout_rngs,\n train_config,\n learning_rate_fn,\n num_microbatches=CFG.microbatches,\n label_smoothing=CFG.label_smoothing,\n z_loss=CFG.z_loss)\n step += 1\n return optimizer, dropout_rngs, metrics, token, step, epoch\n\n def device_train_loop(optimizer, dropout_rngs,\n metrics, step,\n epoch):\n # Create symbolic token for threading infeed data.\n token = lax.create_token(step)\n # Run on-device loop.\n optimizer, dropout_rngs, metrics, _, step, _ = lax.while_loop(\n device_train_loop_cond, device_train_loop_body,\n (optimizer, dropout_rngs, metrics, token, step, epoch))\n return optimizer, dropout_rngs, metrics, step\n\n if num_partitions > 1:\n device_train_loop = sharded_jit(\n device_train_loop,\n in_parts=(optimizer_partitions, None, None, None, None),\n local_in_parts=(per_host_optimizer_partitions, None, None, None, None),\n out_parts=(optimizer_partitions, None, None, None),\n local_out_parts=(per_host_optimizer_partitions, None, None, None))\n p_train_epoch = jax.pmap(\n device_train_loop,\n axis_name='batch',\n in_axes=(None, 0, 0, None, None),\n donate_argnums=(0,),\n global_arg_shapes=(optimizer_shapes, None, None, None, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # Reduction psum for metric data.\n\n def p_allreduce_metrics(x):\n return lax.psum(x, axis_name='batch')\n\n if num_partitions > 1:\n p_allreduce_metrics = sharded_jit(\n p_allreduce_metrics,\n in_parts=None,\n local_in_parts=None,\n out_parts=None,\n local_out_parts=None,\n num_partitions=num_partitions,\n local_num_partitions=topology.per_host_num_partitions)\n p_allreduce_metrics = jax.pmap(\n p_allreduce_metrics,\n axis_name='batch',\n global_arg_shapes=None,\n axis_size=topology.num_replicas,\n devices=topology.device_assignment)\n\n # Training evaluation computation.\n\n # eval_step(params, batch, config, label_smoothing=0.0) --> metrics\n def p_eval_step(params, batch):\n return train_lib.eval_step(\n params, batch, config=eval_config, label_smoothing=CFG.label_smoothing)\n\n if num_partitions > 1:\n p_eval_step = sharded_jit(\n p_eval_step,\n in_parts=(optimizer_partitions.target, None),\n local_in_parts=(per_host_optimizer_partitions.target, None),\n out_parts=None,\n local_out_parts=None)\n p_eval_step = jax.pmap(\n p_eval_step,\n axis_name='batch',\n in_axes=(None, 0),\n global_arg_shapes=(optimizer_shapes.target, None),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # Fast autoregressive decoding loop.\n # For inference and model evaluation.\n\n # predict_step(inputs, params,\n # eos_id, max_decode_len, config, beam_size=4) --> beam_seqs\n def p_pred_step(inputs, params):\n return train_lib.predict_step(inputs, params, eos_id,\n CFG.max_eval_target_length, predict_config,\n CFG.beam_size)\n\n if num_partitions > 1:\n p_pred_step = sharded_jit(\n p_pred_step,\n in_parts=(None, optimizer_partitions.target),\n local_in_parts=(None, per_host_optimizer_partitions.target),\n out_parts=None,\n local_out_parts=None)\n p_pred_step = jax.pmap(\n p_pred_step,\n axis_name='batch',\n in_axes=(0, None),\n global_arg_shapes=(None, optimizer_shapes.target),\n axis_size=topology.num_replicas,\n devices=topology.device_assignment) # pytype: disable=wrong-arg-types\n\n # ---------------------------------------------------------------------------\n # Main Train Loop\n # ---------------------------------------------------------------------------\n\n # We init the first set of dropout PRNG keys, but update it afterwards inside\n # the main pmap'd training update for performance.\n # There should be a unique dropout key for each replica represented on this\n # host, but the key should be the same for the same replica on other hosts.\n # Again, this is what the replica set abstraction is for.\n dropout_rngs = random.split(\n random.fold_in(rng, topology.replica_set_id),\n topology.per_replica_set_num_replicas)\n # restore step from last checkpoint\n host_step = int(optimizer.state.step)\n empty_metrics = broadcast({\n 'loss': 0.0,\n 'accuracy': 0.0,\n 'learning_rate': 0.0,\n 'denominator': 0.0\n })\n if CFG.infeed:\n # TODO(jekbradbury): support something like this for the Python-loop case\n logging.info('Precompiling training loop and moving optimizer to device.')\n optimizer, _, metrics, _ = p_train_epoch(optimizer, dropout_rngs,\n empty_metrics,\n jnp.array(0, dtype=jnp.int32), 1)\n optimizer = train_lib.unbroadcast(optimizer)\n metrics['loss'].block_until_ready()\n\n logging.info('Starting training loop.')\n\n local_devices = jax.local_devices()\n device_step = broadcast(host_step)\n first_epoch = host_step // steps_per_epoch\n\n # Main Loop over \"epochs\".\n train_iter = train_ds.as_numpy_iterator()\n for epoch in range(first_epoch, first_epoch + CFG.num_epochs):\n metrics = empty_metrics\n\n # NOTE: 'optimizer' is unbroadcast by construction at initialization or\n # when loading a checkpoint. It is maintained in 'unbroadcast' state to\n # enable the XLA cross-replica sharding optimization. The broadcasting is\n # handled automatically by the pmap'd functions that use it.\n\n # Gather all task evaluation metrics.\n logging.info('Evaluating tasks.')\n if epoch == first_epoch + 1:\n train_lib.sync_devices()\n for task in eval_cache.tasks:\n logging.info('Evaluating task %s', task.name)\n all_predicted, all_bs = [], []\n for pred_batch in eval_cache.preprocessed_examples[task.name]:\n # Handle final odd-sized batch by padding instead of dropping it.\n input_batch, unpadded_batch_size = train_lib.pad_batch_to_size(\n pred_batch['inputs'], per_replica_set_eval_batch_size)\n all_bs.append(unpadded_batch_size)\n # Split batch dimensions for pmap.\n input_batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]),\n input_batch)\n # Run fast inference on batch.\n all_predicted.append(p_pred_step(input_batch, optimizer.target))\n\n # Pad out the number of batches so each host has the same number.\n max_host_batch_number = np.max(\n eval_cache.preprocessed_batch_sizes[task.name])\n batch_shortfall = max_host_batch_number - len(all_predicted)\n if batch_shortfall > 0:\n # TODO(levskaya): Fix for case of entirely empty all_predicted.\n # To make sure the cross-host barriers work, we run the program the same\n # number of times on all hosts. The results of this call is ignored, and\n # the predictions are populated with zeros instead.\n p_pred_step(input_batch, optimizer.target) # Dummy call.\n all_predicted.extend([jnp.zeros_like(all_predicted[0])] *\n batch_shortfall)\n all_bs.extend([0] * batch_shortfall)\n all_predicted = jnp.concatenate(all_predicted)\n all_bs = jnp.array(all_bs)\n\n # Collect all batches from across hosts and reverse sharding.\n all_predicted = train_lib.host_allgather(\n all_predicted, topology.num_replica_sets, topology.replica_set_id,\n topology.per_replica_set_host_id == 0)\n seqlength = all_predicted.shape[-1]\n total_examples = np.sum(\n train_lib.host_allgather(all_bs, topology.num_replica_sets,\n topology.replica_set_id,\n topology.per_replica_set_host_id == 0))\n del all_bs\n assert total_examples == len(eval_cache.examples[task.name]), (\n 'Total number of batches incorrect for task %s.' % task.name)\n # De-shard the collected predicted tokens and remove padding.\n all_predicted = np.transpose(all_predicted, (1, 2, 0, 3)).reshape(\n -1, seqlength)[:total_examples]\n\n # We now run the post-processing and metric-fns on a single host.\n if jax.host_id() == 0:\n assert eval_summary_writer\n raw_predictions = []\n for tokens in all_predicted:\n raw_predictions.append(decode_tokens(tokens))\n\n # post-process predictions for metric fns\n predictions = [\n task.postprocess_fn(p, example=ex)\n for p, ex in zip(raw_predictions, eval_cache.examples[task.name])\n ]\n\n for metric_fn in task.metric_fns:\n scores = metric_fn(eval_cache.targets[task.name], predictions)\n for metric_name, metric_value in scores.items():\n tag = f'eval/{task.name}/{metric_name}'\n eval_summary_writer.scalar(tag, metric_value, host_step)\n logging.info('EVAL %s at step %d: %.3f', tag, host_step,\n metric_value)\n eval_summary_writer.flush()\n\n # Save text samples for tensorboard.\n exemplars = ''\n for n in np.random.choice(np.arange(len(predictions)), 8):\n tgt_txt = tf.compat.as_text(\n eval_cache.examples[task.name][n]['targets_plaintext'])\n pred_txt = raw_predictions[n]\n exemplars += (f'{eval_cache.inputs[task.name][n]}\\n\\n'\n f'target: {tgt_txt}\\n\\n'\n f'prediction: {pred_txt}\\n\\n')\n eval_summary_writer.text(f'{task.name} samples', exemplars, host_step)\n eval_summary_writer.flush()\n\n # Take an Xprof trace after the first loop has compiled everything.\n if epoch == first_epoch + 1:\n train_lib.sync_devices()\n\n # For on-device loop, we launch the computation before feeding data.\n logging.info('BEGIN Train loop.')\n if CFG.infeed:\n optimizer, dropout_rngs, metrics, device_step = p_train_epoch(\n optimizer, dropout_rngs, metrics, train_lib.unbroadcast(device_step),\n epoch)\n optimizer = train_lib.unbroadcast(optimizer)\n\n # Epoch loop.\n while int(host_step // steps_per_epoch) == epoch:\n batch = next(train_iter)\n batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]), batch)\n # Feed the on-device training loop.\n if CFG.infeed:\n for i, device in enumerate(local_devices):\n # When using infeed to provide data to the computation, we're on our\n # own for feeding the right values to the right devices. Each device\n # should get the minibatch corresponding to its replica, a slice of\n # the larger batch corresponding to the host's replica set.\n if device.platform == 'tpu':\n device_coords = (*device.coords, device.id % 2)\n else:\n device_coords = (device.host_id, i)\n per_replica_set_device_coords = tuple(\n dc % prsm\n for dc, prsm in zip(device_coords, topology.per_replica_set_mesh))\n per_replica_set_replica_coords = tuple(\n prsdc // prm for prsdc, prm in zip(per_replica_set_device_coords,\n topology.per_replica_mesh))\n per_replica_set_replica_id = 0\n for prsm, prm, prsrc in zip(topology.per_replica_set_mesh,\n topology.per_replica_mesh,\n per_replica_set_replica_coords):\n per_replica_set_replica_id = (\n per_replica_set_replica_id * prsm // prm + prsrc)\n input_tuple = tuple(\n [batch[k][per_replica_set_replica_id] for k in train_keys])\n # Safety check: infeed does not check shape or types but requires\n # them to agree with on-device spec, otherwise the queue and program\n # stalls.\n tuple_shapes = jax.tree_map(jnp.shape, input_tuple)\n tuple_dtypes = jax.tree_map(lambda x: x.dtype, input_tuple)\n assert tuple_shapes == device_train_input_shape, (\n 'infeed shape error %s != %s' %\n (tuple_shapes, device_train_input_shape))\n assert tuple(set(tuple_dtypes)) == (jnp.int32,), \\\n ('infeed dtype error %s not all of type %s' % (\n tuple_dtypes, jnp.int32))\n infeed_pool.submit(\n functools.partial(device.transfer_to_infeed, input_tuple))\n # Host training loop.\n else:\n optimizer, metrics, dropout_rngs = p_train_step(optimizer, batch,\n metrics, dropout_rngs)\n optimizer = train_lib.unbroadcast(optimizer)\n host_step += 1\n logging.info('END Train loop.')\n\n # Maybe save a checkpoint on one host.\n if (CFG.save_checkpoints and\n epoch % CFG.checkpoint_freq == CFG.checkpoint_freq - 1 and\n jax.host_id() == 0):\n checkpoints.save_checkpoint(FLAGS.model_dir, optimizer, host_step)\n\n # Gather training metrics.\n metrics = p_allreduce_metrics(metrics)\n metrics = jax.tree_map(lambda x: jax.device_get(x[0]), metrics)\n denominator = metrics.pop('denominator')\n summary = jax.tree_map(lambda x: x / denominator, metrics) # pylint: disable=cell-var-from-loop\n logging.info('train in step: %s, %s', host_step, summary)\n if jax.host_id() == 0:\n assert train_summary_writer\n for key, val in summary.items():\n train_summary_writer.scalar(key, val, host_step)\n train_summary_writer.flush()\n\n # Gather training evaluation metrics.\n logging.info('Gathering training evaluation metrics.')\n eval_metrics = []\n eval_iter = eval_ds.as_numpy_iterator()\n for _, eval_batch in zip(range(CFG.num_eval_steps), eval_iter):\n eval_batch = jax.tree_map(\n lambda x: x.reshape(\n (topology.per_replica_set_num_replicas, -1) + x.shape[1:]),\n eval_batch)\n metrics = p_eval_step(optimizer.target, eval_batch)\n eval_metrics.append(metrics)\n # average metrics across devices\n eval_metrics = p_allreduce_metrics(eval_metrics)\n eval_metrics = common_utils.get_metrics(eval_metrics)\n # average metrics across steps\n eval_metrics = jax.tree_map(np.sum, eval_metrics)\n eval_denominator = eval_metrics.pop('denominator')\n eval_summary = jax.tree_map(\n lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop\n eval_metrics)\n logging.info('eval in step: %s, %s', host_step, eval_summary)\n if jax.host_id() == 0:\n assert eval_summary_writer\n for key, val in eval_summary.items():\n eval_summary_writer.scalar(key, val, host_step)\n eval_summary_writer.flush()\n\n # Wait until computations are done before exiting\n logging.info('Finished.')\n train_lib.sync_devices()\n # Shut down the infeed threadpool.\n if CFG.infeed:\n infeed_pool.shutdown()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.as_text",
"numpy.max",
"tensorflow.io.gfile.makedirs",
"numpy.transpose"
]
] |
davidinouye/destructive-deep-learning | [
"632add7a9731347e050d271ceebb24251e1d8e01"
] | [
"scripts/icml_2018_experiment.py"
] | [
"\"\"\"ICML 2018 experiment for MNIST and CIFAR-10.\"\"\"\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\nimport warnings\n\nimport numpy as np\nimport scipy.stats # Needed for standard error of the mean scipy.stats.sem\nfrom sklearn.base import clone\nfrom sklearn.decomposition import PCA\n\n# Add the directory of this script\nsys.path.append(os.path.dirname(os.path.realpath(__file__))) # noqa E402\n# Add directory for ddl library\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) # noqa E402\n\n# isort:imports-firstparty\nfrom ddl.base import CompositeDestructor\nfrom ddl.deep import DeepDestructorCV\nfrom ddl.externals.mlpack import MlpackDensityTreeEstimator\nfrom ddl.independent import IndependentDensity, IndependentDestructor, IndependentInverseCdf\nfrom ddl.linear import BestLinearReconstructionDestructor\nfrom ddl.local import FeatureGroupsDestructor, ImageFeaturePairs\nfrom ddl.tree import TreeDensity, TreeDestructor\nfrom ddl.univariate import HistogramUnivariateDensity, ScipyUnivariateDensity\nfrom maf_data import CIFAR10_ALPHA, MNIST_ALPHA, get_maf_data\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_experiment(data_name, model_name, model_kwargs=None):\n \"\"\"\n\n Parameters\n ----------\n data_name :\n model_name :\n model_kwargs :\n\n Returns\n -------\n\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n # Setup\n experiment_filename = model_kwargs['experiment_filename']\n experiment_label = model_kwargs['experiment_label']\n _setup_loggers(experiment_filename)\n try:\n git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']\n ).decode('ascii')[:-1]\n except subprocess.CalledProcessError:\n git_hash = 'unknown'\n logger.debug('Current git hash = %s' % git_hash)\n\n # Load data\n logger.debug('Loading data for %s' % experiment_label)\n data_dict = get_maf_data(data_name)\n X_train, X_validation, X_test = (\n data_dict['X_train'], data_dict['X_validation'], data_dict['X_test'])\n n_train, n_validation, n_test = (_X.shape[0] for _X in (X_train, X_validation, X_test))\n\n # Setup cv and refit parameters\n X_train_val = np.vstack((X_train, X_validation))\n model_kwargs['cv'] = [(np.arange(n_train), n_train + np.arange(n_validation))]\n model_kwargs['refit'] = False\n\n # Load model\n deep_destructor = _get_model(data_name, model_name, model_kwargs=model_kwargs)\n\n # Fit destructor\n logger.debug('Starting training for %s' % experiment_label)\n start_time = time.time()\n deep_destructor.fit(X_train_val, y=None, X_test=X_test)\n train_time = time.time() - start_time\n logger.debug('Finished training for %s' % experiment_label)\n logger.debug('%s: Time to train = %g s or %g minutes or %g hours'\n % (experiment_label, train_time, train_time / 60, train_time / 60 / 60))\n\n # Get test score\n start_time = time.time()\n test_scores = deep_destructor.score_samples(X_test)\n score_time = time.time() - start_time\n test_score = np.mean(test_scores)\n test_score_stderr = scipy.stats.sem(test_scores)\n logger.debug('%s: Final test score=%g with std_err=%g computed in %g s'\n % (experiment_label, float(test_score), test_score_stderr, score_time))\n date_time_completed = time.strftime(\"%Y_%m_%d-%H_%M_%S\")\n logger.debug('Date/time completed (just before saving): %s' % date_time_completed)\n\n # Prepare results in dictionary\n result_dict = dict(\n # Data statistics\n data_name=data_name, n_features=X_train.shape[1],\n n_train=n_train, n_validation=n_validation, n_test=n_test,\n # Model\n destructor=deep_destructor, model_name=model_name, model_kwargs=model_kwargs,\n # Time\n train_time=train_time, score_time=score_time, date_time_completed=date_time_completed,\n # Test scores\n test_score=test_score, test_score_stderr=test_score_stderr, test_scores=test_scores,\n git_hash=git_hash,\n )\n\n # Save results to pickle file\n with open(experiment_filename + '.pkl', 'wb') as f:\n pickle.dump(result_dict, f)\n logger.debug('%s: Saved results to file %s' % (experiment_label, experiment_filename))\n return result_dict\n\n\ndef load_experiment_results(data_name, model_name=None, model_kwargs=None, notebook=False):\n \"\"\"\n\n Parameters\n ----------\n data_name :\n model_name :\n model_kwargs :\n notebook :\n\n Returns\n -------\n\n \"\"\"\n experiment_filename, _ = _get_experiment_filename_and_label(data_name, model_name=model_name,\n model_kwargs=model_kwargs)\n if notebook:\n experiment_filename = os.path.join('..', experiment_filename)\n\n with open(experiment_filename + '.pkl', 'rb') as f:\n result_dict = pickle.load(file=f)\n logger.debug('Loaded results from file %s' % experiment_filename)\n return result_dict\n\n\ndef _get_model(data_name, model_name, model_kwargs):\n if 'is_test' not in model_kwargs:\n model_kwargs['is_test'] = False\n # Init destructor is shared with all models\n init_destructor = CompositeDestructor(\n destructors=[\n _get_inverse_logit_destructor(data_name),\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=HistogramUnivariateDensity(\n bins=256, bounds=[0, 1], alpha=1)\n )\n )\n ],\n random_state=0,\n )\n\n # Setup canonical destructor for various models\n if model_name == 'deep-copula':\n deep_stop_tol = 0.001\n canonical_destructor = _get_copula_destructor()\n else:\n deep_stop_tol = 0.0001\n n_jobs = model_kwargs['n_jobs']\n\n # Get pair estimators (i.e. pairs of pixels in a spiral pattern)\n pair_estimators = _get_pair_estimators(data_name, n_uniq_dir=8)\n\n # Setup the local/pair destructor\n pair_canonical_destructor = _get_pair_canonical_destructor(model_name)\n\n # Setup a list of canonical destructors that destroy in each pixel direction\n canonical_destructor = [\n FeatureGroupsDestructor(\n groups_estimator=pair_estimator,\n group_canonical_destructor=clone(pair_canonical_destructor),\n n_jobs=n_jobs\n )\n for pair_estimator in pair_estimators\n ]\n\n # Shared DeepDestructorCV\n return DeepDestructorCV(\n init_destructor=init_destructor,\n canonical_destructor=canonical_destructor,\n stop_tol=deep_stop_tol,\n # Either n_extend or max_canonical_destructors must be None\n n_extend=1,\n cv=model_kwargs['cv'],\n refit=model_kwargs['refit'],\n silent=False,\n log_prefix='',\n random_state=0,\n # Set maximum number of layers (None for infinite)\n max_canonical_destructors=None if not model_kwargs['is_test'] else 1,\n )\n\n\ndef _get_inverse_logit_destructor(data_name):\n if data_name == 'mnist':\n alpha = MNIST_ALPHA\n elif data_name == 'cifar10':\n alpha = CIFAR10_ALPHA\n else:\n raise ValueError('dataset should either be mnist or cifar10')\n inverse_logit = CompositeDestructor(\n destructors=[\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=ScipyUnivariateDensity(\n scipy_rv=scipy.stats.logistic,\n scipy_fit_kwargs=dict(floc=0, fscale=1)\n )\n )\n ),\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=ScipyUnivariateDensity(\n scipy_rv=scipy.stats.uniform,\n scipy_fit_kwargs=dict(floc=alpha, fscale=1 - 2 * alpha)\n )\n )\n )\n ]\n )\n return inverse_logit\n\n\ndef _get_copula_destructor(hist_kwargs=None):\n if hist_kwargs is None:\n hist_kwargs = dict(bins=40, bounds=[0, 1], alpha=100)\n return CompositeDestructor(\n destructors=[\n IndependentDestructor(\n independent_density=IndependentDensity(\n univariate_estimators=HistogramUnivariateDensity(**hist_kwargs)\n )\n ),\n IndependentInverseCdf(),\n BestLinearReconstructionDestructor(\n linear_estimator=PCA(),\n destructor=IndependentDestructor(),\n linear_projector_kwargs=dict(fit_bias=False),\n )\n ],\n random_state=0,\n )\n\n\ndef _get_pair_canonical_destructor(model_name):\n if model_name == 'image-pairs-tree':\n return TreeDestructor(\n tree_density=TreeDensity(\n tree_estimator=MlpackDensityTreeEstimator(\n max_depth=None,\n min_samples_leaf=100,\n max_leaf_nodes=50,\n ),\n get_tree=None,\n node_destructor=None,\n uniform_weight=0.5,\n )\n )\n elif model_name == 'image-pairs-copula':\n return _get_copula_destructor()\n else:\n raise ValueError('Invalid model name \"%s\"')\n\n\ndef _get_pair_estimators(data_name, n_uniq_dir):\n \"\"\"Returns `n_uniq_dir` pair estimators in a spiral pattern.\"\"\"\n\n def _generate_pixel_circle(radius=1):\n cur = radius * np.array([1, 1]) # Start in top right\n d = [cur]\n for step in np.array([[0, -1], [-1, 0], [0, 1], [1, 0]]):\n for i in range(2 * radius):\n cur = cur + step\n d.append(cur)\n d.pop(-1) # remove last that is a repeat\n\n def _rotate(a, n):\n return a[n:] + a[:n]\n\n return _rotate(d, radius) # Rotate to make directly east the first direction\n\n def _generate_pixel_spiral(n_spirals=2):\n d = []\n for i in range(n_spirals):\n d.extend(_generate_pixel_circle(radius=i + 1))\n return d\n\n directions = np.array(_generate_pixel_spiral(n_spirals=10))\n\n if data_name == 'mnist':\n directions = directions[:n_uniq_dir]\n return [\n ImageFeaturePairs(\n image_shape=(28, 28), relative_position=r,\n init_offset=(0, 0), step=(1, 0), wrap=True\n )\n for r in directions\n ]\n elif data_name == 'cifar10':\n # Make 3d coordinates\n directions = [(d2[0], d2[1], 0) for d2 in directions[:n_uniq_dir]]\n init_offset = [(0, 0, 0) for _ in directions]\n # Handle color channels\n directions.extend([(0, 0, 1), (0, 0, 1), (0, 0, 1)])\n init_offset.extend([(0, 0, 0), (0, 0, 1), (0, 0, 2)])\n return [\n ImageFeaturePairs(\n image_shape=(32, 32, 3), relative_position=r,\n init_offset=io, step=(1, 0, 0), wrap=True\n )\n for r, io in zip(directions, init_offset)\n ]\n else:\n raise RuntimeError('Only mnist and cifar10 are supported')\n\n\ndef _setup_loggers(experiment_filename):\n # Setup log file and console to have same format\n log_formatter = logging.Formatter(\n fmt='%(asctime)s:%(levelname)s:%(name)s:%(process)d: %(message)s')\n log_file = logging.FileHandler(experiment_filename + '.log')\n log_file.setFormatter(log_formatter)\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_formatter)\n\n # Add handlers to root logger\n root_logger = logging.getLogger()\n root_logger.addHandler(console_handler)\n root_logger.addHandler(log_file)\n\n # Adjust settings for loggers\n logging.captureWarnings(True)\n logging.getLogger().setLevel(logging.DEBUG)\n logging.getLogger('ddl').setLevel(logging.DEBUG)\n\n\ndef _get_experiment_filename_and_label(data_name, model_name=None, model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n data_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '..', 'data', 'results')\n try:\n os.makedirs(data_dir)\n except OSError:\n pass\n arg_str = '_'.join(['%s-%s' % (k, str(v)) for k, v in model_kwargs.items()])\n arg_str = arg_str.replace('.', '_')\n if len(arg_str) > 0:\n arg_str = '_' + arg_str\n filename = ('data-%s_model-%s%s'\n % (str(data_name), str(model_name), arg_str))\n pickle_filename = os.path.join(data_dir, filename)\n\n arg_str = ', '.join(['%s=%s' % (k, str(v)) for k, v in model_kwargs.items()])\n if len(arg_str) > 0:\n arg_str = ', ' + arg_str\n experiment_label = '(data=%s, model=%s%s)' % (data_name, str(model_name), arg_str)\n\n return pickle_filename, experiment_label\n\n\n# Add fast sanity-check tests for mnist dataset\ntry:\n # noinspection PyPackageRequirements\n import pytest\nexcept ImportError:\n pass\nelse:\n @pytest.mark.parametrize(\n 'model_name',\n # 'image-pairs-tree' not needed since covered by other tests\n ['deep-copula', 'image-pairs-copula']\n )\n def test_mnist_experiment(model_name):\n data_name = 'mnist'\n model_kwargs = dict(is_test=True, n_jobs=1)\n model_kwargs['experiment_filename'], model_kwargs[\n 'experiment_label'] = _get_experiment_filename_and_label(\n data_name, model_name=model_name, model_kwargs=model_kwargs)\n result_dict = run_experiment(data_name, model_name, model_kwargs=model_kwargs)\n\n # Check if test likelihood/score is as expected\n _model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']\n expected_test_scores = [-1.060270463188296844e+03, -1.155477974922050180e+03,\n -1.134326498390250208e+03]\n ind = _model_names.index(model_name)\n assert (np.abs(expected_test_scores[ind] - result_dict['test_score'])\n / np.abs(expected_test_scores[ind]) < 1e-15)\n\nif __name__ == '__main__':\n # Parse args\n all_data_names = ['mnist', 'cifar10']\n all_model_names = ['deep-copula', 'image-pairs-copula', 'image-pairs-tree']\n parser = argparse.ArgumentParser(description='Sets up and/or runs MAF experiments.')\n parser.add_argument(\n '--model_names', default=','.join(all_model_names),\n help='One or more model names separated by commas from the list %s' % str(all_model_names))\n parser.add_argument(\n '--data_names', default=','.join(all_data_names),\n help='One or more data names separated by commas from the list %s' % str(all_data_names))\n parser.add_argument(\n '--parallel_subprocesses', default=False, type=bool,\n help='Whether to use parallel subprocesses for each (model, data) experiment '\n 'pair or run directly (default is False).')\n parser.add_argument(\n '--n_jobs', default=1, type=int,\n help='Number of parallel jobs to use for image-pairs models (default is 1).')\n args = parser.parse_args()\n print('Parsed args = %s' % str(args))\n print('----------------------')\n\n # Run experiments\n _model_kwargs = vars(args).copy() # Extract model_kwargs as dictionary\n model_names = _model_kwargs.pop('model_names').split(',')\n data_names = _model_kwargs.pop('data_names').split(',')\n is_parallel = _model_kwargs.pop('parallel_subprocesses')\n processes = []\n for _data_name in data_names:\n # Make sure data has already been cached\n get_maf_data(_data_name)\n for _model_name in model_names:\n _model_kwargs['experiment_filename'], _model_kwargs[\n 'experiment_label'] = _get_experiment_filename_and_label(\n _data_name, model_name=_model_name, model_kwargs=_model_kwargs)\n if not is_parallel:\n # Just run the experiment directly\n try:\n run_experiment(_data_name, _model_name, _model_kwargs)\n except RuntimeError as e:\n if 'mlpack' not in str(e).lower():\n raise e\n else:\n warnings.warn('Skipping %s because of error \"%s\"' % (_model_name, str(e)))\n else:\n # Generate script to run experiment in parallel in separate subprocesses\n script_str = (\n 'import os\\n'\n 'os.chdir(\\'%s\\')\\n'\n 'from icml_2018_experiment import run_experiment\\n'\n 'run_experiment(\\'%s\\', \\'%s\\', model_kwargs=%s)\\n'\n ) % (\n os.path.dirname(os.path.realpath(__file__)),\n _data_name, _model_name, str(_model_kwargs)\n )\n echo_args = ['echo', '-e', script_str]\n\n # Launch subprocess which can run in parallel\n DEVNULL = open(os.devnull, 'w')\n echo = subprocess.Popen(['echo', '-e', script_str], stdout=subprocess.PIPE)\n python = subprocess.Popen(['python'], stdin=echo.stdout, stdout=DEVNULL)\n processes.append(echo)\n processes.append(python)\n print('Started subprocess for experiment %s' % _model_kwargs['experiment_label'])\n print(\n ' Appending to end of log file %s.log' % _model_kwargs['experiment_filename'])\n\n # Remove filenames and labels for next round\n _model_kwargs.pop('experiment_filename')\n _model_kwargs.pop('experiment_label')\n\n if is_parallel:\n # Wait for all processes to finish\n print('Waiting for all subprocesses to finish')\n for p in processes:\n p.wait()\n print('All subprocesses finished!')\n"
] | [
[
"numpy.vstack",
"sklearn.base.clone",
"sklearn.decomposition.PCA",
"numpy.abs",
"numpy.arange",
"numpy.array",
"numpy.mean"
]
] |
guitargeek/pandas | [
"a6c1f6cccee6bbccfb29488a94664ed07db024d9"
] | [
"pandas/tests/scalar/timestamp/test_timestamp.py"
] | [
"\"\"\" test the scalar Timestamp \"\"\"\n\nimport calendar\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport locale\nimport pickle\nimport unicodedata\n\nfrom dateutil.tz import tzutc\nimport numpy as np\nimport pytest\nimport pytz\nfrom pytz import (\n timezone,\n utc,\n)\n\nfrom pandas._libs.tslibs.timezones import (\n dateutil_gettz as gettz,\n get_timezone,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NaT,\n Timedelta,\n Timestamp,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\nclass TestTimestampProperties:\n def test_freq_deprecation(self):\n # GH#41586\n msg = \"The 'freq' argument in Timestamp is deprecated\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # warning issued at construction\n ts = Timestamp(\"2021-06-01\", freq=\"D\")\n ts2 = Timestamp(\"2021-06-01\", freq=\"B\")\n\n msg = \"Timestamp.freq is deprecated\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # warning issued at attribute lookup\n ts.freq\n\n for per in [\"month\", \"quarter\", \"year\"]:\n for side in [\"start\", \"end\"]:\n attr = f\"is_{per}_{side}\"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n getattr(ts2, attr)\n\n # is_(month|quarter|year)_(start|end) does _not_ issue a warning\n # with freq=\"D\" bc the result will be unaffected by the deprecation\n with tm.assert_produces_warning(None):\n getattr(ts, attr)\n\n @pytest.mark.filterwarnings(\"ignore:The 'freq' argument:FutureWarning\")\n @pytest.mark.filterwarnings(\"ignore:Timestamp.freq is deprecated:FutureWarning\")\n def test_properties_business(self):\n ts = Timestamp(\"2017-10-01\", freq=\"B\")\n control = Timestamp(\"2017-10-01\")\n assert ts.dayofweek == 6\n assert ts.day_of_week == 6\n assert not ts.is_month_start # not a weekday\n assert not ts.freq.is_month_start(ts)\n assert ts.freq.is_month_start(ts + Timedelta(days=1))\n assert not ts.is_quarter_start # not a weekday\n assert not ts.freq.is_quarter_start(ts)\n assert ts.freq.is_quarter_start(ts + Timedelta(days=1))\n # Control case: non-business is month/qtr start\n assert control.is_month_start\n assert control.is_quarter_start\n\n ts = Timestamp(\"2017-09-30\", freq=\"B\")\n control = Timestamp(\"2017-09-30\")\n assert ts.dayofweek == 5\n assert ts.day_of_week == 5\n assert not ts.is_month_end # not a weekday\n assert not ts.freq.is_month_end(ts)\n assert ts.freq.is_month_end(ts - Timedelta(days=1))\n assert not ts.is_quarter_end # not a weekday\n assert not ts.freq.is_quarter_end(ts)\n assert ts.freq.is_quarter_end(ts - Timedelta(days=1))\n # Control case: non-business is month/qtr start\n assert control.is_month_end\n assert control.is_quarter_end\n\n @pytest.mark.parametrize(\n \"attr, expected\",\n [\n [\"year\", 2014],\n [\"month\", 12],\n [\"day\", 31],\n [\"hour\", 23],\n [\"minute\", 59],\n [\"second\", 0],\n [\"microsecond\", 0],\n [\"nanosecond\", 0],\n [\"dayofweek\", 2],\n [\"day_of_week\", 2],\n [\"quarter\", 4],\n [\"dayofyear\", 365],\n [\"day_of_year\", 365],\n [\"week\", 1],\n [\"daysinmonth\", 31],\n ],\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_fields(self, attr, expected, tz):\n # GH 10050\n # GH 13303\n ts = Timestamp(\"2014-12-31 23:59:00\", tz=tz)\n result = getattr(ts, attr)\n # that we are int like\n assert isinstance(result, int)\n assert result == expected\n\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_millisecond_raises(self, tz):\n ts = Timestamp(\"2014-12-31 23:59:00\", tz=tz)\n msg = \"'Timestamp' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n ts.millisecond\n\n @pytest.mark.parametrize(\n \"start\", [\"is_month_start\", \"is_quarter_start\", \"is_year_start\"]\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_is_start(self, start, tz):\n ts = Timestamp(\"2014-01-01 00:00:00\", tz=tz)\n assert getattr(ts, start)\n\n @pytest.mark.parametrize(\"end\", [\"is_month_end\", \"is_year_end\", \"is_quarter_end\"])\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_is_end(self, end, tz):\n ts = Timestamp(\"2014-12-31 23:59:59\", tz=tz)\n assert getattr(ts, end)\n\n # GH 12806\n @pytest.mark.parametrize(\n \"data\",\n [Timestamp(\"2017-08-28 23:00:00\"), Timestamp(\"2017-08-28 23:00:00\", tz=\"EST\")],\n )\n # error: Unsupported operand types for + (\"List[None]\" and \"List[str]\")\n @pytest.mark.parametrize(\n \"time_locale\", [None] + (tm.get_locales() or []) # type: ignore[operator]\n )\n def test_names(self, data, time_locale):\n # GH 17354\n # Test .day_name(), .month_name\n if time_locale is None:\n expected_day = \"Monday\"\n expected_month = \"August\"\n else:\n with tm.set_locale(time_locale, locale.LC_TIME):\n expected_day = calendar.day_name[0].capitalize()\n expected_month = calendar.month_name[8].capitalize()\n\n result_day = data.day_name(time_locale)\n result_month = data.month_name(time_locale)\n\n # Work around https://github.com/pandas-dev/pandas/issues/22342\n # different normalizations\n expected_day = unicodedata.normalize(\"NFD\", expected_day)\n expected_month = unicodedata.normalize(\"NFD\", expected_month)\n\n result_day = unicodedata.normalize(\"NFD\", result_day)\n result_month = unicodedata.normalize(\"NFD\", result_month)\n\n assert result_day == expected_day\n assert result_month == expected_month\n\n # Test NaT\n nan_ts = Timestamp(NaT)\n assert np.isnan(nan_ts.day_name(time_locale))\n assert np.isnan(nan_ts.month_name(time_locale))\n\n def test_is_leap_year(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH 13727\n dt = Timestamp(\"2000-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n assert isinstance(dt.is_leap_year, bool)\n\n dt = Timestamp(\"1999-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n dt = Timestamp(\"2004-01-01 00:00:00\", tz=tz)\n assert dt.is_leap_year\n\n dt = Timestamp(\"2100-01-01 00:00:00\", tz=tz)\n assert not dt.is_leap_year\n\n def test_woy_boundary(self):\n # make sure weeks at year boundaries are correct\n d = datetime(2013, 12, 31)\n result = Timestamp(d).week\n expected = 1 # ISO standard\n assert result == expected\n\n d = datetime(2008, 12, 28)\n result = Timestamp(d).week\n expected = 52 # ISO standard\n assert result == expected\n\n d = datetime(2009, 12, 31)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 1)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n d = datetime(2010, 1, 3)\n result = Timestamp(d).week\n expected = 53 # ISO standard\n assert result == expected\n\n result = np.array(\n [\n Timestamp(datetime(*args)).week\n for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]\n ]\n )\n assert (result == [52, 52, 53, 53]).all()\n\n def test_resolution(self):\n # GH#21336, GH#21365\n dt = Timestamp(\"2100-01-01 00:00:00\")\n assert dt.resolution == Timedelta(nanoseconds=1)\n\n # Check that the attribute is available on the class, mirroring\n # the stdlib datetime behavior\n assert Timestamp.resolution == Timedelta(nanoseconds=1)\n\n\nclass TestTimestamp:\n def test_tz(self):\n tstr = \"2014-02-01 09:00\"\n ts = Timestamp(tstr)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local == Timestamp(tstr, tz=\"Asia/Tokyo\")\n conv = local.tz_convert(\"US/Eastern\")\n assert conv == Timestamp(\"2014-01-31 19:00\", tz=\"US/Eastern\")\n assert conv.hour == 19\n\n # preserves nanosecond\n ts = Timestamp(tstr) + offsets.Nano(5)\n local = ts.tz_localize(\"Asia/Tokyo\")\n assert local.hour == 9\n assert local.nanosecond == 5\n conv = local.tz_convert(\"US/Eastern\")\n assert conv.nanosecond == 5\n assert conv.hour == 19\n\n def test_utc_z_designator(self):\n assert get_timezone(Timestamp(\"2014-11-02 01:00Z\").tzinfo) is utc\n\n def test_asm8(self):\n np.random.seed(7_960_929)\n ns = [Timestamp.min.value, Timestamp.max.value, 1000]\n\n for n in ns:\n assert (\n Timestamp(n).asm8.view(\"i8\") == np.datetime64(n, \"ns\").view(\"i8\") == n\n )\n\n assert Timestamp(\"nat\").asm8.view(\"i8\") == np.datetime64(\"nat\", \"ns\").view(\"i8\")\n\n def test_class_ops_pytz(self):\n def compare(x, y):\n assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(timezone(\"UTC\")))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n msg = \"timezone-aware Timestamp with UTC\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#22451\n ts_utc = Timestamp.utcfromtimestamp(current_time)\n compare(\n ts_utc,\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n compare(\n # Support tz kwarg in Timestamp.fromtimestamp\n Timestamp.fromtimestamp(current_time, \"UTC\"),\n datetime.fromtimestamp(current_time, utc),\n )\n compare(\n # Support tz kwarg in Timestamp.fromtimestamp\n Timestamp.fromtimestamp(current_time, tz=\"UTC\"),\n datetime.fromtimestamp(current_time, utc),\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_class_ops_dateutil(self):\n def compare(x, y):\n assert (\n int(\n np.round(Timestamp(x).value / 1e9)\n - np.round(Timestamp(y).value / 1e9)\n )\n == 0\n )\n\n compare(Timestamp.now(), datetime.now())\n compare(Timestamp.now(\"UTC\"), datetime.now(tzutc()))\n compare(Timestamp.utcnow(), datetime.utcnow())\n compare(Timestamp.today(), datetime.today())\n current_time = calendar.timegm(datetime.now().utctimetuple())\n\n msg = \"timezone-aware Timestamp with UTC\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n # GH#22451\n ts_utc = Timestamp.utcfromtimestamp(current_time)\n\n compare(\n ts_utc,\n datetime.utcfromtimestamp(current_time),\n )\n compare(\n Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)\n )\n\n date_component = datetime.utcnow()\n time_component = (date_component + timedelta(minutes=10)).time()\n compare(\n Timestamp.combine(date_component, time_component),\n datetime.combine(date_component, time_component),\n )\n\n def test_basics_nanos(self):\n val = np.int64(946_684_800_000_000_000).view(\"M8[ns]\")\n stamp = Timestamp(val.view(\"i8\") + 500)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.microsecond == 0\n assert stamp.nanosecond == 500\n\n # GH 14415\n val = np.iinfo(np.int64).min + 80_000_000_000_000\n stamp = Timestamp(val)\n assert stamp.year == 1677\n assert stamp.month == 9\n assert stamp.day == 21\n assert stamp.microsecond == 145224\n assert stamp.nanosecond == 192\n\n @pytest.mark.parametrize(\n \"value, check_kwargs\",\n [\n [946688461000000000, {}],\n [946688461000000000 / 1000, {\"unit\": \"us\"}],\n [946688461000000000 / 1_000_000, {\"unit\": \"ms\"}],\n [946688461000000000 / 1_000_000_000, {\"unit\": \"s\"}],\n [10957, {\"unit\": \"D\", \"h\": 0}],\n [\n (946688461000000000 + 500000) / 1000000000,\n {\"unit\": \"s\", \"us\": 499, \"ns\": 964},\n ],\n [\n (946688461000000000 + 500000000) / 1000000000,\n {\"unit\": \"s\", \"us\": 500000},\n ],\n [(946688461000000000 + 500000) / 1000000, {\"unit\": \"ms\", \"us\": 500}],\n [(946688461000000000 + 500000) / 1000, {\"unit\": \"us\", \"us\": 500}],\n [(946688461000000000 + 500000000) / 1000000, {\"unit\": \"ms\", \"us\": 500000}],\n [946688461000000000 / 1000.0 + 5, {\"unit\": \"us\", \"us\": 5}],\n [946688461000000000 / 1000.0 + 5000, {\"unit\": \"us\", \"us\": 5000}],\n [946688461000000000 / 1000000.0 + 0.5, {\"unit\": \"ms\", \"us\": 500}],\n [946688461000000000 / 1000000.0 + 0.005, {\"unit\": \"ms\", \"us\": 5, \"ns\": 5}],\n [946688461000000000 / 1000000000.0 + 0.5, {\"unit\": \"s\", \"us\": 500000}],\n [10957 + 0.5, {\"unit\": \"D\", \"h\": 12}],\n ],\n )\n def test_unit(self, value, check_kwargs):\n def check(value, unit=None, h=1, s=1, us=0, ns=0):\n stamp = Timestamp(value, unit=unit)\n assert stamp.year == 2000\n assert stamp.month == 1\n assert stamp.day == 1\n assert stamp.hour == h\n if unit != \"D\":\n assert stamp.minute == 1\n assert stamp.second == s\n assert stamp.microsecond == us\n else:\n assert stamp.minute == 0\n assert stamp.second == 0\n assert stamp.microsecond == 0\n assert stamp.nanosecond == ns\n\n check(value, **check_kwargs)\n\n def test_roundtrip(self):\n\n # test value to string and back conversions\n # further test accessors\n base = Timestamp(\"20140101 00:00:00\")\n\n result = Timestamp(base.value + Timedelta(\"5ms\").value)\n assert result == Timestamp(f\"{base}.005000\")\n assert result.microsecond == 5000\n\n result = Timestamp(base.value + Timedelta(\"5us\").value)\n assert result == Timestamp(f\"{base}.000005\")\n assert result.microsecond == 5\n\n result = Timestamp(base.value + Timedelta(\"5ns\").value)\n assert result == Timestamp(f\"{base}.000000005\")\n assert result.nanosecond == 5\n assert result.microsecond == 0\n\n result = Timestamp(base.value + Timedelta(\"6ms 5us\").value)\n assert result == Timestamp(f\"{base}.006005\")\n assert result.microsecond == 5 + 6 * 1000\n\n result = Timestamp(base.value + Timedelta(\"200ms 5us\").value)\n assert result == Timestamp(f\"{base}.200005\")\n assert result.microsecond == 5 + 200 * 1000\n\n def test_hash_equivalent(self):\n d = {datetime(2011, 1, 1): 5}\n stamp = Timestamp(datetime(2011, 1, 1))\n assert d[stamp] == 5\n\n @pytest.mark.parametrize(\n \"timezone, year, month, day, hour\",\n [[\"America/Chicago\", 2013, 11, 3, 1], [\"America/Santiago\", 2021, 4, 3, 23]],\n )\n def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):\n # see gh-33931\n test_timezone = gettz(timezone)\n transition_1 = Timestamp(\n year=year,\n month=month,\n day=day,\n hour=hour,\n minute=0,\n fold=0,\n tzinfo=test_timezone,\n )\n transition_2 = Timestamp(\n year=year,\n month=month,\n day=day,\n hour=hour,\n minute=0,\n fold=1,\n tzinfo=test_timezone,\n )\n assert hash(transition_1) == hash(transition_2)\n\n def test_tz_conversion_freq(self, tz_naive_fixture):\n # GH25241\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n t1 = Timestamp(\"2019-01-01 10:00\", freq=\"H\")\n assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n t2 = Timestamp(\"2019-01-02 12:00\", tz=\"UTC\", freq=\"T\")\n assert t2.tz_convert(tz=\"UTC\").freq == t2.freq\n\n def test_pickle_freq_no_warning(self):\n # GH#41949 we don't want a warning on unpickling\n with tm.assert_produces_warning(FutureWarning, match=\"freq\"):\n ts = Timestamp(\"2019-01-01 10:00\", freq=\"H\")\n\n out = pickle.dumps(ts)\n with tm.assert_produces_warning(None):\n res = pickle.loads(out)\n\n assert res._freq == ts._freq\n\n\nclass TestTimestampNsOperations:\n def test_nanosecond_string_parsing(self):\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\")\n # GH 7878\n expected_repr = \"2013-05-01 07:15:45.123456789\"\n expected_value = 1_367_392_545_123_456_789\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789+09:00\", tz=\"Asia/Tokyo\")\n assert ts.value == expected_value - 9 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"UTC\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n ts = Timestamp(\"2013-05-01 07:15:45.123456789\", tz=\"US/Eastern\")\n assert ts.value == expected_value + 4 * 3600 * 1_000_000_000\n assert expected_repr in repr(ts)\n\n # GH 10041\n ts = Timestamp(\"20130501T071545.123456789\")\n assert ts.value == expected_value\n assert expected_repr in repr(ts)\n\n def test_nanosecond_timestamp(self):\n # GH 7610\n expected = 1_293_840_000_000_000_005\n t = Timestamp(\"2011-01-01\") + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n t = Timestamp(\"2011-01-01 00:00:00.000000005\")\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000005')\"\n assert t.value == expected\n assert t.nanosecond == 5\n\n expected = 1_293_840_000_000_000_010\n t = t + offsets.Nano(5)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(t)\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n t = Timestamp(\"2011-01-01 00:00:00.000000010\")\n assert repr(t) == \"Timestamp('2011-01-01 00:00:00.000000010')\"\n assert t.value == expected\n assert t.nanosecond == 10\n\n\nclass TestTimestampToJulianDate:\n def test_compare_1700(self):\n r = Timestamp(\"1700-06-23\").to_julian_date()\n assert r == 2_342_145.5\n\n def test_compare_2000(self):\n r = Timestamp(\"2000-04-12\").to_julian_date()\n assert r == 2_451_646.5\n\n def test_compare_2100(self):\n r = Timestamp(\"2100-08-12\").to_julian_date()\n assert r == 2_488_292.5\n\n def test_compare_hour01(self):\n r = Timestamp(\"2000-08-12T01:00:00\").to_julian_date()\n assert r == 2_451_768.5416666666666666\n\n def test_compare_hour13(self):\n r = Timestamp(\"2000-08-12T13:00:00\").to_julian_date()\n assert r == 2_451_769.0416666666666666\n\n\nclass TestTimestampConversion:\n def test_conversion(self):\n # GH#9255\n ts = Timestamp(\"2000-01-01\")\n\n result = ts.to_pydatetime()\n expected = datetime(2000, 1, 1)\n assert result == expected\n assert type(result) == type(expected)\n\n result = ts.to_datetime64()\n expected = np.datetime64(ts.value, \"ns\")\n assert result == expected\n assert type(result) == type(expected)\n assert result.dtype == expected.dtype\n\n def test_to_pydatetime_nonzero_nano(self):\n ts = Timestamp(\"2011-01-01 9:00:00.123456789\")\n\n # Warn the user of data loss (nanoseconds).\n with tm.assert_produces_warning(UserWarning):\n expected = datetime(2011, 1, 1, 9, 0, 0, 123456)\n result = ts.to_pydatetime()\n assert result == expected\n\n def test_timestamp_to_datetime(self):\n stamp = Timestamp(\"20090415\", tz=\"US/Eastern\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=\"dateutil/US/Eastern\")\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_timestamp_to_datetime_explicit_pytz(self):\n stamp = Timestamp(\"20090415\", tz=pytz.timezone(\"US/Eastern\"))\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n @td.skip_if_windows\n def test_timestamp_to_datetime_explicit_dateutil(self):\n stamp = Timestamp(\"20090415\", tz=gettz(\"US/Eastern\"))\n dtval = stamp.to_pydatetime()\n assert stamp == dtval\n assert stamp.tzinfo == dtval.tzinfo\n\n def test_to_datetime_bijective(self):\n # Ensure that converting to datetime and back only loses precision\n # by going from nanoseconds to microseconds.\n exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning):\n pydt_max = Timestamp.max.to_pydatetime()\n\n assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000\n\n exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning\n with tm.assert_produces_warning(exp_warning):\n pydt_min = Timestamp.min.to_pydatetime()\n\n # The next assertion can be enabled once GH#39221 is merged\n # assert pydt_min < Timestamp.min # this is bc nanos are dropped\n tdus = timedelta(microseconds=1)\n assert pydt_min + tdus > Timestamp.min\n\n assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000\n\n def test_to_period_tz_warning(self):\n # GH#21333 make sure a warning is issued when timezone\n # info is lost\n ts = Timestamp(\"2009-04-15 16:17:18\", tz=\"US/Eastern\")\n with tm.assert_produces_warning(UserWarning):\n # warning that timezone info will be lost\n ts.to_period(\"D\")\n\n def test_to_numpy_alias(self):\n # GH 24653: alias .to_numpy() for scalars\n ts = Timestamp(datetime.now())\n assert ts.to_datetime64() == ts.to_numpy()\n\n # GH#44460\n msg = \"dtype and copy arguments are ignored\"\n with pytest.raises(ValueError, match=msg):\n ts.to_numpy(\"M8[s]\")\n with pytest.raises(ValueError, match=msg):\n ts.to_numpy(copy=True)\n\n\nclass SubDatetime(datetime):\n pass\n\n\[email protected](\n \"lh,rh\",\n [\n (SubDatetime(2000, 1, 1), Timedelta(hours=1)),\n (Timedelta(hours=1), SubDatetime(2000, 1, 1)),\n ],\n)\ndef test_dt_subclass_add_timedelta(lh, rh):\n # GH#25851\n # ensure that subclassed datetime works for\n # Timedelta operations\n result = lh + rh\n expected = SubDatetime(2000, 1, 1, 1)\n assert result == expected\n"
] | [
[
"pandas.tseries.offsets.Nano",
"numpy.random.seed",
"pandas.Timestamp.today",
"numpy.int64",
"numpy.datetime64",
"pandas.Timestamp.fromtimestamp",
"pandas._testing.assert_produces_warning",
"pandas.Timestamp.now",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas.Timestamp",
"pandas._testing.set_locale",
"pandas.Timedelta",
"pandas.Timestamp.utcfromtimestamp",
"pandas.Timestamp.combine",
"pandas.Timestamp.utcnow",
"numpy.iinfo",
"pandas.Timestamp.max.to_pydatetime",
"pandas._testing.get_locales",
"pandas.Timestamp.min.to_pydatetime"
]
] |
JeffreyDF/Lasagne | [
"6dd88f5fada20768087f29ae89cbd83980fe0a4e"
] | [
"lasagne/tests/layers/test_conv.py"
] | [
"import numpy as np\nimport pytest\nimport importlib\nimport theano\n\nimport lasagne\nfrom lasagne.utils import floatX, as_tuple\n\n\ndef conv2d(input, kernel, pad):\n \"\"\"Execute a 2D convolution.\n\n Parameters\n ----------\n input : numpy array\n kernel : numpy array\n pad : {0, 'valid', 'same', 'full'}\n\n Returns\n -------\n numpy array\n \"\"\"\n if pad not in ['valid', 'same', 'full']:\n pad = as_tuple(pad, 2, int)\n input = np.pad(input,\n ((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),\n mode='constant')\n pad = 'valid'\n\n output = np.zeros((input.shape[0],\n kernel.shape[0],\n input.shape[2] + kernel.shape[2] - 1,\n input.shape[3] + kernel.shape[3] - 1,\n ))\n\n for i in range(kernel.shape[2]):\n for j in range(kernel.shape[3]):\n k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]\n output[:, :, i:i + input.shape[2],\n j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)\n\n if pad == 'valid':\n trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)\n output = output[:,\n :,\n trim[0]:-trim[0] or None,\n trim[1]:-trim[1] or None]\n\n elif pad == 'same':\n shift_x = (kernel.shape[2] - 1) // 2\n shift_y = (kernel.shape[3] - 1) // 2\n output = output[:, :, shift_x:input.shape[2] + shift_x,\n shift_y:input.shape[3] + shift_y]\n return output\n\n\ndef conv2d_test_sets():\n def _convert(input, kernel, output, kwargs):\n return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]\n\n for pad in [0, 'full', 'same']:\n for stride in [1, 2, 3]:\n for filter_size in [1, 3]:\n if stride > filter_size:\n continue\n input = np.random.random((3, 1, 16, 23))\n kernel = np.random.random((16, 1, filter_size, filter_size))\n output = conv2d(input, kernel, pad=pad)\n output = output[:, :, ::stride, ::stride]\n yield _convert(input, kernel, output, {'pad': pad,\n 'stride': stride\n })\n\n # bias-less case\n input = np.random.random((3, 1, 16, 23))\n kernel = np.random.random((16, 1, 3, 3))\n output = conv2d(input, kernel, pad='valid')\n yield _convert(input, kernel, output, {'b': None})\n # pad='valid' case\n yield _convert(input, kernel, output, {'pad': 'valid'})\n\n\ndef conv1d(input, kernel, pad):\n if pad not in ['valid', 'same', 'full']:\n input = np.pad(input,\n ((0, 0), (0, 0), (int(pad), int(pad))),\n mode='constant')\n pad = 'valid'\n\n output = []\n for b in input:\n temp = []\n for c in kernel:\n temp.append(\n np.convolve(b[0, :], c[0, :], mode=pad))\n output.append(temp)\n return np.array(output)\n\n\ndef conv1d_test_sets():\n def _convert(input, kernel, output, kwargs):\n return [theano.shared(floatX(input)), floatX(kernel), output, kwargs]\n\n for pad in [0, 1, 2, 'full', 'same']:\n for stride in [1, 2, 3]:\n for filter_size in [1, 3]:\n if stride > filter_size:\n continue\n input = np.random.random((3, 1, 23))\n kernel = np.random.random((16, 1, filter_size))\n output = conv1d(input, kernel, pad)\n output = output[:, :, ::stride]\n yield _convert(input, kernel, output, {'pad': pad,\n 'stride': stride,\n })\n\n # bias-less case\n input = np.random.random((3, 1, 23))\n kernel = np.random.random((16, 1, 3))\n output = conv1d(input, kernel, pad='valid')\n yield _convert(input, kernel, output, {'b': None})\n # pad='valid' case\n yield _convert(input, kernel, output, {'pad': 'valid'})\n\n\ndef test_conv_output_length():\n from lasagne.layers.conv import conv_output_length\n\n assert conv_output_length(13, 5, 3, 'valid') == 3\n assert conv_output_length(13, 5, 3, 0) == 3\n assert conv_output_length(13, 5, 3, 'full') == 6\n assert conv_output_length(13, 5, 3, 'same') == 5\n assert conv_output_length(13, 5, 3, 2) == 5\n\n with pytest.raises(ValueError) as exc:\n conv_output_length(13, 5, 3, '_nonexistent_mode')\n assert \"Invalid pad: \" in exc.value.args[0]\n\n\[email protected]\ndef DummyInputLayer():\n def factory(shape):\n from lasagne.layers.input import InputLayer\n return InputLayer(shape)\n return factory\n\n\nclass TestConv1DLayer:\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv1d_test_sets()))\n @pytest.mark.parametrize(\"extra_kwargs\", [\n {},\n {'untie_biases': True},\n ])\n def test_defaults(self, DummyInputLayer,\n input, kernel, output, kwargs, extra_kwargs):\n kwargs.update(extra_kwargs)\n b, c, w = input.shape.eval()\n input_layer = DummyInputLayer((b, c, w))\n try:\n from lasagne.layers.conv import Conv1DLayer\n layer = Conv1DLayer(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pass\n\n def test_init_none_nonlinearity_bias(self, DummyInputLayer):\n from lasagne.layers.conv import Conv1DLayer\n input_layer = DummyInputLayer((1, 2, 3))\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),\n nonlinearity=None, b=None)\n assert layer.nonlinearity == lasagne.nonlinearities.identity\n assert layer.b is None\n\n def test_invalid_pad(self, DummyInputLayer):\n from lasagne.layers.conv import Conv1DLayer\n input_layer = DummyInputLayer((1, 2, 3))\n with pytest.raises(TypeError) as exc:\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(3,),\n pad='_nonexistent_mode')\n assert \"iterable of int\" in exc.value.args[0]\n\n with pytest.raises(NotImplementedError) as exc:\n layer = Conv1DLayer(input_layer, num_filters=16, filter_size=(4,),\n pad='same')\n assert \"requires odd filter size\" in exc.value.args[0]\n\n\nclass TestConv2DLayerImplementations:\n\n @pytest.fixture(\n params=[\n ('lasagne.layers', 'Conv2DLayer', {}),\n ('lasagne.layers.cuda_convnet',\n 'Conv2DCCLayer',\n {'flip_filters': True}),\n ('lasagne.layers.corrmm', 'Conv2DMMLayer', {'flip_filters': True}),\n ('lasagne.layers.dnn', 'Conv2DDNNLayer', {'flip_filters': True}),\n ],\n )\n def Conv2DImpl(self, request):\n impl_module_name, impl_name, impl_default_kwargs = request.param\n try:\n mod = importlib.import_module(impl_module_name)\n except ImportError:\n pytest.skip(\"{} not available\".format(impl_module_name))\n\n impl = getattr(mod, impl_name)\n\n def wrapper(*args, **kwargs):\n kwargs2 = impl_default_kwargs.copy()\n kwargs2.update(kwargs)\n return impl(*args, **kwargs2)\n\n wrapper.__name__ = impl_name\n return wrapper\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv2d_test_sets()))\n @pytest.mark.parametrize(\"extra_kwargs\", [\n {},\n {'untie_biases': True},\n ])\n def test_defaults(self, Conv2DImpl, DummyInputLayer,\n input, kernel, output, kwargs, extra_kwargs):\n kwargs.update(extra_kwargs)\n b, c, h, w = input.shape.eval()\n input_layer = DummyInputLayer((b, c, h, w))\n try:\n layer = Conv2DImpl(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2:],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pytest.skip()\n\n @pytest.mark.parametrize(\n \"input, kernel, output, kwargs\", list(conv2d_test_sets()))\n def test_with_nones(self, Conv2DImpl, DummyInputLayer,\n input, kernel, output, kwargs):\n b, c, h, w = input.shape.eval()\n input_layer = DummyInputLayer((None, c, None, None))\n try:\n layer = Conv2DImpl(\n input_layer,\n num_filters=kernel.shape[0],\n filter_size=kernel.shape[2:],\n W=kernel,\n **kwargs\n )\n actual = layer.get_output_for(input).eval()\n\n assert layer.output_shape == (None,\n kernel.shape[0],\n None,\n None)\n assert actual.shape == output.shape\n assert np.allclose(actual, output)\n\n except NotImplementedError:\n pytest.skip()\n\n def test_init_none_nonlinearity_bias(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((1, 2, 3, 3))\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),\n nonlinearity=None, b=None)\n assert layer.nonlinearity == lasagne.nonlinearities.identity\n assert layer.b is None\n\n def test_invalid_pad(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((1, 2, 3))\n with pytest.raises(TypeError) as exc:\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3),\n pad='_nonexistent_mode')\n assert \"iterable of int\" in exc.value.args[0]\n\n with pytest.raises(NotImplementedError) as exc:\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(4, 4),\n pad='same')\n assert \"requires odd filter size\" in exc.value.args[0]\n\n def test_get_params(self, Conv2DImpl, DummyInputLayer):\n input_layer = DummyInputLayer((128, 3, 32, 32))\n layer = Conv2DImpl(input_layer, num_filters=16, filter_size=(3, 3))\n assert layer.get_params() == [layer.W, layer.b]\n assert layer.get_params(regularizable=False) == [layer.b]\n assert layer.get_params(regularizable=True) == [layer.W]\n assert layer.get_params(trainable=True) == [layer.W, layer.b]\n assert layer.get_params(trainable=False) == []\n assert layer.get_params(_nonexistent_tag=True) == []\n assert layer.get_params(_nonexistent_tag=False) == [layer.W, layer.b]\n\n\nclass TestConv2DDNNLayer:\n def test_import_without_gpu_or_cudnn_raises(self):\n from theano.sandbox.cuda import dnn\n if theano.config.device.startswith(\"gpu\") and dnn.dnn_available():\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.dnn\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.dnn import Conv2DDNNLayer\n except ImportError:\n pytest.skip(\"dnn not available\")\n\n input_layer = DummyInputLayer((1, 2, 3, 3))\n\n layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (1, 4, 7, 7)\n\n\nclass TestConv2DMMLayer:\n def test_import_without_gpu_raises(self):\n if theano.config.device.startswith(\"gpu\"):\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.corrmm\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.corrmm import Conv2DMMLayer\n except ImportError:\n pytest.skip(\"corrmm not available\")\n\n input_layer = DummyInputLayer((1, 2, 3, 3))\n\n layer = Conv2DMMLayer(input_layer, num_filters=4, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (1, 4, 7, 7)\n\n\nclass TestConv2DCCLayer:\n def test_import_without_gpu_raises(self):\n if theano.config.device.startswith(\"gpu\"):\n pytest.skip()\n else:\n with pytest.raises(ImportError):\n import lasagne.layers.cuda_convnet\n\n def test_unsupported_settings(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((128, 3, 32, 32))\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 5))\n assert (\"Conv2DCCLayer only supports square filters\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3), stride=(1, 2))\n assert (\"Conv2DCCLayer only supports square strides\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=15,\n filter_size=(3, 3))\n assert (\"Conv2DCCLayer requires num_filters to be a multiple of 16\" in\n exc.value.args[0])\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3), pad=(1, 2))\n assert (\"Conv2DCCLayer only supports square padding\" in\n exc.value.args[0])\n\n input_layer = DummyInputLayer((128, 7, 32, 32))\n\n with pytest.raises(RuntimeError) as exc:\n layer = Conv2DCCLayer(input_layer, num_filters=16,\n filter_size=(3, 3))\n assert (\"Conv2DCCLayer requires the number of input channels to be \"\n \"1, 2, 3 or a multiple of 4\" in exc.value.args[0])\n\n def test_pad(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((128, 3, 32, 32))\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n pad=(3, 3))\n assert layer.output_shape == (128, 16, 36, 36)\n\n def test_dimshuffle_false_shapes(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = DummyInputLayer((4, 32, 32, 128)) # c01b instead of bc01\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False)\n assert layer.W.get_value().shape == (4, 3, 3, 16)\n assert layer.b.get_value().shape == (16,)\n\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False, untie_biases=True)\n assert layer.W.get_value().shape == (4, 3, 3, 16)\n assert layer.b.get_value().shape == (16, 30, 30)\n\n def test_dimshuffle_false_get_output_for(self, DummyInputLayer):\n try:\n from lasagne.layers.cuda_convnet import Conv2DCCLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n # this implementation is tested against FilterActs instead of\n # theano.tensor.nnet.conv.conv2d because using the latter leads to\n # numerical precision errors.\n from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs\n filter_acts = FilterActs(stride=1, pad=0, partial_sum=1)\n\n input = theano.shared(floatX(np.random.random((4, 5, 5, 8))))\n kernel = theano.shared(floatX(np.random.random((4, 3, 3, 16))))\n\n input_layer = DummyInputLayer((4, 5, 5, 8)) # c01b instead of bc01\n layer = Conv2DCCLayer(input_layer, num_filters=16, filter_size=(3, 3),\n dimshuffle=False, W=kernel, b=None,\n nonlinearity=None)\n\n output = np.array(filter_acts(input, kernel).eval())\n\n actual = layer.get_output_for(input).eval()\n actual = np.array(actual)\n assert actual.shape == output.shape\n assert actual.shape == layer.output_shape\n assert np.allclose(actual, output)\n\n\nclass TestShuffleLayers:\n def test_bc01_to_c01b(self):\n from lasagne.layers.input import InputLayer\n try:\n from lasagne.layers.cuda_convnet import ShuffleBC01ToC01BLayer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = InputLayer((1, 2, 3, 4))\n layer = ShuffleBC01ToC01BLayer(input_layer)\n assert layer.output_shape == (2, 3, 4, 1)\n\n input = floatX(np.random.random((1, 2, 3, 4)))\n output = input.transpose(1, 2, 3, 0)\n actual = layer.get_output_for(theano.shared(input)).eval()\n assert np.allclose(output, actual)\n\n def test_c01b_to_bc01(self):\n from lasagne.layers.input import InputLayer\n try:\n from lasagne.layers.cuda_convnet import ShuffleC01BToBC01Layer\n except ImportError:\n pytest.skip(\"cuda_convnet not available\")\n\n input_layer = InputLayer((1, 2, 3, 4))\n layer = ShuffleC01BToBC01Layer(input_layer)\n assert layer.output_shape == (4, 1, 2, 3)\n\n input = floatX(np.random.random((1, 2, 3, 4)))\n output = input.transpose(3, 0, 1, 2)\n actual = layer.get_output_for(theano.shared(input)).eval()\n assert np.allclose(output, actual)\n"
] | [
[
"numpy.allclose",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.pad",
"numpy.convolve"
]
] |
antoszy/RoboND-perception-exercises | [
"8c725e77316162ae485ccee94085fd2314be7ae0"
] | [
"Exercise-3/sensor_stick/src/sensor_stick/features.py"
] | [
"import matplotlib.colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pcl_helper import *\nnbinscol = 32\nnbinsnor = 20\n\ndef rgb_to_hsv(rgb_list):\n rgb_normalized = [1.0*rgb_list[0]/255, 1.0*rgb_list[1]/255, 1.0*rgb_list[2]/255]\n hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]\n return hsv_normalized\n\n\ndef compute_color_histograms(cloud, using_hsv=False):\n\t# Compute histograms for the clusters\n\tpoint_colors_list = []\n\n\t# Step through each point in the point cloud\n\tfor point in pc2.read_points(cloud, skip_nans=True):\n\t\trgb_list = float_to_rgb(point[3])\n\t\tif using_hsv:\n\t\t point_colors_list.append(rgb_to_hsv(rgb_list) * 255)\n\t\telse:\n\t\t point_colors_list.append(rgb_list)\n\n\t# Populate lists with color values\n\tchannel_1_vals = []\n\tchannel_2_vals = []\n\tchannel_3_vals = []\n\n\tfor color in point_colors_list:\n\t\tchannel_1_vals.append(color[0])\n\t\tchannel_2_vals.append(color[1])\n\t\tchannel_3_vals.append(color[2])\n\n\t# TODO: Compute histograms\n\n\thist_1 = np.histogram(channel_1_vals, bins = nbinscol, range = (0, 256))\n\thist_2 = np.histogram(channel_2_vals, bins = nbinscol, range = (0, 256))\n\thist_3 = np.histogram(channel_3_vals, bins = nbinscol, range = (0, 256))\n\n\t# TODO: Concatenate and normalize the histograms\n\tfeatures = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)\n\tnormed_features = features/np.sum(features)\n\n\treturn normed_features \n\n\ndef compute_normal_histograms(normal_cloud):\n\tnorm_x_vals = []\n\tnorm_y_vals = []\n\tnorm_z_vals = []\n\n\tfor norm_component in pc2.read_points(normal_cloud,\n\t\t field_names = ('normal_x', 'normal_y', 'normal_z'),\n\t\t skip_nans=True):\n\t\tnorm_x_vals.append(norm_component[0])\n\t\tnorm_y_vals.append(norm_component[1])\n\t\tnorm_z_vals.append(norm_component[2])\n\n\t# TODO: Compute histograms of normal values (just like with color)\n\thist_1 = np.histogram(norm_x_vals, bins = nbinsnor, range = (0, 256))\n\thist_2 = np.histogram(norm_y_vals, bins = nbinsnor, range = (0, 256))\n\thist_3 = np.histogram(norm_z_vals, bins = nbinsnor, range = (0, 256))\n\n\t# TODO: Concatenate and normalize the histograms\n\tfeatures = np.concatenate((hist_1[0],hist_2[0],hist_3[0])).astype(np.float64)\n\tnormed_features = features/np.sum(features)\n\n\treturn normed_features\n"
] | [
[
"numpy.histogram",
"numpy.sum",
"numpy.concatenate"
]
] |
hrutkabence/tutorials | [
"bd76294860804aee8ecda5e1445464506bf02ee0"
] | [
"english/data_processing/lessons/code/vslide1.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom math import hypot, atan2, sin, cos, pi, degrees\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef vplain(x1, y1, x2, y2):\n \"\"\" set up line equation\n vp[0] * x + vp[1] * y + vp[2] = 0\n\n x1, y1 - horizontal coordinates of the start point of the section\n x2, y2 - horizontal coordinates of the end point of the section\n returns a numpy array with coefficients of the vertical plane\n \"\"\"\n\n vp = np.zeros((3,))\n vp[0] = y1 - y2\n vp[1] = x2 - x1\n vp[2] = x1 * y2 - x2 * y1\n vp = vp / hypot(vp[0], vp[1]) # normalize\n return vp\n\ndef section(pc, x1, y1, x2, y2, tol):\n \"\"\" Select point from a point cloud near to a line\n\n pc - point cloud in a numpy array\n x1, y1 - horizontal coordinates of the start point of the section\n x2, y2 - horizontal coordinates of the end point of the section\n tol - tolerance distance from the section\n returns a numpy array with points near to the section\n \"\"\"\n pc1 = pc.copy()\n pc1[:, 2] = 1 # change to homogenous coordinates\n vp = vplain(x1, y1, x2, y2) # equation of vertical plain\n sec = pc[np.abs(np.dot(pc1, vp)) < tol] # select points close to the section\n\n return sec\n\ndef tr(e1, n1, e2, n2):\n \"\"\" set up transformation matrix for homogenous coordinates\n\n Parameters:\n e1, n1 - start point of the section line\n e2, n2 - end point of the section section line\n returns the transformation matrix\n \"\"\"\n de = e2 - e1\n dn = n2 - n1\n\n a = atan2(dn, de)\n ca = cos(a)\n sa = sin(a)\n return np.dot(np.array([[1, 0, 0], [0, 1, 0], [-e1, -n1, 1]]),\n np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 7:\n pc = np.loadtxt('lidar.txt', delimiter=',') ;# load point cloud\n x1 = 548060.0\n y1 = 5129130.0\n x2 = 549850.0\n y2 = 5129030.0\n #x1 = 549400\n #y1 = 5128900\n #x2 = 549200\n #y2 = 5129300\n tol = 1.0\n else:\n pc = np.loadtxt(sys.argv[1], delimiter=',') ;# load point cloud\n x1 = float(sys.argv[2])\n y1 = float(sys.argv[3])\n x2 = float(sys.argv[4])\n y2 = float(sys.argv[5])\n tol = float(sys.argv[6])\n # set up equation for vertical plain a * x + b * y + c = 0\n vp = vplain(x1, y1, x2, y2)\n sec = section(pc,x1,y1,x2,y2,tol) \n # transformation matrix\n trm = tr(x1, y1, x2, y2)\n if abs(np.dot(np.array([x1, y1, 1]), trm)[1]) > 1e-5 or \\\n abs(np.dot(np.array([x2, y2, 1]), trm)[1]) > 1e-5:\n print(\"tr error\")\n # make a copy of section points for homogenous transformation\n pc1 = sec.copy()\n pc1[:, 2] = 1\n pc1 = np.dot(pc1, trm) # rotate points into the section plain\n pc1[:, 2] = sec[:, 2] # copy back elevations to transformed points\n\n plt.plot(pc1[:,0], pc1[:,2], 'o')\n plt.xlabel('chainage (m)')\n plt.ylabel('elevation (m)')\n plt.axis('equal')\n plt.grid('on')\n plt.show() \n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.dot",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt"
]
] |
MrJohnsson77/bat-country | [
"c0d29a0b32c196ca3d4c40fbaf960432b507e8bb"
] | [
"demo_guided.py"
] | [
"# USAGE\n# python demo_guided.py --base-model $CAFFE_ROOT/models/bvlc_googlenet \\\n#\t--image initial_images/clouds.jpg \\\n#\t--guide-image initial_images/seed_images/starry_night.jpg \\\n#\t--output examples/output/seeded/clouds_and_starry_night.jpg\n\n# import the necessary packages\nfrom batcountry import BatCountry\nfrom PIL import Image\nimport numpy as np\nimport argparse\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-b\", \"--base-model\", required=True, help=\"base model path\")\nap.add_argument(\"-l\", \"--layer\", type=str, default=\"inception_4c/output\",\n\thelp=\"layer of CNN to use\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to base image\")\nap.add_argument(\"-g\", \"--guide-image\", required=True, help=\"path to guide image\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"path to output image\")\nargs = ap.parse_args()\n\n# we can't stop here...\nbc = BatCountry(args.base_model)\nfeatures = bc.prepare_guide(Image.open(args.guide_image), end=args.layer)\nimage = bc.dream(np.float32(Image.open(args.image)), end=args.layer,\n\titer_n=20, objective_fn=BatCountry.guided_objective,\n\tobjective_features=features,)\nbc.cleanup()\n\n# write the output image to file\nresult = Image.fromarray(np.uint8(image))\nresult.save(args.output)"
] | [
[
"numpy.uint8"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.