repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
jbeezley/SMQTK
[ "fc9404b69150ef44f24423844bc80735c0c2b669", "fc9404b69150ef44f24423844bc80735c0c2b669" ]
[ "python/smqtk/bin/make_train_test_sets.py", "python/smqtk/bin/minibatch_kmeans_clusters.py" ]
[ "#!/usr/bin/env python\nimport argparse\nimport csv\nimport itertools\nimport os\nimport re\n\nimport numpy\nimport six\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\nclass KeyToFilepath(argparse.Action):\n \"\"\"\n Custom argparse action for parsing out positional class-to-filepath\n arguments.\n \"\"\"\n re_key2path = re.compile('(\\w+)=(.+)', flags=re.UNICODE)\n\n # noinspection PyUnusedLocal,PyShadowingBuiltins\n def __init__(self, option_strings, dest, nargs=None, const=None,\n default=None, type=None, choices=None, required=False,\n help=None, metavar=None):\n \"\"\"\n Custom constructor to enforce that `nargs` is always `+`.\n \"\"\"\n super(KeyToFilepath, self).__init__(option_strings,\n dest, \"+\",\n const, default, type,\n choices, required,\n help, metavar)\n\n # noinspection PyShadowingNames\n def __call__(self, parser, namespace, values, option_string=None):\n d = dict()\n for a in values:\n m = self.re_key2path.match(a)\n if not m:\n raise ValueError(\"Invalid argument syntax: '%s'\" % a)\n cls_name = m.group(1)\n filepath = m.group(2)\n if not os.path.isfile(filepath):\n raise ValueError(\n \"Invalid filepath '%s' given for argument: '%s'\"\n % (filepath, a)\n )\n # Read in UIDs from lines in CSV file\n d[cls_name] = filepath\n setattr(namespace, self.dest, d)\n\n\ndef cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('cls_to_cmdProcessedCsv',\n nargs='+',\n help=\"Series of `label=filepath` arguments where we \"\n \"interpret the string value before the `=` sign \"\n \"as the class label and the value after to be the \"\n \"path to the `compute_many_descriptors` output \"\n \"CSV of files successfully processed.\",\n action=KeyToFilepath)\n parser.add_argument('-t', '--test-percent',\n type=float,\n default=0.3,\n help=\"Percentage of images per class to split for \"\n \"testing. Should be in the [0,1] range. Selects \"\n \"~30%% by default.\")\n parser.add_argument('--rand-state', type=int, default=0,\n help='Random state initialization integer. Default is '\n '0.')\n parser.add_argument('--output-base',\n default=\"classifier\",\n help=\"String base to output files. We will generate 3 \"\n \"files: '<>.all_uuids.csv', '<>.train_uuids.csv' \"\n \"and '<>.test_uuids.csv'. \"\n \"Default is 'classifier'.\")\n return parser\n\n\ndef main():\n args = cli_parser().parse_args()\n\n TEST_PERCENT = args.test_percent\n RAND_STATE = args.rand_state\n OUTPUT_BASE = args.output_base\n CLS_TO_FILEPATH = args.cls_to_cmdProcessedCsv\n\n # Parse CSV files associated to classes\n cls_uuids = {}\n for cls, filepath in six.iteritems(CLS_TO_FILEPATH):\n cls_uuids[cls] = sorted({r[1] for r in csv.reader(open(filepath))})\n\n cls_list = sorted(cls_uuids)\n all_label, all_uuids = \\\n zip(*[(cls_name, uuid)\n for cls_name in cls_list\n for uuid in cls_uuids[cls_name]])\n # Transform into numpy array for multi-index access later\n all_label = numpy.array(all_label)\n all_uuids = numpy.array(all_uuids)\n\n # ``n_splits=1`` -- Only make one train/test split\n sss = StratifiedShuffleSplit(n_splits=1, test_size=TEST_PERCENT,\n random_state=RAND_STATE)\n\n # Get array of index position values of ``all_uuids`` of uuids to use for\n # train and test sets, respectively.\n train_index, test_index = \\\n iter(sss.split(numpy.zeros(len(all_label)), all_label)).next()\n uuids_train, uuids_test = all_uuids[train_index], all_uuids[test_index]\n label_train, label_test = all_label[train_index], all_label[test_index]\n\n print(\"Train:\")\n for cls_label in cls_list:\n cnt = label_train.tolist().count(cls_label)\n print(\"- %s:\\t%d\\t(~%.2f %% of total class examples)\"\n % (cls_label, cnt, float(cnt) / len(cls_uuids[cls_label]) * 100))\n print(\"Test:\")\n for cls_label in cls_list:\n cnt = label_test.tolist().count(cls_label)\n print(\"- %s:\\t%d\\t(~%.2f %% of total class examples)\"\n % (cls_label, cnt, float(cnt) / len(cls_uuids[cls_label]) * 100))\n\n # Save out files for use with ``classifier_model_validation``\n with open('%s.all_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(all_uuids, all_label):\n w.writerow([uuid, label])\n\n with open('%s.train_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(uuids_train, label_train):\n w.writerow([uuid, label])\n\n with open('%s.test_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(uuids_test, label_test):\n w.writerow([uuid, label])\n\n\nif __name__ == '__main__':\n main()\n", "\"\"\"\nScript for generating clusters from descriptors in a given index using the\nmini-batch KMeans implementation from Scikit-learn\n(http://scikit-learn.org/stable/modules/generated/sklearn.cluster.MiniBatchKMeans.html).\n\nBy the nature of Scikit-learn's MiniBatchKMeans implementation, euclidean\ndistance is used to measure distance between descriptors.\n\"\"\"\nimport logging\nimport os\n\nimport numpy\nfrom six.moves import cPickle\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom smqtk.compute_functions import mb_kmeans_build_apply\nfrom smqtk.representation.descriptor_index import get_descriptor_index_impls\nfrom smqtk.utils import Configurable\nfrom smqtk.utils.bin_utils import utility_main_helper, basic_cli_parser\nfrom smqtk.utils.file_utils import safe_create_dir\nfrom smqtk.utils.plugin import make_config, from_plugin_config\n\n\ndef default_config():\n\n # Trick for mixing in our Configurable class API on top of scikit-learn's\n # MiniBatchKMeans class in order to introspect construction parameters.\n # We never construct this class so we do not need to implement \"pure\n # virtual\" instance methods.\n # noinspection PyAbstractClass\n class MBKTemp (MiniBatchKMeans, Configurable):\n pass\n\n c = {\n \"minibatch_kmeans_params\": MBKTemp.get_default_config(),\n \"descriptor_index\": make_config(get_descriptor_index_impls()),\n # Number of descriptors to run an initial fit with. This brings the\n # advantage of choosing a best initialization point from multiple.\n \"initial_fit_size\": 0,\n # Path to save generated KMeans centroids\n \"centroids_output_filepath_npy\": \"centroids.npy\"\n }\n\n # Change/Remove some KMeans params for more appropriate defaults\n del c['minibatch_kmeans_params']['compute_labels']\n del c['minibatch_kmeans_params']['verbose']\n c['minibatch_kmeans_params']['random_state'] = 0\n\n return c\n\n\ndef cli_parser():\n p = basic_cli_parser(__doc__)\n\n g_output = p.add_argument_group(\"output\")\n g_output.add_argument('-o', '--output-map',\n metavar=\"PATH\",\n help=\"Path to output the clustering class mapping \"\n \"to. Saved as a pickle file with -1 format.\")\n\n return p\n\n\ndef main():\n args = cli_parser().parse_args()\n config = utility_main_helper(default_config, args)\n log = logging.getLogger(__name__)\n\n output_filepath = args.output_map\n if not output_filepath:\n raise ValueError(\"No path given for output map file (pickle).\")\n\n #: :type: smqtk.representation.DescriptorIndex\n index = from_plugin_config(config['descriptor_index'],\n get_descriptor_index_impls())\n mbkm = MiniBatchKMeans(verbose=args.verbose,\n compute_labels=False,\n **config['minibatch_kmeans_params'])\n initial_fit_size = int(config['initial_fit_size'])\n\n d_classes = mb_kmeans_build_apply(index, mbkm, initial_fit_size)\n\n log.info(\"Saving KMeans centroids to: %s\",\n config['centroids_output_filepath_npy'])\n numpy.save(config['centroids_output_filepath_npy'], mbkm.cluster_centers_)\n\n log.info(\"Saving result classification map to: %s\", output_filepath)\n safe_create_dir(os.path.dirname(output_filepath))\n with open(output_filepath, 'w') as f:\n cPickle.dump(d_classes, f, -1)\n\n log.info(\"Done\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.model_selection.StratifiedShuffleSplit", "numpy.array" ], [ "sklearn.cluster.MiniBatchKMeans", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeoscienceAustralia/geophys_utils
[ "e5580f582f3e42f347d71b140dd9213f784e2fde", "e5580f582f3e42f347d71b140dd9213f784e2fde" ]
[ "geophys_utils/netcdf_converter/aseg_gdf_utils.py", "geophys_utils/test/test_netcdf_point_utils.py" ]
[ "'''\nFunctions to work with ASEG-GDF format string\nRefer to https://www.aseg.org.au/sites/default/files/pdf/ASEG-GDF2-REV4.pdf for further information\n\nCreated on 19 Jun. 2018\n\n@author: u76345\n'''\n\nimport re\nimport numpy as np\nfrom collections import OrderedDict\nfrom math import ceil, log10\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO) # Logging level for this module\n\ndef dfrexp(f):\n '''\n Decimal version of frexp or np.frexp function to return mantissa & exponent\n @param f: Floating point scalar or array\n @return fman: Scalar or array decimal mantissa between 0.0 and 1.0 \n @return fexp: Scalar or array decimal exponent\n '''\n # Compute decimal exponent\n if type(f) == np.ndarray:\n fexp = np.zeros(shape=f.shape, dtype='int32')\n fexp[f != 0] = np.ceil(np.log10(np.abs(f[f != 0]))).astype('int32')\n else: # Scalar\n fexp = int(ceil(log10(abs(f)))) if f != 0 else 0\n \n # Compute decimal mantissa between 0.0 and 1.0\n fman = f/10.0**fexp\n \n logger.debug('fman: {}'.format(fman))\n logger.debug('fexp: {}'.format(fexp))\n \n return fman, fexp\n\n\n# Approximate maximum number of significant decimal figures for each signed datatype\nSIG_FIGS = OrderedDict([('uint8', 4), # 128\n ('uint16', 10), # 32768\n ('uint32', 19), # 2147483648 - should be 9, but made 10 because int64 is unsupported\n ('uint64', 30), # 9223372036854775808 - Not supported in netCDF3 or netCDF4-Classic\n ('int8', 2), # 128\n ('int16', 4), # 32768\n ('int32', 10), # 2147483648 - should be 9, but made 10 because int64 is unsupported\n ('int64', 19), # 9223372036854775808 - Not supported in netCDF3 or netCDF4-Classic\n # https://en.wikipedia.org/wiki/Floating-point_arithmetic#IEEE_754:_floating_point_in_modern_computers\n ('float32', 7), # 7.2\n ('float64', 35) # 15.9 - should be 16, but made 35 to support unrealistic precision specifications\n ]\n )\n\nDTYPE_REDUCTION_LISTS = [['int64', 'int32', 'int16', 'int8'], # Integer dtypes\n ['uint64', 'uint32', 'uint16', 'uint8'], # Unsigned integer dtypes\n ['float64', 'float32'] #, 'int16', 'int8'] # Floating point dtypes - do NOT try casting to integer types\n ]\n \nASEG_DTYPE_CODE_MAPPING = {'uint8': 'I',\n 'uint16': 'I',\n 'uint32': 'I',\n 'uint64': 'I',\n 'int8': 'I',\n 'int16': 'I',\n 'int32': 'I',\n 'int64': 'I',\n 'float32': 'E', # real in exponent form\n 'float64': 'D', # double precision real in exponent form\n 'str': 'A'\n }\n\ndef decode_aseg_gdf_format(aseg_gdf_format):\n '''\n Function to decode ASEG-GDF format string\n @param aseg_gdf_format: ASEG-GDF format string\n\n @return columns: Number of columns (i.e. 1 for 1D data, or read from format string for 2D data)\n @return aseg_dtype_code: ASEG-GDF data type character, e.g. \"F\" or \"I\"\n @return width_specifier: Width of field in number of characters read from format string\n @return decimal_places: Number of fractional digits read from format string \n '''\n if not aseg_gdf_format:\n raise BaseException('No ASEG-GDF format string to decode') \n\n match = re.match('(\\d+)*(\\w)(\\d+)\\.*(\\d+)*', aseg_gdf_format)\n \n if not match:\n raise BaseException('Invalid ASEG-GDF format string {}'.format(aseg_gdf_format)) \n \n columns = int(match.group(1)) if match.group(1) is not None else 1\n aseg_dtype_code = match.group(2).upper()\n width_specifier = int(match.group(3))\n decimal_places = int(match.group(4)) if match.group(4) is not None else 0\n \n logger.debug('aseg_gdf_format: {}, columns: {}, aseg_dtype_code: {}, width_specifier: {}, decimal_places: {}'.format(aseg_gdf_format, \n columns, \n aseg_dtype_code, \n width_specifier, \n decimal_places\n )\n ) \n return columns, aseg_dtype_code, width_specifier, decimal_places \n\ndef aseg_gdf_format2dtype(aseg_gdf_format):\n '''\n Function to return Python data type string and other precision information from ASEG-GDF format string\n @param aseg_gdf_format: ASEG-GDF format string\n\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or read from format string for 2D data)\n @return width_specifier: Width of field in number of characters read from format string\n @return decimal_places: Number of fractional digits read from format string \n '''\n columns, aseg_dtype_code, width_specifier, decimal_places = decode_aseg_gdf_format(aseg_gdf_format)\n dtype = None # Initially unknown datatype\n \n # Determine type and size for required significant figures\n # Integer type - N.B: Only signed types available\n if aseg_dtype_code == 'I':\n assert not decimal_places, 'Integer format cannot be defined with fractional digits'\n for test_dtype, sig_figs in SIG_FIGS.items():\n if test_dtype.startswith('int') and sig_figs >= width_specifier:\n dtype = test_dtype\n break\n assert dtype, 'Invalid width_specifier of {}'.format(width_specifier) \n \n # Floating point type - use approximate sig. figs. to determine length\n #TODO: Remove 'A' after string field handling has been properly implemented\n elif aseg_dtype_code in ['D', 'E', 'F']: # Floating point\n for test_dtype, sig_figs in SIG_FIGS.items():\n if test_dtype.startswith('float') and sig_figs >= width_specifier-2: # Allow for sign and decimal place\n dtype = test_dtype\n break\n assert dtype, 'Invalid floating point format of {}.{}'.format(width_specifier, decimal_places) \n \n elif aseg_dtype_code == 'A':\n assert not decimal_places, 'String format cannot be defined with fractional digits'\n dtype = '<U{}'.format(width_specifier) # Numpy fixed-length string type\n \n else:\n raise BaseException('Unhandled ASEG-GDF dtype code {}'.format(aseg_dtype_code))\n \n logger.debug('aseg_dtype_code: {}, columns: {}, width_specifier: {}, decimal_places: {}'.format(dtype, \n columns, \n width_specifier, \n decimal_places\n )\n ) \n return dtype, columns, width_specifier, decimal_places\n\n\ndef variable2aseg_gdf_format(array_variable, decimal_places=None):\n '''\n Function to return ASEG-GDF format string and other info from data array or netCDF array variable\n @param array_variable: data array or netCDF array variable\n @param decimal_places: Number of decimal places to respect, or None for value derived from datatype and values\n \n @return aseg_gdf_format: ASEG-GDF format string\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or second dimension size for 2D data)\n @return width_specifier: Width of field in number of characters\n @return decimal_places: Number of fractional digits (derived from datatype sig. figs - width_specifier)\n @param python_format: Python Formatter string for fixed-width output\n '''\n if len(array_variable.shape) <= 1: # 1D variable or scalar\n columns = 1\n elif len(array_variable.shape) == 2: # 2D variable\n columns = array_variable.shape[1]\n else:\n raise BaseException('Unable to handle arrays with dimensionality > 2')\n \n data_array = array_variable[:]\n \n # Try to determine the dtype string from the variable and data_array type\n if not len(array_variable.shape): # Scalar\n dtype = type(data_array).__name__\n if dtype == 'str':\n width_specifier = len(data_array) + 1\n decimal_places = 0 \n elif dtype == 'ndarray': # Single-element array\n dtype = str(array_variable.dtype)\n data = np.asscalar(data_array)\n\n sig_figs = SIG_FIGS[dtype] + 1 # Look up approximate significant figures and add 1\n sign_width = 1 if data < 0 else 0\n integer_digits = ceil(log10(np.abs(data) + 1.0))\n else: # Array\n dtype = str(array_variable.dtype)\n if dtype in ['str', \"<class 'str'>\"]: # String array or string array variable\n dtype = 'str'\n width_specifier = max([len(string.strip()) for string in data_array]) + 1\n decimal_places = 0\n \n else: # Numeric datatype array\n # Include fill value if required\n if type(data_array) == np.ma.core.MaskedArray:\n logger.debug('Array is masked. Including fill value.')\n data_array = data_array.data\n \n sig_figs = SIG_FIGS[dtype] + 1 # Look up approximate significant figures and add 1\n sign_width = 1 if np.nanmin(data_array) < 0 else 0\n integer_digits = ceil(log10(np.nanmax(np.abs(data_array)) + 1.0))\n \n aseg_dtype_code = ASEG_DTYPE_CODE_MAPPING.get(dtype)\n assert aseg_dtype_code, 'Unhandled dtype {}'.format(dtype)\n \n if aseg_dtype_code == 'I': # Integer\n decimal_places = 0\n width_specifier = integer_digits + sign_width + 1\n aseg_gdf_format = 'I{}'.format(width_specifier)\n python_format = '{' + ':>{:d}.{:d}f'.format(width_specifier, decimal_places) + '}'\n\n elif aseg_dtype_code in ['F', 'D', 'E']: # Floating point\n # If array_variable is a netCDF variable with a \"format\" attribute, use stored format string to determine decimal_places\n if decimal_places is not None:\n decimal_places = min(decimal_places, abs(sig_figs-integer_digits))\n logger.debug('decimal_places set to {} from decimal_places {}'.format(decimal_places, decimal_places))\n elif hasattr(array_variable, 'aseg_gdf_format'): \n _columns, _aseg_dtype_code, _integer_digits, decimal_places = decode_aseg_gdf_format(array_variable.aseg_gdf_format)\n decimal_places = min(decimal_places, abs(sig_figs-integer_digits))\n logger.debug('decimal_places set to {} from variable attribute aseg_gdf_format {}'.format(decimal_places, array_variable.aseg_gdf_format))\n else: # No aseg_gdf_format variable attribute\n decimal_places = abs(sig_figs-integer_digits) # Allow for full precision of datatype\n logger.debug('decimal_places set to {} from sig_figs {} and integer_digits {}'.format(decimal_places, sig_figs, integer_digits))\n \n width_specifier = min(sign_width + integer_digits + decimal_places + 2,\n sign_width + sig_figs + 2\n )\n \n aseg_gdf_format = '{}{}.{}'.format(aseg_dtype_code, width_specifier, decimal_places)\n if aseg_dtype_code == 'F': # Floating point notation\n python_format = '{' + ':>{:d}.{:d}f'.format(width_specifier, decimal_places) + '}' # Add 1 to width for decimal point\n else: # Exponential notation for 'D' or 'E'\n python_format = '{' + ':>{:d}.{:d}E'.format(width_specifier, decimal_places) + '}' # Add 1 to width for decimal point\n\n elif aseg_dtype_code == 'A': # String\n if hasattr(array_variable, 'aseg_gdf_format'):\n _columns, _aseg_dtype_code, width_specifier, decimal_places = decode_aseg_gdf_format(array_variable.aseg_gdf_format)\n aseg_gdf_format = array_variable.aseg_gdf_format\n else:\n aseg_gdf_format = 'A{}'.format(width_specifier)\n \n python_format = '{' + ':>{:d}s'.format(width_specifier) + '}'\n else:\n raise BaseException('Unhandled ASEG-GDF dtype code {}'.format(aseg_dtype_code))\n \n # Pre-pend column count to start of aseg_gdf_format\n if columns > 1:\n aseg_gdf_format = '{}{}'.format(columns, aseg_gdf_format)\n \n return aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format\n\n\ndef fix_field_precision(data_array, current_dtype, decimal_places, no_data_mask=[], fill_value=None):\n '''\n Function to return revised ASEG-GDF format string and other info from data array or netCDF array variable\n after correcting datatype for excessive precision specification, or None if there is no precision change.\n Arrays are copied to smaller representations and then the difference with the original is checked to\n ensure that any difference is less than precision of the specified number of fractional digits.\n Note that fill_value is also considered but potentially modified only if data precision is changed\n @param data_array: data array - assumed to be of dtype float64 for raw data\n @param current_dtype: Current data type string, e.g. int8 or float32\n @param decimal_places: Number of fractional digits for precision checking\n @param fill_value: fill value or None\n \n Returns None if no precision change required.\n @return aseg_gdf_format: ASEG-GDF format string\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or second dimension size for 2D data)\n @return width_specifier: Width of field in number of characters\n @return decimal_places: Number of fractional digits (derived from datatype sig. figs - width_specifier)\n @return python_format: Python Formatter string for fixed-width output\n @return fill_value: Potentially modified fill value\n '''\n logger.debug('data_array: {}, current_dtype: {}, decimal_places: {}'.format(data_array, current_dtype, decimal_places))\n \n try:\n data_mantissa, data_exponent = dfrexp(data_array)\n except:\n logger.debug('Unable to compute data_mantissa & data_exponent')\n return\n \n for dtype_reduction_list in DTYPE_REDUCTION_LISTS:\n try:\n current_dtype_index = dtype_reduction_list.index(current_dtype)\n\n # Try types from smallest to largest\n for smaller_dtype in dtype_reduction_list[:current_dtype_index:-1]: \n smaller_array = data_array.astype(smaller_dtype)\n difference_array = data_array - smaller_array\n logger.debug('current_dtype: {}\\nsmaller_dtype: {}\\narray_variable\\n{}\\nsmaller_array\\n{}\\n\\\ndifference_array\\n{}\\ndecimal_places: {}\\ndifference count: {}\\ndifference values: '.format(current_dtype, \n smaller_dtype, \n data_array, \n smaller_array, \n difference_array, \n decimal_places, \n np.count_nonzero(difference_array >= pow(10, -decimal_places)), \n difference_array[difference_array != 0]\n )\n )\n logger.debug('Maximum error converting from {} to {}: {}'.format(current_dtype,\n smaller_dtype,\n np.nanmax(np.abs(difference_array))))\n smaller_mantissa, smaller_exponent = dfrexp(smaller_array)\n if np.any(np.logical_or((smaller_exponent != data_exponent), \n (np.abs(data_mantissa - smaller_mantissa) >= pow(10, -decimal_places)))):\n # Differences found - try larger datatype\n continue\n else:\n logger.debug('Maximum mantissa difference: {}'.format(np.nanmax(np.abs(data_mantissa - smaller_mantissa))))\n logger.debug('Maximum exponent difference: {}'.format(np.nanmax(np.abs(smaller_exponent - data_exponent))))\n aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format = variable2aseg_gdf_format(smaller_array, decimal_places)\n\n if fill_value is not None:\n # Use reduced precision fill_value if available and unambiguous\n if np.any(no_data_mask):\n reduced_precision_fill_value = data_array[no_data_mask][0] \n \n # Check for ambiguity introduced by reduced precision\n if np.any(data_array[~no_data_mask] == reduced_precision_fill_value):\n logger.debug('Reduced precision fill value of {} is ambiguous'.format(reduced_precision_fill_value))\n continue # Can't use this datatype - try the next bigger one\n elif fill_value != reduced_precision_fill_value:\n logger.debug('fill_value precision reduced from {} to {}'.format(fill_value, \n reduced_precision_fill_value)\n )\n fill_value = reduced_precision_fill_value\n \n fill_value = truncate(fill_value, data_array, no_data_mask, width_specifier, decimal_places)\n \n return aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format, fill_value\n\n \n except ValueError: # current_dtype not in dtype_reduction_list\n continue\n\n\ndef truncate(fill_value, data_array, no_data_mask, width_specifier, decimal_places):\n '''\n Function to truncate fill_value to <width_specifier>.<decimal_places> rather than rounding for neater output later on\n\n @param fill_value: Original fill value\n @param data_array: Array containing data\n @param no_data_mask: Boolean mask array (true for valid data)\n @param width_specifier: Width of field in number of characters\n @param decimal_places: Number of fractional digits for precision checking\n \n @return fill_value: Potentially modified fill value\n '''\n try:\n truncated_fill_value = None\n \n integer_digits = width_specifier - decimal_places\n if fill_value < 0:\n integer_digits -= 1 # Allow for sign\n if decimal_places > 0:\n integer_digits -= 1 # Allow for decimal point\n \n fill_value_str = (fill_value)\n assert 'e' not in fill_value_str.lower(), 'Unable to truncate value in exponential notation'\n pattern = re.compile('(-?)\\d*?(\\d{0,' + '{}'.format(integer_digits) + '}\\.\\d{0,' + '{}'.format(decimal_places) + '})')\n search = re.search(pattern, fill_value_str)\n truncated_fill_value = float(search.group(1)+search.group(2))\n # Check for any ambiguity introduced by truncation\n assert not np.any(data_array[~no_data_mask] == truncated_fill_value), 'Truncated fill value of {} is ambiguous'.format(truncated_fill_value)\n if fill_value != truncated_fill_value:\n logger.debug('fill_value truncated from {} to {}'.format(fill_value, truncated_fill_value))\n return truncated_fill_value\n except Exception as e:\n logger.debug('Unable to truncate fill value from {} to {} ({}). Keeping original value.'.format(fill_value, truncated_fill_value, e))\n return fill_value\n", "#!/usr/bin/env python\n\n#===============================================================================\n# Copyright 2017 Geoscience Australia\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\"\"\"\nUnit tests for geophys_utils._netcdf_point_utils against a NetCDF line data file\n\nCreated on 15/11/2016\n\n@author: Alex Ip\n\"\"\"\nimport unittest\nimport os\nimport re\nimport netCDF4\nimport numpy as np\nfrom geophys_utils._netcdf_point_utils import NetCDFPointUtils\n\nnetcdf_point_utils = None\n\n#NC_PATH = '/g/data2/uc0/rr2_dev/axi547/GSSA_P1255MAG_Marree.nc'\nNC_PATH = 'http://dapds00.nci.org.au/thredds/dodsC/uc0/rr2_dev/rcb547/AWAGS_Levelled_Line_Databases/mag_database_reformat_2016_adjusted/netcdf/GSSA_P1255MAG_Marree.nc'\nNC_TITLE = 'Marree Airborne Magnetic & Radiometric Survey, SA, 2012'\n#NC_PATH = 'test_line.nc'\n#NC_PATH = 'http://dapds00.nci.org.au/thredds/dodsC/uc0/rr2_dev/rcb547/AWAGS_Levelled_Line_Databases/mag_database_reformat_2016_adjusted/netcdf/GSSA_P1255MAG_Marree.nc'\n\nTEST_BOUNDS = (137, -29, 138, -28)\nGRID_RESOLUTION = 0.001\nRESAMPLING_METHOD = 'linear' # 'linear', 'nearest' or 'cubic'. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html\n\nMAX_BYTES = 1600\nMAX_ERROR = 0.000001\nTEST_COORDS = (148.213, -36.015)\nTEST_INDICES = [1, 1]\nTEST_FRACTIONAL_INDICES = [1.25, 1.25]\nTEST_VALUE = 0.0\nTEST_INTERPOLATED_VALUE = -99997.6171875\nSPATIAL_MASK_COUNT = 4613089\n\n\nTEST_GRID_RESULTS = (('GEOGCS[\"GDA94\",DATUM[\"Geocentric_Datum_of_Australia_1994\",SPHEROID[\"GRS 1980\",6378137,298.257222101,AUTHORITY[\"EPSG\",\"7019\"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY[\"EPSG\",\"6283\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4283\"]]',\n [136.4805, 0.001, 0, -27.988500000000002, 0, -0.001],\n (1778, 3047)\n ),\n ('GEOGCS[\"GDA94\",DATUM[\"Geocentric_Datum_of_Australia_1994\",SPHEROID[\"GRS 1980\",6378137,298.257222101,AUTHORITY[\"EPSG\",\"7019\"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY[\"EPSG\",\"6283\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4283\"]]',\n [136.9995, 0.001, 0, -27.9995, 0, -0.001],\n (1001, 1001)\n )\n )\n\n \nclass TestNetCDFPointUtilsConstructor(unittest.TestCase):\n \"\"\"Unit tests for TestNetCDFPointUtils Constructor.\n N.B: This should be run first\"\"\"\n \n def test_netcdf_point_utils_constructor(self):\n print('Testing NetCDFPointUtils constructor')\n global netcdf_point_utils\n \n if re.match('^http.*', NC_PATH):\n nc_path = NC_PATH\n else:\n nc_path = os.path.join(os.path.dirname(__file__), NC_PATH)\n print(nc_path) \n nc_dataset = netCDF4.Dataset(nc_path)\n netcdf_point_utils = NetCDFPointUtils(nc_dataset)\n \n #print(netcdf_point_utils.__dict__)\n assert nc_dataset.title == NC_TITLE, 'Invalid dataset title: \"{}\" != \"{}\"'.format(nc_dataset.title, NC_TITLE)\n \nclass TestNetCDFPointUtilsFunctions1(unittest.TestCase):\n \"\"\"Unit tests for geophys_utils._netcdf_point_utils functions\"\"\"\n \n def test_get_polygon(self):\n print('Testing get_polygon function')\n polygon = netcdf_point_utils.get_polygon()\n assert polygon is None, 'This is just plain messed up'\n\n def test_get_spatial_mask(self):\n print('Testing get_spatial_mask function')\n spatial_mask = netcdf_point_utils.get_spatial_mask(TEST_BOUNDS)\n #print(spatial_mask)\n assert np.count_nonzero(spatial_mask) == SPATIAL_MASK_COUNT, 'Unexpected spatial mask count'\n\n def test_concave_hull(self):\n print('Testing concave hull')\n raise ValueError(netcdf_point_utils.get_concave_hull())\n\n\nclass TestNetCDFPointUtilsGridFunctions(unittest.TestCase):\n \"\"\"Unit tests for geophys_utils._netcdf_point_utils functions\"\"\"\n \n def test_grid_points(self):\n print('Testing grid_points function')\n grids, crs, geotransform = netcdf_point_utils.grid_points(grid_resolution=GRID_RESOLUTION, \n variables='mag_awags',\n point_step = 100)\n assert (crs, geotransform, grids.shape) == TEST_GRID_RESULTS[0], 'Invalid grid results: {} != {}'.format((crs, geotransform, grids.shape), TEST_GRID_RESULTS[0])\n\n print('Testing bounded grid_points function')\n grids, crs, geotransform = netcdf_point_utils.grid_points(grid_resolution=GRID_RESOLUTION, \n variables='mag_awags',\n native_grid_bounds=TEST_BOUNDS,\n point_step = 100)\n assert (crs, geotransform, grids.shape) == TEST_GRID_RESULTS[1], 'Invalid grid results: {} != {}'.format((crs, geotransform, grids.shape), TEST_GRID_RESULTS[1])\n\n\n\n# Define test suites\ndef test_suite():\n \"\"\"Returns a test suite of all the tests in this module.\"\"\"\n\n test_classes = [TestNetCDFPointUtilsConstructor,\n TestNetCDFPointUtilsFunctions1,\n TestNetCDFPointUtilsGridFunctions\n ]\n\n suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,\n test_classes)\n\n suite = unittest.TestSuite(suite_list)\n\n return suite\n\n\n# Define main function\ndef main():\n unittest.TextTestRunner(verbosity=2).run(test_suite())\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.asscalar", "numpy.abs", "numpy.nanmin", "numpy.any", "numpy.zeros" ], [ "numpy.count_nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saurabhclusterone/deepchem
[ "29bcf0fbf29a74c264a553237627ad3573a4b09d" ]
[ "deepchem/utils/save.py" ]
[ "\"\"\"\nSimple utils to save and load from disk.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n# TODO(rbharath): Use standard joblib once old-data has been regenerated.\nimport joblib\nfrom sklearn.externals import joblib as old_joblib\nimport gzip\nimport json\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport os\nimport deepchem\nfrom rdkit import Chem\nimport warnings\nfrom deepchem.utils.genomics import encode_bio_sequence as encode_sequence, encode_fasta_sequence as fasta_sequence, seq_one_hot_encode as seq_one_hotencode\n\n\ndef log(string, verbose=True):\n \"\"\"Print string if verbose.\"\"\"\n if verbose:\n print(string)\n\n\ndef save_to_disk(dataset, filename, compress=3):\n \"\"\"Save a dataset to file.\"\"\"\n joblib.dump(dataset, filename, compress=compress)\n\n\ndef get_input_type(input_file):\n \"\"\"Get type of input file. Must be csv/pkl.gz/sdf file.\"\"\"\n filename, file_extension = os.path.splitext(input_file)\n # If gzipped, need to compute extension again\n if file_extension == \".gz\":\n filename, file_extension = os.path.splitext(filename)\n if file_extension == \".csv\":\n return \"csv\"\n elif file_extension == \".pkl\":\n return \"pandas-pickle\"\n elif file_extension == \".joblib\":\n return \"pandas-joblib\"\n elif file_extension == \".sdf\":\n return \"sdf\"\n else:\n raise ValueError(\"Unrecognized extension %s\" % file_extension)\n\n\ndef load_data(input_files, shard_size=None, verbose=True):\n \"\"\"Loads data from disk.\n\n For CSV files, supports sharded loading for large files.\n \"\"\"\n if not len(input_files):\n return\n input_type = get_input_type(input_files[0])\n if input_type == \"sdf\":\n if shard_size is not None:\n log(\"Ignoring shard_size for sdf input.\", verbose)\n for value in load_sdf_files(input_files):\n yield value\n elif input_type == \"csv\":\n for value in load_csv_files(input_files, shard_size, verbose=verbose):\n yield value\n elif input_type == \"pandas-pickle\":\n for input_file in input_files:\n yield load_pickle_from_disk(input_file)\n\n\ndef load_sdf_files(input_files, clean_mols):\n \"\"\"Load SDF file into dataframe.\"\"\"\n dataframes = []\n for input_file in input_files:\n # Tasks are stored in .sdf.csv file\n raw_df = next(load_csv_files([input_file + \".csv\"], shard_size=None))\n # Structures are stored in .sdf file\n print(\"Reading structures from %s.\" % input_file)\n suppl = Chem.SDMolSupplier(str(input_file), clean_mols, False, False)\n df_rows = []\n for ind, mol in enumerate(suppl):\n if mol is not None:\n smiles = Chem.MolToSmiles(mol)\n df_rows.append([ind, smiles, mol])\n mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))\n dataframes.append(pd.concat([mol_df, raw_df], axis=1, join='inner'))\n return dataframes\n\n\ndef load_csv_files(filenames, shard_size=None, verbose=True):\n \"\"\"Load data as pandas dataframe.\"\"\"\n # First line of user-specified CSV *must* be header.\n shard_num = 1\n for filename in filenames:\n if shard_size is None:\n yield pd.read_csv(filename)\n else:\n log(\"About to start loading CSV from %s\" % filename, verbose)\n for df in pd.read_csv(filename, chunksize=shard_size):\n log(\"Loading shard %d of size %s.\" % (shard_num, str(shard_size)),\n verbose)\n df = df.replace(np.nan, str(\"\"), regex=True)\n shard_num += 1\n yield df\n\n\ndef seq_one_hot_encode(sequences, letters='ATCGN'):\n \"\"\"One hot encodes list of genomic sequences.\n\n Sequences encoded have shape (N_sequences, N_letters, sequence_length, 1).\n These sequences will be processed as images with one color channel.\n\n Parameters\n ----------\n sequences: np.ndarray\n Array of genetic sequences\n letters: str\n String with the set of possible letters in the sequences.\n\n Raises\n ------\n ValueError:\n If sequences are of different lengths.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, N_letters, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics \",\n DeprecationWarning)\n return seq_one_hotencode(sequences, letters=letters)\n\n\ndef encode_fasta_sequence(fname):\n \"\"\"\n Loads fasta file and returns an array of one-hot sequences.\n\n Parameters\n ----------\n fname: str\n Filename of fasta file.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, 5, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics\",\n DeprecationWarning)\n\n return fasta_sequence(fname)\n\n\ndef encode_bio_sequence(fname, file_type=\"fasta\", letters=\"ATCGN\"):\n \"\"\"\n Loads a sequence file and returns an array of one-hot sequences.\n\n Parameters\n ----------\n fname: str\n Filename of fasta file.\n file_type: str\n The type of file encoding to process, e.g. fasta or fastq, this\n is passed to Biopython.SeqIO.parse.\n letters: str\n The set of letters that the sequences consist of, e.g. ATCG.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, N_letters, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics \",\n DeprecationWarning)\n return encode_sequence(fname, file_type=file_type, letters=letters)\n\n\ndef save_metadata(tasks, metadata_df, data_dir):\n \"\"\"\n Saves the metadata for a DiskDataset\n Parameters\n ----------\n tasks: list of str\n Tasks of DiskDataset\n metadata_df: pd.DataFrame\n data_dir: str\n Directory to store metadata\n Returns\n -------\n \"\"\"\n if isinstance(tasks, np.ndarray):\n tasks = tasks.tolist()\n metadata_filename = os.path.join(data_dir, \"metadata.csv.gzip\")\n tasks_filename = os.path.join(data_dir, \"tasks.json\")\n with open(tasks_filename, 'w') as fout:\n json.dump(tasks, fout)\n metadata_df.to_csv(metadata_filename, index=False, compression='gzip')\n\n\ndef load_from_disk(filename):\n \"\"\"Load a dataset from file.\"\"\"\n name = filename\n if os.path.splitext(name)[1] == \".gz\":\n name = os.path.splitext(name)[0]\n if os.path.splitext(name)[1] == \".pkl\":\n return load_pickle_from_disk(filename)\n elif os.path.splitext(name)[1] == \".joblib\":\n try:\n return joblib.load(filename)\n except KeyError:\n # Try older joblib version for legacy files.\n return old_joblib.load(filename)\n except ValueError:\n return old_joblib.load(filename)\n elif os.path.splitext(name)[1] == \".csv\":\n # First line of user-specified CSV *must* be header.\n df = pd.read_csv(filename, header=0)\n df = df.replace(np.nan, str(\"\"), regex=True)\n return df\n else:\n raise ValueError(\"Unrecognized filetype for %s\" % filename)\n\n\ndef load_sharded_csv(filenames):\n \"\"\"Load a dataset from multiple files. Each file MUST have same column headers\"\"\"\n dataframes = []\n for name in filenames:\n placeholder_name = name\n if os.path.splitext(name)[1] == \".gz\":\n name = os.path.splitext(name)[0]\n if os.path.splitext(name)[1] == \".csv\":\n # First line of user-specified CSV *must* be header.\n df = pd.read_csv(placeholder_name, header=0)\n df = df.replace(np.nan, str(\"\"), regex=True)\n dataframes.append(df)\n else:\n raise ValueError(\"Unrecognized filetype for %s\" % filename)\n\n # combine dataframes\n combined_df = dataframes[0]\n for i in range(0, len(dataframes) - 1):\n combined_df = combined_df.append(dataframes[i + 1])\n combined_df = combined_df.reset_index(drop=True)\n return combined_df\n\n\ndef load_pickle_from_disk(filename):\n \"\"\"Load dataset from pickle file.\"\"\"\n if \".gz\" in filename:\n with gzip.open(filename, \"rb\") as f:\n df = pickle.load(f)\n else:\n with open(filename, \"rb\") as f:\n df = pickle.load(f)\n return df\n\n\ndef load_dataset_from_disk(save_dir):\n \"\"\"\n Parameters\n ----------\n save_dir: str\n\n Returns\n -------\n loaded: bool\n Whether the load succeeded\n all_dataset: (dc.data.Dataset, dc.data.Dataset, dc.data.Dataset)\n The train, valid, test datasets\n transformers: list of dc.trans.Transformer\n The transformers used for this dataset\n\n \"\"\"\n\n train_dir = os.path.join(save_dir, \"train_dir\")\n valid_dir = os.path.join(save_dir, \"valid_dir\")\n test_dir = os.path.join(save_dir, \"test_dir\")\n if not os.path.exists(train_dir) or not os.path.exists(\n valid_dir) or not os.path.exists(test_dir):\n return False, None, list()\n loaded = True\n train = deepchem.data.DiskDataset(train_dir)\n valid = deepchem.data.DiskDataset(valid_dir)\n test = deepchem.data.DiskDataset(test_dir)\n all_dataset = (train, valid, test)\n with open(os.path.join(save_dir, \"transformers.pkl\"), 'rb') as f:\n transformers = pickle.load(f)\n return loaded, all_dataset, transformers\n\n\ndef save_dataset_to_disk(save_dir, train, valid, test, transformers):\n train_dir = os.path.join(save_dir, \"train_dir\")\n valid_dir = os.path.join(save_dir, \"valid_dir\")\n test_dir = os.path.join(save_dir, \"test_dir\")\n train.move(train_dir)\n valid.move(valid_dir)\n test.move(test_dir)\n with open(os.path.join(save_dir, \"transformers.pkl\"), 'wb') as f:\n pickle.dump(transformers, f)\n return None\n" ]
[ [ "pandas.concat", "pandas.read_csv", "sklearn.externals.joblib.load", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
HUuxiaobin/Face-Super-Resolution-Guided-by-3D-Facial-Priors
[ "987e7c74d33d26cc5e9d1c0e395a06519a31792f" ]
[ "3Dface_priors/facial_landmark.py" ]
[ "# import the necessary packages\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\n#python facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --image test1.jpg\ndef rect_to_bb(rect):\n# take a bounding predicted by dlib and convert it\n# to the format (x, y, w, h) as we would normally do\n# with OpenCV\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n # return a tuple of (x, y, w, h)\n return (x, y, w, h)\n\ndef shape_to_np(shape, dtype=\"int\"):\n# initialize the list of (x, y)-coordinates\n coords = np.zeros((68, 2), dtype=dtype)\n# loop over the 68 facial landmarks and convert them\n# to a 2-tuple of (x, y)-coordinates\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n# return the list of (x, y)-coordinates\n return coords\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--shape-predictor\", required=True, help=\"path to facial landmark predictor\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\nimage = cv2.imread(args[\"image\"])\nimage = imutils.resize(image, width=500)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# detect faces in the grayscale image\nrects = detector(gray, 1)\n\n# loop over the face detections\nfor (i, rect) in enumerate(rects):\n# determine the facial landmarks for the face region, then\n# convert the facial landmark (x, y)-coordinates to a NumPy\n# array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n# convert dlib's rectangle to a OpenCV-style bounding box\n# [i.e., (x, y, w, h)], then draw the face bounding box\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # show the face number\n cv2.putText(image, \"Face #{}\".format(i + 1), (x - 10, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n# loop over the (x, y)-coordinates for the facial landmarks\n# and draw them on the image\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n# show the output image with the face detections + facial landmarks\ncv2.imshow(\"Output\", image)\ncv2.waitKey(0)" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
koskotG/ebonite
[ "b01b662c43709d152940f488574d78ff25f89ecf", "9f9ae016b70fb24865d5edc99142afb8ab4ddc59", "9f9ae016b70fb24865d5edc99142afb8ab4ddc59" ]
[ "tests/ext/sklearn/test_model.py", "src/ebonite/ext/ext_loader.py", "tests/runtime/test_model_interface.py" ]
[ "import numpy as np\nimport pytest\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\n\nfrom ebonite.core.analyzer.model import ModelAnalyzer\nfrom ebonite.ext.sklearn import SklearnModelWrapper\n\n\[email protected]\ndef inp_data():\n return [[1, 2, 3], [3, 2, 1]]\n\n\[email protected]\ndef out_data():\n return [1, 2]\n\n\[email protected]\ndef classifier(inp_data, out_data):\n lr = LogisticRegression()\n lr.fit(inp_data, out_data)\n return lr\n\n\[email protected]\ndef regressor(inp_data, out_data):\n lr = LinearRegression()\n lr.fit(inp_data, out_data)\n return lr\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_hook(model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n assert isinstance(wrapper, SklearnModelWrapper)\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_wrapper__predict(model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n np.testing.assert_array_almost_equal(model.predict(inp_data), wrapper.call_method('predict', inp_data))\n\n\ndef test_wrapper__clf_predict_proba(classifier, inp_data):\n wrapper = ModelAnalyzer.analyze(classifier, input_data=inp_data)\n\n np.testing.assert_array_almost_equal(classifier.predict_proba(inp_data),\n wrapper.call_method('predict_proba', inp_data))\n\n\ndef test_wrapper__reg_predict_proba(regressor, inp_data):\n wrapper = ModelAnalyzer.analyze(regressor, input_data=inp_data)\n\n with pytest.raises(ValueError):\n wrapper.call_method('predict_proba', inp_data)\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_wrapper__dump_load(tmpdir, model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n expected_requirements = {'sklearn', 'numpy'}\n assert set(wrapper.requirements.modules) == expected_requirements\n\n with wrapper.dump() as d:\n d.materialize(tmpdir)\n wrapper.unbind()\n with pytest.raises(ValueError):\n wrapper.call_method('predict', inp_data)\n\n wrapper.load(tmpdir)\n np.testing.assert_array_almost_equal(model.predict(inp_data), wrapper.call_method('predict', inp_data))\n assert set(wrapper.requirements.modules) == expected_requirements\n", "import importlib\nimport sys\nfrom types import ModuleType\nfrom typing import Dict, List, Union\n\nfrom ebonite.config import Core\nfrom ebonite.utils.classproperty import classproperty\nfrom ebonite.utils.importing import import_module, module_importable, module_imported\nfrom ebonite.utils.log import logger\n\n\nclass Extension:\n \"\"\"\n Extension descriptor\n\n :param module: main extension module\n :param reqs: list of extension dependencies\n :param force: if True, disable lazy loading for this extension\n :param validator: boolean predicate which should evaluate to True for this extension to be loaded\n \"\"\"\n\n def __init__(self, module, reqs: List[str], force=True, validator=None):\n self.force = force\n self.reqs = reqs\n self.module = module\n self.validator = validator\n\n def __str__(self):\n return f'<Extension {self.module}>'\n\n def __repr__(self):\n return str(self)\n\n def __eq__(self, other):\n return self.module == other.module\n\n def __hash__(self):\n return hash(self.module)\n\n\nclass ExtensionDict(dict):\n \"\"\"\n :class:`_Extension` container\n \"\"\"\n\n def __init__(self, *extensions: Extension):\n super().__init__()\n for e in extensions:\n self[e.module] = e\n\n\ndef __tensorflow_major_version():\n import tensorflow as tf\n return tf.__version__.split('.')[0]\n\n\nis_tf_v1, is_tf_v2 = lambda: __tensorflow_major_version() == '1', lambda: __tensorflow_major_version() == '2'\n\n\nclass ExtensionLoader:\n \"\"\"\n Class that tracks and loads extensions.\n\n \"\"\"\n builtin_extensions: Dict[str, Extension] = ExtensionDict(\n Extension('ebonite.ext.numpy', ['numpy'], False),\n Extension('ebonite.ext.pandas', ['pandas'], False),\n Extension('ebonite.ext.sklearn', ['sklearn'], False),\n Extension('ebonite.ext.tensorflow', ['tensorflow'], False, is_tf_v1),\n Extension('ebonite.ext.tensorflow_v2', ['tensorflow'], False, is_tf_v2),\n Extension('ebonite.ext.torch', ['torch'], False),\n Extension('ebonite.ext.catboost', ['catboost'], False),\n Extension('ebonite.ext.aiohttp', ['aiohttp', 'aiohttp_swagger']),\n Extension('ebonite.ext.flask', ['flask', 'flasgger']),\n Extension('ebonite.ext.sqlalchemy', ['sqlalchemy']),\n Extension('ebonite.ext.s3', ['boto3']),\n Extension('ebonite.ext.imageio', ['imageio']),\n Extension('ebonite.ext.lightgbm', ['lightgbm'], False),\n Extension('ebonite.ext.xgboost', ['xgboost'], False)\n )\n\n _loaded_extensions: Dict[Extension, ModuleType] = {}\n\n @classproperty\n def loaded_extensions(cls) -> Dict[Extension, ModuleType]:\n \"\"\"\n :return: List of loaded extensions\n \"\"\"\n return cls._loaded_extensions\n\n @classmethod\n def _setup_import_hook(cls, extensions: List[Extension]):\n \"\"\"\n Add import hook to sys.meta_path that will load extensions when their dependencies are imported\n\n :param extensions: list of :class:`.Extension`\n \"\"\"\n if len(extensions) == 0:\n return\n\n hook = _ImportLoadExtInterceptor(\n module_to_extension={req: e for e in extensions for req in e.reqs}\n )\n sys.meta_path.insert(0, hook)\n\n @classmethod\n def load_all(cls, try_lazy=True):\n \"\"\"\n Load all (builtin and additional) extensions\n\n :param try_lazy: if `False`, use force load for all builtin extensions\n \"\"\"\n for_hook = []\n for ext in cls.builtin_extensions.values():\n if not try_lazy or hasattr(sys, 'frozen') or ext.force:\n if all(module_importable(r) for r in ext.reqs):\n cls.load(ext)\n else:\n if all(module_imported(r) for r in ext.reqs):\n cls.load(ext)\n else:\n for_hook.append(ext)\n\n cls._setup_import_hook(for_hook)\n\n for mod in Core.ADDITIONAL_EXTENSIONS:\n cls.load(mod)\n\n @classmethod\n def load(cls, extension: Union[str, Extension]):\n \"\"\"\n Load single extension\n\n :param extension: str of :class:`.Extension` instance to load\n \"\"\"\n if isinstance(extension, str):\n extension = Extension(extension, [], force=True)\n if extension not in cls._loaded_extensions and not module_imported(extension.module) and \\\n (extension.validator is None or extension.validator()):\n logger.debug('Importing extension module %s', extension.module)\n cls._loaded_extensions[extension] = import_module(extension.module)\n\n\nclass _ImportLoadExtInterceptor(importlib.abc.Loader):\n \"\"\"\n Import hook implementation to load extensions on dependency import\n\n :param module_to_extension: dict requirement -> :class:`.Extension`\n \"\"\"\n\n def __init__(self, module_to_extension: Dict[str, Extension]):\n self.module_to_extension = module_to_extension\n\n def find_module(self, fullname, path=None):\n return self\n\n def load_module(self, fullname):\n sys.meta_path = [x for x in sys.meta_path if x is not self]\n try:\n module = importlib.import_module(fullname)\n finally:\n sys.meta_path = [self] + sys.meta_path\n extension = self.module_to_extension.get(fullname)\n if extension is None:\n return module\n\n if all(module_imported(m) for m in extension.reqs):\n ExtensionLoader.load(extension)\n\n return module\n\n\ndef load_extensions(*exts: str):\n \"\"\"\n Load extensions\n\n :param exts: list of extension main modules\n \"\"\"\n for ext in exts:\n ExtensionLoader.load(ext)\n", "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pyjackson import deserialize\n\nfrom ebonite.core.objects.core import Model\nfrom ebonite.core.objects.requirements import Requirements\nfrom ebonite.ext.sklearn import SklearnModelWrapper\nfrom ebonite.runtime.interface.ml_model import model_interface\n\n\nclass PandasModel:\n def __init__(self, prediction):\n self.prediction = prediction\n\n def predict(self, df: 'pd.DataFrame'):\n assert isinstance(df, pd.DataFrame)\n return self.prediction\n\n\[email protected]\ndef data():\n return pd.DataFrame([{'a': 1, 'b': 1}])\n\n\[email protected]\ndef prediction(data):\n return np.array([[.5 for _ in range(data.size)]])\n\n\[email protected]\ndef model():\n return Model('test model', SklearnModelWrapper(), requirements=Requirements([]))\n\n\[email protected]\ndef pd_model(model: Model, data, prediction):\n model.wrapper.bind_model(PandasModel(prediction), input_data=data)\n return model\n\n\ndef test_interface_types(pd_model: Model, data, prediction):\n interface = model_interface(pd_model)\n pred = interface.execute('predict', {'vector': data})\n assert (pred == prediction).all()\n\n\ndef test_with_serde(pd_model: Model):\n interface = model_interface(pd_model)\n\n obj = {'values': [{'a': 1, 'b': 1}]}\n\n data_type, _ = pd_model.wrapper.method_signature('predict')\n data = deserialize(obj, data_type)\n\n interface.execute('predict', {'vector': data})\n" ]
[ [ "sklearn.linear_model.LinearRegression", "sklearn.linear_model.LogisticRegression" ], [ "tensorflow.__version__.split" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
derEitel/patch_individual_filter_layer
[ "ecd7b3ace759e10ceda8c39ebe1190a7bc27f223" ]
[ "nitorch/nitorch/initialization.py" ]
[ "# Initialize weights\nfrom torch.nn import init, Conv3d, BatchNorm3d, Linear\n\n\ndef xavier(x):\n \"\"\"Wrapper for torch.nn.init.xavier method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.xavier_normal_(x)\n\n\ndef xavier_uniform(x):\n \"\"\"Wrapper for torch.nn.init.xavier_uniform method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.xavier_uniform_(x)\n\n\ndef he(x):\n \"\"\"Wrapper for torch.nn.init.kaiming_normal_ method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.kaiming_normal_(x)\n\n\ndef he_uniform(x):\n \"\"\"Wrapper for torch.nn.init.kaiming_uniform_ method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.kaiming_uniform_(x)\n\n\ndef weights_init(m, func=he_uniform):\n \"\"\"Performs weight initialization for a layer.\n\n Parameters\n ----------\n m\n The layer which weights should be initialized.\n func\n The function to use to initialize weights.\n\n Returns\n -------\n m\n Weight initialized layer.\n\n \"\"\"\n if isinstance(m, Conv3d):\n func(m.weight.data)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, BatchNorm3d):\n m.reset_parameters()\n elif isinstance(m, Linear):\n m.reset_parameters()\n" ]
[ [ "torch.nn.init.constant_", "torch.nn.init.xavier_normal_", "torch.nn.init.kaiming_uniform_", "torch.nn.init.xavier_uniform_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
piersharding/astropy
[ "9680cd546aa9063758f2c23c836ca79a7c8f1eb1" ]
[ "astropy/io/votable/tree.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# TODO: Test FITS parsing\n\n# STDLIB\nimport io\nimport re\nimport gzip\nimport base64\nimport codecs\nimport urllib.request\n\n# THIRD-PARTY\nimport numpy as np\nfrom numpy import ma\n\n# LOCAL\nfrom astropy.io import fits\nfrom astropy import __version__ as astropy_version\nfrom astropy.utils.collections import HomogeneousList\nfrom astropy.utils.xml.writer import XMLWriter\n\nfrom . import converters\nfrom .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,\n warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,\n W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,\n W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,\n W44, W45, W50, W52, W53, W54, E06, E08, E09, E10, E11,\n E12, E13, E15, E16, E17, E18, E19, E20, E21, E22, E23,\n E25)\nfrom . import ucd as ucd_mod\nfrom . import util\nfrom . import xmlutil\n\ntry:\n from . import tablewriter\n _has_c_tabledata_writer = True\nexcept ImportError:\n _has_c_tabledata_writer = False\n\n\n__all__ = [\n 'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys', 'TimeSys',\n 'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',\n 'VOTableFile'\n ]\n\n\n# The default number of rows to read in each chunk before converting\n# to an array.\nDEFAULT_CHUNK_SIZE = 256\nRESIZE_AMOUNT = 1.5\n\n######################################################################\n# FACTORY FUNCTIONS\n\n\ndef _resize(masked, new_size):\n \"\"\"\n Masked arrays can not be resized inplace, and `np.resize` and\n `ma.resize` are both incompatible with structured arrays.\n Therefore, we do all this.\n \"\"\"\n new_array = ma.zeros((new_size,), dtype=masked.dtype)\n length = min(len(masked), new_size)\n new_array[:length] = masked[:length]\n\n return new_array\n\n\ndef _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):\n \"\"\"\n Creates a function useful for looking up an element by a given\n attribute.\n\n Parameters\n ----------\n attr : str\n The attribute name\n\n unique : bool\n Should be `True` if the attribute is unique and therefore this\n should return only one value. Otherwise, returns a list of\n values.\n\n iterator : generator\n A generator that iterates over some arbitrary set of elements\n\n element_name : str\n The XML element name of the elements being iterated over (used\n for error messages only).\n\n doc : str\n A docstring to apply to the generated function.\n\n Returns\n -------\n factory : function\n A function that looks up an element by the given attribute.\n \"\"\"\n\n def lookup_by_attr(self, ref, before=None):\n \"\"\"\n Given a string *ref*, finds the first element in the iterator\n where the given attribute == *ref*. If *before* is provided,\n will stop searching at the object *before*. This is\n important, since \"forward references\" are not allowed in the\n VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if getattr(element, attr, None) == ref:\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if getattr(element, attr, None) == ref:\n yield element\n\n def lookup_by_attr_unique(self, ref, before=None):\n for element in lookup_by_attr(self, ref, before=before):\n return element\n raise KeyError(\n \"No {} with {} '{}' found before the referencing {}\".format(\n element_name, attr, ref, element_name))\n\n if unique:\n lookup_by_attr_unique.__doc__ = doc\n return lookup_by_attr_unique\n else:\n lookup_by_attr.__doc__ = doc\n return lookup_by_attr\n\n\ndef _lookup_by_id_or_name_factory(iterator, element_name, doc):\n \"\"\"\n Like `_lookup_by_attr_factory`, but looks in both the \"ID\" and\n \"name\" attributes.\n \"\"\"\n\n def lookup_by_id_or_name(self, ref, before=None):\n \"\"\"\n Given an key *ref*, finds the first element in the iterator\n with the attribute ID == *ref* or name == *ref*. If *before*\n is provided, will stop searching at the object *before*. This\n is important, since \"forward references\" are not allowed in\n the VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if ref in (element.ID, element.name):\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if ref in (element.ID, element.name):\n return element\n raise KeyError(\n \"No {} with ID or name '{}' found before the referencing {}\".format(\n element_name, ref, element_name))\n\n lookup_by_id_or_name.__doc__ = doc\n return lookup_by_id_or_name\n\n\ndef _get_default_unit_format(config):\n \"\"\"\n Get the default unit format as specified in the VOTable spec.\n \"\"\"\n # The unit format changed between VOTable versions 1.3 and 1.4,\n # see issue #10791.\n if config['version_1_4_or_later']:\n return 'vounit'\n else:\n return 'cds'\n\n\ndef _get_unit_format(config):\n \"\"\"\n Get the unit format based on the configuration.\n \"\"\"\n if config.get('unit_format') is None:\n format = _get_default_unit_format(config)\n else:\n format = config['unit_format']\n return format\n\n\n######################################################################\n# ATTRIBUTE CHECKERS\ndef check_astroyear(year, field, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *year* is not a valid astronomical year as defined by the VOTABLE\n standard.\n\n Parameters\n ----------\n year : str\n An astronomical year string\n\n field : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if (year is not None and\n re.match(r\"^[JB]?[0-9]+([.][0-9]*)?$\", year) is None):\n warn_or_raise(W07, W07, (field, year), config, pos)\n return False\n return True\n\n\ndef check_string(string, attr_name, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *string* is not a string or Unicode string.\n\n Parameters\n ----------\n string : str\n An astronomical year string\n\n attr_name : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if string is not None and not isinstance(string, str):\n warn_or_raise(W08, W08, attr_name, config, pos)\n return False\n return True\n\n\ndef resolve_id(ID, id, config=None, pos=None):\n if ID is None and id is not None:\n warn_or_raise(W09, W09, (), config, pos)\n return id\n return ID\n\n\ndef check_ucd(ucd, config=None, pos=None):\n \"\"\"\n Warns or raises a\n `~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not\n a valid `unified content descriptor`_ string as defined by the\n VOTABLE standard.\n\n Parameters\n ----------\n ucd : str\n A UCD string.\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if config is None:\n config = {}\n if config.get('version_1_1_or_later'):\n try:\n ucd_mod.parse_ucd(\n ucd,\n check_controlled_vocabulary=config.get(\n 'version_1_2_or_later', False),\n has_colon=config.get('version_1_2_or_later', False))\n except ValueError as e:\n # This weird construction is for Python 3 compatibility\n if config.get('verify', 'ignore') == 'exception':\n vo_raise(W06, (ucd, str(e)), config, pos)\n elif config.get('verify', 'ignore') == 'warn':\n vo_warn(W06, (ucd, str(e)), config, pos)\n return False\n else:\n return False\n return True\n\n\n######################################################################\n# PROPERTY MIXINS\nclass _IDProperty:\n @property\n def ID(self):\n \"\"\"\n The XML ID_ of the element. May be `None` or a string\n conforming to XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @ID.deleter\n def ID(self):\n self._ID = None\n\n\nclass _NameProperty:\n @property\n def name(self):\n \"\"\"An optional name for the element.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @name.deleter\n def name(self):\n self._name = None\n\n\nclass _XtypeProperty:\n @property\n def xtype(self):\n \"\"\"Extended data type information.\"\"\"\n return self._xtype\n\n @xtype.setter\n def xtype(self, xtype):\n if xtype is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(\n W28, W28, ('xtype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(xtype, 'xtype', self._config, self._pos)\n self._xtype = xtype\n\n @xtype.deleter\n def xtype(self):\n self._xtype = None\n\n\nclass _UtypeProperty:\n _utype_in_v1_2 = False\n\n @property\n def utype(self):\n \"\"\"The usage-specific or `unique type`_ of the element.\"\"\"\n return self._utype\n\n @utype.setter\n def utype(self, utype):\n if (self._utype_in_v1_2 and\n utype is not None and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('utype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(utype, 'utype', self._config, self._pos)\n self._utype = utype\n\n @utype.deleter\n def utype(self):\n self._utype = None\n\n\nclass _UcdProperty:\n _ucd_in_v1_2 = False\n\n @property\n def ucd(self):\n \"\"\"The `unified content descriptor`_ for the element.\"\"\"\n return self._ucd\n\n @ucd.setter\n def ucd(self, ucd):\n if ucd is not None and ucd.strip() == '':\n ucd = None\n if ucd is not None:\n if (self._ucd_in_v1_2 and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('ucd', self._element_name, '1.2'),\n self._config, self._pos)\n check_ucd(ucd, self._config, self._pos)\n self._ucd = ucd\n\n @ucd.deleter\n def ucd(self):\n self._ucd = None\n\n\nclass _DescriptionProperty:\n @property\n def description(self):\n \"\"\"\n An optional string describing the element. Corresponds to the\n DESCRIPTION_ element.\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n self._description = description\n\n @description.deleter\n def description(self):\n self._description = None\n\n\n######################################################################\n# ELEMENT CLASSES\nclass Element:\n \"\"\"\n A base class for all classes that represent XML elements in the\n VOTABLE file.\n \"\"\"\n _element_name = ''\n _attr_list = []\n\n def _add_unknown_tag(self, iterator, tag, data, config, pos):\n warn_or_raise(W10, W10, tag, config, pos)\n\n def _ignore_add(self, iterator, tag, data, config, pos):\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def _add_definitions(self, iterator, tag, data, config, pos):\n if config.get('version_1_1_or_later'):\n warn_or_raise(W22, W22, (), config, pos)\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def parse(self, iterator, config):\n \"\"\"\n For internal use. Parse the XML content of the children of the\n element.\n\n Parameters\n ----------\n iterator : xml iterator\n An iterator over XML elements as returned by\n `~astropy.utils.xml.iterparser.get_xml_iterator`.\n\n config : dict\n The configuration dictionary that affects how certain\n elements are read.\n\n Returns\n -------\n self : Element\n Returns self as a convenience.\n \"\"\"\n raise NotImplementedError()\n\n def to_xml(self, w, **kwargs):\n \"\"\"\n For internal use. Output the element to XML.\n\n Parameters\n ----------\n w : astropy.utils.xml.writer.XMLWriter object\n An XML writer to write to.\n\n kwargs : dict\n Any configuration parameters to control the output.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimpleElement(Element):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n Element.__init__(self)\n\n def __repr__(self):\n buff = io.StringIO()\n SimpleElement.to_xml(self, XMLWriter(buff))\n return buff.getvalue().strip()\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n\n\nclass SimpleElementWithContent(SimpleElement):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n SimpleElement.__init__(self)\n\n self._content = None\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n if data:\n self.content = data\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name, self._content,\n attrib=w.object_attrs(self, self._attr_list))\n\n @property\n def content(self):\n \"\"\"The content of the element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n\nclass Link(SimpleElement, _IDProperty):\n \"\"\"\n LINK_ elements: used to reference external documents and servers through a URI.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',\n 'href', 'action']\n _element_name = 'LINK'\n\n def __init__(self, ID=None, title=None, value=None, href=None, action=None,\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n content_role = kwargs.get('content-role') or kwargs.get('content_role')\n content_type = kwargs.get('content-type') or kwargs.get('content_type')\n\n if 'gref' in kwargs:\n warn_or_raise(W11, W11, (), config, pos)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.content_role = content_role\n self.content_type = content_type\n self.title = title\n self.value = value\n self.href = href\n self.action = action\n\n warn_unknown_attrs(\n 'LINK', kwargs.keys(), config, pos,\n ['content-role', 'content_role', 'content-type', 'content_type',\n 'gref'])\n\n @property\n def content_role(self):\n \"\"\"\n Defines the MIME role of the referenced object. Must be one of:\n\n None, 'query', 'hints', 'doc', 'location' or 'type'\n \"\"\"\n return self._content_role\n\n @content_role.setter\n def content_role(self, content_role):\n if ((content_role == 'type' and\n not self._config['version_1_3_or_later']) or\n content_role not in\n (None, 'query', 'hints', 'doc', 'location')):\n vo_warn(W45, (content_role,), self._config, self._pos)\n self._content_role = content_role\n\n @content_role.deleter\n def content_role(self):\n self._content_role = None\n\n @property\n def content_type(self):\n \"\"\"Defines the MIME content type of the referenced object.\"\"\"\n return self._content_type\n\n @content_type.setter\n def content_type(self, content_type):\n xmlutil.check_mime_content_type(content_type, self._config, self._pos)\n self._content_type = content_type\n\n @content_type.deleter\n def content_type(self):\n self._content_type = None\n\n @property\n def href(self):\n \"\"\"\n A URI to an arbitrary protocol. The vo package only supports\n http and anonymous ftp.\n \"\"\"\n return self._href\n\n @href.setter\n def href(self, href):\n xmlutil.check_anyuri(href, self._config, self._pos)\n self._href = href\n\n @href.deleter\n def href(self):\n self._href = None\n\n def to_table_column(self, column):\n meta = {}\n for key in self._attr_list:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n column.meta.setdefault('links', [])\n column.meta['links'].append(meta)\n\n @classmethod\n def from_table_column(cls, d):\n return cls(**d)\n\n\nclass Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,\n _UtypeProperty):\n \"\"\"\n INFO_ elements: arbitrary key-value pairs for extensions to the standard.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _element_name = 'INFO'\n _attr_list_11 = ['ID', 'name', 'value']\n _attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']\n _utype_in_v1_2 = True\n\n def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,\n ref=None, unit=None, ucd=None, utype=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElementWithContent.__init__(self)\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n self.value = value\n self.xtype = xtype\n self.ref = ref\n self.unit = unit\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs('INFO', ['xtype'], config, pos)\n if ref is not None:\n warn_unknown_attrs('INFO', ['ref'], config, pos)\n if unit is not None:\n warn_unknown_attrs('INFO', ['unit'], config, pos)\n if ucd is not None:\n warn_unknown_attrs('INFO', ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs('INFO', ['utype'], config, pos)\n\n warn_unknown_attrs('INFO', extra.keys(), config, pos)\n\n @property\n def name(self):\n \"\"\"[*required*] The key of the key-value pair.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n if name is None:\n warn_or_raise(W35, W35, ('name'), self._config, self._pos)\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @property\n def value(self):\n \"\"\"\n [*required*] The value of the key-value pair. (Always stored\n as a string or unicode string).\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n warn_or_raise(W35, W35, ('value'), self._config, self._pos)\n check_string(value, 'value', self._config, self._pos)\n self._value = value\n\n @property\n def content(self):\n \"\"\"The content inside the INFO element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n @property\n def ref(self):\n \"\"\"\n Refer to another INFO_ element by ID_, defined previously in\n the document.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n if ref is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),\n self._config, self._pos)\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n # TODO: actually apply the reference\n # if ref is not None:\n # try:\n # other = self._votable.get_values_by_id(ref, before=self)\n # except KeyError:\n # vo_raise(\n # \"VALUES ref='%s', which has not already been defined.\" %\n # self.ref, self._config, self._pos, KeyError)\n # self.null = other.null\n # self.type = other.type\n # self.min = other.min\n # self.min_inclusive = other.min_inclusive\n # self.max = other.max\n # self.max_inclusive = other.max_inclusive\n # self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the INFO_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n if not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),\n self._config, self._pos)\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n w.element(self._element_name, self._content,\n attrib=attrib)\n\n\nclass Values(Element, _IDProperty):\n \"\"\"\n VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, field, ID=None, null=None, ref=None,\n type=\"legal\", id=None, config=None, pos=None, **extras):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n\n self._votable = votable\n self._field = field\n self.ID = resolve_id(ID, id, config, pos)\n self.null = null\n self._ref = ref\n self.type = type\n\n self.min = None\n self.max = None\n self.min_inclusive = True\n self.max_inclusive = True\n self._options = []\n\n warn_unknown_attrs('VALUES', extras.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n self.to_xml(XMLWriter(buff))\n return buff.getvalue().strip()\n\n @property\n def null(self):\n \"\"\"\n For integral datatypes, *null* is used to define the value\n used for missing values.\n \"\"\"\n return self._null\n\n @null.setter\n def null(self, null):\n if null is not None and isinstance(null, str):\n try:\n null_val = self._field.converter.parse_scalar(\n null, self._config, self._pos)[0]\n except Exception:\n warn_or_raise(W36, W36, null, self._config, self._pos)\n null_val = self._field.converter.parse_scalar(\n '0', self._config, self._pos)[0]\n else:\n null_val = null\n self._null = null_val\n\n @null.deleter\n def null(self):\n self._null = None\n\n @property\n def type(self):\n \"\"\"\n [*required*] Defines the applicability of the domain defined\n by this VALUES_ element. Must be one of the following\n strings:\n\n - 'legal': The domain of this column applies in general to\n this datatype. (default)\n\n - 'actual': The domain of this column applies only to the\n data enclosed in the parent table.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('legal', 'actual'):\n vo_raise(E08, type, self._config, self._pos)\n self._type = type\n\n @property\n def ref(self):\n \"\"\"\n Refer to another VALUES_ element by ID_, defined previously in\n the document, for MIN/MAX/OPTION information.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n other = self._votable.get_values_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,\n self._pos)\n ref = None\n else:\n self.null = other.null\n self.type = other.type\n self.min = other.min\n self.min_inclusive = other.min_inclusive\n self.max = other.max\n self.max_inclusive = other.max_inclusive\n self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def min(self):\n \"\"\"\n The minimum value of the domain. See :attr:`min_inclusive`.\n \"\"\"\n return self._min\n\n @min.setter\n def min(self, min):\n if hasattr(self._field, 'converter') and min is not None:\n self._min = self._field.converter.parse(min)[0]\n else:\n self._min = min\n\n @min.deleter\n def min(self):\n self._min = None\n\n @property\n def min_inclusive(self):\n \"\"\"When `True`, the domain includes the minimum value.\"\"\"\n return self._min_inclusive\n\n @min_inclusive.setter\n def min_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._min_inclusive = True\n elif inclusive == 'no':\n self._min_inclusive = False\n else:\n self._min_inclusive = bool(inclusive)\n\n @min_inclusive.deleter\n def min_inclusive(self):\n self._min_inclusive = True\n\n @property\n def max(self):\n \"\"\"\n The maximum value of the domain. See :attr:`max_inclusive`.\n \"\"\"\n return self._max\n\n @max.setter\n def max(self, max):\n if hasattr(self._field, 'converter') and max is not None:\n self._max = self._field.converter.parse(max)[0]\n else:\n self._max = max\n\n @max.deleter\n def max(self):\n self._max = None\n\n @property\n def max_inclusive(self):\n \"\"\"When `True`, the domain includes the maximum value.\"\"\"\n return self._max_inclusive\n\n @max_inclusive.setter\n def max_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._max_inclusive = True\n elif inclusive == 'no':\n self._max_inclusive = False\n else:\n self._max_inclusive = bool(inclusive)\n\n @max_inclusive.deleter\n def max_inclusive(self):\n self._max_inclusive = True\n\n @property\n def options(self):\n \"\"\"\n A list of string key-value tuples defining other OPTION\n elements for the domain. All options are ignored -- they are\n stored for round-tripping purposes only.\n \"\"\"\n return self._options\n\n def parse(self, iterator, config):\n if self.ref is not None:\n for start, tag, data, pos in iterator:\n if start:\n warn_or_raise(W44, W44, tag, config, pos)\n else:\n if tag != 'VALUES':\n warn_or_raise(W44, W44, tag, config, pos)\n break\n else:\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'MIN':\n if 'value' not in data:\n vo_raise(E09, 'MIN', config, pos)\n self.min = data['value']\n self.min_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MIN', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'MAX':\n if 'value' not in data:\n vo_raise(E09, 'MAX', config, pos)\n self.max = data['value']\n self.max_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MAX', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'OPTION':\n if 'value' not in data:\n vo_raise(E09, 'OPTION', config, pos)\n xmlutil.check_token(\n data.get('name'), 'name', config, pos)\n self.options.append(\n (data.get('name'), data.get('value')))\n warn_unknown_attrs(\n 'OPTION', data.keys(), config, pos,\n ['value', 'name'])\n elif tag == 'VALUES':\n break\n\n return self\n\n def is_defaults(self):\n \"\"\"\n Are the settings on this ``VALUE`` element all the same as the\n XML defaults?\n \"\"\"\n # If there's nothing meaningful or non-default to write,\n # don't write anything.\n return (self.ref is None and self.null is None and self.ID is None and\n self.max is None and self.min is None and self.options == [])\n\n def to_xml(self, w, **kwargs):\n def yes_no(value):\n if value:\n return 'yes'\n return 'no'\n\n if self.is_defaults():\n return\n\n if self.ref is not None:\n w.element('VALUES', attrib=w.object_attrs(self, ['ref']))\n else:\n with w.tag('VALUES',\n attrib=w.object_attrs(\n self, ['ID', 'null', 'ref'])):\n if self.min is not None:\n w.element(\n 'MIN',\n value=self._field.converter.output(self.min, False),\n inclusive=yes_no(self.min_inclusive))\n if self.max is not None:\n w.element(\n 'MAX',\n value=self._field.converter.output(self.max, False),\n inclusive=yes_no(self.max_inclusive))\n for name, value in self.options:\n w.element(\n 'OPTION',\n name=name,\n value=value)\n\n def to_table_column(self, column):\n # Have the ref filled in here\n meta = {}\n for key in ['ID', 'null']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n if self.min is not None:\n meta['min'] = {\n 'value': self.min,\n 'inclusive': self.min_inclusive}\n if self.max is not None:\n meta['max'] = {\n 'value': self.max,\n 'inclusive': self.max_inclusive}\n if len(self.options):\n meta['options'] = dict(self.options)\n\n column.meta['values'] = meta\n\n def from_table_column(self, column):\n if column.info.meta is None or 'values' not in column.info.meta:\n return\n\n meta = column.info.meta['values']\n for key in ['ID', 'null']:\n val = meta.get(key, None)\n if val is not None:\n setattr(self, key, val)\n if 'min' in meta:\n self.min = meta['min']['value']\n self.min_inclusive = meta['min']['inclusive']\n if 'max' in meta:\n self.max = meta['max']['value']\n self.max_inclusive = meta['max']['inclusive']\n if 'options' in meta:\n self._options = list(meta['options'].items())\n\n\nclass Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,\n _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELD_ element: describes the datatype of a particular column of data.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n If *ID* is provided, it is used for the column name in the\n resulting recarray of the table. If no *ID* is provided, *name*\n is used instead. If neither is provided, an exception will be\n raised.\n \"\"\"\n _attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',\n 'unit', 'width', 'precision', 'utype', 'ref']\n _attr_list_12 = _attr_list_11 + ['xtype']\n _element_name = 'FIELD'\n\n def __init__(self, votable, ID=None, name=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, ref=None, type=None, id=None,\n xtype=None,\n config=None, pos=None, **extra):\n if config is None:\n if hasattr(votable, '_get_version_checks'):\n config = votable._get_version_checks()\n else:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs(self._element_name, ['xtype'], config, pos)\n\n # TODO: REMOVE ME ----------------------------------------\n # This is a terrible hack to support Simple Image Access\n # Protocol results from archive.noao.edu. It creates a field\n # for the coordinate projection type of type \"double\", which\n # actually contains character data. We have to hack the field\n # to store character data, or we can't read it in. A warning\n # will be raised when this happens.\n if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and\n ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and\n datatype == 'double'):\n datatype = 'char'\n arraysize = '3'\n vo_warn(W40, (), config, pos)\n # ----------------------------------------\n\n self.description = None\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n if name is None:\n if (self._element_name == 'PARAM' and\n not config.get('version_1_1_or_later')):\n pass\n else:\n warn_or_raise(W15, W15, self._element_name, config, pos)\n self.name = self.ID\n\n if self._ID is None and name is None:\n vo_raise(W12, self._element_name, config, pos)\n\n datatype_mapping = {\n 'string': 'char',\n 'unicodeString': 'unicodeChar',\n 'int16': 'short',\n 'int32': 'int',\n 'int64': 'long',\n 'float32': 'float',\n 'float64': 'double',\n # The following appear in some Vizier tables\n 'unsignedInt': 'long',\n 'unsignedShort': 'int'\n }\n\n datatype_mapping.update(config.get('datatype_mapping', {}))\n\n if datatype in datatype_mapping:\n warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),\n config, pos)\n datatype = datatype_mapping[datatype]\n\n self.ref = ref\n self.datatype = datatype\n self.arraysize = arraysize\n self.ucd = ucd\n self.unit = unit\n self.width = width\n self.precision = precision\n self.utype = utype\n self.type = type\n self._links = HomogeneousList(Link)\n self.title = self.name\n self.values = Values(self._votable, self)\n self.xtype = xtype\n\n self._setup(config, pos)\n\n warn_unknown_attrs(self._element_name, extra.keys(), config, pos)\n\n @classmethod\n def uniqify_names(cls, fields):\n \"\"\"\n Make sure that all names and titles in a list of fields are\n unique, by appending numbers if necessary.\n \"\"\"\n unique = {}\n for field in fields:\n i = 2\n new_id = field.ID\n while new_id in unique:\n new_id = field.ID + f\"_{i:d}\"\n i += 1\n if new_id != field.ID:\n vo_warn(W32, (field.ID, new_id), field._config, field._pos)\n field.ID = new_id\n unique[new_id] = field.ID\n\n for field in fields:\n i = 2\n if field.name is None:\n new_name = field.ID\n implicit = True\n else:\n new_name = field.name\n implicit = False\n if new_name != field.ID:\n while new_name in unique:\n new_name = field.name + f\" {i:d}\"\n i += 1\n\n if (not implicit and\n new_name != field.name):\n vo_warn(W33, (field.name, new_name), field._config, field._pos)\n field._unique_name = new_name\n unique[new_name] = field.name\n\n def _setup(self, config, pos):\n if self.values._ref is not None:\n self.values.ref = self.values._ref\n self.converter = converters.get_converter(self, config, pos)\n\n @property\n def datatype(self):\n \"\"\"\n [*required*] The datatype of the column. Valid values (as\n defined by the spec) are:\n\n 'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',\n 'char', 'unicodeChar', 'float', 'double', 'floatComplex', or\n 'doubleComplex'\n\n Many VOTABLE files in the wild use 'string' instead of 'char',\n so that is also a valid option, though 'string' will always be\n converted to 'char' when writing the file back out.\n \"\"\"\n return self._datatype\n\n @datatype.setter\n def datatype(self, datatype):\n if datatype is None:\n if self._config.get('version_1_1_or_later'):\n warn_or_raise(E10, E10, self._element_name, self._config,\n self._pos)\n datatype = 'char'\n if datatype not in converters.converter_mapping:\n vo_raise(E06, (datatype, self.ID), self._config, self._pos)\n self._datatype = datatype\n\n @property\n def precision(self):\n \"\"\"\n Along with :attr:`width`, defines the `numerical accuracy`_\n associated with the data. These values are used to limit the\n precision when writing floating point values back to the XML\n file. Otherwise, it is purely informational -- the Numpy\n recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._precision\n\n @precision.setter\n def precision(self, precision):\n if precision is not None and not re.match(r\"^[FE]?[0-9]+$\", precision):\n vo_raise(E11, precision, self._config, self._pos)\n self._precision = precision\n\n @precision.deleter\n def precision(self):\n self._precision = None\n\n @property\n def width(self):\n \"\"\"\n Along with :attr:`precision`, defines the `numerical\n accuracy`_ associated with the data. These values are used to\n limit the precision when writing floating point values back to\n the XML file. Otherwise, it is purely informational -- the\n Numpy recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n if width is not None:\n width = int(width)\n if width <= 0:\n vo_raise(E12, width, self._config, self._pos)\n self._width = width\n\n @width.deleter\n def width(self):\n self._width = None\n\n # ref on FIELD and PARAM behave differently than elsewhere -- here\n # they're just informational, such as to refer to a coordinate\n # system.\n @property\n def ref(self):\n \"\"\"\n On FIELD_ elements, ref is used only for informational\n purposes, for example to refer to a COOSYS_ or TIMESYS_ element.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the FIELD_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n @property\n def arraysize(self):\n \"\"\"\n Specifies the size of the multidimensional array if this\n FIELD_ contains more than a single value.\n\n See `multidimensional arrays`_.\n \"\"\"\n return self._arraysize\n\n @arraysize.setter\n def arraysize(self, arraysize):\n if (arraysize is not None and\n not re.match(r\"^([0-9]+x)*[0-9]*[*]?(s\\W)?$\", arraysize)):\n vo_raise(E13, arraysize, self._config, self._pos)\n self._arraysize = arraysize\n\n @arraysize.deleter\n def arraysize(self):\n self._arraysize = None\n\n @property\n def type(self):\n \"\"\"\n The type attribute on FIELD_ elements is reserved for future\n extensions.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n self._type = type\n\n @type.deleter\n def type(self):\n self._type = None\n\n @property\n def values(self):\n \"\"\"\n A :class:`Values` instance (or `None`) defining the domain\n of the column.\n \"\"\"\n return self._values\n\n @values.setter\n def values(self, values):\n assert values is None or isinstance(values, Values)\n self._values = values\n\n @values.deleter\n def values(self):\n self._values = None\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` instances used to reference more\n details about the meaning of the FIELD_. This is purely\n informational and is not used by the `astropy.io.votable`\n package.\n \"\"\"\n return self._links\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'VALUES':\n self.values.__init__(\n self._votable, self, config=config, pos=pos, **data)\n self.values.parse(iterator, config)\n elif tag == 'LINK':\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n elif tag == 'DESCRIPTION':\n warn_unknown_attrs(\n 'DESCRIPTION', data.keys(), config, pos)\n elif tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(\n W17, W17, self._element_name, config, pos)\n self.description = data or None\n elif tag == self._element_name:\n break\n\n if self.description is not None:\n self.title = \" \".join(x.strip() for x in\n self.description.splitlines())\n else:\n self.title = self.name\n\n self._setup(config, pos)\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n with w.tag(self._element_name, attrib=attrib):\n if self.description is not None:\n w.element('DESCRIPTION', self.description, wrap=True)\n if not self.values.is_defaults():\n self.values.to_xml(w, **kwargs)\n for link in self.links:\n link.to_xml(w, **kwargs)\n\n def to_table_column(self, column):\n \"\"\"\n Sets the attributes of a given `astropy.table.Column` instance\n to match the information in this `Field`.\n \"\"\"\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = getattr(self, key, None)\n if val is not None:\n column.meta[key] = val\n if not self.values.is_defaults():\n self.values.to_table_column(column)\n for link in self.links:\n link.to_table_column(column)\n if self.description is not None:\n column.description = self.description\n if self.unit is not None:\n # TODO: Use units framework when it's available\n column.unit = self.unit\n if (isinstance(self.converter, converters.FloatingPoint) and\n self.converter.output_format != '{!r:>}'):\n column.format = self.converter.output_format\n elif isinstance(self.converter, converters.Char):\n column.info.meta['_votable_string_dtype'] = 'char'\n elif isinstance(self.converter, converters.UnicodeChar):\n column.info.meta['_votable_string_dtype'] = 'unicodeChar'\n\n @classmethod\n def from_table_column(cls, votable, column):\n \"\"\"\n Restores a `Field` instance from a given\n `astropy.table.Column` instance.\n \"\"\"\n kwargs = {}\n meta = column.info.meta\n if meta:\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = meta.get(key, None)\n if val is not None:\n kwargs[key] = val\n # TODO: Use the unit framework when available\n if column.info.unit is not None:\n kwargs['unit'] = column.info.unit\n kwargs['name'] = column.info.name\n result = converters.table_column_to_votable_datatype(column)\n kwargs.update(result)\n\n field = cls(votable, **kwargs)\n\n if column.info.description is not None:\n field.description = column.info.description\n field.values.from_table_column(column)\n if meta and 'links' in meta:\n for link in meta['links']:\n field.links.append(Link.from_table_column(link))\n\n # TODO: Parse format into precision and width\n return field\n\n\nclass Param(Field):\n \"\"\"\n PARAM_ element: constant-valued columns in the data.\n\n :class:`Param` objects are a subclass of :class:`Field`, and have\n all of its methods and members. Additionally, it defines :attr:`value`.\n \"\"\"\n _attr_list_11 = Field._attr_list_11 + ['value']\n _attr_list_12 = Field._attr_list_12 + ['value']\n _element_name = 'PARAM'\n\n def __init__(self, votable, ID=None, name=None, value=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, type=None, id=None, config=None,\n pos=None, **extra):\n self._value = value\n Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,\n arraysize=arraysize, ucd=ucd, unit=unit,\n precision=precision, utype=utype, type=type,\n id=id, config=config, pos=pos, **extra)\n\n @property\n def value(self):\n \"\"\"\n [*required*] The constant value of the parameter. Its type is\n determined by the :attr:`~Field.datatype` member.\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n value = \"\"\n if isinstance(value, str):\n self._value = self.converter.parse(\n value, self._config, self._pos)[0]\n else:\n self._value = value\n\n def _setup(self, config, pos):\n Field._setup(self, config, pos)\n self.value = self._value\n\n def to_xml(self, w, **kwargs):\n tmp_value = self._value\n self._value = self.converter.output(tmp_value, False)\n # We must always have a value\n if self._value is None:\n self._value = \"\"\n Field.to_xml(self, w, **kwargs)\n self._value = tmp_value\n\n\nclass CooSys(SimpleElement):\n \"\"\"\n COOSYS_ element: defines a coordinate system.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'equinox', 'epoch', 'system']\n _element_name = 'COOSYS'\n\n def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n # COOSYS was deprecated in 1.2 but then re-instated in 1.3\n if (config.get('version_1_2_or_later') and\n not config.get('version_1_3_or_later')):\n warn_or_raise(W27, W27, (), config, pos)\n\n SimpleElement.__init__(self)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.equinox = equinox\n self.epoch = epoch\n self.system = system\n\n warn_unknown_attrs('COOSYS', extra.keys(), config, pos)\n\n @property\n def ID(self):\n \"\"\"\n [*required*] The XML ID of the COOSYS_ element, used for\n cross-referencing. May be `None` or a string conforming to\n XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n if self._config.get('version_1_1_or_later'):\n if ID is None:\n vo_raise(E15, (), self._config, self._pos)\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @property\n def system(self):\n \"\"\"\n Specifies the type of coordinate system. Valid choices are:\n\n 'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',\n 'supergalactic', 'xy', 'barycentric', or 'geo_app'\n \"\"\"\n return self._system\n\n @system.setter\n def system(self, system):\n if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',\n 'galactic', 'supergalactic', 'xy', 'barycentric',\n 'geo_app'):\n warn_or_raise(E16, E16, system, self._config, self._pos)\n self._system = system\n\n @system.deleter\n def system(self):\n self._system = None\n\n @property\n def equinox(self):\n \"\"\"\n A parameter required to fix the equatorial or ecliptic systems\n (as e.g. \"J2000\" as the default \"eq_FK5\" or \"B1950\" as the\n default \"eq_FK4\").\n \"\"\"\n return self._equinox\n\n @equinox.setter\n def equinox(self, equinox):\n check_astroyear(equinox, 'equinox', self._config, self._pos)\n self._equinox = equinox\n\n @equinox.deleter\n def equinox(self):\n self._equinox = None\n\n @property\n def epoch(self):\n \"\"\"\n Specifies the epoch of the positions. It must be a string\n specifying an astronomical year.\n \"\"\"\n return self._epoch\n\n @epoch.setter\n def epoch(self, epoch):\n check_astroyear(epoch, 'epoch', self._config, self._pos)\n self._epoch = epoch\n\n @epoch.deleter\n def epoch(self):\n self._epoch = None\n\n\nclass TimeSys(SimpleElement):\n \"\"\"\n TIMESYS_ element: defines a time system.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'timeorigin', 'timescale', 'refposition']\n _element_name = 'TIMESYS'\n\n def __init__(self, ID=None, timeorigin=None, timescale=None, refposition=None, id=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n # TIMESYS is supported starting in version 1.4\n if not config['version_1_4_or_later']:\n warn_or_raise(\n W54, W54, config['version'], config, pos)\n\n SimpleElement.__init__(self)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.timeorigin = timeorigin\n self.timescale = timescale\n self.refposition = refposition\n\n warn_unknown_attrs('TIMESYS', extra.keys(), config, pos,\n ['ID', 'timeorigin', 'timescale', 'refposition'])\n\n @property\n def ID(self):\n \"\"\"\n [*required*] The XML ID of the TIMESYS_ element, used for\n cross-referencing. Must be a string conforming to\n XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n if ID is None:\n vo_raise(E22, (), self._config, self._pos)\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @property\n def timeorigin(self):\n \"\"\"\n Specifies the time origin of the time coordinate,\n given as a Julian Date for the the time scale and\n reference point defined. It is usually given as a\n floating point literal; for convenience, the magic\n strings \"MJD-origin\" (standing for 2400000.5) and\n \"JD-origin\" (standing for 0) are also allowed.\n\n The timeorigin attribute MUST be given unless the\n time’s representation contains a year of a calendar\n era, in which case it MUST NOT be present. In VOTables,\n these representations currently are Gregorian calendar\n years with xtype=\"timestamp\", or years in the Julian\n or Besselian calendar when a column has yr, a, or Ba as\n its unit and no time origin is given.\n \"\"\"\n return self._timeorigin\n\n @timeorigin.setter\n def timeorigin(self, timeorigin):\n if (timeorigin is not None and\n timeorigin != 'MJD-origin' and timeorigin != 'JD-origin'):\n try:\n timeorigin = float(timeorigin)\n except ValueError:\n warn_or_raise(E23, E23, timeorigin, self._config, self._pos)\n self._timeorigin = timeorigin\n\n @timeorigin.deleter\n def timeorigin(self):\n self._timeorigin = None\n\n @property\n def timescale(self):\n \"\"\"\n [*required*] String specifying the time scale used. Values\n should be taken from the IVOA timescale vocabulary (documented\n at http://www.ivoa.net/rdf/timescale).\n \"\"\"\n return self._timescale\n\n @timescale.setter\n def timescale(self, timescale):\n self._timescale = timescale\n\n @timescale.deleter\n def timescale(self):\n self._timescale = None\n\n @property\n def refposition(self):\n \"\"\"\n [*required*] String specifying the reference position. Values\n should be taken from the IVOA refposition vocabulary (documented\n at http://www.ivoa.net/rdf/refposition).\n \"\"\"\n return self._refposition\n\n @refposition.setter\n def refposition(self, refposition):\n self._refposition = refposition\n\n @refposition.deleter\n def refposition(self):\n self._refposition = None\n\n\nclass FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"FIELDref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,\n **extra):\n \"\"\"\n *table* is the :class:`Table` object that this :class:`FieldRef`\n is a member of.\n\n *ref* is the ID to reference a :class:`Field` object defined\n elsewhere.\n \"\"\"\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the FIELD_ that this FIELDref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Field` instance that this :class:`FieldRef`\n references.\n \"\"\"\n for field in self._table._votable.iter_fields_and_params():\n if isinstance(field, Field) and field.ID == self.ref:\n return field\n vo_raise(\n f\"No field named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n It contains the following publicly-accessible members:\n\n *ref*: An XML ID referring to a <PARAM> element.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"PARAMref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):\n if config is None:\n config = {}\n\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the PARAM_ that this PARAMref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Param` instance that this :class:``PARAMref``\n references.\n \"\"\"\n for param in self._table._votable.iter_fields_and_params():\n if isinstance(param, Param) and param.ID == self.ref:\n return param\n vo_raise(\n f\"No params named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass Group(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _UcdProperty, _DescriptionProperty):\n \"\"\"\n GROUP_ element: groups FIELD_ and PARAM_ elements.\n\n This information is currently ignored by the vo package---that is\n the columns in the recarray are always flat---but the grouping\n information is stored so that it can be written out again to the\n XML file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, table, ID=None, name=None, ref=None, ucd=None,\n utype=None, id=None, config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n self.description = None\n\n self._entries = HomogeneousList(\n (FieldRef, ParamRef, Group, Param))\n\n warn_unknown_attrs('GROUP', extra.keys(), config, pos)\n\n def __repr__(self):\n return f'<GROUP>... {len(self._entries)} entries ...</GROUP>'\n\n @property\n def ref(self):\n \"\"\"\n Currently ignored, as it's not clear from the spec how this is\n meant to work.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def entries(self):\n \"\"\"\n [read-only] A list of members of the GROUP_. This list may\n only contain objects of type :class:`Param`, :class:`Group`,\n :class:`ParamRef` and :class:`FieldRef`.\n \"\"\"\n return self._entries\n\n def _add_fieldref(self, iterator, tag, data, config, pos):\n fieldref = FieldRef(self._table, config=config, pos=pos, **data)\n self.entries.append(fieldref)\n\n def _add_paramref(self, iterator, tag, data, config, pos):\n paramref = ParamRef(self._table, config=config, pos=pos, **data)\n self.entries.append(paramref)\n\n def _add_param(self, iterator, tag, data, config, pos):\n if isinstance(self._table, VOTableFile):\n votable = self._table\n else:\n votable = self._table._votable\n param = Param(votable, config=config, pos=pos, **data)\n self.entries.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self._table, config=config, pos=pos, **data)\n self.entries.append(group)\n group.parse(iterator, config)\n\n def parse(self, iterator, config):\n tag_mapping = {\n 'FIELDref': self._add_fieldref,\n 'PARAMref': self._add_paramref,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'GROUP', config, pos)\n self.description = data or None\n elif tag == 'GROUP':\n break\n return self\n\n def to_xml(self, w, **kwargs):\n with w.tag(\n 'GROUP',\n attrib=w.object_attrs(\n self, ['ID', 'name', 'ref', 'ucd', 'utype'])):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for entry in self.entries:\n entry.to_xml(w, **kwargs)\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all :class:`Param` elements in this\n :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Param):\n yield entry\n elif isinstance(entry, Group):\n for field in entry.iter_fields_and_params():\n yield field\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all sub-:class:`Group` instances in\n this :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Group):\n yield entry\n for group in entry.iter_groups():\n yield group\n\n\nclass Table(Element, _IDProperty, _NameProperty, _UcdProperty,\n _DescriptionProperty):\n \"\"\"\n TABLE_ element: optionally contains data.\n\n It contains the following publicly-accessible and mutable\n attribute:\n\n *array*: A Numpy masked array of the data itself, where each\n row is a row of votable data, and columns are named and typed\n based on the <FIELD> elements of the table. The mask is\n parallel to the data array, except for variable-length fields.\n For those fields, the numpy array's column type is \"object\"\n (``\"O\"``), and another masked array is stored there.\n\n If the Table contains no data, (for example, its enclosing\n :class:`Resource` has :attr:`~Resource.type` == 'meta') *array*\n will have zero-length.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,\n utype=None, nrows=None, id=None, config=None, pos=None,\n **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n self._empty = False\n\n Element.__init__(self)\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n xmlutil.check_id(ref, 'ref', config, pos)\n self._ref = ref\n self.ucd = ucd\n self.utype = utype\n if nrows is not None:\n nrows = int(nrows)\n if nrows < 0:\n raise ValueError(\"'nrows' cannot be negative.\")\n self._nrows = nrows\n self.description = None\n self.format = 'tabledata'\n\n self._fields = HomogeneousList(Field)\n self._params = HomogeneousList(Param)\n self._groups = HomogeneousList(Group)\n self._links = HomogeneousList(Link)\n self._infos = HomogeneousList(Info)\n\n self.array = ma.array([])\n\n warn_unknown_attrs('TABLE', extra.keys(), config, pos)\n\n def __repr__(self):\n return repr(self.to_table())\n\n def __bytes__(self):\n return bytes(self.to_table())\n\n def __str__(self):\n return str(self.to_table())\n\n @property\n def ref(self):\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n \"\"\"\n Refer to another TABLE, previously defined, by the *ref* ID_\n for all metadata (FIELD_, PARAM_ etc.) information.\n \"\"\"\n # When the ref changes, we want to verify that it will work\n # by actually going and looking for the referenced table.\n # If found, set a bunch of properties in this table based\n # on the other one.\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n table = self._votable.get_table_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(\n W43, W43, ('TABLE', self.ref), self._config, self._pos)\n ref = None\n else:\n self._fields = table.fields\n self._params = table.params\n self._groups = table.groups\n self._links = table.links\n else:\n del self._fields[:]\n del self._params[:]\n del self._groups[:]\n del self._links[:]\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def format(self):\n \"\"\"\n [*required*] The serialization format of the table. Must be\n one of:\n\n 'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)\n 'fits' (FITS_).\n\n Note that the 'fits' format, since it requires an external\n file, can not be written out. Any file read in with 'fits'\n format will be read out, by default, in 'tabledata' format.\n\n See :ref:`votable-serialization`.\n \"\"\"\n return self._format\n\n @format.setter\n def format(self, format):\n format = format.lower()\n if format == 'fits':\n vo_raise(\"fits format can not be written out, only read.\",\n self._config, self._pos, NotImplementedError)\n if format == 'binary2':\n if not self._config['version_1_3_or_later']:\n vo_raise(\n \"binary2 only supported in votable 1.3 or later\",\n self._config, self._pos)\n elif format not in ('tabledata', 'binary'):\n vo_raise(f\"Invalid format '{format}'\",\n self._config, self._pos)\n self._format = format\n\n @property\n def nrows(self):\n \"\"\"\n [*immutable*] The number of rows in the table, as specified in\n the XML file.\n \"\"\"\n return self._nrows\n\n @property\n def fields(self):\n \"\"\"\n A list of :class:`Field` objects describing the types of each\n of the data columns.\n \"\"\"\n return self._fields\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n table. Must contain only :class:`Param` objects.\n \"\"\"\n return self._params\n\n @property\n def groups(self):\n \"\"\"\n A list of :class:`Group` objects describing how the columns\n and parameters are grouped. Currently this information is\n only kept around for round-tripping and informational\n purposes.\n \"\"\"\n return self._groups\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` objects (pointers to other documents\n or servers through a URI) for the table.\n \"\"\"\n return self._links\n\n @property\n def infos(self):\n \"\"\"\n A list of :class:`Info` objects for the table. Allows for\n post-operational diagnostics.\n \"\"\"\n return self._infos\n\n def is_empty(self):\n \"\"\"\n Returns True if this table doesn't contain any real data\n because it was skipped over by the parser (through use of the\n ``table_number`` kwarg).\n \"\"\"\n return self._empty\n\n def create_arrays(self, nrows=0, config=None):\n \"\"\"\n Create a new array to hold the data based on the current set\n of fields, and store them in the *array* and member variable.\n Any data in the existing array will be lost.\n\n *nrows*, if provided, is the number of rows to allocate.\n \"\"\"\n if nrows is None:\n nrows = 0\n\n fields = self.fields\n\n if len(fields) == 0:\n array = np.recarray((nrows,), dtype='O')\n mask = np.zeros((nrows,), dtype='b')\n else:\n # for field in fields: field._setup(config)\n Field.uniqify_names(fields)\n\n dtype = []\n for x in fields:\n if x._unique_name == x.ID:\n id = x.ID\n else:\n id = (x._unique_name, x.ID)\n dtype.append((id, x.converter.format))\n\n array = np.recarray((nrows,), dtype=np.dtype(dtype))\n descr_mask = []\n for d in array.dtype.descr:\n new_type = (d[1][1] == 'O' and 'O') or 'bool'\n if len(d) == 2:\n descr_mask.append((d[0], new_type))\n elif len(d) == 3:\n descr_mask.append((d[0], new_type, d[2]))\n mask = np.zeros((nrows,), dtype=descr_mask)\n\n self.array = ma.array(array, mask=mask)\n\n def _resize_strategy(self, size):\n \"\"\"\n Return a new (larger) size based on size, used for\n reallocating an array when it fills up. This is in its own\n function so the resizing strategy can be easily replaced.\n \"\"\"\n # Once we go beyond 0, make a big step -- after that use a\n # factor of 1.5 to help keep memory usage compact\n if size == 0:\n return 512\n return int(np.ceil(size * RESIZE_AMOUNT))\n\n def _add_field(self, iterator, tag, data, config, pos):\n field = Field(self._votable, config=config, pos=pos, **data)\n self.fields.append(field)\n field.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def parse(self, iterator, config):\n columns = config.get('columns')\n\n # If we've requested to read in only a specific table, skip\n # all others\n table_number = config.get('table_number')\n current_table_number = config.get('_current_table_number')\n skip_table = False\n if current_table_number is not None:\n config['_current_table_number'] += 1\n if (table_number is not None and\n table_number != current_table_number):\n skip_table = True\n self._empty = True\n\n table_id = config.get('table_id')\n if table_id is not None:\n if table_id != self.ID:\n skip_table = True\n self._empty = True\n\n if self.ref is not None:\n # This table doesn't have its own datatype descriptors, it\n # just references those from another table.\n\n # This is to call the property setter to go and get the\n # referenced information\n self.ref = self.ref\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n else:\n if tag == 'TABLE':\n return self\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n else:\n tag_mapping = {\n 'FIELD': self._add_field,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'LINK': self._add_link,\n 'INFO': self._add_info,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n if len(self.fields) == 0:\n warn_or_raise(E25, E25, None, config, pos)\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'TABLE':\n # For error checking purposes\n Field.uniqify_names(self.fields)\n # We still need to create arrays, even if the file\n # contains no DATA section\n self.create_arrays(nrows=0, config=config)\n return self\n\n self.create_arrays(nrows=self._nrows, config=config)\n fields = self.fields\n names = [x.ID for x in fields]\n # Deal with a subset of the columns, if requested.\n if not columns:\n colnumbers = list(range(len(fields)))\n else:\n if isinstance(columns, str):\n columns = [columns]\n columns = np.asarray(columns)\n if issubclass(columns.dtype.type, np.integer):\n if np.any(columns < 0) or np.any(columns > len(fields)):\n raise ValueError(\n \"Some specified column numbers out of range\")\n colnumbers = columns\n elif issubclass(columns.dtype.type, np.character):\n try:\n colnumbers = [names.index(x) for x in columns]\n except ValueError:\n raise ValueError(\n f\"Columns '{columns}' not found in fields list\")\n else:\n raise TypeError(\"Invalid columns list\")\n\n if (not skip_table) and (len(fields) > 0):\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'TABLEDATA':\n warn_unknown_attrs(\n 'TABLEDATA', data.keys(), config, pos)\n self.array = self._parse_tabledata(\n iterator, colnumbers, fields, config)\n break\n elif tag == 'BINARY':\n warn_unknown_attrs(\n 'BINARY', data.keys(), config, pos)\n self.array = self._parse_binary(\n 1, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'BINARY2':\n if not config['version_1_3_or_later']:\n warn_or_raise(\n W52, W52, config['version'], config, pos)\n self.array = self._parse_binary(\n 2, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'FITS':\n warn_unknown_attrs(\n 'FITS', data.keys(), config, pos, ['extnum'])\n try:\n extnum = int(data.get('extnum', 0))\n if extnum < 0:\n raise ValueError(\"'extnum' cannot be negative.\")\n except ValueError:\n vo_raise(E17, (), config, pos)\n self.array = self._parse_fits(\n iterator, extnum, config)\n break\n else:\n warn_or_raise(W37, W37, tag, config, pos)\n break\n\n for start, tag, data, pos in iterator:\n if not start and tag == 'DATA':\n break\n\n for start, tag, data, pos in iterator:\n if start and tag == 'INFO':\n if not config.get('version_1_2_or_later'):\n warn_or_raise(\n W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n elif not start and tag == 'TABLE':\n break\n\n return self\n\n def _parse_tabledata(self, iterator, colnumbers, fields, config):\n # Since we don't know the number of rows up front, we'll\n # reallocate the record array to make room as we go. This\n # prevents the need to scan through the XML twice. The\n # allocation is by factors of 1.5.\n invalid = config.get('invalid', 'exception')\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n parsers = [field.converter.parse for field in fields]\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n colnumbers_bits = [i in colnumbers for i in range(len(fields))]\n row_default = [x.converter.default for x in fields]\n mask_default = [True] * len(fields)\n array_chunk = []\n mask_chunk = []\n chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)\n for start, tag, data, pos in iterator:\n if tag == 'TR':\n # Now parse one row\n row = row_default[:]\n row_mask = mask_default[:]\n i = 0\n for start, tag, data, pos in iterator:\n if start:\n binary = (data.get('encoding', None) == 'base64')\n warn_unknown_attrs(\n tag, data.keys(), config, pos, ['encoding'])\n else:\n if tag == 'TD':\n if i >= len(fields):\n vo_raise(E20, len(fields), config, pos)\n\n if colnumbers_bits[i]:\n try:\n if binary:\n rawdata = base64.b64decode(\n data.encode('ascii'))\n buf = io.BytesIO(rawdata)\n buf.seek(0)\n try:\n value, mask_value = binparsers[i](\n buf.read)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n else:\n try:\n value, mask_value = parsers[i](\n data, config, pos)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n except Exception as e:\n if invalid == 'exception':\n vo_reraise(e, config, pos)\n else:\n row[i] = value\n row_mask[i] = mask_value\n elif tag == 'TR':\n break\n else:\n self._add_unknown_tag(\n iterator, tag, data, config, pos)\n i += 1\n\n if i < len(fields):\n vo_raise(E21, (i, len(fields)), config, pos)\n\n array_chunk.append(tuple(row))\n mask_chunk.append(tuple(row_mask))\n\n if len(array_chunk) == chunk_size:\n while numrows + chunk_size > alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n if alloc_rows != len(array):\n array = _resize(array, alloc_rows)\n array[numrows:numrows + chunk_size] = array_chunk\n array.mask[numrows:numrows + chunk_size] = mask_chunk\n numrows += chunk_size\n array_chunk = []\n mask_chunk = []\n\n elif not start and tag == 'TABLEDATA':\n break\n\n # Now, resize the array to the exact number of rows we need and\n # put the last chunk values in there.\n alloc_rows = numrows + len(array_chunk)\n\n array = _resize(array, alloc_rows)\n array[numrows:] = array_chunk\n if alloc_rows != 0:\n array.mask[numrows:] = mask_chunk\n numrows += len(array_chunk)\n\n if (self.nrows is not None and\n self.nrows >= 0 and\n self.nrows != numrows):\n warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)\n self._nrows = numrows\n\n return array\n\n def _get_binary_data_stream(self, iterator, config):\n have_local_stream = False\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n if 'href' not in data:\n have_local_stream = True\n if data.get('encoding', None) != 'base64':\n warn_or_raise(\n W38, W38, data.get('encoding', None),\n config, pos)\n else:\n href = data['href']\n xmlutil.check_anyuri(href, config, pos)\n encoding = data.get('encoding', None)\n else:\n buffer = data\n break\n\n if have_local_stream:\n buffer = base64.b64decode(buffer.encode('ascii'))\n string_io = io.BytesIO(buffer)\n string_io.seek(0)\n read = string_io.read\n else:\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \" +\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'rb', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n read = fd.read\n\n def careful_read(length):\n result = read(length)\n if len(result) != length:\n raise EOFError\n return result\n\n return careful_read\n\n def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):\n fields = self.fields\n\n careful_read = self._get_binary_data_stream(iterator, config)\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n while True:\n # Resize result arrays if necessary\n if numrows >= alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n array = _resize(array, alloc_rows)\n\n row_data = []\n row_mask_data = []\n\n try:\n if mode == 2:\n mask_bits = careful_read(int((len(fields) + 7) / 8))\n row_mask_data = list(converters.bitarray_to_bool(\n mask_bits, len(fields)))\n\n # Ignore the mask for string columns (see issue 8995)\n for i, f in enumerate(fields):\n if row_mask_data[i] and (f.datatype == 'char' or f.datatype == 'unicodeChar'):\n row_mask_data[i] = False\n\n for i, binparse in enumerate(binparsers):\n try:\n value, value_mask = binparse(careful_read)\n except EOFError:\n raise\n except Exception as e:\n vo_reraise(\n e, config, pos, \"(in row {:d}, col '{}')\".format(\n numrows, fields[i].ID))\n row_data.append(value)\n if mode == 1:\n row_mask_data.append(value_mask)\n else:\n row_mask_data[i] = row_mask_data[i] or value_mask\n except EOFError:\n break\n\n row = [x.converter.default for x in fields]\n row_mask = [False] * len(fields)\n for i in colnumbers:\n row[i] = row_data[i]\n row_mask[i] = row_mask_data[i]\n\n array[numrows] = tuple(row)\n array.mask[numrows] = tuple(row_mask)\n numrows += 1\n\n array = _resize(array, numrows)\n\n return array\n\n def _parse_fits(self, iterator, extnum, config):\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n href = data['href']\n encoding = data.get('encoding', None)\n else:\n break\n\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \"\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'r', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n\n hdulist = fits.open(fd)\n\n array = hdulist[int(extnum)].data\n if array.dtype != self.array.dtype:\n warn_or_raise(W19, W19, (), self._config, self._pos)\n\n return array\n\n def to_xml(self, w, **kwargs):\n specified_format = kwargs.get('tabledata_format')\n if specified_format is not None:\n format = specified_format\n else:\n format = self.format\n if format == 'fits':\n format = 'tabledata'\n\n with w.tag(\n 'TABLE',\n attrib=w.object_attrs(\n self,\n ('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):\n\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n\n for element_set in (self.fields, self.params):\n for element in element_set:\n element._setup({}, None)\n\n if self.ref is None:\n for element_set in (self.fields, self.params, self.groups,\n self.links):\n for element in element_set:\n element.to_xml(w, **kwargs)\n elif kwargs['version_1_2_or_later']:\n index = list(self._votable.iter_tables()).index(self)\n group = Group(self, ID=f\"_g{index}\")\n group.to_xml(w, **kwargs)\n\n if len(self.array):\n with w.tag('DATA'):\n if format == 'tabledata':\n self._write_tabledata(w, **kwargs)\n elif format == 'binary':\n self._write_binary(1, w, **kwargs)\n elif format == 'binary2':\n self._write_binary(2, w, **kwargs)\n\n if kwargs['version_1_2_or_later']:\n for element in self._infos:\n element.to_xml(w, **kwargs)\n\n def _write_tabledata(self, w, **kwargs):\n fields = self.fields\n array = self.array\n\n with w.tag('TABLEDATA'):\n w._flush()\n if (_has_c_tabledata_writer and\n not kwargs.get('_debug_python_based_parser')):\n supports_empty_values = [\n field.converter.supports_empty_values(kwargs)\n for field in fields]\n fields = [field.converter.output for field in fields]\n indent = len(w._tags) - 1\n tablewriter.write_tabledata(\n w.write, array.data, array.mask, fields,\n supports_empty_values, indent, 1 << 8)\n else:\n write = w.write\n indent_spaces = w.get_indentation_spaces()\n tr_start = indent_spaces + \"<TR>\\n\"\n tr_end = indent_spaces + \"</TR>\\n\"\n td = indent_spaces + \" <TD>{}</TD>\\n\"\n td_empty = indent_spaces + \" <TD/>\\n\"\n fields = [(i, field.converter.output,\n field.converter.supports_empty_values(kwargs))\n for i, field in enumerate(fields)]\n for row in range(len(array)):\n write(tr_start)\n array_row = array.data[row]\n mask_row = array.mask[row]\n for i, output, supports_empty_values in fields:\n data = array_row[i]\n masked = mask_row[i]\n if supports_empty_values and np.all(masked):\n write(td_empty)\n else:\n try:\n val = output(data, masked)\n except Exception as e:\n vo_reraise(\n e,\n additional=\"(in row {:d}, col '{}')\".format(\n row, self.fields[i].ID))\n if len(val):\n write(td.format(val))\n else:\n write(td_empty)\n write(tr_end)\n\n def _write_binary(self, mode, w, **kwargs):\n fields = self.fields\n array = self.array\n if mode == 1:\n tag_name = 'BINARY'\n else:\n tag_name = 'BINARY2'\n\n with w.tag(tag_name):\n with w.tag('STREAM', encoding='base64'):\n fields_basic = [(i, field.converter.binoutput)\n for (i, field) in enumerate(fields)]\n\n data = io.BytesIO()\n for row in range(len(array)):\n array_row = array.data[row]\n array_mask = array.mask[row]\n\n if mode == 2:\n flattened = np.array([np.all(x) for x in array_mask])\n data.write(converters.bool_to_bitarray(flattened))\n\n for i, converter in fields_basic:\n try:\n chunk = converter(array_row[i], array_mask[i])\n assert type(chunk) == bytes\n except Exception as e:\n vo_reraise(\n e, additional=f\"(in row {row:d}, col '{fields[i].ID}')\")\n data.write(chunk)\n\n w._flush()\n w.write(base64.b64encode(data.getvalue()).decode('ascii'))\n\n def to_table(self, use_names_over_ids=False):\n \"\"\"\n Convert this VO Table to an `astropy.table.Table` instance.\n\n Parameters\n ----------\n use_names_over_ids : bool, optional\n When `True` use the ``name`` attributes of columns as the\n names of columns in the `astropy.table.Table` instance.\n Since names are not guaranteed to be unique, this may cause\n some columns to be renamed by appending numbers to the end.\n Otherwise (default), use the ID attributes as the column\n names.\n\n .. warning::\n Variable-length array fields may not be restored\n identically when round-tripping through the\n `astropy.table.Table` instance.\n \"\"\"\n from astropy.table import Table\n\n meta = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n if use_names_over_ids:\n names = [field.name for field in self.fields]\n unique_names = []\n for i, name in enumerate(names):\n new_name = name\n i = 2\n while new_name in unique_names:\n new_name = f'{name}{i}'\n i += 1\n unique_names.append(new_name)\n names = unique_names\n else:\n names = [field.ID for field in self.fields]\n\n table = Table(self.array, names=names, meta=meta)\n\n for name, field in zip(names, self.fields):\n column = table[name]\n field.to_table_column(column)\n\n return table\n\n @classmethod\n def from_table(cls, votable, table):\n \"\"\"\n Create a `Table` instance from a given `astropy.table.Table`\n instance.\n \"\"\"\n kwargs = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype']:\n val = table.meta.get(key)\n if val is not None:\n kwargs[key] = val\n new_table = cls(votable, **kwargs)\n if 'description' in table.meta:\n new_table.description = table.meta['description']\n\n for colname in table.colnames:\n column = table[colname]\n new_table.fields.append(Field.from_table_column(votable, column))\n\n if table.mask is None:\n new_table.array = ma.array(np.asarray(table))\n else:\n new_table.array = ma.array(np.asarray(table),\n mask=np.asarray(table.mask))\n\n return new_table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD and PARAM elements in the\n TABLE.\n \"\"\"\n for param in self.params:\n yield param\n for field in self.fields:\n yield field\n for group in self.groups:\n for field in group.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID or name.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given utype and\n returns an iterator emitting all matches.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP elements in the TABLE.\n \"\"\"\n for group in self.groups:\n yield group\n for g in group.iter_groups():\n yield g\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_info(self):\n for info in self.infos:\n yield info\n\n\nclass Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _DescriptionProperty):\n \"\"\"\n RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, name=None, ID=None, utype=None, type='results',\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.name = name\n self.ID = resolve_id(ID, id, config, pos)\n self.utype = utype\n self.type = type\n self._extra_attributes = kwargs\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._time_systems = HomogeneousList(TimeSys)\n self._groups = HomogeneousList(Group)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._links = HomogeneousList(Link)\n self._tables = HomogeneousList(Table)\n self._resources = HomogeneousList(Resource)\n\n warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n w = XMLWriter(buff)\n w.element(\n self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n return buff.getvalue().strip()\n\n @property\n def type(self):\n \"\"\"\n [*required*] The type of the resource. Must be either:\n\n - 'results': This resource contains actual result values\n (default)\n\n - 'meta': This resource contains only datatype descriptions\n (FIELD_ elements), but no actual data.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('results', 'meta'):\n vo_raise(E18, type, self._config, self._pos)\n self._type = type\n\n @property\n def extra_attributes(self):\n \"\"\"\n A dictionary of string keys to string values containing any\n extra attributes of the RESOURCE_ element that are not defined\n in the specification. (The specification explicitly allows\n for extra attributes here, but nowhere else.)\n \"\"\"\n return self._extra_attributes\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system definitions (COOSYS_ elements) for\n the RESOURCE_. Must contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def time_systems(self):\n \"\"\"\n A list of time system definitions (TIMESYS_ elements) for\n the RESOURCE_. Must contain only `TimeSys` objects.\n \"\"\"\n return self._time_systems\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n resource. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def groups(self):\n \"\"\"\n A list of groups\n \"\"\"\n return self._groups\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n resource. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def links(self):\n \"\"\"\n A list of links (pointers to other documents or servers\n through a URI) for the resource. Must contain only `Link`\n objects.\n \"\"\"\n return self._links\n\n @property\n def tables(self):\n \"\"\"\n A list of tables in the resource. Must contain only\n `Table` objects.\n \"\"\"\n return self._tables\n\n @property\n def resources(self):\n \"\"\"\n A list of nested resources inside this resource. Must contain\n only `Resource` objects.\n \"\"\"\n return self._resources\n\n def _add_table(self, iterator, tag, data, config, pos):\n table = Table(self._votable, config=config, pos=pos, **data)\n self.tables.append(table)\n table.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_timesys(self, iterator, tag, data, config, pos):\n timesys = TimeSys(config=config, pos=pos, **data)\n self.time_systems.append(timesys)\n timesys.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self._votable, iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def parse(self, votable, iterator, config):\n self._votable = votable\n\n tag_mapping = {\n 'TABLE': self._add_table,\n 'INFO': self._add_info,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'COOSYS': self._add_coosys,\n 'TIMESYS': self._add_timesys,\n 'RESOURCE': self._add_resource,\n 'LINK': self._add_link,\n 'DESCRIPTION': self._ignore_add\n }\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'RESOURCE':\n break\n\n del self._votable\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrs = w.object_attrs(self, ('ID', 'type', 'utype'))\n attrs.update(self.extra_attributes)\n with w.tag('RESOURCE', attrib=attrs):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for element_set in (self.coordinate_systems, self.time_systems,\n self.params, self.infos, self.links,\n self.tables, self.resources):\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Recursively iterates over all tables in the resource and\n nested resources.\n \"\"\"\n for table in self.tables:\n yield table\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterates over all FIELD_ and PARAM_ elements in\n the resource, its tables and nested resources.\n \"\"\"\n for param in self.params:\n yield param\n for table in self.tables:\n for param in table.iter_fields_and_params():\n yield param\n for resource in self.resources:\n for param in resource.iter_fields_and_params():\n yield param\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterates over all the COOSYS_ elements in the\n resource and nested resources.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n def iter_timesys(self):\n \"\"\"\n Recursively iterates over all the TIMESYS_ elements in the\n resource and nested resources.\n \"\"\"\n for timesys in self.time_systems:\n yield timesys\n for resource in self.resources:\n for timesys in resource.iter_timesys():\n yield timesys\n\n def iter_info(self):\n \"\"\"\n Recursively iterates over all the INFO_ elements in the\n resource and nested resources.\n \"\"\"\n for info in self.infos:\n yield info\n for table in self.tables:\n for info in table.iter_info():\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n\nclass VOTableFile(Element, _IDProperty, _DescriptionProperty):\n \"\"\"\n VOTABLE_ element: represents an entire file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n *version* is settable at construction time only, since conformance\n tests for building the rest of the structure depend on it.\n \"\"\"\n\n def __init__(self, ID=None, id=None, config=None, pos=None, version=\"1.4\"):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.ID = resolve_id(ID, id, config, pos)\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._time_systems = HomogeneousList(TimeSys)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._resources = HomogeneousList(Resource)\n self._groups = HomogeneousList(Group)\n\n version = str(version)\n if version not in (\"1.0\", \"1.1\", \"1.2\", \"1.3\", \"1.4\"):\n raise ValueError(\"'version' should be one of '1.0', '1.1', \"\n \"'1.2', '1.3', or '1.4'\")\n\n self._version = version\n\n def __repr__(self):\n n_tables = len(list(self.iter_tables()))\n return f'<VOTABLE>... {n_tables} tables ...</VOTABLE>'\n\n @property\n def version(self):\n \"\"\"\n The version of the VOTable specification that the file uses.\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n version = str(version)\n if version not in ('1.1', '1.2', '1.3', '1.4'):\n raise ValueError(\n \"astropy.io.votable only supports VOTable versions \"\n \"1.1, 1.2, 1.3, and 1.4\")\n self._version = version\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system descriptions for the file. Must\n contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def time_systems(self):\n \"\"\"\n A list of time system descriptions for the file. Must\n contain only `TimeSys` objects.\n \"\"\"\n return self._time_systems\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) that apply to\n the entire file. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n entire file. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def resources(self):\n \"\"\"\n A list of resources, in the order they appear in the file.\n Must only contain `Resource` objects.\n \"\"\"\n return self._resources\n\n @property\n def groups(self):\n \"\"\"\n A list of groups, in the order they appear in the file. Only\n supported as a child of the VOTABLE element in VOTable 1.2 or\n later.\n \"\"\"\n return self._groups\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self, iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_timesys(self, iterator, tag, data, config, pos):\n timesys = TimeSys(config=config, pos=pos, **data)\n self.time_systems.append(timesys)\n timesys.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _get_version_checks(self):\n config = {}\n config['version_1_1_or_later'] = \\\n util.version_compare(self.version, '1.1') >= 0\n config['version_1_2_or_later'] = \\\n util.version_compare(self.version, '1.2') >= 0\n config['version_1_3_or_later'] = \\\n util.version_compare(self.version, '1.3') >= 0\n config['version_1_4_or_later'] = \\\n util.version_compare(self.version, '1.4') >= 0\n return config\n\n def parse(self, iterator, config):\n config['_current_table_number'] = 0\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'xml':\n pass\n elif tag == 'VOTABLE':\n if 'version' not in data:\n warn_or_raise(W20, W20, self.version, config, pos)\n config['version'] = self.version\n else:\n config['version'] = self._version = data['version']\n if config['version'].lower().startswith('v'):\n warn_or_raise(\n W29, W29, config['version'], config, pos)\n self._version = config['version'] = \\\n config['version'][1:]\n if config['version'] not in ('1.1', '1.2', '1.3', '1.4'):\n vo_warn(W21, config['version'], config, pos)\n\n if 'xmlns' in data:\n # Starting with VOTable 1.3, namespace URIs stop\n # incrementing with minor version changes. See\n # this IVOA note for more info:\n # http://www.ivoa.net/documents/Notes/XMLVers/20180529/\n #\n # If this policy is in place for major version 2,\n # then this logic will need tweaking.\n if config['version'] in ('1.3', '1.4'):\n ns_version = '1.3'\n else:\n ns_version = config['version']\n correct_ns = f'http://www.ivoa.net/xml/VOTable/v{ns_version}'\n if data['xmlns'] != correct_ns:\n vo_warn(\n W41, (correct_ns, data['xmlns']), config, pos)\n else:\n vo_warn(W42, (), config, pos)\n\n break\n else:\n vo_raise(E19, (), config, pos)\n config.update(self._get_version_checks())\n\n tag_mapping = {\n 'PARAM': self._add_param,\n 'RESOURCE': self._add_resource,\n 'COOSYS': self._add_coosys,\n 'TIMESYS': self._add_timesys,\n 'INFO': self._add_info,\n 'DEFINITIONS': self._add_definitions,\n 'DESCRIPTION': self._ignore_add,\n 'GROUP': self._add_group}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'VOTABLE', config, pos)\n self.description = data or None\n\n if not len(self.resources) and config['version_1_2_or_later']:\n warn_or_raise(W53, W53, (), config, pos)\n\n return self\n\n def to_xml(self, fd, compressed=False, tabledata_format=None,\n _debug_python_based_parser=False, _astropy_version=None):\n \"\"\"\n Write to an XML file.\n\n Parameters\n ----------\n fd : str path or writable file-like object\n Where to write the file.\n\n compressed : bool, optional\n When `True`, write to a gzip-compressed file. (Default:\n `False`)\n\n tabledata_format : str, optional\n Override the format of the table(s) data to write. Must\n be one of ``tabledata`` (text representation), ``binary`` or\n ``binary2``. By default, use the format that was specified\n in each `Table` object as it was created or read in. See\n :ref:`votable-serialization`.\n \"\"\"\n if tabledata_format is not None:\n if tabledata_format.lower() not in (\n 'tabledata', 'binary', 'binary2'):\n raise ValueError(f\"Unknown format type '{format}'\")\n\n kwargs = {\n 'version': self.version,\n 'tabledata_format':\n tabledata_format,\n '_debug_python_based_parser': _debug_python_based_parser,\n '_group_number': 1}\n kwargs.update(self._get_version_checks())\n\n with util.convert_to_writable_filelike(\n fd, compressed=compressed) as fd:\n w = XMLWriter(fd)\n version = self.version\n if _astropy_version is None:\n lib_version = astropy_version\n else:\n lib_version = _astropy_version\n\n xml_header = \"\"\"\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Produced with astropy.io.votable version {lib_version}\n http://www.astropy.org/ -->\\n\"\"\"\n w.write(xml_header.lstrip().format(**locals()))\n\n with w.tag('VOTABLE',\n {'version': version,\n 'xmlns:xsi':\n \"http://www.w3.org/2001/XMLSchema-instance\",\n 'xsi:noNamespaceSchemaLocation':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\",\n 'xmlns':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\"}):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n element_sets = [self.coordinate_systems, self.time_systems,\n self.params, self.infos, self.resources]\n if kwargs['version_1_2_or_later']:\n element_sets[0] = self.groups\n for element_set in element_sets:\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Iterates over all tables in the VOTable file in a \"flat\" way,\n ignoring the nesting of resources etc.\n \"\"\"\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def get_first_table(self):\n \"\"\"\n Often, you know there is only one table in the file, and\n that's all you need. This method returns that first table.\n \"\"\"\n for table in self.iter_tables():\n if not table.is_empty():\n return table\n raise IndexError(\"No table found in VOTABLE file.\")\n\n get_table_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given ID. Used by the table\n \"ref\" attribute.\n \"\"\")\n\n get_tables_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given utype, and returns an\n iterator emitting all matches.\n \"\"\")\n\n def get_table_by_index(self, idx):\n \"\"\"\n Get a table by its ordinal position in the file.\n \"\"\"\n for i, table in enumerate(self.iter_tables()):\n if i == idx:\n return table\n raise IndexError(\n f\"No table at index {idx:d} found in VOTABLE file.\")\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD_ and PARAM_ elements in the\n VOTABLE_ file.\n \"\"\"\n for resource in self.resources:\n for field in resource.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_. Used by the field's\n \"ref\" attribute.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_ or name.\n \"\"\")\n\n def iter_values(self):\n \"\"\"\n Recursively iterate over all VALUES_ elements in the VOTABLE_\n file.\n \"\"\"\n for field in self.iter_fields_and_params():\n yield field.values\n\n get_values_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_values', 'VALUES',\n \"\"\"\n Looks up a VALUES_ element by the given ID. Used by the values\n \"ref\" attribute.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP_ elements in the VOTABLE_\n file.\n \"\"\"\n for table in self.iter_tables():\n for group in table.iter_groups():\n yield group\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterate over all COOSYS_ elements in the VOTABLE_\n file.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n get_coosys_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_coosys', 'COOSYS',\n \"\"\"Looks up a COOSYS_ element by the given ID.\"\"\")\n\n def iter_timesys(self):\n \"\"\"\n Recursively iterate over all TIMESYS_ elements in the VOTABLE_\n file.\n \"\"\"\n for timesys in self.time_systems:\n yield timesys\n for resource in self.resources:\n for timesys in resource.iter_timesys():\n yield timesys\n\n get_timesys_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_timesys', 'TIMESYS',\n \"\"\"Looks up a TIMESYS_ element by the given ID.\"\"\")\n\n def iter_info(self):\n \"\"\"\n Recursively iterate over all INFO_ elements in the VOTABLE_\n file.\n \"\"\"\n for info in self.infos:\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n get_info_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_info', 'INFO',\n \"\"\"Looks up a INFO element by the given ID.\"\"\")\n\n def set_all_tables_format(self, format):\n \"\"\"\n Set the output storage format of all tables in the file.\n \"\"\"\n for table in self.iter_tables():\n table.format = format\n\n @classmethod\n def from_table(cls, table, table_id=None):\n \"\"\"\n Create a `VOTableFile` instance from a given\n `astropy.table.Table` instance.\n\n Parameters\n ----------\n table_id : str, optional\n Set the given ID attribute on the returned Table instance.\n \"\"\"\n votable_file = cls()\n resource = Resource()\n votable = Table.from_table(votable_file, table)\n if table_id is not None:\n votable.ID = table_id\n resource.tables.append(votable)\n votable_file.resources.append(resource)\n return votable_file\n" ]
[ [ "numpy.asarray", "numpy.dtype", "numpy.all", "numpy.ceil", "numpy.any", "numpy.ma.array", "numpy.ma.zeros", "numpy.zeros", "numpy.recarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tsaodingtw/pttanal
[ "c1d786c04e2d6f1ce02f6886748b4dbec1f37676" ]
[ "tf.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom feature import Feature\nimport sqlite3\nimport pickle\n\ndb = sqlite3.connect('ptt.db')\ncur = db.execute('SELECT * FROM ARTICLES LIMIT 1000')\n\n\n# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3\nf = Feature()\npost_data = []\npush_data = []\nboo_data = []\nfor i in cur:\n post_data.append(f.features_for_tensorflow(i[5]))\n push_data.append(i[7])\n boo_data.append(i[8])\n\ny_data = np.array(push_data) - np.array(boo_data)\n\nx_data = np.array(post_data)\nx = tf.placeholder(tf.float32, shape=(300000, 1))\n# Try to find values for W and b that compute y_data = W * x_data + b\n# (We know that W should be 0.1 and b 0.3, but Tensorflow will\n# figure that out for us.)\nW = tf.Variable(tf.random_uniform([300000, 1], -1.0, 1.0), name=\"Weigh\")\nb = tf.Variable(tf.zeros([1]), name=\"Bias\")\ny = tf.add(tf.matmul(W, x, transpose_a=True), b)\n\n# Minimize the mean squared errors.\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.0000005)\ntrain = optimizer.minimize(loss)\n\n# Before starting, initialize the variables. We will 'run' this first.\ninit = tf.initialize_all_variables()\n\n# Launch the graph.\nsaver = tf.train.Saver([W, b])\nsess = tf.Session()\nsess.run(init)\n\n# Fit the line.\nfor step in range(20001):\n for data in x_data:\n sess.run(train, feed_dict={x: data})\n if step % 20 == 0:\n print(step, sess.run(W), sess.run(b))\n if step % 1000 == 0:\n # Append the step number to the checkpoint name:\n saver.save(sess, 'my-model', global_step=step)\n\n" ]
[ [ "tensorflow.matmul", "tensorflow.zeros", "tensorflow.placeholder", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Session", "tensorflow.square", "tensorflow.train.Saver", "numpy.array", "tensorflow.random_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
thorwhalen/ut
[ "353a4629c35a2cca76ef91a4d5209afe766433b4" ]
[ "sound/dacc/mg.py" ]
[ "__author__ = 'thor'\n\nimport os\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom pymongo.cursor import Cursor\n\n\nfrom ut.sound import util as sutil\nfrom ut.daf.manip import reorder_columns_as\nfrom ut.sound.util import Sound\nfrom ut.pstr.trans import str_to_utf8_or_bust\n\n\nclass MgDacc(object):\n def __init__(self, db, collection, root_folder, path_field='_id', mg_client_kwargs={}):\n self.mgc = MongoClient(**mg_client_kwargs)[db][collection]\n self.root_folder = root_folder\n self.path_field = path_field\n\n def filepath_of(self, path):\n return str_to_utf8_or_bust(os.path.join(self.root_folder, path))\n\n def get_wf_and_sr(self, path, **kwargs):\n return sutil.wf_and_sr_from_filepath(self.filepath_of(path), **kwargs)\n\n def get_sound(self, path_or_doc, **kwargs):\n if not isinstance(path_or_doc, str):\n path_or_doc = path_or_doc.copy()\n file_path = path_or_doc.pop(self.path_field)\n kwargs = dict(kwargs, **path_or_doc)\n path_or_doc = file_path\n name = kwargs.pop('name', os.path.splitext(os.path.basename(path_or_doc))[0])\n try:\n wf, sr = self.get_wf_and_sr(path_or_doc, **kwargs)\n except TypeError:\n kwargs.pop('channels')\n kwargs.pop('frames')\n wf, sr = self.get_wf_and_sr(path_or_doc, **kwargs)\n return Sound(wf=wf, sr=sr, name=name)\n\n def get_sound_iterator(self, find_args={}, find_kwargs={}):\n \"\"\"\n Util to flip through sounds.\n You can do, for example:\n sound_iterator = self.get_sound_iterator\n and then run the following several times:\n sound = sound_iterator.next(); sound.display_sound()\n \"\"\"\n if not find_args and not find_kwargs:\n cursor = self.mgc.find()\n else:\n cursor = self.mgc.find(*find_args, **find_kwargs)\n return map(lambda x: self.get_sound(path_or_doc=x[self.path_field]), cursor)\n\n\nclass SegmentDacc(MgDacc):\n def __init__(self, db, collection, root_folder, path_field='_id', mg_client_kwargs={},\n segment_field='segments', feat_field='fv', tag_field='tags', kv_tag_field='kv_tags'):\n super(SegmentDacc, self).__init__(db, collection, root_folder, path_field, mg_client_kwargs)\n self.segment_field = segment_field\n self.feat_field = feat_field\n self.tag_field = tag_field\n self.kv_tag_field = kv_tag_field\n\n def get_data_with_tags(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], Cursor):\n c = args[0]\n else:\n c = self.mgc.find(*args, **kwargs)\n d = list()\n for ci in c:\n for seg in ci['segments']:\n dd = {'path': ci[self.path_field], 'tags': ci[self.tag_field]}\n dd.update(seg['fv'])\n dd.update({'offset_s': seg['offset_s'], 'duration': seg['duration']})\n d += [dd]\n d = reorder_columns_as(pd.DataFrame(d), ['path', 'tags', 'offset_s', 'duration'])\n return d\n\n def get_data_with_kv_tags(self, *args, **kwargs):\n if 'kv_tag_keys' in list(kwargs.keys()):\n kv_tag_keys = kwargs.get('kv_tag_keys')\n kwargs.pop('kv_tag_keys')\n else:\n kv_tag_keys = ['move_direction', 'vehicle_type']\n\n if len(args) > 0 and isinstance(args[0], Cursor):\n c = args[0]\n else:\n c = self.mgc.find(*args, **kwargs)\n d = list()\n for ci in c:\n for seg in ci[self.segment_field]:\n dd = {'path': ci[self.path_field]}\n for tag_key in kv_tag_keys:\n dd.update({tag_key: ci[self.kv_tag_field].get(tag_key, None)})\n dd.update(seg['fv'])\n dd.update({'offset_s': seg['offset_s'], 'duration': seg['duration']})\n d += [dd]\n d = reorder_columns_as(pd.DataFrame(d), ['path'] + kv_tag_keys + ['offset_s', 'duration'])\n return d\n\n # def get_sound(self, *args, **kwargs):\n # # if len(args) > 0:\n # # kwargs['path_or_doc'] = args[0]\n # return super(SegmentDacc, self).get_sound(path_or_doc=, **kwargs)\n\n # return super(SegmentDacc, self).get_sound(args[0], **kwargs)\n # return super(SegmentDacc, self).get_sound(path_or_doc=kwargs['path'],\n # offset_s=kwargs['offset_s'],\n # duration=kwargs['duration'])\n\n def get_segment_iterator(self, only_segments=True, fields=None, *args, **kwargs):\n cursor = self.mgc.find(*args, **kwargs)\n\n def segment_iterator():\n for d in cursor:\n segments = d.pop(self.segment_field)\n if segments is not None:\n for dd in segments:\n if not only_segments:\n dd = dict(d, **dd)\n if fields is None:\n yield dd\n else:\n yield {k: v for k, v in dd.items() if k in fields}\n\n return segment_iterator()\n\n def get_sound_iterator(self, *args, **kwargs):\n \"\"\"\n Util to flip through sounds.\n You can do, for example:\n sound_iterator = self.get_sound_iterator\n and then run the following several times:\n sound = sound_iterator.next(); sound.display_sound()\n \"\"\"\n\n cursor = self.mgc.find(*args, **kwargs)\n return map(self.get_sound, cursor)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
MaksHess/napari
[ "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7", "64a144607342c02177fc62fa83a3442ace0a98e7" ]
[ "napari/utils/_dtype.py", "napari/_tests/test_draw.py", "napari/utils/colormaps/standardize_color.py", "examples/add_points.py", "napari/layers/labels/_tests/test_labels_pyramid.py", "napari/_qt/_tests/test_qt_viewer.py", "napari/utils/_tests/test_geometry.py" ]
[ "from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 16: np.float16,\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n # If we don't find one of the named dtypes, return the dtype_spec\n # unchanged. This allows NumPy big endian types to work. See\n # https://github.com/napari/napari/issues/3421\n else:\n return dtype_spec\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n", "import sys\n\nimport numpy as np\nimport pytest\n\nfrom napari._tests.utils import skip_local_popups\n\n\n@skip_local_popups\[email protected](\n sys.platform.startswith('win') or sys.platform.startswith('linux'),\n reason='Currently fails on certain CI due to error on canvas draw.',\n)\ndef test_canvas_drawing(make_napari_viewer):\n \"\"\"Test drawing before and after adding and then deleting a layer.\"\"\"\n viewer = make_napari_viewer(show=True)\n view = viewer.window._qt_viewer\n view.set_welcome_visible(False)\n\n assert len(viewer.layers) == 0\n\n # Check canvas context is not none before drawing, as currently on\n # some of our CI a proper canvas context is not made\n view.canvas.events.draw()\n\n # Add layer\n data = np.random.random((15, 10, 5))\n layer = viewer.add_image(data)\n assert len(viewer.layers) == 1\n view.canvas.events.draw()\n\n # Remove layer\n viewer.layers.remove(layer)\n assert len(viewer.layers) == 0\n view.canvas.events.draw()\n", "\"\"\"This module contains functions that 'standardize' the color handling\nof napari layers by supplying functions that are able to convert most\ncolor representation the user had in mind into a single representation -\na numpy Nx4 array of float32 values between 0 and 1 - that is used across\nthe codebase. The color is always in an RGBA format. To handle colors in\nHSV, for example, we should point users to skimage, matplotlib and others.\n\nThe main function of the module is \"transform_color\", which might call\na cascade of other, private, function in the module to do the hard work\nof converting the input. This function will either be called directly, or\nused by the function \"transform_color_with_defaults\", which is a helper\nfunction for the layer objects located in\n``layers.utils.color_transformations.py``.\n\nIn general, when handling colors we try to catch invalid color\nrepresentations, warn the users of their misbehaving and return a default\nwhite color array, since it seems unreasonable to crash the entire napari\nsession due to mis-represented colors.\n\"\"\"\n\nimport functools\nimport types\nimport warnings\nfrom typing import Any, Callable, Dict, Sequence\n\nimport numpy as np\nfrom vispy.color import ColorArray, get_color_dict, get_color_names\nfrom vispy.color.color_array import _string_to_rgb\n\nfrom ..translations import trans\n\n\ndef transform_color(colors: Any) -> np.ndarray:\n \"\"\"Transforms provided color(s) to an Nx4 array of RGBA np.float32\n values.\n\n N is the number of given colors. The function is designed to parse all\n valid color representations a user might have and convert them properly.\n That being said, combinations of different color representation in the\n same list of colors is prohibited, and will error. This means that a list\n of ['red', np.array([1, 0, 0])] cannot be parsed and has to be manually\n pre-processed by the user before sent to this function. In addition, the\n provided colors - if numeric - should already be in an RGB(A) format. To\n convert an existing numeric color array to RGBA format use skimage before\n calling this function.\n\n Parameters\n ----------\n colors : string and array-like.\n The color(s) to interpret and convert\n\n Returns\n -------\n colors : np.ndarray\n An instance of np.ndarray with a data type of float32, 4 columns in\n RGBA order and N rows, with N being the number of colors. The array\n will always be 2D even if a single color is passed.\n\n Raises\n ------\n ValueError, AttributeError, KeyError\n invalid inputs\n \"\"\"\n colortype = type(colors)\n return _color_switch[colortype](colors)\n\n\[email protected]_cache(maxsize=1024)\ndef _handle_str(color: str) -> np.ndarray:\n \"\"\"Creates an array from a color of type string.\n\n The function uses an LRU cache to enhance performance.\n\n Parameters\n ----------\n color : str\n A single string as an input color. Can be a color name or a\n hex representation of a color, with either 6 or 8 hex digits.\n\n Returns\n -------\n colorarray : np.ndarray\n 1x4 array\n\n \"\"\"\n if len(color) == 0:\n warnings.warn(\n trans._(\n \"Empty string detected. Returning black instead.\",\n deferred=True,\n )\n )\n return np.zeros((1, 4), dtype=np.float32)\n\n # This line will stay here until vispy adds a \"transparent\" key\n # to their color dictionary. A PR was sent and approved, currently\n # waiting to be merged.\n color = color.replace(\"transparent\", \"#00000000\")\n colorarray = np.atleast_2d(_string_to_rgb(color)).astype(np.float32)\n if colorarray.shape[1] == 3:\n colorarray = np.column_stack([colorarray, np.float32(1.0)])\n return colorarray\n\n\ndef _handle_list_like(colors: Sequence) -> np.ndarray:\n \"\"\"Parse a list-like container of colors into a numpy Nx4 array.\n\n Handles all list-like containers of colors using recursion (if necessary).\n The colors inside the container should all be represented in the same\n manner. This means that a list containing ['r', (1., 1., 1.)] will raise\n an error. Note that numpy arrays are handled in _handle_array. Lists which\n are known to contain strings will be parsed with _handle_str_list_like.\n Generators should first visit _handle_generator before arriving as input.\n\n Parameters\n ----------\n colors : Sequence\n A list-like container of colors. The colors inside should be homogeneous\n in their representation.\n\n Returns\n -------\n color_array : np.ndarray\n Nx4 numpy array, with N being the length of ``colors``.\n \"\"\"\n\n try:\n # The following conversion works for most cases, and so it's expected\n # that most valid inputs will pass this .asarray() call\n # with ease. Those who don't are usually too cryptic to decipher.\n # If only some of the colors are strings, explicitly provide an object\n # dtype to avoid the deprecated behavior described in:\n # https://github.com/napari/napari/issues/2791\n num_str = len([c for c in colors if isinstance(c, str)])\n dtype = 'O' if 0 < num_str < len(colors) else None\n color_array = np.atleast_2d(np.asarray(colors, dtype=dtype))\n except ValueError:\n warnings.warn(\n trans._(\n \"Couldn't convert input color array to a proper numpy array. Please make sure that your input data is in a parsable format. Converting input to a white color array.\",\n deferred=True,\n )\n )\n return np.ones((max(len(colors), 1), 4), dtype=np.float32)\n\n # Happy path - converted to a float\\integer array\n if color_array.dtype.kind in ['f', 'i']:\n return _handle_array(color_array)\n\n # User input was an iterable with strings\n if color_array.dtype.kind in ['U', 'O']:\n return _handle_str_list_like(color_array.ravel())\n\n\ndef _handle_generator(colors) -> np.ndarray:\n \"\"\"Generators are converted to lists since we need to know their\n length to instantiate a proper array.\n \"\"\"\n return _handle_list_like(list(colors))\n\n\ndef handle_nested_colors(colors) -> ColorArray:\n \"\"\"In case of an array-like container holding colors, unpack it.\"\"\"\n colors_as_rbga = np.ones((len(colors), 4), dtype=np.float32)\n for idx, color in enumerate(colors):\n colors_as_rbga[idx] = _color_switch[type(color)](color)\n return ColorArray(colors_as_rbga)\n\n\ndef _handle_array(colors: np.ndarray) -> np.ndarray:\n \"\"\"Converts the given array into an array in the right format.\"\"\"\n kind = colors.dtype.kind\n\n # Object arrays aren't handled by napari\n if kind == 'O':\n warnings.warn(\n trans._(\n \"An object array was passed as the color input. Please convert its datatype before sending it to napari. Converting input to a white color array.\",\n deferred=True,\n )\n )\n return np.ones((max(len(colors), 1), 4), dtype=np.float32)\n\n # An array of strings will be treated as a list if compatible\n elif kind == 'U':\n if colors.ndim == 1:\n return _handle_str_list_like(colors)\n else:\n warnings.warn(\n trans._(\n \"String color arrays should be one-dimensional. Converting input to a white color array.\",\n deferred=True,\n )\n )\n return np.ones((len(colors), 4), dtype=np.float32)\n\n # Test the dimensionality of the input array\n\n # Empty color array can be a way for the user to signal\n # that it wants the \"default\" colors of napari. We return\n # a single white color.\n if colors.shape[-1] == 0:\n warnings.warn(\n trans._(\n \"Given color input is empty. Converting input to a white color array.\",\n deferred=True,\n )\n )\n return np.ones((1, 4), dtype=np.float32)\n\n colors = np.atleast_2d(colors)\n\n # Arrays with more than two dimensions don't have a clear\n # conversion method to a color array and thus raise an error.\n if colors.ndim > 2:\n raise ValueError(\n trans._(\n \"Given colors input should contain one or two dimensions. Received array with {ndim} dimensions.\",\n deferred=True,\n ndim=colors.ndim,\n )\n )\n\n # User provided a list of numbers as color input. This input\n # cannot be coerced into something understandable and thus\n # will return an error.\n if colors.shape[0] == 1 and colors.shape[1] not in {3, 4}:\n raise ValueError(\n trans._(\n \"Given color array has an unsupported format. Received the following array:\\n{colors}\\nA proper color array should have 3-4 columns with a row per data entry.\",\n deferred=True,\n colors=colors,\n )\n )\n\n # The user gave a list of colors, but it contains a wrong number\n # of columns. This check will also drop Nx1 (2D) arrays, since\n # numpy has vectors, and representing colors in this way\n # (column vector-like) is redundant. However, this results in a\n # warning and not a ValueError since we know the number of colors\n # in this dataset, meaning we can save the napari session by\n # rendering the data in white, which better than crashing.\n if not 3 <= colors.shape[1] <= 4:\n warnings.warn(\n trans._(\n \"Given colors input should contain three or four columns. Received array with {shape} columns. Converting input to a white color array.\",\n deferred=True,\n shape=colors.shape[1],\n )\n )\n return np.ones((len(colors), 4), dtype=np.float32)\n\n # Arrays with floats and ints can be safely converted to the proper format\n if kind in ['f', 'i', 'u']:\n return _convert_array_to_correct_format(colors)\n\n else:\n raise ValueError(\n trans._(\n \"Data type of array ({color_dtype}) not supported.\",\n deferred=True,\n color_dtype=colors.dtype,\n )\n )\n\n\ndef _convert_array_to_correct_format(colors: np.ndarray) -> np.ndarray:\n \"\"\"Asserts shape, dtype and normalization of given color array.\n\n This function deals with arrays which are already 'well-behaved',\n i.e. have (almost) the correct number of columns and are able to represent\n colors correctly. It then it makes sure that the array indeed has exactly\n four columns and that its values are normalized between 0 and 1, with a\n data type of float32.\n\n Parameters\n ----------\n colors : np.ndarray\n Input color array, perhaps un-normalized and without the alpha channel.\n\n Returns\n -------\n colors : np.ndarray\n Nx4, float32 color array with values in the range [0, 1]\n \"\"\"\n if colors.shape[1] == 3:\n colors = np.column_stack(\n [colors, np.ones(len(colors), dtype=np.float32)]\n )\n\n if colors.min() < 0:\n raise ValueError(\n trans._(\n \"Colors input had negative values.\",\n deferred=True,\n )\n )\n\n if colors.max() > 1:\n warnings.warn(\n trans._(\n \"Colors with values larger than one detected. napari will normalize these colors for you. If you'd like to convert these yourself, please use the proper method from skimage.color.\",\n deferred=True,\n )\n )\n colors = _normalize_color_array(colors)\n return np.atleast_2d(np.asarray(colors, dtype=np.float32))\n\n\ndef _handle_str_list_like(colors: Sequence) -> np.ndarray:\n \"\"\"Converts lists or arrays filled with strings to the proper color array\n format.\n\n Parameters\n ----------\n colors : list-like\n A sequence of string colors\n\n Returns\n -------\n color_array : np.ndarray\n Nx4, float32 color array\n \"\"\"\n color_array = np.empty((len(colors), 4), dtype=np.float32)\n for idx, c in enumerate(colors):\n try:\n color_array[idx, :] = _color_switch[type(c)](c)\n except (ValueError, TypeError, KeyError):\n raise ValueError(\n trans._(\n \"Invalid color found: {color} at index {idx}.\",\n deferred=True,\n color=c,\n idx=idx,\n )\n )\n return color_array\n\n\ndef _handle_none(color) -> np.ndarray:\n \"\"\"Converts color given as None to black.\n\n Parameters\n ----------\n color : NoneType\n None value given as a color\n\n Returns\n -------\n arr : np.ndarray\n 1x4 numpy array of float32 zeros\n\n \"\"\"\n return np.zeros((1, 4), dtype=np.float32)\n\n\ndef _normalize_color_array(colors: np.ndarray) -> np.ndarray:\n \"\"\"Normalize all array values to the range [0, 1].\n\n The added complexity here stems from the fact that if a row in the given\n array contains four identical value a simple normalization might raise a\n division by zero exception.\n\n Parameters\n ----------\n colors : np.ndarray\n A numpy array with values possibly outside the range of [0, 1]\n\n Returns\n -------\n colors : np.ndarray\n Copy of input array with normalized values\n \"\"\"\n colors = colors.astype(np.float32, copy=True)\n out_of_bounds_idx = np.unique(np.where((colors > 1) | (colors < 0))[0])\n out_of_bounds = colors[out_of_bounds_idx]\n norm = np.linalg.norm(out_of_bounds, np.inf, axis=1)\n out_of_bounds = out_of_bounds / norm[:, np.newaxis]\n colors[out_of_bounds_idx] = out_of_bounds\n return colors.astype(np.float32)\n\n\n_color_switch: Dict[Any, Callable] = {\n str: _handle_str,\n np.str_: _handle_str,\n list: _handle_list_like,\n tuple: _handle_list_like,\n types.GeneratorType: _handle_generator,\n np.ndarray: _handle_array,\n type(None): _handle_none,\n}\n\n\ndef _create_hex_to_name_dict():\n \"\"\"Create a dictionary mapping hexadecimal RGB colors into their\n 'official' name.\n\n Returns\n -------\n hex_to_rgb : dict\n Mapping from hexadecimal RGB ('#ff0000') to name ('red').\n \"\"\"\n colordict = get_color_dict()\n hex_to_name = {f\"{v.lower()}ff\": k for k, v in colordict.items()}\n hex_to_name[\"#00000000\"] = \"transparent\"\n return hex_to_name\n\n\ndef get_color_namelist():\n \"\"\"A wrapper around vispy's get_color_names designed to add a\n \"transparent\" (alpha = 0) color to it.\n\n Once https://github.com/vispy/vispy/pull/1794 is merged this\n function is no longer necessary.\n\n Returns\n -------\n color_dict : list\n A list of all valid vispy color names plus \"transparent\".\n \"\"\"\n names = get_color_names()\n names.append('transparent')\n return names\n\n\nhex_to_name = _create_hex_to_name_dict()\n\n\ndef _check_color_dim(val):\n \"\"\"Ensures input is Nx4.\n\n Parameters\n ----------\n val : np.ndarray\n A color array of possibly less than 4 columns\n\n Returns\n -------\n val : np.ndarray\n A four columns version of the input array. If the original array\n was a missing the fourth channel, it's added as 1.0 values.\n \"\"\"\n val = np.atleast_2d(val)\n if val.shape[1] not in (3, 4):\n raise RuntimeError(\n trans._(\n 'Value must have second dimension of size 3 or 4',\n deferred=True,\n )\n )\n\n if val.shape[1] == 3:\n val = np.column_stack([val, np.float32(1.0)])\n return val\n\n\ndef rgb_to_hex(rgbs: Sequence) -> np.ndarray:\n \"\"\"Convert RGB to hex quadruplet.\n\n Taken from vispy with slight modifications.\n\n Parameters\n ----------\n rgbs : Sequence\n A list-like container of colors in RGBA format with values\n between [0, 1]\n\n Returns\n -------\n arr : np.ndarray\n An array of the hex representation of the input colors\n\n \"\"\"\n rgbs = _check_color_dim(rgbs)\n return np.array(\n [\n f'#{\"%02x\" * 4}' % tuple((255 * rgb).astype(np.uint8))\n for rgb in rgbs\n ],\n '|U9',\n )\n", "\"\"\"\nDisplay a points layer on top of an image layer using the add_points and\nadd_image APIs\n\"\"\"\n\nimport numpy as np\nfrom skimage import data\nfrom skimage.color import rgb2gray\nimport napari\n\n\n# add the image\nviewer = napari.view_image(rgb2gray(data.astronaut()))\n# add the points\npoints = np.array([[100, 100], [200, 200], [333, 111]])\nsize = np.array([10, 20, 20])\nviewer.add_points(points, size=size)\n\n# unselect the image layer\nviewer.layers.selection.discard(viewer.layers[0])\n\n# adjust some of the points layer properties\nlayer = viewer.layers[1]\n\n# change the layer name\nlayer.name = 'points'\n\n# change the layer visibility\nlayer.visible = False\nlayer.visible = True\n\n# select the layer\nviewer.layers.selection.add(layer)\n# deselect the layer\nviewer.layers.selection.remove(layer)\n# or: viewer.layers.selection.discard(layer)\n\n# change the layer opacity\nlayer.opacity = 0.9\n\n# change the layer point symbol using an alias\nlayer.symbol = '+'\n\n# change the layer point n_dimensional status\nlayer.n_dimensional = True\n\n# change the layer mode\nlayer.mode = 'add'\n\nnapari.run()\n", "import numpy as np\n\nfrom napari.layers import Labels\n\n\ndef test_random_multiscale():\n \"\"\"Test instantiating Labels layer with random 2D multiscale data.\"\"\"\n shapes = [(40, 20), (20, 10), (10, 5)]\n np.random.seed(0)\n data = [np.random.randint(20, size=s) for s in shapes]\n layer = Labels(data, multiscale=True)\n assert layer.data == data\n assert layer.multiscale is True\n assert layer.editable is False\n assert layer.ndim == len(shapes[0])\n np.testing.assert_array_equal(layer.extent.data[1], shapes[0])\n assert layer.rgb is False\n assert layer._data_view.ndim == 2\n\n\ndef test_infer_multiscale():\n \"\"\"Test instantiating Labels layer with random 2D multiscale data.\"\"\"\n shapes = [(40, 20), (20, 10), (10, 5)]\n np.random.seed(0)\n data = [np.random.randint(20, size=s) for s in shapes]\n layer = Labels(data)\n assert layer.data == data\n assert layer.multiscale is True\n assert layer.editable is False\n assert layer.ndim == len(shapes[0])\n np.testing.assert_array_equal(layer.extent.data[1], shapes[0])\n assert layer.rgb is False\n assert layer._data_view.ndim == 2\n\n\ndef test_3D_multiscale():\n \"\"\"Test instantiating Labels layer with 3D data.\"\"\"\n shapes = [(8, 40, 20), (4, 20, 10), (2, 10, 5)]\n np.random.seed(0)\n data = [np.random.randint(20, size=s) for s in shapes]\n layer = Labels(data, multiscale=True)\n assert layer.data == data\n assert layer.multiscale is True\n assert layer.editable is False\n assert layer.ndim == len(shapes[0])\n np.testing.assert_array_equal(layer.extent.data[1], shapes[0])\n assert layer.rgb is False\n assert layer._data_view.ndim == 2\n", "import gc\nimport os\nimport weakref\nfrom dataclasses import dataclass\nfrom typing import List\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom qtpy.QtGui import QGuiApplication\nfrom qtpy.QtWidgets import QMessageBox\n\nfrom napari._qt.dialogs._tests.test_reader_dialog_get_choice import (\n MockQtReaderDialog,\n)\nfrom napari._tests.utils import (\n add_layer_by_type,\n check_viewer_functioning,\n layer_test_data,\n restore_settings_on_exit,\n skip_local_popups,\n skip_on_win_ci,\n)\nfrom napari._vispy.utils.gl import fix_data_dtype\nfrom napari.settings import get_settings\nfrom napari.utils.interactions import mouse_press_callbacks\nfrom napari.utils.io import imread\nfrom napari.utils.theme import available_themes\n\ntry:\n import npe2 # noqa: F401\n\n BUILTINS_DISP = 'napari'\n BUILTINS_NAME = 'builtins'\nexcept ImportError:\n BUILTINS_DISP = BUILTINS_NAME = 'builtins'\n\n\ndef test_qt_viewer(make_napari_viewer):\n \"\"\"Test instantiating viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n assert viewer.title == 'napari'\n assert view.viewer == viewer\n\n assert len(viewer.layers) == 0\n assert view.layers.model().rowCount() == 0\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_qt_viewer_with_console(make_napari_viewer):\n \"\"\"Test instantiating console from viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n # Check console is created when requested\n assert view.console is not None\n assert view.dockConsole.widget() is view.console\n\n\ndef test_qt_viewer_toggle_console(make_napari_viewer):\n \"\"\"Test instantiating console from viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n # Check console has been created when it is supposed to be shown\n view.toggle_console_visibility(None)\n assert view._console is not None\n assert view.dockConsole.widget() is view.console\n\n\[email protected]('layer_class, data, ndim', layer_test_data)\ndef test_add_layer(make_napari_viewer, layer_class, data, ndim):\n\n viewer = make_napari_viewer(ndisplay=int(np.clip(ndim, 2, 3)))\n view = viewer.window._qt_viewer\n\n add_layer_by_type(viewer, layer_class, data)\n check_viewer_functioning(viewer, view, data, ndim)\n\n\ndef test_new_labels(make_napari_viewer):\n \"\"\"Test adding new labels layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n viewer._new_labels()\n assert np.max(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add labels with image already present\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer._new_labels()\n assert np.max(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_new_points(make_napari_viewer):\n \"\"\"Test adding new points layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n viewer.add_points()\n assert len(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add points with image already present\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer.add_points()\n assert len(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_new_shapes_empty_viewer(make_napari_viewer):\n \"\"\"Test adding new shapes layer.\"\"\"\n # Add labels to empty viewer\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n viewer.add_shapes()\n assert len(viewer.layers[0].data) == 0\n assert len(viewer.layers) == 1\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n # Add points with image already present\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n viewer.add_image(data)\n viewer.add_shapes()\n assert len(viewer.layers[1].data) == 0\n assert len(viewer.layers) == 2\n assert view.layers.model().rowCount() == len(viewer.layers)\n\n assert viewer.dims.ndim == 2\n assert view.dims.nsliders == viewer.dims.ndim\n assert np.sum(view.dims._displayed_sliders) == 0\n\n\ndef test_z_order_adding_removing_images(make_napari_viewer):\n \"\"\"Test z order is correct after adding/ removing images.\"\"\"\n data = np.ones((10, 10))\n\n viewer = make_napari_viewer()\n vis = viewer.window._qt_viewer.layer_to_visual\n viewer.add_image(data, colormap='red', name='red')\n viewer.add_image(data, colormap='green', name='green')\n viewer.add_image(data, colormap='blue', name='blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Remove and re-add image\n viewer.layers.remove('red')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n viewer.add_image(data, colormap='red', name='red')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Remove two other images\n viewer.layers.remove('green')\n viewer.layers.remove('blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n # Add two other layers back\n viewer.add_image(data, colormap='green', name='green')\n viewer.add_image(data, colormap='blue', name='blue')\n order = [vis[x].order for x in viewer.layers]\n np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))\n\n\n@skip_on_win_ci\ndef test_screenshot(make_napari_viewer):\n \"Test taking a screenshot\"\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n # Add image\n data = np.random.random((10, 15))\n viewer.add_image(data)\n\n # Add labels\n data = np.random.randint(20, size=(10, 15))\n viewer.add_labels(data)\n\n # Add points\n data = 20 * np.random.random((10, 2))\n viewer.add_points(data)\n\n # Add vectors\n data = 20 * np.random.random((10, 2, 2))\n viewer.add_vectors(data)\n\n # Add shapes\n data = 20 * np.random.random((10, 4, 2))\n viewer.add_shapes(data)\n\n # Take screenshot\n with pytest.warns(FutureWarning):\n screenshot = viewer.window.qt_viewer.screenshot(flash=False)\n screenshot = viewer.window.screenshot(flash=False, canvas_only=True)\n assert screenshot.ndim == 3\n\n\[email protected](\"new approach\")\ndef test_screenshot_dialog(make_napari_viewer, tmpdir):\n \"\"\"Test save screenshot functionality.\"\"\"\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n # Add image\n data = np.random.random((10, 15))\n viewer.add_image(data)\n\n # Add labels\n data = np.random.randint(20, size=(10, 15))\n viewer.add_labels(data)\n\n # Add points\n data = 20 * np.random.random((10, 2))\n viewer.add_points(data)\n\n # Add vectors\n data = 20 * np.random.random((10, 2, 2))\n viewer.add_vectors(data)\n\n # Add shapes\n data = 20 * np.random.random((10, 4, 2))\n viewer.add_shapes(data)\n\n # Save screenshot\n input_filepath = os.path.join(tmpdir, 'test-save-screenshot')\n mock_return = (input_filepath, '')\n with mock.patch('napari._qt._qt_viewer.QFileDialog') as mocker, mock.patch(\n 'napari._qt._qt_viewer.QMessageBox'\n ) as mocker2:\n mocker.getSaveFileName.return_value = mock_return\n mocker2.warning.return_value = QMessageBox.Yes\n viewer.window._qt_viewer._screenshot_dialog()\n # Assert behaviour is correct\n expected_filepath = input_filepath + '.png' # add default file extension\n assert os.path.exists(expected_filepath)\n output_data = imread(expected_filepath)\n expected_data = viewer.window._qt_viewer.screenshot(flash=False)\n assert np.allclose(output_data, expected_data)\n\n\[email protected](\n \"dtype\",\n [\n 'int8',\n 'uint8',\n 'int16',\n 'uint16',\n 'int32',\n 'float16',\n 'float32',\n 'float64',\n ],\n)\ndef test_qt_viewer_data_integrity(make_napari_viewer, dtype):\n \"\"\"Test that the viewer doesn't change the underlying array.\"\"\"\n image = np.random.rand(10, 32, 32)\n image *= 200 if dtype.endswith('8') else 2 ** 14\n image = image.astype(dtype)\n imean = image.mean()\n\n viewer = make_napari_viewer()\n layer = viewer.add_image(image.copy())\n data = layer.data\n\n datamean = np.mean(data)\n assert datamean == imean\n # toggle dimensions\n viewer.dims.ndisplay = 3\n datamean = np.mean(data)\n assert datamean == imean\n # back to 2D\n viewer.dims.ndisplay = 2\n datamean = np.mean(data)\n assert datamean == imean\n # also check that vispy gets (almost) the same data\n datamean = np.mean(fix_data_dtype(data))\n assert np.allclose(datamean, imean, rtol=5e-04)\n\n\ndef test_points_layer_display_correct_slice_on_scale(make_napari_viewer):\n viewer = make_napari_viewer()\n data = np.zeros((60, 60, 60))\n viewer.add_image(data, scale=[0.29, 0.26, 0.26])\n pts = viewer.add_points(name='test', size=1, ndim=3)\n pts.add((8.7, 0, 0))\n viewer.dims.set_point(0, 30 * 0.29) # middle plane\n layer = viewer.layers[1]\n indices, scale = layer._slice_data(layer._slice_indices)\n np.testing.assert_equal(indices, [0])\n\n\ndef test_qt_viewer_clipboard_with_flash(make_napari_viewer, qtbot):\n viewer = make_napari_viewer()\n # make sure clipboard is empty\n QGuiApplication.clipboard().clear()\n clipboard_image = QGuiApplication.clipboard().image()\n assert clipboard_image.isNull()\n\n # capture screenshot\n with pytest.warns(FutureWarning):\n viewer.window.qt_viewer.clipboard(flash=True)\n\n viewer.window.clipboard(flash=False, canvas_only=True)\n\n clipboard_image = QGuiApplication.clipboard().image()\n assert not clipboard_image.isNull()\n\n # ensure the flash effect is applied\n assert (\n viewer.window._qt_viewer._canvas_overlay.graphicsEffect() is not None\n )\n assert hasattr(\n viewer.window._qt_viewer._canvas_overlay, \"_flash_animation\"\n )\n qtbot.wait(500) # wait for the animation to finish\n assert viewer.window._qt_viewer._canvas_overlay.graphicsEffect() is None\n assert not hasattr(\n viewer.window._qt_viewer._canvas_overlay, \"_flash_animation\"\n )\n\n # clear clipboard and grab image from application view\n QGuiApplication.clipboard().clear()\n clipboard_image = QGuiApplication.clipboard().image()\n assert clipboard_image.isNull()\n\n # capture screenshot of the entire window\n viewer.window.clipboard(flash=True)\n clipboard_image = QGuiApplication.clipboard().image()\n assert not clipboard_image.isNull()\n\n # ensure the flash effect is applied\n assert viewer.window._qt_window.graphicsEffect() is not None\n assert hasattr(viewer.window._qt_window, \"_flash_animation\")\n qtbot.wait(500) # wait for the animation to finish\n assert viewer.window._qt_window.graphicsEffect() is None\n assert not hasattr(viewer.window._qt_window, \"_flash_animation\")\n\n\ndef test_qt_viewer_clipboard_without_flash(make_napari_viewer):\n viewer = make_napari_viewer()\n # make sure clipboard is empty\n QGuiApplication.clipboard().clear()\n clipboard_image = QGuiApplication.clipboard().image()\n assert clipboard_image.isNull()\n\n # capture screenshot\n with pytest.warns(FutureWarning):\n viewer.window.qt_viewer.clipboard(flash=False)\n\n viewer.window.clipboard(flash=False, canvas_only=True)\n\n clipboard_image = QGuiApplication.clipboard().image()\n assert not clipboard_image.isNull()\n\n # ensure the flash effect is not applied\n assert viewer.window._qt_viewer._canvas_overlay.graphicsEffect() is None\n assert not hasattr(\n viewer.window._qt_viewer._canvas_overlay, \"_flash_animation\"\n )\n\n # clear clipboard and grab image from application view\n QGuiApplication.clipboard().clear()\n clipboard_image = QGuiApplication.clipboard().image()\n assert clipboard_image.isNull()\n\n # capture screenshot of the entire window\n viewer.window.clipboard(flash=False)\n clipboard_image = QGuiApplication.clipboard().image()\n assert not clipboard_image.isNull()\n\n # ensure the flash effect is not applied\n assert viewer.window._qt_window.graphicsEffect() is None\n assert not hasattr(viewer.window._qt_window, \"_flash_animation\")\n\n\ndef test_active_keybindings(make_napari_viewer):\n \"\"\"Test instantiating viewer.\"\"\"\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n\n # Check only keybinding is Viewer\n assert len(view._key_map_handler.keymap_providers) == 1\n assert view._key_map_handler.keymap_providers[0] == viewer\n\n # Add a layer and check it is keybindings are active\n data = np.random.random((10, 15))\n layer_image = viewer.add_image(data)\n assert viewer.layers.selection.active == layer_image\n assert len(view._key_map_handler.keymap_providers) == 2\n assert view._key_map_handler.keymap_providers[0] == layer_image\n\n # Add a layer and check it is keybindings become active\n layer_image_2 = viewer.add_image(data)\n assert viewer.layers.selection.active == layer_image_2\n assert len(view._key_map_handler.keymap_providers) == 2\n assert view._key_map_handler.keymap_providers[0] == layer_image_2\n\n # Change active layer and check it is keybindings become active\n viewer.layers.selection.active = layer_image\n assert viewer.layers.selection.active == layer_image\n assert len(view._key_map_handler.keymap_providers) == 2\n assert view._key_map_handler.keymap_providers[0] == layer_image\n\n\n@dataclass\nclass MouseEvent:\n # mock mouse event class\n pos: List[int]\n\n\ndef test_process_mouse_event(make_napari_viewer):\n \"\"\"Test that the correct properties are added to the\n MouseEvent by _process_mouse_events.\n \"\"\"\n # make a mock mouse event\n new_pos = [25, 25]\n mouse_event = MouseEvent(\n pos=new_pos,\n )\n data = np.zeros((5, 20, 20, 20), dtype=int)\n data[1, 0:10, 0:10, 0:10] = 1\n\n viewer = make_napari_viewer()\n view = viewer.window._qt_viewer\n labels = viewer.add_labels(data, scale=(1, 2, 1, 1), translate=(5, 5, 5))\n\n @labels.mouse_drag_callbacks.append\n def on_click(layer, event):\n np.testing.assert_almost_equal(event.view_direction, [0, 1, 0, 0])\n np.testing.assert_array_equal(event.dims_displayed, [1, 2, 3])\n assert event.dims_point[0] == data.shape[0] // 2\n\n expected_position = view._map_canvas2world(new_pos)\n np.testing.assert_almost_equal(expected_position, list(event.position))\n\n viewer.dims.ndisplay = 3\n view._process_mouse_event(mouse_press_callbacks, mouse_event)\n\n\n@skip_local_popups\ndef test_memory_leaking(qtbot, make_napari_viewer):\n data = np.zeros((5, 20, 20, 20), dtype=int)\n data[1, 0:10, 0:10, 0:10] = 1\n viewer = make_napari_viewer()\n image = weakref.ref(viewer.add_image(data))\n labels = weakref.ref(viewer.add_labels(data))\n del viewer.layers[0]\n del viewer.layers[0]\n qtbot.wait(100)\n gc.collect()\n gc.collect()\n assert image() is None\n assert labels() is None\n\n\n@skip_local_popups\ndef test_leaks_image(qtbot, make_napari_viewer):\n\n viewer = make_napari_viewer(show=True)\n lr = weakref.ref(viewer.add_image(np.random.rand(10, 10)))\n dr = weakref.ref(lr().data)\n\n viewer.layers.clear()\n qtbot.wait(100)\n gc.collect()\n assert not gc.collect()\n assert not lr()\n assert not dr()\n\n\n@skip_local_popups\ndef test_leaks_labels(qtbot, make_napari_viewer):\n viewer = make_napari_viewer(show=True)\n lr = weakref.ref(\n viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))\n )\n dr = weakref.ref(lr().data)\n viewer.layers.clear()\n qtbot.wait(100)\n gc.collect()\n assert not gc.collect()\n assert not lr()\n assert not dr()\n\n\[email protected](\"theme\", available_themes())\ndef test_canvas_color(make_napari_viewer, theme):\n \"\"\"Test instantiating viewer with different themes.\n\n See: https://github.com/napari/napari/issues/3278\n \"\"\"\n # This test is to make sure the application starts with\n # with different themes\n get_settings().appearance.theme = theme\n viewer = make_napari_viewer()\n assert viewer.theme == theme\n\n\ndef test_remove_points(make_napari_viewer):\n viewer = make_napari_viewer()\n viewer.add_points([(1, 2), (2, 3)])\n del viewer.layers[0]\n viewer.add_points([(1, 2), (2, 3)])\n\n\ndef test_remove_image(make_napari_viewer):\n viewer = make_napari_viewer()\n viewer.add_image(np.random.rand(10, 10))\n del viewer.layers[0]\n viewer.add_image(np.random.rand(10, 10))\n\n\ndef test_remove_labels(make_napari_viewer):\n viewer = make_napari_viewer()\n viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))\n del viewer.layers[0]\n viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))\n\n\[email protected]('multiscale', [False, True])\ndef test_mixed_2d_and_3d_layers(make_napari_viewer, multiscale):\n \"\"\"Test bug in setting corner_pixels from qt_viewer.on_draw\"\"\"\n viewer = make_napari_viewer()\n\n img = np.ones((512, 256))\n # canvas size must be large enough that img fits in the canvas\n canvas_size = tuple(3 * s for s in img.shape)\n expected_corner_pixels = np.asarray([[0, 0], [img.shape[0], img.shape[1]]])\n\n vol = np.stack([img] * 8, axis=0)\n if multiscale:\n img = [img[::s, ::s] for s in (1, 2, 4)]\n viewer.add_image(img)\n img_multi_layer = viewer.layers[0]\n viewer.add_image(vol)\n\n viewer.dims.order = (0, 1, 2)\n viewer.window._qt_viewer.canvas.size = canvas_size\n viewer.window._qt_viewer.on_draw(None)\n assert np.all(img_multi_layer.corner_pixels == expected_corner_pixels)\n\n viewer.dims.order = (2, 0, 1)\n viewer.window._qt_viewer.on_draw(None)\n assert np.all(img_multi_layer.corner_pixels == expected_corner_pixels)\n\n viewer.dims.order = (1, 2, 0)\n viewer.window._qt_viewer.on_draw(None)\n assert np.all(img_multi_layer.corner_pixels == expected_corner_pixels)\n\n\ndef test_remove_add_image_3D(make_napari_viewer):\n \"\"\"\n Test that adding, removing and readding an image layer in 3D does not cause issues\n due to the vispy node change. See https://github.com/napari/napari/pull/3670\n \"\"\"\n viewer = make_napari_viewer(ndisplay=3)\n img = np.ones((10, 10, 10))\n\n layer = viewer.add_image(img)\n viewer.layers.remove(layer)\n viewer.layers.append(layer)\n\n\n@skip_on_win_ci\n@skip_local_popups\ndef test_qt_viewer_multscale_image_out_of_view(make_napari_viewer):\n \"\"\"Test out-of-view multiscale image viewing fix.\n\n Just verifies that no RuntimeError is raised in this scenario.\n\n see: https://github.com/napari/napari/issues/3863.\n \"\"\"\n # show=True required to test fix for OpenGL error\n viewer = make_napari_viewer(ndisplay=2, show=True)\n viewer.add_shapes(\n data=[\n np.array(\n [[1500, 4500], [4500, 4500], [4500, 1500], [1500, 1500]],\n dtype=float,\n )\n ],\n shape_type=['polygon'],\n )\n viewer.add_image([np.eye(1024), np.eye(512), np.eye(256)])\n\n\ndef test_surface_mixed_dim(make_napari_viewer):\n \"\"\"Test that adding a layer that changes the world ndim\n when ndisplay=3 before the mouse cursor has been updated\n doesn't raise an error.\n\n See PR: https://github.com/napari/napari/pull/3881\n \"\"\"\n viewer = make_napari_viewer(ndisplay=3)\n\n verts = np.array([[0, 0, 0], [0, 20, 10], [10, 0, -10], [10, 10, -10]])\n faces = np.array([[0, 1, 2], [1, 2, 3]])\n values = np.linspace(0, 1, len(verts))\n data = (verts, faces, values)\n viewer.add_surface(data)\n\n timeseries_values = np.vstack([values, values])\n timeseries_data = (verts, faces, timeseries_values)\n viewer.add_surface(timeseries_data)\n\n\ndef test_try_reader_from_settings(make_napari_viewer, tmpdir, layers):\n \"\"\"Test opening file with reader saved in settings\"\"\"\n viewer = make_napari_viewer()\n im_pth = os.path.join(tmpdir, 'layer.png')\n extension = '.png'\n layers[0].save(im_pth, plugin=BUILTINS_DISP)\n\n readers = {BUILTINS_DISP: BUILTINS_NAME}\n with restore_settings_on_exit():\n # read successfully with settings\n get_settings().plugins.extension2reader = {extension: BUILTINS_DISP}\n error_message = viewer.window._qt_viewer._try_reader_from_settings(\n readers, extension, im_pth\n )\n assert error_message is None\n assert len(viewer.layers) == 1\n assert viewer.layers[0].source.reader_plugin == BUILTINS_NAME\n\n # find plugin from settings but it fails to read\n os.remove(im_pth)\n error_message = viewer.window._qt_viewer._try_reader_from_settings(\n readers, extension, im_pth\n )\n assert error_message.startswith(\n f\"Tried to open file with {BUILTINS_DISP}, but reading failed\"\n )\n\n # fail to find plugin from settings\n with restore_settings_on_exit():\n get_settings().plugins.extension2reader = {\n extension: 'not-a-real-name'\n }\n error_message = viewer.window._qt_viewer._try_reader_from_settings(\n readers, extension, im_pth\n )\n assert error_message.startswith(\n \"Can't find not-a-real-name plugin associated with .png files.\"\n )\n\n\ndef test_get_and_try_preferred_reader(make_napari_viewer, tmpdir, layers):\n \"\"\"Test opening file with user preference and persisting preference\"\"\"\n viewer = make_napari_viewer()\n im_pth = os.path.join(tmpdir, 'layer.png')\n layers[0].save(im_pth, plugin=BUILTINS_DISP)\n error_message = 'Test error message'\n readers = {BUILTINS_DISP: BUILTINS_NAME, 'not-a-plugin': 'not-a-plugin'}\n\n # open successfully without persisting\n with restore_settings_on_exit():\n get_settings().plugins.extension2reader = {}\n reader_dialog = MockQtReaderDialog(\n im_pth, readers=readers, error_message=error_message\n )\n reader_dialog._set_plugin_choice(BUILTINS_DISP)\n reader_dialog._set_persist_choice(False)\n viewer.window._qt_viewer._get_and_try_preferred_reader(\n reader_dialog, readers, error_message\n )\n assert len(viewer.layers) == 1\n assert viewer.layers[0].source.reader_plugin == BUILTINS_NAME\n assert get_settings().plugins.extension2reader == {}\n\n # open successfully and persist choice\n with restore_settings_on_exit():\n get_settings().plugins.extension2reader = {}\n reader_dialog = MockQtReaderDialog(\n im_pth,\n readers=readers,\n error_message=error_message,\n extension='.png',\n )\n reader_dialog._set_plugin_choice(BUILTINS_DISP)\n reader_dialog._set_persist_choice(True)\n viewer.window._qt_viewer._get_and_try_preferred_reader(\n reader_dialog, readers, error_message\n )\n assert get_settings().plugins.extension2reader == {\n '.png': BUILTINS_DISP\n }\n", "import numpy as np\nimport pytest\n\nfrom napari.utils.geometry import (\n bounding_box_to_face_vertices,\n clamp_point_to_bounding_box,\n distance_between_point_and_line_3d,\n face_coordinate_from_bounding_box,\n find_front_back_face,\n inside_triangles,\n intersect_line_with_axis_aligned_bounding_box_3d,\n intersect_line_with_axis_aligned_plane,\n intersect_line_with_multiple_planes_3d,\n intersect_line_with_plane_3d,\n line_in_quadrilateral_3d,\n line_in_triangles_3d,\n point_in_quadrilateral_2d,\n project_points_onto_plane,\n rotation_matrix_from_vectors_2d,\n rotation_matrix_from_vectors_3d,\n)\n\nsingle_point = np.array([10, 10, 10])\nexpected_point_single = np.array([[10, 0, 10]])\nexpected_distance_single = np.array([10])\nmultiple_point = np.array(\n [[10, 10, 10], [20, 10, 30], [20, 40, 20], [10, -5, 30]]\n)\nexpected_multiple_point = np.array(\n [[10, 0, 10], [20, 0, 30], [20, 0, 20], [10, 0, 30]]\n)\nexpected_distance_multiple = np.array([10, 10, 40, -5])\n\n\[email protected](\n \"point,expected_projected_point,expected_distances\",\n [\n (single_point, expected_point_single, expected_distance_single),\n (multiple_point, expected_multiple_point, expected_distance_multiple),\n ],\n)\ndef test_project_point_to_plane(\n point, expected_projected_point, expected_distances\n):\n plane_point = np.array([20, 0, 0])\n plane_normal = np.array([0, 1, 0])\n projected_point, distance_to_plane = project_points_onto_plane(\n point, plane_point, plane_normal\n )\n\n np.testing.assert_allclose(projected_point, expected_projected_point)\n np.testing.assert_allclose(distance_to_plane, expected_distances)\n\n\[email protected](\n \"vec_1, vec_2\",\n [\n (np.array([10, 0]), np.array([0, 5])),\n (np.array([0, 5]), np.array([0, 5])),\n (np.array([0, 5]), np.array([0, -5])),\n ],\n)\ndef test_rotation_matrix_from_vectors_2d(vec_1, vec_2):\n\n rotation_matrix = rotation_matrix_from_vectors_2d(vec_1, vec_2)\n\n rotated_1 = rotation_matrix.dot(vec_1)\n unit_rotated_1 = rotated_1 / np.linalg.norm(rotated_1)\n\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n\n np.testing.assert_allclose(unit_rotated_1, unit_vec_2)\n\n\[email protected](\n \"vec_1, vec_2\",\n [\n (np.array([10, 0, 0]), np.array([0, 5, 0])),\n (np.array([0, 5, 0]), np.array([0, 5, 0])),\n (np.array([0, 5, 0]), np.array([0, -5, 0])),\n ],\n)\ndef test_rotation_matrix_from_vectors_3d(vec_1, vec_2):\n \"\"\"Test that calculated rotation matrices align vec1 to vec2.\"\"\"\n rotation_matrix = rotation_matrix_from_vectors_3d(vec_1, vec_2)\n\n rotated_1 = rotation_matrix.dot(vec_1)\n unit_rotated_1 = rotated_1 / np.linalg.norm(rotated_1)\n\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n\n np.testing.assert_allclose(unit_rotated_1, unit_vec_2)\n\n\[email protected](\n \"line_position, line_direction, plane_position, plane_normal, expected\",\n [\n ([0, 0, 1], [0, 0, -1], [0, 0, 0], [0, 0, 1], [0, 0, 0]),\n ([1, 1, 1], [-1, -1, -1], [0, 0, 0], [0, 0, 1], [0, 0, 0]),\n ([2, 2, 2], [-1, -1, -1], [1, 1, 1], [0, 0, 1], [1, 1, 1]),\n ],\n)\ndef test_intersect_line_with_plane_3d(\n line_position, line_direction, plane_position, plane_normal, expected\n):\n \"\"\"Test that arbitrary line-plane intersections are correctly calculated.\"\"\"\n intersection = intersect_line_with_plane_3d(\n line_position, line_direction, plane_position, plane_normal\n )\n np.testing.assert_allclose(expected, intersection)\n\n\ndef test_intersect_line_with_multiple_planes_3d():\n \"\"\"Test intersecting a ray with multiple planes and getting the intersection\n with each one.\n \"\"\"\n line_position = [0, 0, 1]\n line_direction = [0, 0, -1]\n plane_positions = [[0, 0, 0], [0, 0, 1]]\n plane_normals = [[0, 0, 1], [0, 0, 1]]\n intersections = intersect_line_with_multiple_planes_3d(\n line_position, line_direction, plane_positions, plane_normals\n )\n\n expected = np.array([[0, 0, 0], [0, 0, 1]])\n np.testing.assert_allclose(intersections, expected)\n\n\[email protected](\n \"point, bounding_box, expected\",\n [\n ([5, 5, 5], np.array([[0, 10], [0, 10], [0, 10]]), [5, 5, 5]),\n ([10, 10, 10], np.array([[0, 10], [0, 10], [0, 10]]), [9, 9, 9]),\n ([5, 5, 15], np.array([[0, 10], [0, 10], [0, 10]]), [5, 5, 9]),\n ],\n)\ndef test_clamp_point_to_bounding_box(point, bounding_box, expected):\n \"\"\"Test that points are correctly clamped to the limits of the data.\n Note: bounding boxes are calculated from layer extents, points are clamped\n to the range of valid indices into each dimension.\n\n e.g. for a shape (10,) array, data is clamped to the range (0, 9)\n \"\"\"\n clamped_point = clamp_point_to_bounding_box(point, bounding_box)\n np.testing.assert_allclose(expected, clamped_point)\n\n\ndef test_clamp_multiple_points_to_bounding_box():\n \"\"\"test that an array of points can be clamped to the bbox\"\"\"\n points = np.array([[10, 10, 10], [0, 5, 0], [20, 0, 20]])\n bbox = np.array([[0, 25], [0, 10], [3, 25]])\n expected_points = np.array([[10, 9, 10], [0, 5, 3], [20, 0, 20]])\n clamped_points = clamp_point_to_bounding_box(points, bbox)\n np.testing.assert_array_equal(clamped_points, expected_points)\n\n\[email protected](\n 'bounding_box, face_normal, expected',\n [\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([1, 0, 0]), 10),\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([-1, 0, 0]), 5),\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([0, 1, 0]), 20),\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([0, -1, 0]), 10),\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([0, 0, 1]), 30),\n (np.array([[5, 10], [10, 20], [20, 30]]), np.array([0, 0, -1]), 20),\n ],\n)\ndef test_face_coordinate_from_bounding_box(\n bounding_box, face_normal, expected\n):\n \"\"\"Test that the correct face coordinate is calculated.\n\n Face coordinate is a float which is the value where a face of a bounding box,\n defined by a face normal, intersects the axis the normal vector is aligned with.\n \"\"\"\n face_coordinate = face_coordinate_from_bounding_box(\n bounding_box, face_normal\n )\n np.testing.assert_allclose(expected, face_coordinate)\n\n\[email protected](\n 'plane_intercept, plane_normal, line_start, line_direction, expected',\n [\n (\n 0,\n np.array([0, 0, 1]),\n np.array([0, 0, 1]),\n np.array([0, 0, 1]),\n [0, 0, 0],\n ),\n (\n 10,\n np.array([0, 0, 1]),\n np.array([0, 0, 0]),\n np.array([0, 0, 1]),\n [0, 0, 10],\n ),\n (\n 10,\n np.array([0, 1, 0]),\n np.array([0, 1, 0]),\n np.array([0, 1, 0]),\n [0, 10, 0],\n ),\n (\n 10,\n np.array([1, 0, 0]),\n np.array([1, 0, 0]),\n np.array([1, 0, 0]),\n [10, 0, 0],\n ),\n ],\n)\ndef test_line_with_axis_aligned_plane(\n plane_intercept, plane_normal, line_start, line_direction, expected\n):\n \"\"\"Test that intersections between line and axis aligned plane are\n calculated correctly.\n \"\"\"\n intersection = intersect_line_with_axis_aligned_plane(\n plane_intercept, plane_normal, line_start, line_direction\n )\n np.testing.assert_allclose(expected, intersection)\n\n\ndef test_bounding_box_to_face_vertices_3d():\n \"\"\"Test that bounding_box_to_face_vertices returns a dictionary of vertices\n for each face of an axis aligned 3D bounding box.\n \"\"\"\n bounding_box = np.array([[5, 10], [15, 20], [25, 30]])\n face_vertices = bounding_box_to_face_vertices(bounding_box)\n expected = {\n 'x_pos': np.array(\n [[5, 15, 30], [5, 20, 30], [10, 20, 30], [10, 15, 30]]\n ),\n 'x_neg': np.array(\n [[5, 15, 25], [5, 20, 25], [10, 20, 25], [10, 15, 25]]\n ),\n 'y_pos': np.array(\n [[5, 20, 25], [5, 20, 30], [10, 20, 30], [10, 20, 25]]\n ),\n 'y_neg': np.array(\n [[5, 15, 25], [5, 15, 30], [10, 15, 30], [10, 15, 25]]\n ),\n 'z_pos': np.array(\n [[10, 15, 25], [10, 15, 30], [10, 20, 30], [10, 20, 25]]\n ),\n 'z_neg': np.array(\n [[5, 15, 25], [5, 15, 30], [5, 20, 30], [5, 20, 25]]\n ),\n }\n for k in face_vertices:\n np.testing.assert_allclose(expected[k], face_vertices[k])\n\n\ndef test_bounding_box_to_face_vertices_nd():\n \"\"\"Test that bounding_box_to_face_vertices returns a dictionary of vertices\n for each face of an axis aligned nD bounding box.\n \"\"\"\n bounding_box = np.array([[0, 0], [0, 0], [5, 10], [15, 20], [25, 30]])\n face_vertices = bounding_box_to_face_vertices(bounding_box)\n expected = {\n 'x_pos': np.array(\n [[5, 15, 30], [5, 20, 30], [10, 20, 30], [10, 15, 30]]\n ),\n 'x_neg': np.array(\n [[5, 15, 25], [5, 20, 25], [10, 20, 25], [10, 15, 25]]\n ),\n 'y_pos': np.array(\n [[5, 20, 25], [5, 20, 30], [10, 20, 30], [10, 20, 25]]\n ),\n 'y_neg': np.array(\n [[5, 15, 25], [5, 15, 30], [10, 15, 30], [10, 15, 25]]\n ),\n 'z_pos': np.array(\n [[10, 15, 25], [10, 15, 30], [10, 20, 30], [10, 20, 25]]\n ),\n 'z_neg': np.array(\n [[5, 15, 25], [5, 15, 30], [5, 20, 30], [5, 20, 25]]\n ),\n }\n for k in face_vertices:\n np.testing.assert_allclose(expected[k], face_vertices[k])\n\n\[email protected](\n 'triangle, expected',\n [\n (np.array([[[-1, -1], [-1, 1], [1, 0]]]), True),\n (np.array([[[1, 1], [2, 1], [1.5, 2]]]), False),\n ],\n)\ndef test_inside_triangles(triangle, expected):\n \"\"\"Test that inside triangles returns an array of True for triangles which\n contain the origin, False otherwise.\n \"\"\"\n inside = np.all(inside_triangles(triangle))\n assert inside == expected\n\n\[email protected](\n 'point, quadrilateral, expected',\n [\n (\n np.array([0.5, 0.5]),\n np.array([[0, 0], [0, 1], [1, 1], [0, 1]]),\n True,\n ),\n (np.array([2, 2]), np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), False),\n ],\n)\ndef test_point_in_quadrilateral_2d(point, quadrilateral, expected):\n \"\"\"Test that point_in_quadrilateral_2d determines whether a point\n is inside a quadrilateral.\n \"\"\"\n inside = point_in_quadrilateral_2d(point, quadrilateral)\n assert inside == expected\n\n\[email protected](\n 'click_position, quadrilateral, view_dir, expected',\n [\n (\n np.array([0, 0, 0]),\n np.array([[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]]),\n np.array([0, 0, 1]),\n True,\n ),\n (\n np.array([0, 0, 5]),\n np.array([[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]]),\n np.array([0, 0, 1]),\n True,\n ),\n (\n np.array([0, 5, 0]),\n np.array([[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]]),\n np.array([0, 0, 1]),\n False,\n ),\n ],\n)\ndef test_click_in_quadrilateral_3d(\n click_position, quadrilateral, view_dir, expected\n):\n \"\"\"Test that click in quadrilateral 3d determines whether the projection\n of a 3D point onto a plane falls within a 3d quadrilateral projected\n onto the same plane\n \"\"\"\n in_quadrilateral = line_in_quadrilateral_3d(\n click_position, view_dir, quadrilateral\n )\n assert in_quadrilateral == expected\n\n\[email protected](\n 'click_position, bounding_box, view_dir, expected',\n [\n (\n np.array([5, 5, 5]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 0, 1]),\n ([0, 0, -1], [0, 0, 1]),\n ),\n (\n np.array([-5, -5, -5]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 0, 1]),\n (None, None),\n ),\n (\n np.array([5, 5, 5]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 1, 0]),\n ([0, -1, 0], [0, 1, 0]),\n ),\n (\n np.array([5, 5, 5]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([1, 0, 0]),\n ([-1, 0, 0], [1, 0, 0]),\n ),\n ],\n)\ndef test_find_front_back_face(\n click_position, bounding_box, view_dir, expected\n):\n \"\"\"Test that find front_back face finds the faces of an axis aligned\n bounding box that a ray intersects with.\n \"\"\"\n result = find_front_back_face(click_position, bounding_box, view_dir)\n for idx, item in enumerate(result):\n if item is not None:\n np.testing.assert_allclose(item, expected[idx])\n else:\n assert item == expected[idx]\n\n\[email protected](\n 'line_position, line_direction, bounding_box, face_normal, expected',\n [\n (\n np.array([5, 5, 5]),\n np.array([0, 0, 1]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 0, 1]),\n np.array([5, 5, 10]),\n ),\n (\n np.array([5, 5, 5]),\n np.array([0, 0, 1]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 0, -1]),\n np.array([5, 5, 0]),\n ),\n (\n np.array([5, 5, 5]),\n np.array([0, 1, 0]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 1, 0]),\n np.array([5, 10, 5]),\n ),\n (\n np.array([5, 5, 5]),\n np.array([0, 1, 0]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([0, 1, 0]),\n np.array([5, 10, 5]),\n ),\n (\n np.array([5, 5, 5]),\n np.array([1, 0, 0]),\n np.array([[0, 10], [0, 10], [0, 10]]),\n np.array([1, 0, 0]),\n np.array([10, 5, 5]),\n ),\n ],\n)\ndef test_intersect_line_with_axis_aligned_bounding_box_3d(\n line_position, line_direction, bounding_box, face_normal, expected\n):\n \"\"\"Test that intersections between lines and axis aligned\n bounding boxes are correctly computed.\n \"\"\"\n result = intersect_line_with_axis_aligned_bounding_box_3d(\n line_position, line_direction, bounding_box, face_normal\n )\n np.testing.assert_allclose(expected, result)\n\n\ndef test_distance_between_point_and_line_3d():\n \"\"\"Test that distance between points and lines are correctly computed.\"\"\"\n line_position = np.random.random(size=3)\n line_direction = np.array([0, 0, 1])\n\n # find a point a random distance away on the line\n point_on_line = line_position + np.random.random(1) * line_direction\n\n # find a point a fixed distance from the point in a direction perpendicular\n # to the line direction.\n expected_distance = np.random.random(1)\n point = point_on_line + expected_distance * np.array([0, 1, 0])\n\n # calculate distance and check that it is correct\n distance = distance_between_point_and_line_3d(\n point, line_position, line_direction\n )\n\n np.testing.assert_allclose(distance, expected_distance)\n\n\ndef test_line_in_triangles_3d():\n line_point = np.array([0, 5, 5])\n line_direction = np.array([1, 0, 0])\n\n triangles = np.array(\n [\n [[10, 0, 0], [19, 10, 5], [5, 5, 10]],\n [[10, 4, 4], [10, 0, 0], [10, 4, 0]],\n ]\n )\n in_triangle = line_in_triangles_3d(line_point, line_direction, triangles)\n np.testing.assert_array_equal(in_triangle, [True, False])\n" ]
[ [ "numpy.issubdtype", "numpy.iinfo", "numpy.dtype", "numpy.finfo" ], [ "numpy.random.random" ], [ "numpy.asarray", "numpy.linalg.norm", "numpy.ones", "numpy.atleast_2d", "numpy.float32", "numpy.zeros", "numpy.where" ], [ "numpy.array" ], [ "numpy.testing.assert_array_equal", "numpy.random.seed", "numpy.random.randint" ], [ "numpy.asarray", "numpy.all", "numpy.max", "numpy.mean", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.allclose", "numpy.clip", "numpy.eye", "numpy.stack", "numpy.testing.assert_almost_equal", "numpy.zeros", "numpy.random.rand", "numpy.array", "numpy.sum", "numpy.random.random", "numpy.random.seed", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.vstack" ], [ "numpy.random.random", "numpy.linalg.norm", "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
romenr/bachelorthesis
[ "1f4325d5f10274597efb81194b6869768cc38659" ]
[ "controller/model.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nfrom parameters import *\nfrom snn import TargetFollowingSNN, ObstacleAvoidanceSNN, nest_simulate\n\n\nclass Model:\n\n\tdef __init__(self):\n\t\tself.snn_tf = TargetFollowingSNN()\n\t\tself.snn_oa = ObstacleAvoidanceSNN()\n\t\tself.turn_pre = 0.0\n\t\tself.angle_pre = 0.0\n\t\tself.weights_tf = []\n\t\tself.weights_oa = []\n\n\tdef reset(self):\n\t\tself.turn_pre = 0.0\n\t\tself.angle_pre = 0.0\n\n\tdef simulate(self, state):\n\t\tself.snn_tf.set_input(state)\n\t\tself.snn_oa.set_input(state)\n\n\t\t# Simulate both networks\n\t\tnest_simulate()\n\n\t\toutput, self.weights_tf = self.snn_tf.get_results()\n\t\toutput_p, self.weights_oa = self.snn_oa.get_results()\n\n\t\tangle = self.get_turning_angle(output)\n\t\tangle_oa = self.get_obstacle_avoidance_angle(output_p)\n\n\t\tif np.any(state[\"prox\"][1:] > 0.25) and not (\n\t\t\t\t\t\tabs(angle) > abs(angle_oa) and np.sign(angle) == np.sign(angle_oa)):\n\t\t\tangle = angle_oa\n\t\treturn angle\n\n\tdef get_turning_angle(self, snn_output):\n\t\t# Snake turning model\n\t\tm_l = snn_output[left_neuron]\n\t\tm_r = snn_output[right_neuron]\n\t\tangle = a_max * (m_l - m_r)\n\t\tc = math.sqrt((m_l**2 + m_r**2)/2.0)\n\t\tself.turn_pre = c * angle + (1 - c) * self.turn_pre\n\t\treturn self.turn_pre\n\n\tdef get_obstacle_avoidance_angle(self, snn_output):\n\t\tm_l = snn_output[left_neuron]\n\t\tm_r = snn_output[right_neuron]\n\t\tangle = a_avoidance_max * (m_l - m_r)\n\t\treturn angle\n\n\tdef get_turning_radius(self, n_l, n_r):\n\t\t# Snake turning model\n\t\tm_l = n_l/n_max\n\t\tm_r = n_r/n_max\n\t\ta = m_l - m_r\n\t\tc = math.sqrt((m_l**2 + m_r**2)/2.0)\n\t\tself.turn_pre = c*0.5*a + (1-c)*self.turn_pre\n\t\tif abs(self.turn_pre) < 0.001:\n\t\t\tradius = 0\n\t\telse:\n\t\t\tradius = r_min/self.turn_pre\n\t\treturn radius\n" ]
[ [ "numpy.sign", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rraymondhp/dybm
[ "3d618874a2f8838eaeca17ce40649a3789e9f140" ]
[ "src/pydybm/arraymath/dycupy/random.py" ]
[ "\"\"\"``cupy``-based implementation of the random module\n\"\"\"\n\n__author__ = \"Taro Sekiyama\"\n__copyright__ = \"(C) Copyright IBM Corp. 2016\"\n\n\nimport numpy.random as r\nimport cupy as cp\n\n\ndef _to_gpu(a):\n arr = cp.empty_like(a)\n arr.set(a)\n return arr\n\n\nclass RandomState:\n def __init__(self, seed):\n self._random = r.RandomState(seed)\n\n def uniform(self, low=0.0, high=1.0, size=None):\n return _to_gpu(self._random.uniform(low=low, high=high, size=size))\n\n def normal(self, loc=0.0, scale=1.0, size=None):\n return _to_gpu(self._random.normal(loc=loc, scale=scale, size=size))\n\n def get_state(self):\n return self._random.get_state()\n\n def set_state(self, *args):\n return self._random.set_state(*args)\n\n def rand(self, *args):\n return _to_gpu(self._random.rand(*args))\n\n\nseed = r.seed\n\n\ndef normal(loc=0.0, scale=1.0, size=None):\n return _to_gpu(r.normal(loc=loc, scale=scale, size=size))\n\n\ndef uniform(low=0.0, high=1.0, size=None):\n return _to_gpu(r.uniform(low=low, high=high, size=size))\n\n\ndef rand(*args):\n return _to_gpu(r.rand(*args))\n\n\ndef randn(*args):\n return _to_gpu(r.randn(*args))\n\n\ndef random(size=None):\n return _to_gpu(r.random(size=size))\n" ]
[ [ "numpy.random.random", "numpy.random.normal", "numpy.random.randn", "numpy.random.rand", "numpy.random.uniform", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mosvlad/tumor_mask_rcnn
[ "16d6b20431553e6e1cf1594686a1f503171d5f8d" ]
[ "inference_2.py" ]
[ "import os\nimport cv2\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport skimage\nimport glob\n\nROOT_DIR = os.getcwd()\n\nsys.path.append(ROOT_DIR)\nfrom Mask_RCNN.mrcnn import utils\nfrom Mask_RCNN.mrcnn import visualize\nfrom Mask_RCNN.mrcnn.visualize import display_images\nimport Mask_RCNN.mrcnn.model as modellib\nfrom Mask_RCNN.mrcnn.model import log\n\nfrom train import TrainConfig\nfrom train import TumorDataset\n\n\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\nprint(os.getcwd())\ncustom_WEIGHTS_PATH = \"Mask_RCNN/logs/tumor_detect20211207T1827/mask_rcnn_tumor_detect_0100.h5\"\n\n\nclass InferenceConfig(TrainConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\ndef get_ax(rows=1, cols=1, size=7):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax\n\ninference_config = InferenceConfig()\n\nDATASET_DIR = './brain-tumor-segmentation/brain_tumor_data/'\ndataset_val = TumorDataset()\ndataset_val.load_brain_tumor_images(DATASET_DIR, 'val')\ndataset_val.prepare()\n\nwith tf.device(\"/cpu:0\"):\n model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR,\n config=inference_config)\n\nprint(\"Loading weights \", custom_WEIGHTS_PATH)\nmodel.load_weights(custom_WEIGHTS_PATH, by_name=True)\n\nfrom importlib import reload\nreload(visualize)\n\nimage_id = 3\nimage, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config, image_id, use_mini_mask=False)\ninfo = dataset_val.image_info[image_id]\nprint(\"image ID: {}.{} ({}) {}\".format(info[\"source\"], info[\"id\"], image_id,\n dataset_val.image_reference(image_id)))\n\n# Run object detection\nresults = model.detect([image], verbose=1)\nr = results[0]\nprint(r)\n\nvisualize.display_differences(\n image,\n gt_bbox, gt_class_id, gt_mask,\n r['rois'], r['class_ids'], r['scores'], r['masks'],\n class_names=['tumor'], title=\"\", ax=get_ax(),\n show_mask=True, show_box=True)\nplt.show()" ]
[ [ "tensorflow.device", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
huxian123/mask_detecting
[ "a9564d595edaff9317378fbe682cad4400760bff" ]
[ "yolo.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/logs/trained_weights_final.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/my_class.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n print(out_classes)\n print(self.class_names)\n\n font = ImageFont.truetype(font='font/simsun.ttc',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(50) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\nif __name__ == '__main__':\n yolo = YOLO()\n data_path = 'test_data/b025.jpg'\n video_path = 'test_data/Aha.mp4'\n detect_video(yolo, video_path)\n try:\n image = Image.open(data_path)\n except:\n print(\"Open error! Try again\")\n else:\n r_image = yolo.detect_image(image)\n r_image.show()\n yolo.close_session()\n" ]
[ [ "numpy.expand_dims", "numpy.random.seed", "numpy.asarray", "numpy.random.shuffle", "numpy.floor", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ParhamYZ/MusicSourceSeparation
[ "26a42fbebdf50d2ae2ef674ef64f4c88cbe7e8e3" ]
[ "tests/test_transforms.py" ]
[ "import pytest\r\nimport numpy as np\r\nimport torch\r\nfrom openunmix import transforms\r\n\r\n\r\[email protected](params=[4096, 44100])\r\ndef nb_timesteps(request):\r\n return int(request.param)\r\n\r\n\r\[email protected](params=[1, 2])\r\ndef nb_channels(request):\r\n return request.param\r\n\r\n\r\[email protected](params=[1, 2])\r\ndef nb_samples(request):\r\n return request.param\r\n\r\n\r\[email protected](params=[1024, 2048, 4096])\r\ndef nfft(request):\r\n return int(request.param)\r\n\r\n\r\[email protected](params=[2, 4])\r\ndef hop(request, nfft):\r\n return nfft // request.param\r\n\r\n\r\[email protected](params=[\"torch\", \"asteroid\"])\r\ndef method(request):\r\n return request.param\r\n\r\n\r\[email protected]\r\ndef audio(request, nb_samples, nb_channels, nb_timesteps):\r\n return torch.rand((nb_samples, nb_channels, nb_timesteps))\r\n\r\n\r\ndef test_stft(audio, nfft, hop, method):\r\n # we should only test for center=True as\r\n # False doesn't pass COLA\r\n # https://github.com/pytorch/audio/issues/500\r\n stft, istft = transforms.make_filterbanks(n_fft=nfft, n_hop=hop, center=True, method=method)\r\n\r\n X = stft(audio)\r\n X = X.detach()\r\n out = istft(X, length=audio.shape[-1])\r\n assert np.sqrt(np.mean((audio.detach().numpy() - out.detach().numpy()) ** 2)) < 1e-6\r\n" ]
[ [ "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CsabaWirnhardt/cbm
[ "1822addd72881057af34ac6a7c2a1f02ea511225", "1822addd72881057af34ac6a7c2a1f02ea511225", "1822addd72881057af34ac6a7c2a1f02ea511225" ]
[ "scripts/extraction/postgisC6Extract.py", "cbm/datas/api.py", "cbm/extract/pgS1bsExtract.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Guido Lemoine\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n# Version : \n\nimport time\nimport sys\nimport os\nimport io\nimport json\n\nimport psycopg2\nimport psycopg2.extras\nimport rasterio\nimport pandas as pd\nfrom rasterstats import zonal_stats\nfrom datetime import datetime\n\nimport download_with_boto3 as dwb\n\nstart = time.time()\n\n# Rev 1.1. configuration parsing from json\nwith open('s3_config.json', 'r') as f:\n s3config = json.load(f)\ns3config = s3config['s3']\n\nwith open('db_config_c6.json', 'r') as f:\n dbconfig = json.load(f)\ndbconfig = dbconfig['database']\n\n# Input data base is postgis\nconnString = \"host={} dbname={} user={} port={} password={}\".format(\n dbconfig['connection']['host'], dbconfig['connection']['dbname'],\n dbconfig['connection']['dbuser'], dbconfig['connection']['port'],\n dbconfig['connection']['dbpasswd'])\n\ninconn = psycopg2.connect(connString)\nif not inconn:\n print(\"No in connection established\")\n sys.exit(1)\n\nincurs = inconn.cursor()\n\nsrid = -1\n\nsridSql = \"select srid from geometry_columns where f_table_name = '{}';\"\n\ntry:\n incurs.execute(sridSql.format(dbconfig['tables']['parcel_table']))\n result = incurs.fetchone()\n if not result:\n print(\"{} does not exist or is not a spatial table\")\n else:\n srid = result[0]\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n sys.exit(1)\n\nprint(\"Parcel srid = \", srid)\n\n# Get the first image records that is not yet processed\nimagesql = \"\"\"\n SELECT id, reference, obstime from dias_catalogue, {}\n WHERE footprint && wkb_geometry and {} = '{}'\n And obstime between '{}' and '{}'\n And status ='ingested'\n And card='c6' order by obstime asc limit 1\"\"\"\n\nupdateSql = \"\"\"update dias_catalogue set status='{}' where id = {} and status = '{}'\"\"\"\n\ntry:\n incurs.execute(imagesql.format(\n dbconfig['tables']['aoi_table'], dbconfig['args']['aoi_field'],\n dbconfig['args']['name'], dbconfig['args']['startdate'],\n dbconfig['args']['enddate']))\n result = incurs.fetchone()\n if not result:\n print(\"No images with status 'ingested' found\")\n inconn.close()\n sys.exit(1)\n else:\n oid = result[0]\n reference = result[1]\n obstime = result[2]\n # Fails if this record is changed in the meantime\n incurs.execute(updateSql.format('inprogress', oid, 'ingested'))\n inconn.commit()\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n sys.exit(1)\n\n# Count parcels inside this image footprint\nparcelcountsql = \"\"\"\n SELECT count(es.ogc_fid)\n FROM {} es, dias_catalogue dias, {} aoi\n WHERE es.wkb_geometry && st_transform(dias.footprint, {})\n And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography, 1000)::geometry, {})\n And st_area(es.wkb_geometry) > 3000.0\n And aoi.{} = '{}' And dias.id = {}\n -- and es.ogc_fid not in (select distinct pid from {} where obsid = {})\n \"\"\"\n\nincurs.execute(parcelcountsql.format(\n dbconfig['tables']['parcel_table'],\n dbconfig['tables']['aoi_table'], srid, srid,\n dbconfig['args']['aoi_field'], dbconfig['args']['name'],\n oid, dbconfig['tables']['results_table'], oid))\n\nnrecs = incurs.fetchone()\n\n# If no parcels inside, we can stop\nif nrecs[0] == 0:\n print(\"No parcels inside image bounds\")\n incurs.execute(updateSql.format('No_parcels', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n# Copy input data from S3 to local disk\n# CREODIAS\ns3path = \"Sentinel-1/SAR/CARD-COH6/{}/{}/{}.tif\".format(\n datetime.strftime(obstime, '%Y/%m/%d'), reference, reference)\n\n# SOBLOO\n# s3path = \"{}/SLC/{}/\".format(reference.split('_')[0], reference)\n\nflist = dwb.listFileFromS3(s3path)\n\nif not flist:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('C6_nopath', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\ns3path = flist[0]\n\nfpath = 'data/{}'.format(s3path.split('/')[-1])\n\noutsrid = -1\n\nif dwb.getFileFromS3(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 C6 img', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\nelse:\n # Only if the header file is present can we open the image to\n # check its projection\n with rasterio.open(fpath) as src:\n outsrid = src.crs.to_epsg()\n\nprint('Out SRID: ', outsrid)\n\n# Open a connection to save results\noutconn = psycopg2.connect(connString)\nif not outconn:\n print(\"No out connection established\")\n incurs.execute(updateSql.format('no_out_conn', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n# Get the parcel polygon in this image' footprint\n\nincurs.close()\n# Open a named cursor\nincurs = inconn.cursor(name='fetch_image_coverage',\n cursor_factory=psycopg2.extras.DictCursor)\n\nparcelsql = \"\"\"\n SELECT es.ogc_fid, ST_AsGeoJSON(st_transform(es.wkb_geometry, {}))::json\n FROM {} es, dias_catalogue dias, {} aoi\n WHERE es.wkb_geometry && st_transform(dias.footprint, {})\n And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography,\n 1000)::geometry, {})\n And st_area(es.wkb_geometry) > 3000.0\n And aoi.{} = '{}'\n And dias.id = {}\n -- and es.ogc_fid not in (select distinct pid from {} where obsid = {})\n \"\"\"\n\nincurs.execute(parcelsql.format(\n outsrid, dbconfig['tables']['parcel_table'],\n dbconfig['tables']['aoi_table'], srid, srid,\n dbconfig['args']['aoi_field'], dbconfig['args']['name'],\n oid, dbconfig['tables']['results_table'], oid))\n\nsqlload = time.time() - start\nprint(\"Images loaded and {} features selected from database in {} seconds\".format(\n nrecs[0], sqlload))\n\nnrows = {}\nnrows['VV'] = 0\nnrows['VH'] = 0\n\naffine = {}\narray = {}\n\nbands = ['VV', 'VH']\n\nwith rasterio.open(fpath) as src:\n for b in bands:\n affine[b] = src.transform\n array[b] = src.read(bands.index(b) + 1)\n\n\nwhile True: # nrows['VV'] < 2:\n rowset = incurs.fetchmany(size=2000)\n\n if not rowset:\n break\n\n features = {\"type\": \"FeatureCollection\",\n \"features\": [{\"type\": \"feature\", \"geometry\": f[1],\n \"properties\": {\"pid\": int(f[0])}} for f in rowset]}\n\n for b in bands:\n\n zs = zonal_stats(features, array[b], affine=affine[b], stats=[\n \"count\", \"mean\", \"std\", \"min\", \"max\",\n \"percentile_25\", \"percentile_50\", \"percentile_75\"], prefix=\"\",\n nodata=0, geojson_out=True)\n\n df = pd.DataFrame(zs)\n\n df = pd.DataFrame.from_dict(df.properties.to_dict(), orient='index')\n\n df['obsid'] = oid\n df['band'] = b\n\n df.rename(index=str, columns={\n \"percentile_25\": \"p25\", \"percentile_50\": \"p50\",\n \"percentile_75\": \"p75\"}, inplace=True)\n\n nrows[b] = nrows[b] + len(df)\n # df is the dataframe\n if len(df) > 0:\n df.dropna(inplace=True)\n if len(df.values) > 0:\n df_columns = list(df)\n s_buf = io.StringIO()\n df.to_csv(s_buf, header=False, index=False, sep=',')\n s_buf.seek(0)\n outcurs = outconn.cursor()\n # print(tuple(df_columns))\n try:\n #psycopg2.extras.execute_batch(outcurs, insert_stmt, df.values)\n outcurs.copy_from(\n s_buf, dbconfig['tables']['results_table'],\n columns=tuple(df_columns), sep=',')\n outconn.commit()\n except psycopg2.IntegrityError as e:\n print(\"insert statement {} contains duplicate index\".format(\n insert_stmt))\n # except Error as e:\n # print(e)\n finally:\n outcurs.close()\n else:\n print(\"No valid data in block {}\".format(nrows[b]))\n\noutconn.close()\n\nincurs.close()\n\nincurs = inconn.cursor()\n\ntry:\n incurs.execute(updateSql.format('extracted', oid, 'inprogress'))\n inconn.commit()\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n if outconn:\n outconn.close()\n\nincurs.close()\ninconn.close()\n\nif os.path.exists(fpath):\n os.remove(fpath)\n\nprint(\"Total time required for {} features and {} bands: {} seconds\".format(\n nrows['VV'], len(bands), time.time() - start))\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Guido Lemoine, Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\nimport os\nimport os.path\nimport requests\nfrom os.path import join, normpath, isfile\n\nfrom cbm.utils import config\n\n\ndef parcel_by_loc(aoi, year, lon, lat, ptype=None,\n geom=False, wgs84=False, debug=False):\n\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/parcelByLocation?aoi={}&year={}&lon={}&lat={}\"\"\"\n if geom is True:\n requrl = f\"{requrl}&withGeometry=True\"\n if ptype not in [None, '']:\n requrl = f\"{requrl}&ptype={ptype}\"\n if wgs84 is True:\n requrl = f\"{requrl}&wgs84={wgs84}\"\n # print(requrl.format(api_url, aoi, year, lon, lat))\n response = requests.get(requrl.format(api_url, aoi, year, lon, lat),\n auth=(api_user, api_pass))\n if debug:\n print(requrl.format(api_url, aoi, year, lon, lat), response)\n return response.content\n\n\ndef parcel_by_id(aoi, year, pid, ptype=None, geom=False,\n wgs84=False, debug=False):\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/parcelById?aoi={}&year={}&pid={}\"\"\"\n if geom is True:\n requrl = f\"{requrl}&withGeometry=True\"\n if ptype not in [None, '']:\n requrl = f\"{requrl}&ptype={ptype}\"\n if wgs84 is True:\n requrl = f\"{requrl}&wgs84={wgs84}\"\n # print(requrl.format(api_url, aoi, year, pid))\n response = requests.get(requrl.format(api_url, aoi, year, pid),\n auth=(api_user, api_pass))\n if debug:\n print(requrl.format(api_url, aoi, year, pid), response)\n return response.content\n\n\ndef parcel_by_polygon(aoi, year, polygon, ptype=None, geom=False,\n wgs84=False, only_ids=True, debug=False):\n\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/parcelsByPolygon?aoi={}&year={}&polygon={}\"\"\"\n if geom is True:\n requrl = f\"{requrl}&withGeometry=True\"\n if only_ids is True:\n requrl = f\"{requrl}&only_ids=True\"\n if ptype not in [None, '']:\n requrl = f\"{requrl}&ptype={ptype}\"\n if wgs84 is True:\n requrl = f\"{requrl}&wgs84={wgs84}\"\n response = requests.get(requrl.format(api_url, aoi, year, polygon),\n auth=(api_user, api_pass))\n if debug:\n print(requrl.format(api_url, aoi, year, polygon), response)\n return response.content\n\n\ndef parcel_ts(aoi, year, pid, tstype='s2', ptype=None, band='', debug=False):\n\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/parcelTimeSeries?aoi={}&year={}&pid={}&tstype={}\"\"\"\n if ptype not in [None, '']:\n requrl = f\"{requrl}&ptype={ptype}\"\n if band not in [None, '']:\n requrl = f\"{requrl}&band={band}\"\n response = requests.get(requrl.format(api_url, aoi, year,\n pid, tstype, band),\n auth=(api_user, api_pass))\n if debug:\n print(requrl.format(api_url, aoi, year, pid, tstype, band), response)\n return response.content\n\n\ndef cbl(lon, lat, start_date, end_date, bands=None, lut=None, chipsize=None):\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/chipsByLocation?lon={}&lat={}&start_date={}&end_date={}\"\"\"\n band = '_'.join(bands)\n if band is not None:\n requrl = f\"{requrl}&band={band}\"\n if chipsize is not None:\n requrl = f\"{requrl}&chipsize={chipsize}\"\n if lut != '':\n requrl = f\"{requrl}&lut={lut}\"\n # print(requrl.format(api_url, lon, lat, start_date, end_date))\n response = requests.get(requrl.format(api_url, lon, lat,\n start_date, end_date),\n auth=(api_user, api_pass))\n return response\n\n\ndef rcbl(parcel, start_date, end_date, bands, chipsize, filespath,\n quiet=True):\n \"\"\"Get parcel raw chip images from RESTful API by location\"\"\"\n import os\n import os.path\n import pandas as pd\n from osgeo import osr, ogr\n import time\n start = time.time()\n api_url, api_user, api_pass = config.credentials('api')\n\n for band in bands:\n requrl = \"\"\"{}/query/rawChipByLocation?lon={}&lat={}&start_date={}&end_date={}\"\"\"\n if band is not None:\n requrl = f\"{requrl}&band={band}\"\n if chipsize is not None:\n requrl = f\"{requrl}&chipsize={chipsize}\"\n\n # Create a valid geometry from the returned JSON withGeometry\n geom = ogr.CreateGeometryFromJson(parcel.get('geom')[0])\n source = osr.SpatialReference()\n source.ImportFromEPSG(parcel.get('srid')[0])\n\n # Assign this projection to the geometry\n geom.AssignSpatialReference(source)\n target = osr.SpatialReference()\n target.ImportFromEPSG(4326)\n transform = osr.CoordinateTransformation(source, target)\n\n # And get the lon, lat for its centroid, so that we can center the chips\n # on the parcel\n centroid = geom.Centroid()\n centroid.Transform(transform)\n\n # Use pid for next request\n # pid = parcel['pid'][0]\n # cropname = parcel['cropname'][0]\n\n # Set up the rawChip request\n cen_x, cen_y = str(centroid.GetX()), str(centroid.GetY())\n\n response = requests.get(requrl.format(api_url, cen_y, cen_x, start_date,\n end_date, band, chipsize),\n auth=(api_user, api_pass))\n if not quiet:\n print(\"Request url:\", requrl.format(\n api_url, cen_y, cen_x, start_date, end_date, band, chipsize))\n print(\"Geom:\", geom)\n print(\"Source:\", source, \", Target:\", target)\n print(\"Centroid\", centroid)\n print(\"Response:\", response)\n # Directly create a pandas DataFrame from the json response\n df = pd.read_json(response.content)\n os.makedirs(filespath, exist_ok=True)\n df_file = normpath(join(filespath, f'images_list.{band}.csv'))\n df.to_csv(df_file, index=True, header=True)\n # print(f\"The response table is saved to: {df_file}\")\n\n # Download the GeoTIFFs that were just created in the user cache\n for c in df.chips:\n url = f\"{api_url}{c}\"\n outf = normpath(join(filespath, c.split('/')[-1]))\n if not isfile(outf):\n res = requests.get(url, stream=True)\n if not quiet:\n print(f\"Downloading {c.split('/')[-1]}\")\n with open(outf, \"wb\") as handle:\n for chunk in res.iter_content(chunk_size=512):\n if chunk: # filter out keep-alive new chunks\n handle.write(chunk)\n if not quiet:\n print(\n f\"Images for band '{band}', for the selected dates are downloaded.\")\n\n if not quiet:\n print(\"\\n------Total time------\")\n print(\n f\"Total time required for {len(bands)} bands: {time.time() - start} seconds.\")\n\n\ndef clouds(geom):\n import glob\n import json\n import rasterio\n from osgeo import osr\n from rasterstats import zonal_stats\n # Check whether our parcel is cloud free\n\n # We should have a list of GeoTIFFs ending with .SCL.tif\n tiflist = glob.glob('*.SCL.tif')\n\n for t in tiflist:\n with rasterio.open(t) as src:\n affine = src.transform\n CRS = src.crs\n data = src.read(1)\n\n # Reproject the parcel geometry in the image crs\n imageCRS = int(str(CRS).split(':')[-1])\n\n # Cross check with the projection of the geometry\n # This needs to be done for each image, because the parcel could be in\n # a straddle between (UTM) zones\n geomCRS = int(geom.GetSpatialReference().GetAuthorityCode(None))\n\n if geomCRS != imageCRS:\n target = osr.SpatialReference()\n target.ImportFromEPSG(imageCRS)\n source = osr.SpatialReference()\n source.ImportFromEPSG(geomCRS)\n transform = osr.CoordinateTransformation(source, target)\n geom.Transform(transform)\n\n # Format as a feature collection (with only 1 feature)\n # and extract the histogram\n features = {\"type\": \"FeatureCollection\",\n \"features\": [{\"type\": \"feature\",\n \"geometry\": json.loads(geom.ExportToJson()),\n \"properties\": {\"pid\": pid}}]}\n zs = zonal_stats(features, data, affine=affine, prefix=\"\",\n nodata=0, categorical=True, geojson_out=True)\n\n # This has only one record\n properties = zs[0].get('properties')\n\n # pid was used as a dummy key to make sure the histogram\n # values are in 'properties'\n del properties['pid']\n\n histogram = {int(float(k)): v for k, v in properties.items()}\n # print(t, histogram)\n\n\ndef get_options():\n api_url, api_user, api_pass = config.credentials('api')\n requrl = \"\"\"{}/query/options\"\"\"\n response = requests.get(requrl.format(api_url),\n auth=(api_user, api_pass))\n return response.content\n\n\ndef background(lon, lat, chipsize=512, extend=512, tms='Google',\n bg_path='', debug=False):\n # aoi='undefined', year='', pid='0000', quiet=True):\n \"\"\"Download the background image.\n\n Examples:\n background(lon, lat, 512, 512, 'Google', 'temp/test.tif', True)\n\n Arguments:\n lon, lat, longitude and latitude in decimal degrees (float).\n chipsize, size of the chip in pixels (int).\n extend, size of the chip in meters (float).\n tms, tile map server Google or Bing (str).\n bk_file, the name of the output file (str).\n \"\"\"\n\n # Get the api credentials\n api_url, api_user, api_pass = config.credentials('api')\n\n # The url to get the background image\n requrl = f\"lon={lon}&lat={lat}&chipsize={chipsize}&extend={extend}\"\n # print(f\"{api_url}/query/backgroundByLocation?{requrl}&tms={tms}&iformat=tif\")\n response = requests.get(\n f\"{api_url}/query/backgroundByLocation?{requrl}&tms={tms}&iformat=tif\",\n auth=(api_user, api_pass))\n # print(response)\n\n # Try to get the image link from the html response\n try:\n img_url = response.content.decode(\"utf-8\")\n # print(type(img_url), img_url)\n if img_url == '{}':\n if debug:\n print(\"Image not found...\")\n print(\n f\"{api_url}/query/backgroundByLocation?{requrl}&tms={tms}&iformat=tif\", response)\n return response\n else:\n if debug:\n print(\n f\"{api_url}/query/backgroundByLocation?{requrl}&tms={tms}&iformat=tif\", response)\n res = requests.get(img_url, stream=True)\n image_name = img_url.split('/')[-1].lower()\n bg_file = normpath(join(bg_path, image_name))\n\n with open(bg_file, \"wb\") as handle:\n for chunk in res.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n handle.write(chunk)\n\n return bg_file\n except AttributeError as err:\n return err\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Guido Lemoine\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nimport time\nimport sys\nimport os\nimport io\nimport json\nimport psycopg2\nimport psycopg2.extras\nimport rasterio\nfrom rasterstats import zonal_stats\nfrom datetime import datetime\nimport pandas as pd\n\nfrom cbm.utils import config\nfrom cbm.datas import db, object_storage\n\ndef extractS1bs(startdate, enddate):\n start = time.time()\n frootpath = 'tmp'\n\n values = config.read()\n dsc = values['set']['dataset']\n dias_catalogue = values['dataset'][dsc]['tables']['dias_catalog']\n parcels_table = values['dataset'][dsc]['tables']['parcels']\n results_table = values['dataset'][dsc]['tables']['s1']\n\n inconn = db.connection()\n if not inconn:\n print(\"No in connection established\")\n sys.exit(1)\n\n incurs = inconn.cursor()\n srid = -1\n sridSql = \"select srid from geometry_columns where f_table_name = '{}';\"\n\n try:\n incurs.execute(sridSql.format(parcels_table))\n result = incurs.fetchone()\n if not result:\n print(\"{} does not exist or is not a spatial table\")\n else:\n srid = result[0]\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n sys.exit(1)\n\n print(\"Parcel srid = \", srid)\n\n # Get the first image record that is not yet processed\n imagesql = f\"\"\"\n SELECT id, reference, obstime FROM {dias_catalogue}\n WHERE obstime between '{startdate}' And '{enddate}'\n And status ='ingested' And card = 's2'\n ORDER by obstime asc LIMIT 1\n \"\"\"\n updateSql = \"\"\"\n UPDATE {} SET status='{}'\n WHERE id = {} And status = '{}'\n \"\"\"\n\n with inconn:\n with inconn.cursor() as trans_cur:\n trans_cur.execute(imagesql)\n result = trans_cur.fetchone()\n if not result:\n print(\"All signatures for the given dates have been extracted.\")\n inconn.close()\n sys.exit(1)\n else:\n oid = result[0]\n reference = result[1]\n obstime = result[2]\n # Fails if this record is changed in the meantime\n trans_cur.execute(updateSql.format(\n dias_catalogue, 'inprogress', oid, 'ingested'))\n inconn.commit()\n\n print(reference)\n obstime = reference.split('_')[2][0:8]\n print(obstime)\n obs_path = \"{}/{}/{}\".format(obstime[0:4], obstime[4:6], obstime[6:8])\n print(obs_path)\n\n mgrs_tile = reference.split('_')[5]\n full_tstamp = reference.split('_')[2]\n\n # Copy input data from S3 to local disk\n dias = values['s3']['dias']\n if dias in ['EOSC', 'CREODIAS']:\n rootpath = 'Sentinel-1/SAR/CARD-BS'\n print(datetime.strptime(obstime, '%Y/%m/%d'), reference)\n s3path = '{}/{}/{}/{}.data/Gamma0_VV.img'.format(\n rootpath, datetime.strftime(obstime, '%Y/%m/%d'), reference, reference)\n elif dias == 'SOBLOO':\n s3path = '{}/GRD/{}/{}.data/Gamma0_VV.img'.format(\n reference.split('_')[0], reference, reference)\n\n fpath = f'{frootpath}/{reference}_VV.img'\n outsrid = -1\n\n if object_storage.get_file(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 VV img', oid,'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n s3path = s3path.replace('.img', '.hdr')\n fpath = f'{frootpath}/{reference}_VV.hdr'\n\n if object_storage.get_file(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 VV hdr', oid,'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n else:\n # Only if the header file is present can we open the image to check its projection\n with rasterio.open(fpath.replace('hdr', 'img')) as src:\n outsrid = cbm.crs.to_epsg()\n\n print('Out SRID: ', outsrid)\n\n if dias in ['EOSC', 'CREODIAS']:\n rootpath = 'Sentinel-1/SAR/CARD-BS'\n s3path = '{}/{}/{}/{}.data/Gamma0_VH.img'.format(\n rootpath, datetime.strftime(obstime, '%Y/%m/%d'), reference, reference)\n elif dias == 'SOBLOO':\n s3path = '{}/GRD/{}/{}.data/Gamma0_VH.img'.format(\n reference.split('_')[0], reference, reference)\n fpath = f'{frootpath}/{reference}_VH.img'\n\n if object_storage.get_file(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 VH img', oid,'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n s3path = s3path.replace('.img', '.hdr')\n fpath = f'{frootpath}/{reference}_VH.hdr'\n\n if object_storage.get_file(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 VH hdr', oid,'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n\n # Open a connection to save results\n outconn = psycopg2.connect(connString)\n if not outconn:\n print(\"No out connection established\")\n incurs.execute(updateSql.format('no_out_conn', oid,'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n # Get the parcel polygon in this image' footprint\n\n incurs.close()\n # Open a named cursor\n incurs = inconn.cursor(name='fetch_image_coverage', cursor_factory=psycopg2.extras.DictCursor)\n\n dataset = config.get_value(['set', 'dataset'])\n pid_column = config.get_value(['dataset', dataset, 'columns', 'parcels_id'])\n\n parcelsql = f\"\"\"\n SELECT p.{pid_column}, ST_AsGeoJSON(st_transform(p.wkb_geometry,\n {outsrid}))::json\n FROM {parcels_table} p, {dias_catalogue} dc\n WHERE p.wkb_geometry && st_transform(dc.footprint, {srid})\n And st_area(p.wkb_geometry) > 3000.0\n And dc.id = {oid}\n -- And p.{pid_column} not in (SELECT distinct pid\n -- FROM {results_table} where obsid = {oid})\n \"\"\"\n incurs.execute(parcelsql)\n\n sqlload = time.time() - start\n print(\"Images loaded and nrecs[0] features selected from database in {} seconds\".format(sqlload))\n\n nrows = {}\n nrows['VV']=0\n nrows['VH']=0\n\n affine = {}\n array = {}\n\n bands = ['VV', 'VH']\n\n for b in bands:\n with rasterio.open(f'{frootpath}/{reference}_{b}.img') as src:\n affine[b] = cbm.transform\n array[b] = cbm.read(1)\n\n while True:\n rowset = incurs.fetchmany(size=2000)\n\n if not rowset:\n break\n\n features = { \"type\": \"FeatureCollection\",\n \"features\": [{\"type\": \"feature\", \"geometry\": f[1], \"properties\": {\"pid\": int(f[0])}} for f in rowset]}\n\n for b in bands:\n\n zs = zonal_stats(features, array[b], affine=affine[b], stats=[\"count\", \"mean\", \"std\", \"min\", \"max\", \"percentile_25\", \"percentile_50\", \"percentile_75\"], prefix=\"\", nodata=0, geojson_out=True)\n\n df = pd.DataFrame(zs)\n\n df = pd.DataFrame.from_dict(df.properties.to_dict(), orient='index')\n\n df['obsid'] = oid\n df['band'] = b\n\n df.rename(index=str, columns={\"percentile_25\": \"p25\", \"percentile_50\": \"p50\",\"percentile_75\": \"p75\"}, inplace=True)\n\n nrows[b] = nrows[b] + len(df)\n # df is the dataframe\n if len(df) > 0:\n df.dropna(inplace=True)\n if len(df.values) > 0:\n df_columns = list(df)\n s_buf = io.StringIO()\n df.to_csv(s_buf, header=False, index=False, sep=',')\n s_buf.seek(0)\n outcurs = outconn.cursor()\n #print(tuple(df_columns))\n try:\n #psycopg2.extras.execute_batch(outcurs, insert_stmt, df.values)\n outcurs.copy_from(s_buf, dbconfig['tables']['results_table'], columns = tuple(df_columns), sep = ',')\n outconn.commit()\n except psycopg2.IntegrityError as e:\n print(\"insert statement {} contains duplicate index\".format(insert_stmt))\n #except Error as e:\n # print(e)\n finally:\n outcurs.close()\n else:\n print(\"No valid data in block {}\".format(nrows[b]))\n\n outconn.close()\n\n incurs.close()\n\n incurs = inconn.cursor()\n\n try:\n incurs.execute(updateSql.format('extracted', oid, 'inprogress'))\n inconn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n if outconn:\n outconn.close()\n\n incurs.close()\n inconn.close()\n\n fpath = f'{frootpath}/{reference}_VV.img'\n\n if os.path.exists(fpath):\n os.remove(fpath)\n os.remove(fpath.replace('.img', '.hdr'))\n\n fpath = f'{frootpath}/{reference}_VH.img'\n\n if os.path.exists(fpath):\n os.remove(fpath)\n os.remove(fpath.replace('.img', '.hdr'))\n\n print(\"Total time required for {} features and {} bands: {} seconds\".format(nrows['VV'], len(bands), time.time() - start))\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.read_json" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sabuj7177/CovidProject
[ "b4b7bcfa5ace165520507f489dc74da7b695e2f0", "b4b7bcfa5ace165520507f489dc74da7b695e2f0", "b4b7bcfa5ace165520507f489dc74da7b695e2f0" ]
[ "coronet/main2_worker_gradient_quantization.py", "coronet/util_tf_v2_migrated.py", "coronet/main2_worker_gradient_quantization_with_gps.py" ]
[ "import time\n\nfrom numpy.random import seed\nseed(8) #1\n\nimport tensorflow\ntensorflow.random.set_seed(7)\n# tensorflow.random.set_random_seed(7)\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport os\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model ,load_model\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout\nfrom tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications.vgg16 import decode_predictions\nfrom keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport socket\n\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import layers\n\nfrom tensorflow.keras import optimizers\n\nfrom os import listdir\nimport util\n\nBASE_PATH = 'coronet_org_data/four_classes'\n# data_list = listdir('/content/covid-19/four_classes/train')\ndata_list = listdir(BASE_PATH + '/train2')\n\n# Delete some classes that may interfere\n\nprint(len(data_list))\n\nstart_time = time.time()\n\nDATASET_PATH = BASE_PATH + '/train2'\nVALIDATION_PATH = BASE_PATH + '/val'\ntest_dir = BASE_PATH + '/test'\nIMAGE_SIZE = (150, 150)\nNUM_CLASSES = len(data_list)\nBATCH_SIZE = 10 # try reducing batch size or freeze more layers if your GPU runs out of memory\nNUM_EPOCHS = 10\nLEARNING_RATE = 0.0001\nTCP_IP = '127.0.0.1'\nport = 17001\n\n\ndef safe_recv(size, server_socket):\n data = bytearray()\n while 1:\n try:\n temp = server_socket.recv(size - len(data))\n data.extend(temp)\n recv_size = len(data)\n if recv_size >= size:\n break\n except:\n print(\"Error\")\n data = bytes(data)\n return data\n\n\n# Train datagen here is a preprocessor\ntrain_datagen = ImageDataGenerator(rescale=1. / 255,\n rotation_range=50,\n featurewise_center=True,\n featurewise_std_normalization=True,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.25,\n zoom_range=0.1,\n zca_whitening=True,\n channel_shift_range=20,\n horizontal_flip=True,\n vertical_flip=True,\n validation_split=0.2,\n fill_mode='constant')\n\n# For multiclass use categorical n for binary use binary\ntrain_batches = train_datagen.flow_from_directory(DATASET_PATH,\n target_size=IMAGE_SIZE,\n shuffle=True,\n batch_size=BATCH_SIZE,\n seed=42,\n class_mode=\"categorical\"\n # For multiclass use categorical n for binary use binary\n )\n\nvalid_batches = train_datagen.flow_from_directory(VALIDATION_PATH,\n target_size=IMAGE_SIZE,\n shuffle=True,\n batch_size=BATCH_SIZE,\n seed=42,\n class_mode=\"categorical\"\n # For multiclass use categorical n for binary use binary\n\n )\n\nfrom tensorflow.keras.applications import Xception\n\nconv_base = Xception(weights='imagenet',\n include_top=False,\n input_shape=(150, 150, 3))\n\nconv_base.trainable = True\n\nmodel = models.Sequential()\nmodel.add(conv_base)\n\nmodel.add(layers.Flatten())\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(256, activation='relu'))\nmodel.add(layers.Dense(4, activation='softmax'))\n\n# model.compile(loss='categorical_crossentropy', # for multiclass use categorical_crossentropy\n#\n# optimizer=optimizers.Adam(lr=LEARNING_RATE),\n# metrics=['acc'])\n\nprint(\"Batch len\")\nprint(len(train_batches))\nprint(len(valid_batches))\n\naccuracy = tf.keras.metrics.CategoricalAccuracy()\naccuracy_val = tf.keras.metrics.CategoricalAccuracy()\nloss_fn = tf.keras.losses.CategoricalCrossentropy()\noptimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)\n\n# STEP_SIZE_TRAIN=train_batches.n//train_batches.batch_size\n# STEP_SIZE_VALID=valid_batches.n//valid_batches.batch_size\nSTEP_SIZE_TRAIN = len(train_batches) - 1\nSTEP_SIZE_VALID = len(valid_batches) - 1\nprint(\"Step size len\")\nprint(STEP_SIZE_TRAIN)\nprint(STEP_SIZE_VALID)\n\n# result=model.fit_generator(train_batches,\n# steps_per_epoch =STEP_SIZE_TRAIN,\n# validation_data = valid_batches,\n# validation_steps = STEP_SIZE_VALID,\n# epochs= NUM_EPOCHS,\n# )\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((TCP_IP, port))\n\ntotal_gradient_size = 0\ntotal_weight_size = 0\ntotal_training_computation = 0\ntotal_training_communication = 0\n\nfor epoch in range(NUM_EPOCHS):\n print(\"###############################################\")\n # Iterate over the batches of a dataset.\n for step, (x, y) in enumerate(train_batches):\n train_comp = time.time()\n with tf.GradientTape() as tape:\n logits = model(x)\n # Compute the loss value for this batch.\n loss_value = loss_fn(y, logits)\n\n # Update the state of the `accuracy` metric.\n accuracy.update_state(y, logits)\n\n # Update the weights of the model to minimize the loss value.\n gradients = tape.gradient(loss_value, model.trainable_weights)\n mean_scaler, quantized_gradient, grad_shape = util.get_ternarized_gradients(gradients)\n # print(grad_shape)\n\n # only_grads_val = pickle.dumps(gradients, pickle.HIGHEST_PROTOCOL)\n # gradients_size = len(only_grads_val)\n # total_gradient_size += gradients_size\n # gradients_size = pickle.dumps(gradients_size, pickle.HIGHEST_PROTOCOL)\n # total_training_computation += time.time() - train_comp\n # train_comm = time.time()\n # # print(\"Size of gradients size; {}\", len(gradients_size))\n # s.sendall(gradients_size)\n # s.sendall(only_grads_val)\n\n mean_scaler = pickle.dumps(mean_scaler, pickle.HIGHEST_PROTOCOL)\n mean_scaler_size = len(mean_scaler)\n total_gradient_size += mean_scaler_size\n mean_scaler_size = pickle.dumps(mean_scaler_size, pickle.HIGHEST_PROTOCOL)\n\n quantized_gradient = pickle.dumps(quantized_gradient, pickle.HIGHEST_PROTOCOL)\n quantized_gradient_size = len(quantized_gradient)\n total_gradient_size += quantized_gradient_size\n quantized_gradient_size = pickle.dumps(quantized_gradient_size, pickle.HIGHEST_PROTOCOL)\n total_training_computation += time.time() - train_comp\n train_comm = time.time()\n\n s.sendall(quantized_gradient_size)\n s.sendall(quantized_gradient)\n\n s.sendall(mean_scaler_size)\n s.sendall(mean_scaler)\n\n\n recv_size = safe_recv(17, s)\n recv_size = pickle.loads(recv_size)\n total_weight_size += recv_size\n recv_data = safe_recv(recv_size, s)\n total_training_communication += time.time() - train_comm\n train_comp = time.time()\n weight_list = pickle.loads(recv_data)\n\n\n\n # print(gradients)\n # optimizer.apply_gradients(zip(gradients, model.trainable_weights))\n \n # weight_list = []\n # for w in model.trainable_weights:\n # weight_list.append(w.numpy())\n i =0\n for w in model.trainable_weights:\n w.assign(weight_list[i])\n i += 1\n\n # Logging the current accuracy value so far.\n if step % 20 == 0:\n print(\"Epoch:\", epoch, \"Step:\", step, \"Loss value:\", loss_value.numpy())\n print(\"Total running accuracy so far: %.3f\" % accuracy.result())\n total_training_computation += time.time() - train_comp\n if step >= STEP_SIZE_TRAIN:\n break\n\n # Reset the metric's state at the end of an epoch\n accuracy.reset_states()\n\n\n total_val_loss = 0\n for step, (x, y) in enumerate(valid_batches):\n with tf.GradientTape() as tape:\n logits = model(x)\n # Compute the loss value for this batch.\n loss_value = loss_fn(y, logits)\n\n # Update the state of the `accuracy` metric.\n accuracy_val.update_state(y, logits)\n total_val_loss += loss_value.numpy()\n if step >= STEP_SIZE_VALID:\n break\n\n # Logging the current accuracy value so far.\n print(\"Validation Loss value:\", total_val_loss/STEP_SIZE_VALID)\n print(\"Total validation accuracy so far: %.3f\" % (accuracy_val.result()))\n # Reset the metric's state at the end of an epoch\n accuracy_val.reset_states()\n\nelapsed_time = time.time()-start_time\nprint('Total time in {:.0f}m {:.0f}s'.format(\n elapsed_time // 60, elapsed_time % 60))\nprint('Training computation time in {:.0f}m {:.0f}s'.format(\n total_training_computation // 60, total_training_computation % 60))\nprint('Training communication time in {:.0f}m {:.0f}s'.format(\n total_training_communication // 60, total_training_communication % 60))\nprint(\"Total gradient size: \"+str(total_gradient_size))\nprint(\"Total weight size: \"+str(total_weight_size))\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_acc_loss(result, epochs):\n acc = result.history['acc']\n loss = result.history['loss']\n val_acc = result.history['val_acc']\n val_loss = result.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(121)\n plt.plot(range(1, epochs), acc[1:], label='Train_acc')\n plt.plot(range(1, epochs), val_acc[1:], label='Val_acc')\n plt.title('Accuracy over ' + str(epochs) + ' Epochs', size=15)\n plt.legend()\n plt.grid(True)\n plt.subplot(122)\n plt.plot(range(1, epochs), loss[1:], label='Train_loss')\n plt.plot(range(1, epochs), val_loss[1:], label='Val_loss')\n plt.title('Loss over ' + str(epochs) + ' Epochs', size=15)\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\n# plot_acc_loss(result, 80)\n\n# %%\n\n# Save the trained model and copy to drive\n\nmodel.save('4-class-Covid19-Mod-Xception.h5')\n# !cp /content/\"4-class-Covid19-Mod-Xception.h5\" /content/drive/\"My Drive\"/\"Colab Notebooks\"\n\n\n# %% md\n\n# ** Evaluate\n# using\n# evaluate\n# Generator **\n\n# %%\n\n# Create evaluate data generator from test set\n# Dont forget shuffle false\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n# test_dir = '/content/COVID-19 Radiography Database'\neval_generator = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SIZE, batch_size=1,\n shuffle=False, seed=42, class_mode=\"categorical\")\neval_generator.reset()\n\n# %%\n\n# Evalute the trained model on evaluate generator\neval_generator.reset()\nx = model.evaluate_generator(eval_generator,\n steps=np.ceil(len(eval_generator)),\n use_multiprocessing=False,\n verbose=1,\n workers=1,\n )\n\nprint('Test loss:', x[0])\nprint('Test accuracy:', x[1])\n\n# Poor test accuracy due to the small dataset size\n\n# %% md\n\n# ** Create\n# DataGen\n# on\n# single\n# folder /\n#\n#\n# class and predict ! **\n\n# %%\n\n\n# IMAGE_SIZE = (150, 150)\n# test_datagen = ImageDataGenerator(rescale=1. / 255)\n# test_dir = 'data/COVID-19 Radiography Database'\n# pred_generator = test_datagen.flow_from_directory(\n# test_dir, target_size=IMAGE_SIZE,\n# batch_size=1,\n# shuffle=False,\n#\n# seed=42,\n#\n# class_mode=\"categorical\")\n# pred_generator.reset()\n#\n# count = [0, 0, 0, 0]\n#\n# files = pred_generator.filenames\n#\n# for i in range(len(files)):\n# x, y = pred_generator.next()\n# img = x\n# predict = model.predict(img)\n#\n# p = np.argmax(predict, axis=-1)\n# print(str(p[0]) + \" \" + files[pred_generator.batch_index - 1])\n# # print(predict)\n# # p=model.predict_classes(img)\n# count[p[0]] += 1\n#\n# # print(str(p[0])+\" \"+files[i])\n# print(count)\n#\n# # %% md\n#\n# ### **`Predict Results using predict generator and evaluate the accuracy and Confusion matrix `**\n#\n# # %%\n#\n# from sklearn.metrics import confusion_matrix\n# from sklearn.metrics import plot_confusion_matrix\n# from sklearn.metrics import classification_report\n#\n# filenames = eval_generator.filenames\n# nb_samples = len(filenames)\n# eval_generator.reset()\n# predict = model.predict_generator(eval_generator, steps=np.ceil(len(eval_generator)))\n# pp = predict\n# predict = np.argmax(predict, axis=-1)\n# classes = eval_generator.classes[eval_generator.index_array]\n# acc = sum(predict == classes) / len(predict)\n# names = [\"covid\", \"normal\", \"pneumonia_bac\", \"pneumonia_vir\"]\n# # print(confusion_matrix(classes,predict))\n#\n# font = {\n# 'family': 'Times New Roman',\n# 'size': 12\n# }\n# plt.rc('font', **font)\n# cm = confusion_matrix(classes, predict)\n# print(cm)\n# print(classification_report(classes, predict))\n# plt.imshow(cm, cmap=plt.cm.Blues)\n# plt.xlabel('Predicted labels \\nAccuracy: {:0.2f}'.format(acc * 100))\n# plt.ylabel(\"True labels\")\n# plt.xticks(classes, [])\n# plt.yticks(classes, [])\n# plt.title('Confusion matrix ')\n# plt.colorbar()\n# plt.show()\n\n# %% md\n\n# ** Test\n# Single\n# image **\n\n# %%\n\n# import cv2\n# from skimage import transform\n#\n# img_r = cv2.imread('/content/test/x.jpg')\n#\n# img1 = np.array(img_r).astype('float32') / 255\n# img2 = transform.resize(img1, (150, 150, 3))\n#\n# img = np.expand_dims(img2, axis=0)\n#\n# r = model.predict(img)\n#\n# names = dict((v, k) for k, v in labels.items())\n# index = np.argmax(r)\n# name = names.get(index, \"Unknown\")\n#\n# p = round(r.max() * 100, 3) # to find maximum score\n#\n# scores = r\n# print(scores)\n#\n# font = {\n# 'family': 'Times New Roman',\n# 'size': 9,\n#\n# }\n# plt.rc('font', **font)\n#\n# # plt.title(name +\" (\"+ str(p)+\")\")\n# plt.title(names[0] + \" \" + str(round(scores[0][0] * 100, 1)) + \"%\" + \"\\n\" + names[1] + \" \" + str(\n# round(scores[0][1] * 100, 1)) + \"%\" + \"\\n\" + names[2] + \" \" + str(round(scores[0][2] * 100, 1)) + \"%\" + \"\\n\" +\n# names[3] + \" \" + str(round(scores[0][3] * 100, 1)) + \"%\")\n#\n# plt.imshow(img2)\n#\n# # %% md\n#\n# # ** Test\n# # Whole\n# # Folder **\n#\n# # %%\n#\n# import cv2\n# from skimage import transform\n#\n# count = [0, 0, 0, 0]\n# folder_name = \"/content/drive/My Drive/Datasets/covid-19/covidnew/covid\"\n# files = os.listdir(folder_name)\n# for i in range(len(files)):\n# img_r = cv2.imread(folder_name + \"/\" + files[i])\n#\n# img = np.array(img_r).astype('float32') / 255\n#\n# img = transform.resize(img, (150, 150, 3))\n# img = np.expand_dims(img, axis=0)\n#\n# predict = model.predict(img)\n# p = np.argmax(predict, axis=-1)\n# # p=model.predict_classes(img)\n# count[p[0]] += 1\n# print(str(p[0]) + \" \" + files[i])\n#\n# print()\n#\n# print(count)", "from numpy import linalg as LA\nimport numpy as np\nimport bingrad_common_updated\nimport tensorflow as tf\n\n\ndef ternary_encoder(input_data):\n \"\"\"Encoding and compressing the signs \"\"\"\n a = tf.sign(input_data) # -1, 0, 1\n a = tf.add(a, 1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)\n a = tf.reshape(a, [-1])\n pad_size = 4 - tf.compat.v1.mod(tf.size(input=a), 4)\n pad = tf.range(0.0, pad_size)\n a = tf.concat([a, pad], 0)\n a_split1, a_split2, a_split3, a_split4 = tf.split(a, 4) # assume the size is dividable by 4\n\n # encode 4 grads into 1 Byte\n sum_1 = tf.add(a_split1, a_split2 * 4)\n sum_2 = tf.add(a_split3 * 16, a_split4 * 64)\n sum_all = tf.add(sum_1, sum_2)\n encoded = tf.cast(sum_all, tf.uint8)\n # print(\"encoded: \")\n # print(encoded)\n return encoded\n\n\ndef ternary_decoder(encoded_data, scaler, shape):\n \"\"\"Decoding the signs to float format \"\"\"\n a = tf.cast(encoded_data, tf.int32)\n a_split1 = tf.compat.v1.mod(a, 4)\n a_split2 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 4, 4))\n a_split3 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 16, 4))\n a_split4 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 64, 4))\n a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)\n real_size = tf.reduce_prod(input_tensor=shape)\n a = tf.compat.v1.to_float(a)\n a = tf.gather(a, tf.range(0, real_size))\n\n a = tf.reshape(a, shape)\n a = tf.subtract(a, 1)\n decoded = a * scaler\n # print(\"decoded: \")\n # print(decoded)\n return decoded\n\n\ndef clip_gradients_by_stddev_2(gradients, clip_factor=2.5):\n \"\"\" Clip gradients to [-clip_factor*stddev, clip_factor*stddev].\"\"\"\n # gradients, variables = zip(*grads_and_vars)\n clipped_gradients = []\n for gradient in gradients:\n if gradient is None:\n clipped_gradients.append(None)\n continue\n\n mean_gradient = tf.reduce_mean(gradient)\n stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))\n # clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)\n clipped_gradient = tf.cond(tf.size(gradient) < 1,\n lambda: gradient,\n lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient,\n clip_factor * stddev_gradient))\n\n clipped_gradients.append(clipped_gradient)\n return list(clipped_gradients)\n\n\ndef gradient_binarizing_scalers_2(gradients, clip_factor):\n \"\"\" Get the scalers.\"\"\"\n # gradients, variables = zip(*grads_and_vars)\n scalers = []\n for gradient in gradients:\n if gradient is None:\n scalers.append(None)\n continue\n\n if (clip_factor > 1.0e-5):\n mean_gradient = tf.reduce_mean(input_tensor=gradient)\n stddev_gradient = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(gradient - mean_gradient)))\n scalers.append(clip_factor * stddev_gradient)\n else:\n scalers.append(tf.reduce_max(input_tensor=tf.abs(gradient)))\n\n return list(scalers)\n\n\ndef max_scalers_2(tower_scalers):\n \"\"\"Calculate the max scalers for gradients across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_scalers: List of lists of (scaler, variable) tuples. The outer list\n is over individual scaler. The inner list is over the scaler\n calculation for each tower.\n Returns:\n List of pairs of scaler where the scaler is the max one\n across all towers.\n \"\"\"\n max_scalers = []\n # for scale_and_vars in zip(*tower_scalers):\n # Note that each scale_and_vars looks like the following:\n # ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))\n scalers = []\n for s in tower_scalers:\n # Add 0 dimension to the scalers to represent the tower.\n expanded_s = tf.expand_dims(s, 0)\n\n # Append on a 'tower' dimension which we get the max over below.\n scalers.append(expanded_s)\n\n # Get the max over the 'tower' dimension.\n scaler = tf.concat(scalers, 0)\n scaler = tf.reduce_max(input_tensor=scaler, axis=0)\n\n max_scalers.append(scaler)\n return max_scalers\n\n\ndef average_gradients_v2(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n\n tower_len = len(tower_grads)\n tower_elem_len = len(tower_grads[0])\n overall_gradient = []\n for i in range(tower_elem_len):\n per_layer_gradient = []\n for j in range(tower_len):\n per_layer_gradient.append(tower_grads[j][i])\n overall_gradient.append(per_layer_gradient)\n\n average_grads = []\n # for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n for overall_grads in overall_gradient:\n grads = []\n for g in overall_grads:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n # v = grad_and_vars[0][1]\n # grad_and_var = (grad, v)\n # average_grads.append(grad_and_var)\n average_grads.append(grad)\n return average_grads\n\n\ndef average_scalers_2(tower_scalers):\n \"\"\"Calculate the average scalers for gradients across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_scalers: List of lists of (scaler, variable) tuples. The outer list\n is over individual scaler. The inner list is over the scaler\n calculation for each tower.\n Returns:\n List of pairs of scaler where the scaler has been averaged\n across all towers.\n \"\"\"\n # # average_scalers = []\n # # for scale_and_vars in zip(*tower_scalers):\n # # Note that each scale_and_vars looks like the following:\n # # ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))\n # scalers = []\n # for s in tower_scalers:\n # # Add 0 dimension to the scalers to represent the tower.\n # expanded_s = tf.expand_dims(s, 0)\n #\n # # Append on a 'tower' dimension which we will average over below.\n # scalers.append(expanded_s)\n #\n # # Average over the 'tower' dimension.\n # scaler = tf.concat(scalers, 0)\n # scaler = tf.reduce_mean(scaler, 0)\n #\n # # average_scalers.append(scaler)\n # # return average_scalers\n # return scaler\n\n tower_len = len(tower_scalers)\n tower_elem_len = len(tower_scalers[0])\n overall_scaler = []\n for i in range(tower_elem_len):\n per_layer_gradient = []\n for j in range(tower_len):\n per_layer_gradient.append(tower_scalers[j][i])\n overall_scaler.append(per_layer_gradient)\n\n average_scalers = []\n # for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n for overall_scals in overall_scaler:\n grads = []\n for g in overall_scals:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n # v = grad_and_vars[0][1]\n # grad_and_var = (grad, v)\n # average_grads.append(grad_and_var)\n average_scalers.append(grad)\n return average_scalers\n\n\ndef stochastical_binarize_gradients_2(gradients, scalers):\n \"\"\"Stochastically binarize gradients.\"\"\"\n # gradients, variables = zip(*grads_and_vars)\n binarized_gradients = []\n # for gradient, scaler in zip(gradients, scalers):\n i = 0\n for i in range(len(gradients)):\n gradient = gradients[i]\n if gradient is None:\n binarized_gradients.append(None)\n continue\n if isinstance(gradient, tf.IndexedSlices):\n gradient_shape = gradient.dense_shape\n else:\n gradient_shape = gradient.get_shape()\n\n zeros = tf.zeros(gradient_shape)\n abs_gradient = tf.abs(gradient)\n sign_gradient = tf.sign(gradient)\n rnd_sample = tf.random.uniform(gradient_shape, 0, scalers[i])\n where_cond = tf.less(rnd_sample, abs_gradient)\n binarized_gradient = tf.cond(pred=tf.size(input=gradient) < 1,\n true_fn=lambda: gradient,\n false_fn=lambda: tf.compat.v1.where(where_cond, sign_gradient * scalers[i], zeros))\n\n binarized_gradients.append(binarized_gradient)\n i += 1\n # return list(zip(binarized_gradients, variables))\n return list(binarized_gradients)\n\n\ndef encode_to_ternary_gradients_2(gradients, get_shape=False):\n \"\"\"Encode each gradient tensor.\"\"\"\n with tf.compat.v1.name_scope('ternary_encoder'):\n # gradients, variables = zip(*grads_and_vars)\n ternary_gradients = []\n gradient_shapes = []\n for gradient in gradients:\n if gradient is None:\n ternary_gradients.append(None)\n if get_shape:\n gradient_shapes.append(None)\n continue\n\n if get_shape:\n if isinstance(gradient, tf.IndexedSlices):\n gradient_shape = gradient.dense_shape\n else:\n gradient_shape = gradient.get_shape().as_list()\n gradient_shapes.append(gradient_shape)\n\n ternary_gradient = tf.cond(pred=tf.size(input=gradient) < 1,\n true_fn=lambda: tf.bitcast(gradient, type=tf.uint8),\n false_fn=lambda: ternary_encoder(gradient))\n ternary_gradients.append(ternary_gradient)\n\n # if get_shape:\n # return list(zip(ternary_gradients, variables)), gradient_shapes\n # else:\n # return list(zip(ternary_gradients, variables))\n\n # print(\"Yooo Gradient shape\")\n # print(gradient_shapes)\n if get_shape:\n return list(ternary_gradients), gradient_shapes\n else:\n return list(ternary_gradients)\n\[email protected](autograph=False)\ndef get_ternarized_gradients_2(gradient_tensor_list):\n gradient_tensor_list = clip_gradients_by_stddev_2(gradient_tensor_list)\n scalers = gradient_binarizing_scalers_2(gradient_tensor_list, 0)\n mean_scalers = max_scalers_2([scalers])\n ternarized_gradient = stochastical_binarize_gradients_2(gradient_tensor_list, mean_scalers[0])\n ternarized_gradient, grad_shape = encode_to_ternary_gradients_2(ternarized_gradient, get_shape=True)\n # print(mean_scalers)\n # print(ternarized_gradient)\n # mean_scaler_np_list = []\n # ternarized_gradient_np_list = []\n # for s in mean_scalers[0]:\n # mean_scaler_np_list.append(s.numpy())\n # for t in ternarized_gradient:\n # ternarized_gradient_np_list.append(t.numpy())\n # return mean_scaler_np_list, ternarized_gradient_np_list, grad_shape\n return mean_scalers, ternarized_gradient, grad_shape\n\n\[email protected](autograph=False)\ndef get_ternarized_gradients_3(gradient_tensor_list):\n scalers = gradient_binarizing_scalers_2(gradient_tensor_list, 0)\n mean_scalers = max_scalers_2([scalers])\n ternarized_gradient = stochastical_binarize_gradients_2(gradient_tensor_list, mean_scalers[0])\n ternarized_gradient, grad_shape = encode_to_ternary_gradients_2(ternarized_gradient, get_shape=True)\n # print(mean_scalers)\n # print(ternarized_gradient)\n # mean_scaler_np_list = []\n # ternarized_gradient_np_list = []\n # for s in mean_scalers[0]:\n # mean_scaler_np_list.append(s.numpy())\n # for t in ternarized_gradient:\n # ternarized_gradient_np_list.append(t.numpy())\n # return mean_scaler_np_list, ternarized_gradient_np_list, grad_shape\n return mean_scalers, ternarized_gradient, grad_shape\n\n\ndef decode_from_ternary_gradients_2(gradients, scalers, shapes):\n # variable = tf.Variable(tf.constant([1.5, 2.5]), name='adfsf')\n \"\"\"Decode each gradient tensor.\"\"\"\n with tf.compat.v1.name_scope('ternary_decoder'):\n # gradients, variables = zip(*grads_and_vars)\n floating_gradients = []\n i = 0\n # for gradient, scaler, shape in zip(gradients, scalers, shapes):\n for gradient in gradients:\n if gradient is None:\n floating_gradients.append(None)\n # gradient is encoded, so we use variable to check its size\n # We also assume dtype of variable and gradient is the same\n # floating_gradient = tf.cond(lambda: tf.bitcast(gradient, variable.dtype),\n # lambda: ternary_decoder(gradient, scaler, shape))\n floating_gradient = ternary_decoder(gradient, scalers[i], shapes[i])\n floating_gradients.append(floating_gradient)\n i += 1\n\n return list(floating_gradients)\n\n\[email protected]\ndef get_full_gradient_from_ternarized(mean_scaler_tensor_list, ternarized_gradient_tensor_list, grad_shape_list):\n # print(\"Yo 3\")\n local_worker_gradients = decode_from_ternary_gradients_2(\n ternarized_gradient_tensor_list, mean_scaler_tensor_list, grad_shape_list)\n # print(\"Yo 4\")\n # local_worker_gradients_np_list = []\n # for gradient in local_worker_gradients:\n # local_worker_gradients_np_list.append(gradient.numpy())\n # return local_worker_gradients_np_list\n return local_worker_gradients\n\n\[email protected]\ndef get_mean_ternarized_gradients_scalers(scaler_tensor_list, ternarized_gradient_tensor_list):\n mean_scaler = average_scalers_2(scaler_tensor_list)\n mean_gradients = average_gradients_v2(ternarized_gradient_tensor_list)\n return mean_scaler, mean_gradients\n", "import time\r\n\r\nfrom numpy.random import seed\r\n\r\nseed(8)\r\n\r\nimport tensorflow\r\ntensorflow.random.set_seed(7)\r\n\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nimport tensorflow as tf\r\nimport pickle\r\nimport socket\r\nimport argparse\r\nfrom tensorflow.python.keras import models\r\nfrom tensorflow.python.keras import layers\r\nfrom tensorflow.keras.applications import Xception\r\n\r\nimport util\r\nimport util_gps\r\nimport os\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom tensorflow.python.ops import math_ops\r\nimport numpy as np\r\nimport pandas as pd\r\nimport util_tf_v2_migrated\r\n\r\n\r\ndef safe_recv(size, server_socket):\r\n data = bytearray()\r\n while 1:\r\n try:\r\n temp = server_socket.recv(size - len(data))\r\n data.extend(temp)\r\n recv_size = len(data)\r\n if recv_size >= size:\r\n break\r\n except:\r\n print(\"Error\")\r\n data = bytes(data)\r\n return data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--base_path\", type=str, default='coronet_org_data/four_classes')\r\n parser.add_argument(\"--image_dim\", type=int, default=150)\r\n parser.add_argument(\"--bs\", type=int, default=10)\r\n parser.add_argument(\"--epochs\", type=int, default=80)\r\n parser.add_argument(\"--lr\", type=float, default=0.0001)\r\n parser.add_argument(\"--worker_id\", type=int, default=0)\r\n parser.add_argument(\"--lps_id\", type=int, default=1)\r\n parser.add_argument(\"--lps_ip\", type=str, default='127.0.0.1')\r\n parser.add_argument(\"--lps_port\", type=int, default=17001)\r\n args = parser.parse_args()\r\n\r\n base_log_name = \"coronet_worker_\" + str(args.lps_id) + \"_\" + str(args.worker_id) + \"_train_test_val_\" + str(args.image_dim) + \"_\" + str(args.epochs) + \"_\" + str(args.bs) + \"_\" + str(args.lr)\r\n log = open(base_log_name + \".txt\", 'a', buffering=1)\r\n BASE_PATH = args.base_path\r\n\r\n DATASET_PATH = BASE_PATH + '_' + str(args.lps_id) + '_' + str(args.worker_id) + '/train2'\r\n VALIDATION_PATH = BASE_PATH + '_' + str(args.lps_id) + '_' + str(args.worker_id) + '/val'\r\n test_dir = BASE_PATH + '_' + str(args.lps_id) + '_' + str(args.worker_id) + '/test'\r\n IMAGE_SIZE = (args.image_dim, args.image_dim)\r\n BATCH_SIZE = args.bs\r\n NUM_EPOCHS = args.epochs\r\n LEARNING_RATE = args.lr\r\n TCP_IP = args.lps_ip\r\n port = args.lps_port\r\n grad_shape = [[3, 3, 3, 32], [32], [32], [3, 3, 32, 64], [64], [64], [3, 3, 64, 1], [1, 1, 64, 128], [128], [128], [3, 3, 128, 1], [1, 1, 128, 128], [128], [128], [1, 1, 64, 128], [128], [128], [3, 3, 128, 1], [1, 1, 128, 256], [256], [256], [3, 3, 256, 1], [1, 1, 256, 256], [256], [256], [1, 1, 128, 256], [256], [256], [3, 3, 256, 1], [1, 1, 256, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [1, 1, 256, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 1024], [1024], [1024], [1, 1, 728, 1024], [1024], [1024], [3, 3, 1024, 1], [1, 1, 1024, 1536], [1536], [1536], [3, 3, 1536, 1], [1, 1, 1536, 2048], [2048], [2048], [51200, 256], [256], [256, 4], [4]]\r\n\r\n train_datagen = ImageDataGenerator(rescale=1. / 255,\r\n rotation_range=50,\r\n featurewise_center=True,\r\n featurewise_std_normalization=True,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n shear_range=0.25,\r\n zoom_range=0.1,\r\n zca_whitening=True,\r\n channel_shift_range=20,\r\n horizontal_flip=True,\r\n vertical_flip=True,\r\n validation_split=0.2,\r\n fill_mode='constant')\r\n\r\n train_batches = train_datagen.flow_from_directory(DATASET_PATH,\r\n target_size=IMAGE_SIZE,\r\n shuffle=True,\r\n batch_size=BATCH_SIZE,\r\n seed=42,\r\n class_mode=\"categorical\")\r\n\r\n valid_batches = train_datagen.flow_from_directory(VALIDATION_PATH,\r\n target_size=IMAGE_SIZE,\r\n shuffle=True,\r\n batch_size=BATCH_SIZE,\r\n seed=42,\r\n class_mode=\"categorical\")\r\n\r\n conv_base = Xception(weights='imagenet',\r\n include_top=False,\r\n input_shape=(args.image_dim, args.image_dim, 3))\r\n conv_base.trainable = True\r\n model = models.Sequential()\r\n model.add(conv_base)\r\n model.add(layers.Flatten())\r\n model.add(layers.Dropout(0.5))\r\n model.add(layers.Dense(256, activation='relu'))\r\n model.add(layers.Dense(4, activation='softmax'))\r\n\r\n train_batch_len = len(train_batches)\r\n validation_batch_len = len(valid_batches)\r\n\r\n accuracy = tf.keras.metrics.CategoricalAccuracy()\r\n accuracy_val = tf.keras.metrics.CategoricalAccuracy()\r\n loss_fn = tf.keras.losses.CategoricalCrossentropy()\r\n target_names = os.listdir(test_dir)\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((TCP_IP, port))\r\n\r\n recv_size = safe_recv(17, s)\r\n recv_size = pickle.loads(recv_size)\r\n recv_data = safe_recv(recv_size, s)\r\n weight_list = pickle.loads(recv_data)\r\n i = 0\r\n for w in model.trainable_weights:\r\n w.assign(weight_list[i])\r\n i += 1\r\n print(\"Received initial parameters\")\r\n\r\n total_org_gradient_size = 0\r\n total_gradient_size = 0\r\n total_weight_size = 0\r\n total_training_computation = 0\r\n total_validation_computation = 0\r\n MOMENTUM = 0.9\r\n\r\n current_momentum = []\r\n for shape in grad_shape:\r\n current_momentum.append(np.zeros(shape))\r\n\r\n total_start_time = time.time()\r\n for epoch in range(NUM_EPOCHS):\r\n total_train_loss = 0\r\n for step, (x, y) in enumerate(train_batches):\r\n train_comp = time.time()\r\n with tf.GradientTape() as tape:\r\n logits = model(x)\r\n loss_value = loss_fn(y, logits)\r\n accuracy.update_state(y, logits)\r\n total_train_loss += loss_value.numpy()\r\n gradients = tape.gradient(loss_value, model.trainable_weights)\r\n total_org_gradient_size += len(pickle.dumps(gradients, pickle.HIGHEST_PROTOCOL))\r\n quantization_start = time.time()\r\n gradient_tensor_list = []\r\n for gradient in gradients:\r\n gradient_tensor_list.append(tf.convert_to_tensor(gradient, dtype=tf.float32))\r\n just_q = time.time()\r\n mean_scaler_tensor, quantized_gradient_tensor, grad_shape_tensor = util_tf_v2_migrated.get_ternarized_gradients_2(gradient_tensor_list)\r\n mean_scaler = []\r\n quantized_gradient = []\r\n for ms in mean_scaler_tensor[0]:\r\n mean_scaler.append(ms.numpy())\r\n for t in quantized_gradient_tensor:\r\n quantized_gradient.append(t.numpy())\r\n dump_time = time.time()\r\n mean_scaler = pickle.dumps(mean_scaler, pickle.HIGHEST_PROTOCOL)\r\n mean_scaler_size = len(mean_scaler)\r\n total_gradient_size += mean_scaler_size\r\n mean_scaler_size = pickle.dumps(mean_scaler_size, pickle.HIGHEST_PROTOCOL)\r\n quantized_gradient = pickle.dumps(quantized_gradient, pickle.HIGHEST_PROTOCOL)\r\n quantized_gradient_size = len(quantized_gradient)\r\n total_gradient_size += quantized_gradient_size\r\n quantized_gradient_size = pickle.dumps(quantized_gradient_size, pickle.HIGHEST_PROTOCOL)\r\n total_training_computation += time.time() - train_comp\r\n\r\n s.sendall(quantized_gradient_size)\r\n s.sendall(quantized_gradient)\r\n s.sendall(mean_scaler_size)\r\n s.sendall(mean_scaler)\r\n\r\n recv_size = safe_recv(17, s)\r\n recv_size = pickle.loads(recv_size)\r\n total_weight_size += recv_size\r\n recv_data = safe_recv(recv_size, s)\r\n\r\n train_comp = time.time()\r\n weight_list = pickle.loads(recv_data)\r\n i =0\r\n for w in model.trainable_weights:\r\n w.assign(weight_list[i])\r\n i += 1\r\n print(\"Epoch:\", epoch, \"Step:\", step, \"Loss value:\", loss_value.numpy())\r\n print(\"Total running accuracy so far: %.3f\" % accuracy.result())\r\n total_training_computation += time.time() - train_comp\r\n if step >= (train_batch_len - 1):\r\n break\r\n\r\n print(\"Train Epoch \", epoch, \" Loss \", total_train_loss / train_batch_len, \" Accuracy \",\r\n accuracy.result().numpy())\r\n log.write(\"Train Epoch \" + str(epoch) + \" Loss \" + str(total_train_loss / train_batch_len) + \" Accuracy \" + str(\r\n accuracy.result().numpy()))\r\n log.write('\\n')\r\n accuracy.reset_states()\r\n\r\n valid_start = time.time()\r\n total_val_loss = 0\r\n for step, (x, y) in enumerate(valid_batches):\r\n with tf.GradientTape() as tape:\r\n logits = model(x)\r\n loss_value = loss_fn(y, logits)\r\n\r\n accuracy_val.update_state(y, logits)\r\n total_val_loss += loss_value.numpy()\r\n if step >= (validation_batch_len - 1):\r\n break\r\n\r\n print(\"Val Epoch \", epoch, \" Loss \", total_val_loss / validation_batch_len, \" Accuracy \",\r\n accuracy_val.result().numpy())\r\n log.write(\"Val Epoch \" + str(epoch) + \" Loss \" + str(total_val_loss / validation_batch_len) + \" Accuracy \" + str(\r\n accuracy_val.result().numpy()))\r\n log.write('\\n')\r\n accuracy_val.reset_states()\r\n total_validation_computation += time.time() - valid_start\r\n\r\n total_time = time.time()-total_start_time\r\n total_training_communication = total_time - total_training_computation - total_validation_computation\r\n print('Total time in {:.0f}m {:.0f}s'.format(total_time // 60, total_time % 60))\r\n log.write('Total time in {:.0f}m {:.0f}s'.format(total_time // 60, total_time % 60))\r\n log.write('\\n')\r\n print('Total training computation time in {:.0f}m {:.0f}s'.format(total_training_computation // 60, total_training_computation % 60))\r\n log.write('Total training computation time in {:.0f}m {:.0f}s'.format(total_training_computation // 60, total_training_computation % 60))\r\n log.write('\\n')\r\n print('Total validation computation time in {:.0f}m {:.0f}s'.format(total_validation_computation // 60, total_validation_computation % 60))\r\n log.write('Total validation computation time in {:.0f}m {:.0f}s'.format(total_validation_computation // 60, total_validation_computation % 60))\r\n log.write('\\n')\r\n print('Total training communication time in {:.0f}m {:.0f}s'.format(total_training_communication // 60, total_training_communication % 60))\r\n log.write('Total training communication time in {:.0f}m {:.0f}s'.format(total_training_communication // 60, total_training_communication % 60))\r\n log.write('\\n')\r\n print(\"Total gradient size: \"+str(total_gradient_size))\r\n log.write(\"Total gradient size: \"+str(total_gradient_size))\r\n log.write('\\n')\r\n print(\"Total original gradient size: \" + str(total_org_gradient_size))\r\n log.write(\"Total original gradient size: \" + str(total_org_gradient_size))\r\n log.write('\\n')\r\n print(\"Total weight size: \"+str(total_weight_size))\r\n log.write(\"Total weight size: \"+str(total_weight_size))\r\n log.write('\\n')\r\n\r\n test_datagen = ImageDataGenerator(rescale=1. / 255)\r\n test_batches = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SIZE, batch_size=1,\r\n shuffle=False, seed=42, class_mode=\"categorical\")\r\n test_batch_len = len(test_batches)\r\n test_batches.reset()\r\n accuracy_test = tf.keras.metrics.CategoricalAccuracy()\r\n total_test_loss = 0\r\n original_label = []\r\n predicted_label = []\r\n for step, (x, y) in enumerate(test_batches):\r\n with tf.GradientTape() as tape:\r\n logits = model(x)\r\n loss_value = loss_fn(y, logits)\r\n original_label.append(math_ops.argmax(y, axis=-1).numpy()[0])\r\n predicted_label.append(math_ops.argmax(logits, axis=-1).numpy()[0])\r\n accuracy_test.update_state(y, logits)\r\n total_test_loss += loss_value.numpy()\r\n if step > test_batch_len:\r\n break\r\n\r\n print(\"Test \", \"Loss \", total_test_loss / test_batch_len, \" Accuracy \", accuracy_test.result().numpy())\r\n log.write(\r\n \"Test \" + \"Loss \" + str(total_test_loss / test_batch_len) + \" Accuracy \" + str(accuracy_test.result().numpy()))\r\n log.write('\\n')\r\n accuracy_test.reset_states()\r\n\r\n conf_matrix = confusion_matrix(original_label, predicted_label)\r\n class_report = classification_report(original_label, predicted_label, target_names=target_names, output_dict=True)\r\n print('Confusion Matrix')\r\n print(target_names)\r\n print(conf_matrix)\r\n print('Classification Report')\r\n print(class_report)\r\n log.write(\"Confusion Matrix\")\r\n log.write('\\n')\r\n for name in target_names:\r\n log.write(name + \" \")\r\n log.write('\\n')\r\n log.write(np.array2string(conf_matrix, separator=', '))\r\n log.write('\\n')\r\n pd.DataFrame(class_report).transpose().to_csv(base_log_name + \".csv\")\r\n log.close()\r\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.python.keras.layers.Flatten", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.random.seed", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Sequential", "tensorflow.keras.applications.Xception", "tensorflow.GradientTape", "tensorflow.keras.optimizers.Adam", "tensorflow.python.keras.layers.Dropout", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "tensorflow.keras.metrics.CategoricalAccuracy", "matplotlib.pyplot.show", "tensorflow.random.set_seed", "matplotlib.pyplot.figure" ], [ "tensorflow.bitcast", "tensorflow.sign", "tensorflow.concat", "tensorflow.zeros", "tensorflow.cast", "tensorflow.subtract", "tensorflow.add", "tensorflow.square", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.where", "tensorflow.less", "tensorflow.random.uniform", "tensorflow.compat.v1.mod", "tensorflow.function", "tensorflow.reduce_prod", "tensorflow.split", "tensorflow.size", "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.compat.v1.to_float", "tensorflow.abs" ], [ "tensorflow.convert_to_tensor", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.python.keras.layers.Flatten", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.random.seed", "tensorflow.python.keras.layers.Dense", "tensorflow.python.ops.math_ops.argmax", "sklearn.metrics.confusion_matrix", "tensorflow.python.keras.models.Sequential", "tensorflow.keras.applications.Xception", "pandas.DataFrame", "tensorflow.python.keras.layers.Dropout", "numpy.zeros", "tensorflow.keras.metrics.CategoricalAccuracy", "numpy.array2string", "sklearn.metrics.classification_report", "tensorflow.random.set_seed", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
mumumu99/latent-pose-reenactment
[ "bfe8175f9cf3d67d46c21194bb5b6f898ef3ea53" ]
[ "embedders/FAb-Net/Datasets/generate_large_voxceleb.py" ]
[ "\nimport os\n\nids = [d for d in os.listdir(VOX_CELEB_LOCATION) if d[0:2] == 'id']\n\n\ntrain = ids[0:int(0.7*len(ids))]\nval = ids[int(0.7*len(ids)):int(0.8*len(ids))]\ntest = ids[int(0.8*len(ids)):]\n\nimport numpy as np\nnp.save('./large_voxceleb/train.npy', np.array(train))\nnp.save('./large_voxceleb/test.npy', np.array(test))\nnp.save('./large_voxceleb/val.npy', np.array(val))\n\nprint(np.array(val).shape)\nprint(val[0])\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aaryapatel007/Hippocampal-Volume-Quantification-in-Alzheimer-Progression
[ "5c9eff98572c1d2647a742d285805d9e328ab14f" ]
[ "train model/src/inference/UNetInferenceAgent.py" ]
[ "\"\"\"\nContains class that runs inferencing\n\"\"\"\nimport torch\nimport numpy as np\n\nfrom networks.RecursiveUNet import UNet\n\nfrom utils.utils import med_reshape\n\nclass UNetInferenceAgent:\n \"\"\"\n Stores model and parameters and some methods to handle inferencing\n \"\"\"\n def __init__(self, parameter_file_path='', model=None, device=\"cpu\", patch_size=64):\n\n self.model = model\n self.patch_size = patch_size\n self.device = device\n\n if model is None:\n self.model = UNet(num_classes=3)\n\n if parameter_file_path:\n self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))\n\n self.model.to(device)\n\n def single_volume_inference_unpadded(self, volume):\n \"\"\"\n Runs inference on a single volume of arbitrary patch size,\n padding it to the conformant size first\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n \n raise NotImplementedError\n\n def single_volume_inference(self, volume):\n \"\"\"\n Runs inference on a single volume of conformant patch size\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n\n # TASK: Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n slc_tensor = torch.from_numpy(volume).type(torch.cuda.FloatTensor).unsqueeze(1).to(self.device)\n prediction = self.model(slc_tensor)\n masks = torch.argmax(prediction, dim = 1).cpu().detach().numpy().astype(int)\n \n return masks\n" ]
[ [ "torch.argmax", "torch.from_numpy", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cv-hci-project/PyTorch-VAE
[ "e9b9d122eb52f76e096942b300a8db97a123be13" ]
[ "models/base.py" ]
[ "from abc import abstractmethod\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport torch\nimport torchvision.utils as vutils\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import CelebA\n\nfrom datasets.concrete_cracks import ConcreteCracksDataset\nfrom datasets.sdnet2018 import SDNet2018\nfrom models.types_ import *\n\n\nclass BaseVAE(pl.LightningModule):\n\n def __init__(self, params: dict) -> None:\n super().__init__()\n\n self.params = params\n self.curr_device = None\n\n try:\n num_workers = params[\"dataloader_workers\"]\n except KeyError:\n num_workers = 1\n\n self.additional_dataloader_args = {'num_workers': num_workers, 'pin_memory': True}\n\n def encode(self, input: Tensor) -> List[Tensor]:\n raise NotImplementedError\n\n def decode(self, input: Tensor) -> Any:\n raise NotImplementedError\n\n def sample(self, batch_size: int, current_device: int, **kwargs) -> Tensor:\n raise RuntimeWarning()\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n raise NotImplementedError\n\n @abstractmethod\n def forward(self, *inputs: Tensor) -> Tensor:\n pass\n\n @abstractmethod\n def loss_function(self, *inputs: Any, **kwargs) -> Tensor:\n pass\n\n def training_step(self, batch, batch_idx, optimizer_idx=0):\n real_img, labels = batch\n self.curr_device = real_img.device\n\n results = self.forward(real_img, labels=labels)\n train_loss = self.loss_function(*results,\n M_N=self.params['batch_size'] / self.num_train_imgs,\n optimizer_idx=optimizer_idx,\n batch_idx=batch_idx)\n\n # TODO this is deprecated\n self.logger.experiment.log({key: val.item() for key, val in train_loss.items()})\n\n return train_loss\n\n def validation_step(self, batch, batch_idx, optimizer_idx=0):\n real_img, labels = batch\n self.curr_device = real_img.device\n\n results = self.forward(real_img, labels=labels)\n val_loss = self.loss_function(*results,\n M_N=self.params['batch_size'] / self.num_val_imgs,\n optimizer_idx=optimizer_idx,\n batch_idx=batch_idx)\n\n return val_loss\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['loss'] for x in outputs]).mean()\n tensorboard_logs = {'avg_val_loss': avg_loss}\n\n if self.current_epoch % 5 == 0 or self.current_epoch == (self.trainer.max_epochs - 1):\n self.sample_images()\n\n return {'val_loss': avg_loss, 'log': tensorboard_logs}\n\n def sample_images(self, save=True, display=False):\n # Get sample reconstruction image\n test_input, test_label = next(iter(self.sample_dataloader))\n test_input = test_input.to(self.curr_device)\n test_label = test_label.to(self.curr_device)\n recons = self.generate(test_input, labels=test_label)\n\n if save:\n vutils.save_image(recons.data,\n f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n f\"recons_{self.logger.name}_{self.current_epoch}.png\",\n normalize=True,\n nrow=12)\n\n if display:\n plt.imshow(vutils.make_grid(recons.data, normalize=True, nrow=12).permute(2, 1, 0).numpy())\n plt.title(\"Reconstructed images\")\n plt.show()\n\n # vutils.save_image(test_input.data,\n # f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n # f\"real_img_{self.logger.name}_{self.current_epoch}.png\",\n # normalize=True,\n # nrow=12)\n\n try:\n samples = self.sample(144,\n self.curr_device,\n labels=test_label)\n if save:\n vutils.save_image(samples.cpu().data,\n f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n f\"{self.logger.name}_{self.current_epoch}.png\",\n normalize=True,\n nrow=12)\n if display:\n plt.imshow(vutils.make_grid(samples.data, normalize=True, nrow=12).permute(2, 1, 0).numpy())\n plt.title(\"Sampled images\")\n plt.show()\n except:\n pass\n\n del test_input, recons # , samples\n\n def configure_optimizers(self):\n\n optims = []\n scheds = []\n\n optimizer = optim.Adam(self.parameters(),\n lr=self.params['LR'],\n weight_decay=self.params['weight_decay'])\n optims.append(optimizer)\n # Check if more than 1 optimizer is required (Used for adversarial training)\n try:\n if self.params['LR_2'] is not None:\n optimizer2 = optim.Adam(self.params['submodel'].parameters(),\n lr=self.params['LR_2'])\n optims.append(optimizer2)\n except:\n pass\n\n try:\n if self.params['scheduler_gamma'] is not None:\n scheduler = optim.lr_scheduler.ExponentialLR(optims[0],\n gamma=self.params['scheduler_gamma'])\n scheds.append(scheduler)\n\n # Check if another scheduler is required for the second optimizer\n try:\n if self.params['scheduler_gamma_2'] is not None:\n scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],\n gamma=self.params['scheduler_gamma_2'])\n scheds.append(scheduler2)\n except:\n pass\n return optims, scheds\n except:\n return optims\n\n def train_dataloader(self):\n transform = self.data_transforms()\n\n if self.params['dataset'] == 'celeba':\n dataset = CelebA(root=self.params['data_path'],\n split=\"train\",\n transform=transform,\n download=False)\n elif self.params['dataset'] == \"concrete-cracks\":\n dataset = ConcreteCracksDataset(root_dir=self.params['data_path'],\n split=\"train\",\n abnormal_data=False,\n transform=transform)\n elif self.params['dataset'] == \"SDNET2018\":\n dataset = SDNet2018(root_dir=self.params['data_path'],\n split=\"train\",\n abnormal_data=False,\n transform=transform)\n else:\n raise ValueError('Undefined dataset type')\n\n self.num_train_imgs = len(dataset)\n\n return DataLoader(dataset,\n batch_size=self.params['batch_size'],\n shuffle=True,\n drop_last=True,\n **self.additional_dataloader_args)\n\n def val_dataloader(self):\n transform = self.data_transforms()\n\n if self.params['dataset'] == 'celeba':\n dataset = CelebA(root=self.params['data_path'],\n split=\"test\",\n transform=transform,\n download=False)\n elif self.params['dataset'] == 'concrete-cracks':\n dataset = ConcreteCracksDataset(root_dir=self.params['data_path'],\n split=\"val\",\n abnormal_data=False,\n transform=transform)\n elif self.params['dataset'] == 'SDNET2018':\n dataset = SDNet2018(root_dir=self.params['data_path'],\n split=\"val\",\n abnormal_data=False,\n transform=transform)\n else:\n raise ValueError('Undefined dataset type')\n\n self.sample_dataloader = DataLoader(dataset,\n batch_size=self.params['batch_size'],\n shuffle=True,\n drop_last=True,\n **self.additional_dataloader_args)\n\n self.num_val_imgs = len(self.sample_dataloader)\n\n return self.sample_dataloader\n\n def data_transforms(self):\n\n SetRange = transforms.Lambda(lambda X: 2 * X - 1.)\n SetScale = transforms.Lambda(lambda X: X / X.sum(0).expand_as(X))\n\n if self.params['dataset'] == 'celeba':\n transform = transforms.Compose([transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(148),\n transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n elif self.params['dataset'] == 'concrete-cracks':\n transform = transforms.Compose([transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n elif self.params['dataset'] == 'SDNET2018':\n transform = transforms.Compose([transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n else:\n raise ValueError('Undefined dataset type')\n return transform\n" ]
[ [ "matplotlib.pyplot.title", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.ExponentialLR", "torch.stack", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hwsamuel/modin
[ "4d0a3155b31104ac8083b223bd71ff3e541ecd92", "4d0a3155b31104ac8083b223bd71ff3e541ecd92", "4d0a3155b31104ac8083b223bd71ff3e541ecd92" ]
[ "modin/pandas/datetimes.py", "modin/engines/base/io/text/json_reader.py", "modin/engines/base/frame/axis_partition.py" ]
[ "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pandas\n\nfrom .dataframe import DataFrame\nfrom .series import Series\n\n\ndef to_datetime(\n arg,\n errors=\"raise\",\n dayfirst=False,\n yearfirst=False,\n utc=None,\n format=None,\n exact=True,\n unit=None,\n infer_datetime_format=False,\n origin=\"unix\",\n cache=True,\n):\n \"\"\"\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like\n The object to convert to a datetime.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaT.\n - If 'ignore', then invalid parsing will return the input.\n dayfirst : bool, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : bool, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil behavior).\n utc : bool, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n format : str, default None\n The strftime to parse time, eg \"%d/%m/%Y\", note that \"%f\" will parse\n all the way up to nanoseconds.\n See strftime documentation for more information on choices:\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.\n exact : bool, True by default\n Behaves as:\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : str, default 'ns'\n The unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit='ms' and origin='unix' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : bool, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings based on the first non-NaN element,\n and if it can be inferred, switch to a faster method of parsing them.\n In some cases this can increase the parsing speed by ~5-10x.\n origin : scalar, default 'unix'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If 'unix' (or POSIX) time; origin is set to 1970-01-01.\n - If 'julian', unit must be 'D', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n cache : bool, default True\n If True, use a cache of unique, converted dates to apply the datetime\n conversion. May produce significant speed-up when parsing duplicate\n date strings, especially ones with timezone offsets. The cache is only\n used when there are at least 50 values. The presence of out-of-bounds\n values will render the cache unusable and may slow down parsing.\n\n Returns\n -------\n datetime\n If parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n \"\"\"\n if not isinstance(arg, (DataFrame, Series)):\n return pandas.to_datetime(\n arg,\n errors=errors,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n utc=utc,\n format=format,\n exact=exact,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n cache=cache,\n )\n return arg._to_datetime(\n errors=errors,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n utc=utc,\n format=format,\n exact=exact,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n cache=cache,\n )\n", "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom modin.engines.base.io.text.text_file_reader import TextFileReader\nfrom modin.data_management.utils import compute_chunksize\nfrom io import BytesIO\nimport pandas\nimport numpy as np\n\n\nclass JSONReader(TextFileReader):\n @classmethod\n def _read(cls, path_or_buf, **kwargs):\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n columns = pandas.read_json(\n BytesIO(b\"\" + open(path_or_buf, \"rb\").readline()), lines=True\n ).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with cls.file_open(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n total_bytes = cls.file_size(f)\n from modin.pandas import DEFAULT_NPARTITIONS\n\n num_partitions = DEFAULT_NPARTITIONS\n num_splits = min(len(columns), num_partitions)\n chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)\n\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)\n if column_chunksize > len(columns):\n column_widths = [len(columns)]\n num_splits = 1\n else:\n column_widths = [\n column_chunksize\n if i != num_splits - 1\n else len(columns) - (column_chunksize * (num_splits - 1))\n for i in range(num_splits)\n ]\n\n while f.tell() < total_bytes:\n start = f.tell()\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, \"start\": start}\n args.update(kwargs)\n partition_id = cls.call_deploy(f, chunk_size, num_splits + 3, args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame._apply_index_objs(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pandas\nfrom modin.data_management.utils import split_result_of_axis_func_pandas\n\nNOT_IMPLMENTED_MESSAGE = \"Must be implemented in child class\"\n\n\nclass BaseFrameAxisPartition(object): # pragma: no cover\n \"\"\"This abstract class represents the Parent class for any\n `ColumnPartition` or `RowPartition` class. This class is intended to\n simplify the way that operations are performed\n\n Note 0: The procedures that use this class and its methods assume that\n they have some global knowledge about the entire axis. This may\n require the implementation to use concatenation or append on the\n list of block partitions in this object.\n\n Note 1: The `BaseFrameManager` object that controls these objects\n (through the API exposed here) has an invariant that requires that\n this object is never returned from a function. It assumes that\n there will always be `BaseFramePartition` object stored and structures\n itself accordingly.\n\n The abstract methods that need implemented are `apply` and `shuffle`.\n The children classes must also implement `instance_type` and `partition_type`\n (see below).\n \"\"\"\n\n def apply(\n self,\n func,\n num_splits=None,\n other_axis_partition=None,\n maintain_partitioning=True,\n **kwargs,\n ):\n \"\"\"Applies a function to a full axis.\n\n Note: The procedures that invoke this method assume full axis\n knowledge. Implement this method accordingly.\n\n Important: You must return a list of `BaseFramePartition` objects from\n this method. See Note 1 for this class above for more information.\n\n Args:\n func: The function to apply. This will be preprocessed according to\n the corresponding `RemotePartitions` object.\n num_splits: The number of objects to return, the number of splits\n for the resulting object. It is up to this method to choose the\n splitting at this time.\n other_axis_partition: Another `BaseFrameAxisPartition` object to be applied\n to func. This is for operations that are between datasets.\n maintain_partitioning: Whether or not to keep the partitioning in the same\n orientation as it was previously. This is important because we may be\n operating on an individual AxisPartition and not touching the rest.\n In this case, we have to return the partitioning to its previous\n orientation (the lengths will remain the same). This is ignored between\n two axis partitions.\n\n Returns:\n A list of `BaseFramePartition` objects.\n \"\"\"\n raise NotImplementedError(NOT_IMPLMENTED_MESSAGE)\n\n def shuffle(self, func, lengths, **kwargs):\n \"\"\"Shuffle the order of the data in this axis based on the `lengths`.\n\n Args:\n func: The function to apply before splitting.\n lengths: The list of partition lengths to split the result into.\n\n Returns:\n A list of RemotePartition objects split by `lengths`.\n \"\"\"\n raise NotImplementedError(NOT_IMPLMENTED_MESSAGE)\n\n # Child classes must have these in order to correctly subclass.\n instance_type = None\n partition_type = None\n\n def _wrap_partitions(self, partitions):\n if isinstance(partitions, self.instance_type):\n return [self.partition_type(partitions)]\n else:\n return [self.partition_type(obj) for obj in partitions]\n\n\nclass PandasFrameAxisPartition(BaseFrameAxisPartition):\n \"\"\"This abstract class is created to simplify and consolidate the code for\n AxisPartitions that run pandas. Because much of the code is similar, this allows\n us to reuse this code.\n\n Subclasses must implement `list_of_blocks` which unwraps the `RemotePartition`\n objects and creates something interpretable as a pandas DataFrame.\n\n See `modin.engines.ray.pandas_on_ray.axis_partition.PandasOnRayFrameAxisPartition`\n for an example on how to override/use this class when the implementation needs\n to be augmented.\n \"\"\"\n\n def apply(\n self,\n func,\n num_splits=None,\n other_axis_partition=None,\n maintain_partitioning=True,\n **kwargs,\n ):\n \"\"\"Applies func to the object in the plasma store.\n\n See notes in Parent class about this method.\n\n Args:\n func: The function to apply.\n num_splits: The number of times to split the result object.\n other_axis_partition: Another `PandasOnRayFrameAxisPartition` object to apply to\n func with this one.\n maintain_partitioning: Whether or not to keep the partitioning in the same\n orientation as it was previously. This is important because we may be\n operating on an individual AxisPartition and not touching the rest.\n In this case, we have to return the partitioning to its previous\n orientation (the lengths will remain the same). This is ignored between\n two axis partitions.\n\n Returns:\n A list of `RayRemotePartition` objects.\n \"\"\"\n if num_splits is None:\n num_splits = len(self.list_of_blocks)\n\n if other_axis_partition is not None:\n return self._wrap_partitions(\n self.deploy_func_between_two_axis_partitions(\n self.axis,\n func,\n num_splits,\n len(self.list_of_blocks),\n kwargs,\n *tuple(self.list_of_blocks + other_axis_partition.list_of_blocks),\n )\n )\n args = [self.axis, func, num_splits, kwargs, maintain_partitioning]\n args.extend(self.list_of_blocks)\n return self._wrap_partitions(self.deploy_axis_func(*args))\n\n def shuffle(self, func, lengths, **kwargs):\n \"\"\"Shuffle the order of the data in this axis based on the `lengths`.\n\n Extends `BaseFrameAxisPartition.shuffle`.\n\n Args:\n func: The function to apply before splitting.\n lengths: The list of partition lengths to split the result into.\n\n Returns:\n A list of RemotePartition objects split by `lengths`.\n \"\"\"\n num_splits = len(lengths)\n # We add these to kwargs and will pop them off before performing the operation.\n kwargs[\"manual_partition\"] = True\n kwargs[\"_lengths\"] = lengths\n args = [self.axis, func, num_splits, kwargs, False]\n args.extend(self.list_of_blocks)\n return self._wrap_partitions(self.deploy_axis_func(*args))\n\n @classmethod\n def deploy_axis_func(\n cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions\n ):\n \"\"\"Deploy a function along a full axis in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`)\n kwargs: A dictionary of keyword arguments.\n maintain_partitioning: If True, keep the old partitioning if possible.\n If False, create a new partition layout.\n partitions: All partitions that make up the full axis (row or column)\n\n Returns:\n A list of Pandas DataFrames.\n \"\"\"\n # Pop these off first because they aren't expected by the function.\n manual_partition = kwargs.pop(\"manual_partition\", False)\n lengths = kwargs.pop(\"_lengths\", None)\n\n dataframe = pandas.concat(list(partitions), axis=axis, copy=False)\n result = func(dataframe, **kwargs)\n if isinstance(result, pandas.Series):\n if num_splits == 1:\n return result\n return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]\n\n if manual_partition:\n # The split function is expecting a list\n lengths = list(lengths)\n # We set lengths to None so we don't use the old lengths for the resulting partition\n # layout. This is done if the number of splits is changing or we are told not to\n # keep the old partitioning.\n elif num_splits != len(partitions) or not maintain_partitioning:\n lengths = None\n else:\n if axis == 0:\n lengths = [len(part) for part in partitions]\n if sum(lengths) != len(result):\n lengths = None\n else:\n lengths = [len(part.columns) for part in partitions]\n if sum(lengths) != len(result.columns):\n lengths = None\n return split_result_of_axis_func_pandas(axis, num_splits, result, lengths)\n\n @classmethod\n def deploy_func_between_two_axis_partitions(\n cls, axis, func, num_splits, len_of_left, kwargs, *partitions\n ):\n \"\"\"Deploy a function along a full axis between two data sets in Ray.\n\n Args:\n axis: The axis to perform the function along.\n func: The function to perform.\n num_splits: The number of splits to return\n (see `split_result_of_axis_func_pandas`).\n len_of_left: The number of values in `partitions` that belong to the\n left data set.\n kwargs: A dictionary of keyword arguments.\n partitions: All partitions that make up the full axis (row or column)\n for both data sets.\n\n Returns:\n A list of Pandas DataFrames.\n \"\"\"\n lt_frame = pandas.concat(partitions[:len_of_left], axis=axis, copy=False)\n rt_frame = pandas.concat(partitions[len_of_left:], axis=axis, copy=False)\n result = func(lt_frame, rt_frame, **kwargs)\n return split_result_of_axis_func_pandas(axis, num_splits, result)\n" ]
[ [ "pandas.to_datetime" ], [ "numpy.array", "pandas.Series", "pandas.DataFrame" ], [ "pandas.concat", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
souradip93/GCDT
[ "5991044307f59598ea224b64f1f3b915fa00ebcc" ]
[ "thumt/bin/trainer.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2018 The THUMT Authors\n\nimport argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport thumt.data.dataset as dataset\nimport thumt.data.record as record\nimport thumt.data.vocab as vocabulary\nimport thumt.models as models\nimport thumt.utils.hooks as hooks\nimport thumt.utils.utils as utils\nimport thumt.utils.parallel as parallel\nimport thumt.utils.search as search\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description=\"Training neural machine translation models\",\n usage=\"trainer.py [<args>] [-h | --help]\"\n )\n\n # input files\n parser.add_argument(\"--input\", type=str, nargs=2,\n help=\"Path of source and target corpus\")\n parser.add_argument(\"--glove_emb_path\", type=str, default=None,\n help=\"Path of glove embeddings\")\n parser.add_argument(\"--bert_emb_path\", type=str, default=None,\n help=\"Path of bert embeddings\")\n parser.add_argument(\"--record\", type=str,\n help=\"Path to tf.Record data\")\n parser.add_argument(\"--output\", type=str, default=\"train\",\n help=\"Path to saved models\")\n parser.add_argument(\"--vocabulary\", type=str, nargs=3,\n help=\"Path of source and target vocabulary\")\n parser.add_argument(\"--validation\", type=str,\n help=\"Path of validation file\")\n parser.add_argument(\"--references\", type=str, nargs=\"+\",\n help=\"Path of reference files\")\n\n # model and configuration\n parser.add_argument(\"--model\", type=str, required=True,\n help=\"Name of the model\")\n parser.add_argument(\"--parameters\", type=str, default=\"\",\n help=\"Additional hyper parameters\")\n\n return parser.parse_args(args)\n\n\ndef default_parameters():\n params = tf.contrib.training.HParams(\n input=[\"\", \"\"],\n output=\"\",\n record=\"\",\n model=\"rnnsearch\",\n vocab=[\"\", \"\"],\n # Default training hyper parameters\n num_threads=6,\n batch_size=128,\n max_length=256,\n length_multiplier=1,\n mantissa_bits=2,\n warmup_steps=50,\n train_steps=100000,\n buffer_size=10000,\n constant_batch_size=False,\n device_list=[0],\n update_cycle=1,\n initializer=\"xavier\",\n initializer_gain=0.08,\n adam_beta1=0.9,\n adam_beta2=0.999,\n adam_epsilon=1e-6,\n r0=2.0,\n s=1000, \n e=4000,\n clip_grad_norm=5.0,\n learning_rate=1.0,\n learning_rate_decay=\"rnnplus_warmup_decay\",\n learning_rate_boundaries=[0],\n learning_rate_values=[0.0],\n keep_checkpoint_max=100,\n keep_top_checkpoint_max=5,\n gpu_memory_fraction=1,\n # Validation\n eval_steps=100000,\n eval_secs=0,\n eval_batch_size=64,\n top_beams=1,\n beam_size=4,\n decode_alpha=0.6,\n decode_length=0,\n decode_constant=5.0,\n decode_normalize=False,\n validation=\"\",\n references=[\"\"],\n save_checkpoint_secs=0,\n save_checkpoint_steps=1000,\n )\n\n return params\n\n\ndef import_params(model_dir, model_name, params):\n model_dir = os.path.abspath(model_dir)\n p_name = os.path.join(model_dir, \"params.json\")\n m_name = os.path.join(model_dir, model_name + \".json\")\n\n if not tf.gfile.Exists(p_name) or not tf.gfile.Exists(m_name):\n return params\n\n with tf.gfile.Open(p_name) as fd:\n tf.logging.info(\"Restoring hyper parameters from %s\" % p_name)\n json_str = fd.readline()\n params.parse_json(json_str)\n\n with tf.gfile.Open(m_name) as fd:\n tf.logging.info(\"Restoring model parameters from %s\" % m_name)\n json_str = fd.readline()\n params.parse_json(json_str)\n\n return params\n\n\ndef export_params(output_dir, name, params):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MkDir(output_dir)\n\n # Save params as params.json\n filename = os.path.join(output_dir, name)\n with tf.gfile.Open(filename, \"w\") as fd:\n fd.write(params.to_json())\n\n\ndef collect_params(all_params, params):\n collected = tf.contrib.training.HParams()\n\n for k in params.values().keys():\n collected.add_hparam(k, getattr(all_params, k))\n\n return collected\n\n\ndef merge_parameters(params1, params2):\n\n params = tf.contrib.training.HParams()\n\n for (k, v) in params1.values().items():\n params.add_hparam(k, v)\n\n params_dict = list(params.values()) ## key value pair\n\n for (k, v) in params2.values().items():\n if k in params_dict:\n # Override\n setattr(params, k, v)\n else:\n params.add_hparam(k, v)\n\n return params\n\n\ndef override_parameters(params, args):\n params.model = args.model\n params.input = args.input or params.input\n params.glove_emb_path = args.glove_emb_path \n params.bert_emb_path = args.bert_emb_path\n params.output = args.output or params.output\n params.record = args.record or params.record\n params.vocab = args.vocabulary or params.vocab\n params.validation = args.validation or params.validation\n params.references = args.references or params.references\n params.parse(args.parameters)\n\n params.vocabulary = {\n \"source\": vocabulary.load_vocabulary(params.vocab[0]),\n \"target\": vocabulary.load_vocabulary(params.vocab[1]),\n \"char\" : vocabulary.load_vocabulary(params.vocab[2])\n }\n params.vocabulary[\"source\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"source\"], params\n )\n params.vocabulary[\"target\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"target\"], params\n )\n params.vocabulary[\"char\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"char\"], params\n )\n\n control_symbols = [params.pad, params.bos, params.eos, params.unk]\n\n params.mapping = {\n \"source\": vocabulary.get_control_mapping(\n params.vocabulary[\"source\"],\n control_symbols\n ),\n \"target\": vocabulary.get_control_mapping(\n params.vocabulary[\"target\"],\n control_symbols\n ),\n \"char\": vocabulary.get_control_mapping(\n params.vocabulary[\"char\"],\n control_symbols\n )\n }\n\n return params\n\n\ndef get_initializer(params):\n if params.initializer == \"xavier\":\n return tf.contrib.layers.xavier_initializer()\n elif params.initializer == \"uniform\":\n max_val = params.initializer_gain\n return tf.random_uniform_initializer(-max_val, max_val)\n elif params.initializer == \"normal\":\n return tf.random_normal_initializer(0.0, params.initializer_gain)\n elif params.initializer == \"normal_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"normal\")\n elif params.initializer == \"uniform_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\")\n else:\n raise ValueError(\"Unrecognized initializer: %s\" % params.initializer)\n\n\ndef get_learning_rate_decay(learning_rate, global_step, params):\n if params.learning_rate_decay == \"noam\":\n step = tf.to_float(global_step)\n warmup_steps = tf.to_float(params.warmup_steps)\n multiplier = params.hidden_size ** -0.5\n decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5),\n (step + 1) ** -0.5)\n\n return learning_rate * decay\n elif params.learning_rate_decay == \"new_warmup_rsqrt_decay\":\n step = tf.to_float(global_step)\n warmup_steps = tf.to_float(params.warmup_steps)\n multiplier = params.hidden_size ** -0.5\n decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5),\n (step + 1) ** -0.5)\n\n return learning_rate * decay\n elif params.learning_rate_decay == \"rnnplus_warmup_decay\":\n step = tf.to_float(global_step)\n n = float(len(params.device_list))\n warmup_steps = tf.to_float(params.warmup_steps)\n decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s)))))\n\n return tf.maximum(learning_rate * decay, 5e-6)\n elif params.learning_rate_decay == \"piecewise_constant\":\n return tf.train.piecewise_constant(tf.to_int32(global_step),\n params.learning_rate_boundaries,\n params.learning_rate_values)\n elif params.learning_rate_decay == \"none\":\n return learning_rate\n else:\n raise ValueError(\"Unknown learning_rate_decay\")\n\n\ndef session_config(params):\n optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,\n do_function_inlining=True)\n graph_options = tf.GraphOptions(optimizer_options=optimizer_options)\n config = tf.ConfigProto(allow_soft_placement=True,\n graph_options=graph_options)\n if params.device_list:\n device_str = \",\".join([str(i) for i in params.device_list])\n config.gpu_options.visible_device_list = device_str\n config.gpu_options.per_process_gpu_memory_fraction = params.gpu_memory_fraction\n config.gpu_options.allow_growth = True\n return config\n\n\ndef decode_target_ids(inputs, params):\n decoded = []\n vocab = params.vocabulary[\"target\"]\n\n for item in inputs:\n syms = []\n for idx in item:\n sym = vocab[idx]\n\n if sym == params.eos:\n break\n\n if sym == params.pad:\n break\n\n syms.append(sym)\n decoded.append(syms)\n\n return decoded\n\n\ndef main(args):\n tf.logging.set_verbosity(tf.logging.INFO)\n model_cls = models.get_model(args.model)\n params = default_parameters()\n # Import and override parameters\n # Priorities (low -> high):\n # default -> saved -> command\n params = merge_parameters(params, model_cls.get_parameters())\n params = import_params(args.output, args.model, params)\n override_parameters(params, args)\n\n # Export all parameters and model specific parameters\n export_params(params.output, \"params.json\", params)\n export_params(\n params.output,\n \"%s.json\" % args.model,\n collect_params(params, model_cls.get_parameters())\n )\n\n # Build Graph\n with tf.Graph().as_default():\n if not params.record:\n # Build input queue\n if params.use_bert and params.bert_emb_path:\n features = dataset.get_training_input_with_bert(params.input + [params.bert_emb_path], params)\n else:\n features = dataset.get_training_input(params.input, params)\n else:\n features = record.get_input_features( # ??? \n os.path.join(params.record, \"*train*\"), \"train\", params\n )\n\n # Build model\n initializer = get_initializer(params)\n model = model_cls(params)\n\n # Multi-GPU setting\n sharded_losses = parallel.parallel_model(\n model.get_training_func(initializer),\n features,\n params.device_list\n )\n loss = tf.add_n(sharded_losses) / len(sharded_losses)\n\n # Create global step\n global_step = tf.train.get_or_create_global_step()\n\n # Print parameters\n all_weights = {v.name: v for v in tf.trainable_variables()}\n total_size = 0\n\n for v_name in sorted(list(all_weights)):\n v = all_weights[v_name]\n tf.logging.info(\"%s\\tshape %s\", v.name[:-2].ljust(80),\n str(v.shape).ljust(20))\n v_size = np.prod(np.array(v.shape.as_list())).tolist() # mutiple all dimension size\n total_size += v_size\n tf.logging.info(\"Total trainable variables size: %d\", total_size)\n\n learning_rate = get_learning_rate_decay(params.learning_rate,\n global_step, params)\n learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n # Create optimizer\n opt = tf.train.AdamOptimizer(learning_rate,\n beta1=params.adam_beta1,\n beta2=params.adam_beta2,\n epsilon=params.adam_epsilon)\n\n if params.update_cycle == 1:\n train_op = tf.contrib.layers.optimize_loss(\n name=\"training\",\n loss=loss,\n global_step=global_step,\n learning_rate=learning_rate,\n clip_gradients=params.clip_grad_norm or None,\n optimizer=opt,\n colocate_gradients_with_ops=True\n )\n zero_op = tf.no_op(\"zero_op\")\n collect_op = tf.no_op(\"collect_op\")\n else:\n grads_and_vars = opt.compute_gradients(\n loss, colocate_gradients_with_ops=True)\n gradients = [item[0] for item in grads_and_vars]\n variables = [item[1] for item in grads_and_vars]\n variables = utils.replicate_variables(variables)\n zero_op = utils.zero_variables(variables)\n collect_op = utils.collect_gradients(gradients, variables)\n\n scale = 1.0 / params.update_cycle\n gradients, variables = utils.scale_gradients(grads_and_vars, scale)\n\n # Gradient clipping avoid greadient explosion!!\n if isinstance(params.clip_grad_norm or None, float):\n gradients, _ = tf.clip_by_global_norm(gradients,\n params.clip_grad_norm)\n\n # Update variables\n grads_and_vars = list(zip(gradients, variables))\n with tf.control_dependencies([collect_op]):\n train_op = opt.apply_gradients(grads_and_vars, global_step)\n\n # Validation\n '''\n if params.validation and params.references[0]:\n files = [params.validation] + list(params.references)\n eval_inputs = files\n eval_input_fn = dataset.get_evaluation_input\n else:\n print(\"Don't evaluate\")\n eval_input_fn = None\n '''\n # Add hooks\n train_hooks = [\n tf.train.StopAtStepHook(last_step=params.train_steps),\n tf.train.NanTensorHook(loss), # Monitors the loss tensor and stops training if loss is NaN\n tf.train.LoggingTensorHook(\n {\n \"step\": global_step,\n \"loss\": loss,\n \"chars\": tf.shape(features[\"chars\"]),\n \"source\": tf.shape(features[\"source\"]),\n #\"bert\": tf.shape(features[\"bert\"]),\n \"lr\": learning_rate\n },\n every_n_iter=1\n ),\n tf.train.CheckpointSaverHook(\n checkpoint_dir=params.output,\n save_secs=params.save_checkpoint_secs or None,\n save_steps=params.save_checkpoint_steps or None,\n saver=tf.train.Saver(\n max_to_keep=params.keep_checkpoint_max,\n sharded=False\n )\n )\n ]\n\n config = session_config(params)\n '''\n if not eval_input_fn is None:\n train_hooks.append(\n hooks.EvaluationHook(\n lambda f: search.create_inference_graph(\n model.get_evaluation_func(), f, params\n ),\n lambda: eval_input_fn(eval_inputs, params),\n lambda x: decode_target_ids(x, params),\n params.output,\n config,\n params.keep_top_checkpoint_max,\n eval_secs=params.eval_secs,\n eval_steps=params.eval_steps\n )\n )\n '''\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=params.output, hooks=train_hooks,\n save_checkpoint_secs=None, config=config) as sess:\n while not sess.should_stop():\n utils.session_run(sess, zero_op)\n for i in range(1, params.update_cycle):\n utils.session_run(sess, collect_op)\n sess.run(train_op)\n\n\nif __name__ == \"__main__\":\n main(parse_args())\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.control_dependencies", "tensorflow.gfile.Exists", "tensorflow.gfile.MkDir", "tensorflow.minimum", "tensorflow.train.AdamOptimizer", "tensorflow.to_int32", "tensorflow.train.MonitoredTrainingSession", "tensorflow.add_n", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.random_uniform_initializer", "tensorflow.OptimizerOptions", "tensorflow.train.get_or_create_global_step", "tensorflow.ConfigProto", "tensorflow.logging.set_verbosity", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.to_float", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.random_normal_initializer", "tensorflow.train.StopAtStepHook", "tensorflow.train.NanTensorHook", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.variance_scaling_initializer", "tensorflow.logging.info", "tensorflow.no_op", "tensorflow.contrib.training.HParams", "tensorflow.maximum", "tensorflow.clip_by_global_norm", "tensorflow.GraphOptions", "tensorflow.contrib.layers.optimize_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
craft-ai/craft-ai-client-python
[ "3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3" ]
[ "tests/test_pandas_get_generator_operations.py" ]
[ "import unittest\n\nfrom craft_ai.pandas import CRAFTAI_PANDAS_ENABLED\n\nif CRAFTAI_PANDAS_ENABLED:\n import copy\n import pandas as pd\n\n import craft_ai.pandas\n\n from .data import pandas_valid_data\n from .utils import generate_entity_id\n from . import settings\n\n AGENT_ID_1_BASE = \"test_1_df_pd\"\n AGENT_ID_2_BASE = \"test_2_df_pd\"\n GENERATOR_ID_BASE = \"test_pandas_gen_df_pd\"\n\n SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION\n SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA\n COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION\n COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2\n COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA\n COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2\n VALID_GENERATOR_CONFIGURATION = pandas_valid_data.VALID_GENERATOR_CONFIGURATION\n VALID_COMPLEX_GENERATOR_CONFIGURATION = (\n pandas_valid_data.VALID_COMPLEX_GENERATOR_CONFIGURATION\n )\n VALID_TIMESTAMP = pandas_valid_data.VALID_TIMESTAMP\n VALID_LAST_TIMESTAMP = pandas_valid_data.VALID_LAST_TIMESTAMP\n\n CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)\n\n\[email protected](CRAFTAI_PANDAS_ENABLED is False, \"pandas is not enabled\")\nclass TestPandasSimpleGeneratorWithOpperations(unittest.TestCase):\n def setUp(self):\n self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + \"GeneratorWithOp\")\n self.generator_id = generate_entity_id(GENERATOR_ID_BASE + \"GeneratorWithOp\")\n\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_generator(self.generator_id)\n CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_1_id)\n\n CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_DATA)\n\n generator_configuration = copy.deepcopy(VALID_GENERATOR_CONFIGURATION)\n generator_configuration[\"filter\"] = [self.agent_1_id]\n CLIENT.create_generator(generator_configuration, self.generator_id)\n\n def tearDown(self):\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_generator(self.generator_id)\n\n def test_simple_pd_get_generator_operations(self):\n df = CLIENT.get_generator_operations(self.generator_id, None, None)\n\n self.assertIsInstance(df, pd.DataFrame)\n self.assertEqual(len(df), 300)\n self.assertEqual(len(df.dtypes), 7)\n self.assertEqual(\n df.timestamp.min(),\n pd.Timestamp(\"2019-12-31 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n self.assertEqual(\n df.timestamp.max(),\n pd.Timestamp(\"2020-01-01 03:59:00+0000\", tz=\"UTC\").value / 1e9,\n )\n\n def test_get_generator_operations_with_pdtimestamp(self):\n\n ops_df = CLIENT.get_generator_operations(\n self.generator_id,\n pd.Timestamp(VALID_TIMESTAMP, unit=\"s\", tz=\"UTC\"),\n pd.Timestamp(VALID_LAST_TIMESTAMP, unit=\"s\", tz=\"UTC\"),\n )\n\n ground_truth_ops_df = CLIENT.get_generator_operations(\n self.generator_id, VALID_TIMESTAMP, VALID_LAST_TIMESTAMP,\n )\n\n self.assertIsInstance(ops_df, pd.DataFrame)\n self.assertFalse(ops_df.empty)\n self.assertNotEqual(ops_df.get(\"agent_id\").any(), None)\n self.assertNotEqual(ops_df.columns.any(), None)\n self.assertTrue(ops_df.equals(ground_truth_ops_df))\n\n\[email protected](CRAFTAI_PANDAS_ENABLED is False, \"pandas is not enabled\")\nclass TestPandasComplexGeneratorWithOpperations(unittest.TestCase):\n def setUp(self):\n self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + \"GeneratorWithOp\")\n self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + \"GeneratorWithOp\")\n self.generator_id = generate_entity_id(GENERATOR_ID_BASE + \"GeneratorWithOp\")\n\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_agent(self.agent_2_id)\n CLIENT.delete_generator(self.generator_id)\n CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_1_id)\n CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_2_id)\n CLIENT.add_agent_operations(self.agent_1_id, COMPLEX_AGENT_DATA)\n CLIENT.add_agent_operations(self.agent_2_id, COMPLEX_AGENT_DATA_2)\n generator_configuration = copy.deepcopy(VALID_COMPLEX_GENERATOR_CONFIGURATION)\n generator_configuration[\"filter\"] = [self.agent_1_id, self.agent_2_id]\n\n CLIENT.create_generator(generator_configuration, self.generator_id)\n\n def tearDown(self):\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_agent(self.agent_2_id)\n CLIENT.delete_generator(self.generator_id)\n\n def test_complex_pd_get_generator_operations(self):\n df = CLIENT.get_generator_operations(self.generator_id, None, None)\n\n self.assertIsInstance(df, pd.DataFrame)\n self.assertEqual(len(df), 20)\n self.assertEqual(len(df.dtypes), 5)\n self.assertEqual(\n df.timestamp.min(),\n pd.Timestamp(\"2019-12-31 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n self.assertEqual(\n df.timestamp.max(),\n pd.Timestamp(\"2020-01-09 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grybd/oneflow
[ "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69", "82237ad096a10527591660c09b61444c42917e69" ]
[ "python/oneflow/test/modules/test_view.py", "python/oneflow/test/modules/test_randn.py", "python/oneflow/test/graph/test_graph_optim_sgd.py", "python/oneflow/test/modules/test_convtranspose.py", "python/oneflow/test/modules/test_meshgrid.py", "python/oneflow/test/modules/test_replicationpad2d.py", "python/oneflow/test/graph/test_graph_free_eager_tensor.py" ]
[ "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\n\ndef _test_view(test_case, device):\n x = np.array(\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n ).astype(np.float32)\n input = flow.tensor(\n x, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n of_out = input.view(2, 2, 2, -1)\n of_shape = of_out.numpy().shape\n np_shape = (2, 2, 2, 2)\n test_case.assertTrue(np.array_equal(of_shape, np_shape))\n of_out = of_out.sum()\n of_out.backward()\n np_grad = np.array(\n [\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n )\n test_case.assertTrue(np.allclose(np_grad, input.grad.numpy(), 0.0001, 0.0001))\n\n\ndef _test_view_flow_size(test_case, device):\n x = np.array(\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n ).astype(np.float32)\n input = flow.tensor(\n x, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n shape = flow.Size([2, 2, 2, -1])\n of_out = input.view(shape)\n np_shape = (2, 2, 2, 2)\n test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_shape))\n of_out = of_out.sum()\n of_out.backward()\n np_grad = np.array(\n [\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n )\n test_case.assertTrue(np.allclose(np_grad, input.grad.numpy(), 0.0001, 0.0001))\n\n\[email protected]_unless_1n1d()\nclass TestView(flow.unittest.TestCase):\n def test_view(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_view,\n _test_view_flow_size,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\n\nimport oneflow.unittest\nfrom test_util import GenArgList\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _test_randn(test_case, device, shape):\n y1 = flow.randn(*shape, device=flow.device(device))\n y2 = flow.randn(*shape, device=flow.device(device))\n test_case.assertTrue(not np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))\n test_case.assertTrue(shape == y1.shape)\n\n\ndef _test_0d_rand(test_case, device, shape):\n y1 = flow.randn(*shape, device=flow.device(device))\n y2 = flow.randn(*shape, device=flow.device(device))\n test_case.assertTrue(\n np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4)\n ) # 0d is [] and []\n test_case.assertTrue(shape == y1.shape)\n\n\ndef _test_different_dtype(test_case, device, shape):\n y1 = flow.randn(*shape, dtype=flow.float32, device=flow.device(device))\n y2 = flow.randn(*shape, dtype=flow.float64, device=flow.device(device))\n test_case.assertTrue(not np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))\n test_case.assertTrue(shape == y1.shape)\n\n with test_case.assertRaises(\n oneflow._oneflow_internal.exception.UnimplementedException\n ):\n flow.randn(*shape, dtype=flow.int32, device=flow.device(device))\n\n\ndef _test_backward(test_case, device, shape):\n x = flow.randn(*shape, device=flow.device(device), requires_grad=True)\n y = x.sum()\n y.backward()\n test_case.assertTrue(\n np.allclose(np.ones(shape), x.grad.numpy(), atol=1e-4, rtol=1e-4)\n )\n\n\ndef _test_with_generator(test_case, device, shape):\n gen = flow.Generator()\n gen.manual_seed(0)\n y1 = flow.randn(\n *shape, dtype=flow.float32, device=flow.device(device), generator=gen\n )\n gen.manual_seed(0)\n y2 = flow.randn(\n *shape, dtype=flow.float32, device=flow.device(device), generator=gen\n )\n test_case.assertTrue(np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))\n\n\[email protected]_unless_1n1d()\nclass TestRandnModule(flow.unittest.TestCase):\n def test_consistent_naive(test_case):\n placement = flow.placement(\"cpu\", {0: [0]})\n sbp = (flow.sbp.broadcast,)\n x = flow.randn(16, 16, placement=placement, sbp=sbp)\n test_case.assertEqual(x.sbp, sbp)\n test_case.assertEqual(x.placement, placement)\n\n def test_randn(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_randn,\n _test_different_dtype,\n _test_backward,\n _test_with_generator,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n arg_dict[\"shape\"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]\n\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n def test_0d_randn(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [_test_0d_rand]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n arg_dict[\"shape\"] = [(2, 0, 4), (2, 0, 2)]\n\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom test_util import GenArgList\nfrom optimizer_test_util import clip_grad_norm_np\n\nimport oneflow as flow\n\n\ndef compare_with_numpy_sgd(\n test_case, device, x_shape, learning_rate, train_iters, momentum, weight_decay\n):\n random_grad_seq = []\n for _ in range(train_iters):\n random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))\n init_value = np.random.uniform(size=x_shape).astype(np.float32)\n\n class CustomModule(flow.nn.Module):\n def __init__(self):\n super().__init__()\n self.para0 = flow.nn.Parameter(\n flow.tensor(init_value, device=flow.device(device))\n )\n\n def forward(self, mask):\n return self.para0 * mask\n\n simp_module = CustomModule()\n simp_module.to(device)\n simp_module.train()\n\n sgd0 = flow.optim.SGD(\n [\n {\n \"params\": simp_module.parameters(),\n \"lr\": learning_rate,\n \"momentum\": momentum,\n \"weight_decay\": weight_decay,\n }\n ],\n )\n\n class CustomSGDGraph(flow.nn.Graph):\n def __init__(self):\n super().__init__()\n self.m = simp_module\n self.add_optimizer(sgd0)\n\n def build(self, mask_tensor):\n loss = flow.sum(self.m(mask_tensor))\n loss.backward()\n return loss\n\n of_res_list = []\n sgd_graph = CustomSGDGraph()\n for i in range(train_iters):\n mask_tensor = flow.tensor(\n random_grad_seq[i], requires_grad=False, device=flow.device(device)\n )\n sgd_x = sgd_graph(mask_tensor)\n of_res_list.append(simp_module.para0.numpy())\n\n np_res_list = []\n\n def train_by_numpy():\n x = init_value\n vt = np.zeros_like(x)\n\n def np_train_one_iter(grad):\n grad = grad + weight_decay * x\n v = momentum * vt - learning_rate * grad\n param = x + v\n return (param, v)\n\n for i in range(train_iters):\n (x, vt) = np_train_one_iter(random_grad_seq[i])\n np_res_list.append(x)\n\n train_by_numpy()\n test_case.assertTrue(np.allclose(np_res_list, of_res_list, rtol=1e-3, atol=1e-3))\n\n\ndef compare_with_numpy_sgd_clip_grad(\n test_case,\n device,\n x_shape,\n learning_rate,\n momentum,\n weight_decay,\n clip_grad_max_norm,\n clip_grad_norm_type,\n train_iters,\n):\n random_grad_seq = []\n for _ in range(train_iters):\n random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))\n init_value = np.random.uniform(size=x_shape).astype(np.float32)\n\n class CustomModule(flow.nn.Module):\n def __init__(self):\n super().__init__()\n self.para0 = flow.nn.Parameter(\n flow.tensor(init_value, device=flow.device(device))\n )\n\n def forward(self, mask):\n return self.para0 * mask\n\n simp_module = CustomModule()\n simp_module.to(device)\n simp_module.train()\n\n sgd0 = flow.optim.SGD(\n [\n {\n \"params\": simp_module.parameters(),\n \"lr\": learning_rate,\n \"momentum\": momentum,\n \"weight_decay\": weight_decay,\n \"clip_grad_max_norm\": clip_grad_max_norm,\n \"clip_grad_norm_type\": clip_grad_norm_type,\n }\n ]\n )\n\n class CustomSGDGraph(flow.nn.Graph):\n def __init__(self):\n super().__init__()\n self.m = simp_module\n self.add_optimizer(sgd0)\n\n def build(self, mask_tensor):\n loss = flow.sum(self.m(mask_tensor))\n loss.backward()\n return loss\n\n of_res_list = []\n sgd_graph = CustomSGDGraph()\n for i in range(train_iters):\n mask_tensor = flow.tensor(\n random_grad_seq[i], requires_grad=False, device=flow.device(device)\n )\n sgd_x = sgd_graph(mask_tensor)\n of_res_list.append(simp_module.para0.numpy())\n\n np_res_list = []\n\n def train_by_numpy():\n x = init_value\n vt = np.zeros_like(x)\n\n def np_train_one_iter(grad):\n norm, grad = clip_grad_norm_np(\n grad, clip_grad_max_norm, clip_grad_norm_type\n )\n grad = grad + weight_decay * x\n v = momentum * vt - learning_rate * grad\n param = x + v\n return (param, v)\n\n for i in range(train_iters):\n (x, vt) = np_train_one_iter(random_grad_seq[i])\n np_res_list.append(x)\n\n train_by_numpy()\n for np_res, of_res in zip(np_res_list, of_res_list):\n test_case.assertTrue(np.allclose(np_res, of_res, rtol=0.001, atol=0.001))\n\n\[email protected]_unless_1n1d()\nclass TestCpuSGD(flow.unittest.TestCase):\n def test_sgd(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n arg_dict[\"x_shape\"] = [(10,)]\n arg_dict[\"learning_rate\"] = [1, 1e-3]\n arg_dict[\"train_iters\"] = [10]\n arg_dict[\"momentum\"] = [0.9, 0.8]\n arg_dict[\"weight_decay\"] = [0.001, 0.0]\n for arg in GenArgList(arg_dict):\n compare_with_numpy_sgd(test_case, *arg)\n\n def test_sgd_with_clip_grad(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n arg_dict[\"x_shape\"] = [(10,)]\n arg_dict[\"learning_rate\"] = [1, 0.1]\n arg_dict[\"momentum\"] = [0.0, 0.9]\n arg_dict[\"weight_decay\"] = [0.0, 0.9]\n arg_dict[\"clip_grad_max_norm\"] = [1.0]\n arg_dict[\"clip_grad_norm_type\"] = [2.0]\n arg_dict[\"train_iters\"] = [10]\n for arg in GenArgList(arg_dict):\n compare_with_numpy_sgd_clip_grad(test_case, *arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\nfrom oneflow.test_utils.automated_test_util import *\n\nimport oneflow as flow\nimport oneflow.nn as nn\nimport oneflow.unittest\n\n\ndef _test_convtranspose1d_bias_false(test_case, device):\n np_arr = np.array([[[0.35356437, -0.95761778, 0.19567713]]])\n weight = np.ones((1, 2, 3))\n test_out_data = np.array(\n [\n [\n [0.35356438, -0.6040534, -0.40837622, -0.7619406, 0.19567713],\n [0.35356438, -0.6040534, -0.40837622, -0.7619406, 0.19567713],\n ]\n ]\n )\n test_out_grad = np.array([[[6.0, 6.0, 6.0]]])\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(1, 2, 3, stride=1, bias=False)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\ndef _test_convtranspose1d_bias_true(test_case, device):\n np_arr = np.array([[[0.54925832, -0.64144184, 0.15213189]]])\n weight = np.ones((1, 2, 3))\n bias = np.array([0.16849578, 0.1509564])\n test_out_data = np.array(\n [\n [\n [0.71775407, 0.07631224, 0.22844413, -0.32081416, 0.32062766],\n [0.7002147, 0.05877288, 0.21090476, -0.3383535, 0.3030883],\n ]\n ]\n )\n test_out_grad = np.array([[[6.0, 6.0, 6.0]]])\n\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(1, 2, 3, stride=1, bias=True)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f.bias = nn.Parameter(flow.Tensor(bias))\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\ndef _test_convtranspose1d_group_bias_false(test_case, device):\n np_arr = np.array(\n [[[0.38072484, -0.01421228, -0.6512485], [-0.05744093, 2.47079971, 0.17573214]]]\n )\n weight = np.ones((2, 1, 3))\n test_out_data = np.array(\n [\n [\n [0.38072485, 0.36651257, -0.28473592, -0.66546077, -0.6512485],\n [-0.05744093, 2.4133587, 2.5890908, 2.6465318, 0.17573214],\n ]\n ]\n )\n test_out_grad = np.array([[[3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]])\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(2, 2, 3, stride=1, groups=2, bias=False)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\ndef _test_convtranspose1d_group_bias_true(test_case, device):\n np_arr = np.array(\n [\n [\n [-0.77808793, 0.99824008, 0.57340066],\n [1.46278707, -0.65234252, -1.13087643],\n ],\n [\n [0.76053973, 0.62332447, -1.17157106],\n [0.60291466, -0.0472167, 0.89986403],\n ],\n ]\n )\n weight = np.ones((2, 1, 3))\n bias = np.array([0.32546719, 0.14995032])\n test_out_data = np.array(\n [\n [\n [-0.45262071, 0.54561937, 1.11902, 1.897108, 0.89886785],\n [1.6127374, 0.96039486, -0.1704815, -1.6332686, -0.9809261],\n ],\n [\n [1.0860069, 1.7093314, 0.5377604, -0.22277936, -0.8461038],\n [0.75286496, 0.70564824, 1.6055121, 1.0025976, 1.0498143],\n ],\n ]\n )\n test_out_grad = np.array(\n [[[3.0, 3.0, 3.0], [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]]\n )\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(2, 2, 3, stride=1, groups=2, bias=True)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f.bias = nn.Parameter(flow.Tensor(bias))\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\ndef _test_convtranspose1d_group_large_out_channel(test_case, device):\n np_arr = np.array(\n [\n [\n [2.00934643, 1.5782626, -1.59060988],\n [-1.70463546, 1.30170714, -1.04025804],\n ],\n [\n [0.60327536, 1.26085986, -0.58499662],\n [-0.48145872, -1.64391469, -0.09332249],\n ],\n ]\n )\n weight = np.ones((2, 3, 3))\n test_out_data = np.array(\n [\n [\n [2.0093465, 3.587609, 1.9969991, -0.01234734, -1.5906099],\n [2.0093465, 3.587609, 1.9969991, -0.01234734, -1.5906099],\n [2.0093465, 3.587609, 1.9969991, -0.01234734, -1.5906099],\n [-1.7046355, -0.40292835, -1.4431864, 0.2614491, -1.040258],\n [-1.7046355, -0.40292835, -1.4431864, 0.2614491, -1.040258],\n [-1.7046355, -0.40292835, -1.4431864, 0.2614491, -1.040258],\n ],\n [\n [0.60327536, 1.8641353, 1.2791386, 0.6758632, -0.58499664],\n [0.60327536, 1.8641353, 1.2791386, 0.6758632, -0.58499664],\n [0.60327536, 1.8641353, 1.2791386, 0.6758632, -0.58499664],\n [-0.48145872, -2.1253734, -2.2186959, -1.7372372, -0.09332249],\n [-0.48145872, -2.1253734, -2.2186959, -1.7372372, -0.09332249],\n [-0.48145872, -2.1253734, -2.2186959, -1.7372372, -0.09332249],\n ],\n ]\n )\n test_out_grad = np.array(\n [[[9.0, 9.0, 9.0], [9.0, 9.0, 9.0]], [[9.0, 9.0, 9.0], [9.0, 9.0, 9.0]]]\n )\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(2, 6, 3, stride=1, groups=2, bias=False)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\ndef _test_convtranspose1d_group_large_in_channel(test_case, device):\n np_arr = np.array(\n [\n [\n [-0.3939792, -0.34989742, 0.15775536],\n [0.927185, 0.25040535, -1.22738067],\n [-0.2187831, -0.24346108, -0.07109655],\n [-1.55353756, -0.37241986, 0.59579139],\n ],\n [\n [-0.01818884, -1.34408642, 1.31260516],\n [0.52124192, 0.52142919, 1.40499944],\n [0.7410308, 1.93069512, 0.25694943],\n [-0.30531658, 0.24990326, -0.9493729],\n ],\n ]\n )\n weight = np.ones((4, 1, 3))\n test_out_data = np.array(\n [\n [\n [0.5332058, 0.43371373, -0.6359115, -1.1691173, -1.0696253],\n [-1.7723207, -2.3882017, -1.8635068, -0.09118611, 0.52469486],\n ],\n [\n [0.50305307, -0.31960416, 2.3980005, 1.8949474, 2.7176046],\n [0.43571424, 2.6163127, 1.9238893, 1.488175, -0.69242346],\n ],\n ]\n )\n test_out_grad = np.array(\n [\n [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]],\n [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]],\n ]\n )\n input_flow = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m_f = nn.ConvTranspose1d(4, 2, 3, stride=1, groups=2, bias=False)\n m_f.weight.data = flow.tensor(weight, dtype=flow.float32)\n m_f = m_f.to(device)\n out_flow = m_f(input_flow)\n test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))\n out_flow = out_flow.sum()\n out_flow.backward()\n test_case.assertTrue(\n np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)\n )\n\n\[email protected]_unless_1n1d()\nclass TestConvTranspose(flow.unittest.TestCase):\n def test_ConvTranspose1d(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_convtranspose1d_bias_false,\n _test_convtranspose1d_bias_true,\n _test_convtranspose1d_group_bias_false,\n _test_convtranspose1d_group_bias_true,\n _test_convtranspose1d_group_large_out_channel,\n _test_convtranspose1d_group_large_in_channel,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n @autotest()\n def test_ConvTranspose1d_(test_case):\n channels = random(1, 6)\n m = torch.nn.ConvTranspose1d(\n in_channels=channels,\n out_channels=random(1, 20),\n kernel_size=random(1, 4),\n stride=random() | nothing(),\n padding=random(1, 3).to(int) | nothing(),\n dilation=random(1, 5) | nothing(),\n groups=random(1, 5) | nothing(),\n padding_mode=constant(\"zeros\") | nothing(),\n )\n m.train(random())\n device = random_device()\n m.to(device)\n x = random_pytorch_tensor(ndim=3, dim1=channels).to(device)\n y = m(x)\n return y\n\n @autotest()\n def test_ConvTranspose3d_(test_case):\n channels = random(1, 2)\n m = torch.nn.ConvTranspose3d(\n in_channels=channels,\n out_channels=random(1, 2),\n kernel_size=random(1, 2),\n stride=random() | nothing(),\n padding=random(1, 3).to(int) | nothing(),\n dilation=random(1, 5) | nothing(),\n groups=1,\n padding_mode=constant(\"zeros\") | nothing(),\n )\n m.train(random())\n device = random_device()\n m.to(device)\n x = random_pytorch_tensor(ndim=5, dim1=channels).to(device)\n y = m(x)\n return y\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _test_meshgrid_forawd(test_case, device):\n input1 = flow.tensor(\n np.array([1, 2, 3]), dtype=flow.float32, device=flow.device(device)\n )\n input2 = flow.tensor(\n np.array([4, 5, 6]), dtype=flow.float32, device=flow.device(device)\n )\n (np_x, np_y) = np.meshgrid(input1.numpy(), input2.numpy(), indexing=\"ij\")\n (of_x, of_y) = flow.meshgrid(input1, input2)\n test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))\n test_case.assertTrue(np.allclose(of_y.numpy(), np_y, 0.0001, 0.0001))\n\n\ndef _test_meshgrid_forawd_scalar(test_case, device):\n input1 = flow.tensor(np.array(1.0), dtype=flow.float32, device=flow.device(device))\n input2 = flow.tensor(np.array(2.0), dtype=flow.float32, device=flow.device(device))\n (np_x, np_y) = np.meshgrid(input1.numpy(), input2.numpy(), indexing=\"ij\")\n (of_x, of_y) = flow.meshgrid(input1, input2)\n test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))\n test_case.assertTrue(np.allclose(of_y.numpy(), np_y, 0.0001, 0.0001))\n\n\ndef _test_meshgrid_forawd_3tensor(test_case, device):\n input1 = flow.tensor(\n np.array([1, 2, 3]), dtype=flow.float32, device=flow.device(device)\n )\n input2 = flow.tensor(\n np.array([4, 5, 6]), dtype=flow.float32, device=flow.device(device)\n )\n input3 = flow.tensor(\n np.array([7, 8, 9]), dtype=flow.float32, device=flow.device(device)\n )\n (np_x, np_y, np_z) = np.meshgrid(\n input1.numpy(), input2.numpy(), input3.numpy(), indexing=\"ij\"\n )\n (of_x, of_y, of_z) = flow.meshgrid(input1, input2, input3)\n test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))\n test_case.assertTrue(np.allclose(of_y.numpy(), np_y, 0.0001, 0.0001))\n test_case.assertTrue(np.allclose(of_z.numpy(), np_z, 0.0001, 0.0001))\n\n\[email protected]_unless_1n1d()\nclass TestMeshGridModule(flow.unittest.TestCase):\n def test_meshgrid(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_meshgrid_forawd,\n _test_meshgrid_forawd_scalar,\n _test_meshgrid_forawd_3tensor,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n @autotest(auto_backward=False)\n def test_meshgrid_with_random_data(test_case):\n device = random_device()\n x = random_pytorch_tensor(ndim=1, dim0=3, requires_grad=False).to(device)\n y = random_pytorch_tensor(ndim=1, dim0=3, requires_grad=False).to(device)\n res = torch.meshgrid(x, y)\n return res[0], res[1]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import Array2Numpy, FlattenArray, GenArgList, Index2Coordinate\n\nimport oneflow as flow\nimport oneflow.unittest\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _np_replication_pad2d_grad(src, dest, padding):\n (c_idx, h_idx, w_idx) = (1, 2, 3)\n pad_left = padding[0]\n pad_right = padding[1]\n pad_top = padding[2]\n pad_bottom = padding[3]\n (dx_height, dx_width) = (dest.shape[h_idx], dest.shape[w_idx])\n (dy_height, dy_width) = (src.shape[h_idx], src.shape[w_idx])\n numpy_src = np.ones(src.shape, np.int32)\n numpy_dest = np.zeros(dest.shape, np.int32)\n array_src = FlattenArray(numpy_src)\n array_dest = FlattenArray(numpy_dest)\n src_num = src.shape[c_idx] * src.shape[h_idx] * src.shape[w_idx]\n dest_num = dest.shape[c_idx] * dest.shape[h_idx] * dest.shape[w_idx]\n elements_num = src.shape[0] * src_num\n for iter_n in range(elements_num):\n coords = Index2Coordinate(iter_n, src.shape)\n (n, c, i, j) = (coords[0], coords[c_idx], coords[h_idx], coords[w_idx])\n ip_x = ip_y = 0\n if j < pad_left:\n ip_x = pad_left\n elif j >= pad_left and j < dx_width + pad_left:\n ip_x = j\n else:\n ip_x = dx_width + pad_left - 1\n if i < pad_top:\n ip_y = pad_top\n elif i >= pad_top and i < dx_height + pad_top:\n ip_y = i\n else:\n ip_y = dx_height + pad_top - 1\n ip_x = ip_x - pad_left\n ip_y = ip_y - pad_top\n src_index = n * src_num + c * dy_width * dy_height + i * dy_width + j\n dest_index = n * dest_num + c * dx_width * dx_height + ip_y * dx_width + ip_x\n array_dest[dest_index] += array_src[src_index]\n numpy_dest = Array2Numpy(array_dest, dest.shape)\n return numpy_dest\n\n\ndef _test_ReplicationPad2d(test_case, shape, padding, device):\n np_input = np.random.random(shape).astype(np.float32)\n of_input = flow.tensor(\n np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n if isinstance(padding, int):\n np_boundary = ((0, 0), (0, 0), (padding, padding), (padding, padding))\n boundry = [padding, padding, padding, padding]\n elif isinstance(padding, (tuple, int)) and len(padding) == 4:\n np_boundary = (\n (0, 0),\n (0, 0),\n (padding[2], padding[3]),\n (padding[0], padding[1]),\n )\n boundry = [padding[0], padding[1], padding[2], padding[3]]\n else:\n raise ValueError(\"padding must be in or list or tuple!\")\n layer = flow.nn.ReplicationPad2d(padding=padding)\n of_out = layer(of_input)\n np_out = np.pad(np_input, np_boundary, mode=\"edge\")\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n of_out = of_out.sum()\n of_out.backward()\n np_out_grad = _np_replication_pad2d_grad(np_out, np_input, boundry)\n test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_out_grad, 0.001, 0.001))\n\n\[email protected]_unless_1n1d()\nclass TestReplicationPad2dModule(flow.unittest.TestCase):\n def test_ReplicationPad2d(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(1, 2, 3, 4), (8, 3, 4, 4)]\n arg_dict[\"padding\"] = [2, (1, 1, 2, 2)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n _test_ReplicationPad2d(test_case, *arg)\n\n @autotest(n=5)\n def test_replication_pad2d_with_random_data(test_case):\n c = random(1, 6).to(int)\n h = random(1, 6).to(int)\n w = random(1, 6).to(int)\n m = torch.nn.ReplicationPad2d(padding=random(low=0, high=7))\n m.train(random())\n device = random_device()\n m.to(device)\n x = random_pytorch_tensor(ndim=4, dim1=c, dim2=h, dim3=w).to(device)\n y = m(x)\n return y\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nimport unittest\nimport numpy as np\n\nimport oneflow as flow\nimport oneflow.unittest\n\n\nclass MyModuleWithEagerTensorForward(flow.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = flow.nn.Linear(3, 8, False)\n\n def forward(self, x):\n y0 = self.linear(x)\n eager_t = flow.tensor([1.0], dtype=y0.dtype, device=y0.device)\n out = y0 + eager_t\n return out\n\n\[email protected](os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\[email protected]_unless_1n1d()\nclass TestGraphWithEagerTensorCaught(oneflow.unittest.TestCase):\n def test_eager_tensor_forward_graph(test_case):\n my_net_module = MyModuleWithEagerTensorForward()\n flow.nn.init.constant_(my_net_module.linear.weight, 2.3)\n x = np.random.randn(5, 3)\n x = flow.tensor(x, dtype=flow.float32)\n\n class GraphEagerTensorCaught(flow.nn.Graph):\n def __init__(self):\n super().__init__()\n self.my_net = my_net_module\n\n def build(self, x):\n return self.my_net(x)\n\n my_g = GraphEagerTensorCaught()\n graph_out = my_g(x)\n eager_out = my_net_module(x)\n test_case.assertTrue(\n np.allclose(graph_out.numpy(), eager_out.numpy(), atol=1e-4, rtol=1e-4)\n )\n\n def test_eager_tensor_to(test_case):\n class EagerTensorToModule(flow.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self):\n # test free eager tensor to\n t = flow.tensor([1.0], dtype=flow.float32).to(\"cuda\")\n return t\n\n e_m = EagerTensorToModule()\n\n class EagerTensorToGraph(flow.nn.Graph):\n def __init__(self):\n super().__init__()\n self.e_m = e_m\n\n def build(self):\n return self.e_m()\n\n e_g = EagerTensorToGraph()\n graph_out = e_g()\n eager_out = e_m()\n test_case.assertTrue(\n np.allclose(graph_out.numpy(), eager_out.numpy(), atol=1e-4, rtol=1e-4)\n )\n\n\[email protected](os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\[email protected]_unless_1n2d()\nclass ConsistentFreeEagerTensorGraphTestCase(oneflow.unittest.TestCase):\n def test_consistent_eager_tensor_to(test_case):\n rank = flow.env.get_rank()\n placement = flow.placement(\"cpu\", {0: [0, 1]})\n t_l = flow.tensor([1.0, 2.0], dtype=flow.float32)\n t = t_l.to_consistent(placement=placement, sbp=flow.sbp.broadcast)\n\n class ConsistentEagerTensorToModule(flow.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self):\n # test free eager tensor to\n nonlocal t\n t = t.to(\"cuda\")\n return t\n\n e_m = ConsistentEagerTensorToModule()\n\n class ConsistentEagerTensorToGraph(flow.nn.Graph):\n def __init__(self):\n super().__init__()\n self.e_m = e_m\n\n def build(self):\n return self.e_m()\n\n e_g = ConsistentEagerTensorToGraph()\n graph_out = e_g().to_local()\n print(\"g \", graph_out.numpy())\n test_case.assertTrue(\n np.allclose(graph_out.numpy(), t_l.numpy(), atol=1e-4, rtol=1e-4)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.array_equal" ], [ "numpy.ones" ], [ "numpy.random.uniform", "numpy.zeros_like", "numpy.allclose" ], [ "numpy.array", "numpy.ones" ], [ "numpy.array" ], [ "numpy.random.random", "numpy.zeros", "numpy.pad", "numpy.ones" ], [ "numpy.random.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
papanisaicharan/Scalable-energy-efficient-scheme-on-mobile-nodes
[ "43f8d9c82b603c33803ee5d07f056eaeabdaf530" ]
[ "SEES_CODE/hybrid_placement.py" ]
[ "\"\"\" \r\n\r\nThis is a hybrid placement algorithm(offline mode)\r\n\"\"\"\r\n\r\nfrom sympy.solvers import solve\r\nfrom sympy import Symbol\r\n#this is testing plot\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.ticker import MultipleLocator\r\nimport numpy as np\r\nimport math\r\nimport random\r\nimport Node as nd\r\nimport EH_relay as relay_nd\r\nimport LBS as L_B_S\r\nimport Zone as zone\r\nimport Network as nw\r\n\r\ndef getpoints(startx,starty,endx,endy,id):\r\n rangeX = (startx,endx)\r\n rangeY = (starty,endy)\r\n randPoints = []\r\n excluded = set()\r\n i = 0\r\n while i<D:\r\n x = random.randrange(*rangeX)\r\n y = random.randrange(*rangeY)\r\n if (x,y) in excluded: continue\r\n randPoints.append((x,y))\r\n i += 1\r\n excluded.update((x, y))\r\n \r\n secure_random = random.SystemRandom()\r\n nodeswithenergy = []#(x,y),energy,id\r\n for j in range(len(nodesinlevel)):\r\n for i in range(nodesinlevel[j]):\r\n list1 = []\r\n list1.append(secure_random.choice(randPoints))\r\n randPoints.remove(list1[0])\r\n list1.append(Et[j])\r\n list1.append(id)\r\n list1.append(j+1)\r\n id = id+1\r\n nodeswithenergy.append(list1)\r\n return(nodeswithenergy)\r\n\r\ndef get_rand_uniform_points_LBS(x1,x2,y1,y2):\r\n final_list = []\r\n t1 = []\r\n t2 = []\r\n t1.append(random.randint(x1,x2))\r\n t2.append(random.randint(y1,y2))\r\n for i in range(no_lbs_on_each_side-1):\r\n p1 = random.randint(x1,x2)\r\n p2 = random.randint(y1,y2)\r\n while check_validation(t1,t2,p1,p2):\r\n p1 = random.randint(x1,x2)\r\n p2 = random.randint(y1,y2)\r\n t1.append(p1)\r\n t2.append(p2)\r\n final_list.append(t1)\r\n final_list.append(t2)\r\n return final_list\r\n\r\ndef euclidean_distance(x1,y1,x2,y2):\r\n distance = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2) ) \r\n return distance\r\n\r\ndef check_validation(t1,t2,p1,p2):\r\n all_distances = []\r\n for i in range(len(t1)):\r\n all_distances.append(euclidean_distance(p1,p2,t1[i],t2[i]))\r\n for i in all_distances:\r\n if i < distance_btw_lbs:\r\n return True\r\n return False\r\n\r\n#taking inputs,parameters setting\r\nN = 2000#int(input(\"Enter N( the total number of HN nodes) : \"))\r\nFs = 20#int(input(\"Enter Fs() : \"))\r\nL = 200#int(input(\"Enter L(length of area) : \"))\r\nB = 12#int(input(\"Enter B(LBS) : \"))\r\nn = 2#int(input(\"Enter n(number of heterogeneity level) : \"))\r\n\r\n#taking constants\r\nalpha = 2#float(input(\"Enter alpha : \"))#singh has taken this as 0.5\r\n#beta = input(\"Enter beta\")\r\ngamma = 0.4#float(input(\"Enter gamma : \"))#intial gamma will be given\r\ntheta = 0.025#float(input(\"Enter theta : \"))#should validate a equation\r\n\r\nEinti = 0.5#float(input(\"Enter E1(initial energy) : \"))\r\n\r\n#we need to validate theta and gamma values and find beta constant\r\nif((gamma / (2*(n-1)) ) > theta):\r\n\tprint(\"validated theta and gamma values\")\r\n\t# finding gamma values\r\n\tgammavalues = [gamma]\r\n\t#getting gamma values,this is done according to singh model\r\n\tfor i in range(1,n+1):\r\n\t #gammai = gammai-1 - 2*theta\r\n\t gammavalues.append(round(gammavalues[i-1] - 2*theta,3))\r\n\t \r\n\tprint(\"gamma-values : \",gammavalues)\r\n\t#number of nodes in each level\r\n\tbeta = gammavalues[len(gammavalues)-1]\r\n\tlastgamma = beta\r\n\tgammavalues[:] = gammavalues[:len(gammavalues)-1]\r\n\r\n\tprint(\"gamma-values : \",gammavalues)\r\n\t#https://docs.sympy.org/latest/modules/solvers/solvers.html#systems-of-polynomial-equations\r\n\tbeta = Symbol('x',positive=True)\r\n\tf =1\r\n\tfor i in range(n-1,-1,-1):\r\n\t f = f*(beta-gammavalues[i])\r\n\t f= f+1\r\n\r\n\tf = f-2\r\n\tprint(\"equation used for solving beta: \",f)\r\n\tbeta = solve(f, beta)\r\n\tprint(\"positive beta value: \",beta)\r\n\tNt = [] #indicate cardinality of n categories\r\n\tEt = [] #indicate Energy of n categories\r\n\tEnergySinghTotal = []\r\n\r\n\tfor i in range(1,n+1):\r\n\t # we compute Nit and Eit and append them to Nt and Et,formulea are shown below\r\n\t #(Einti * (1 + ((i − 1) * alpha)))\r\n\t et = i-1\r\n\t et = et*alpha\r\n\t et = et+1\r\n\t et = et*Einti\r\n\t Et.append(et)\r\n\t #Nit = N × (beta − gamma1) × (beta − gamma2) × (beta − gamma3)×⋯× (beta − gammai)\r\n\t nt = N\r\n\t for j in range(0,i):\r\n\t nt = nt *(beta[0] - gammavalues[j])\r\n\t Nt.append(nt)\r\n\r\n\t# print(\"enregy of each HN type : \",Et)\r\n\t# print(\"total number of HN nodes in each level : \",Nt)\r\n\r\n\tNt1 = [round(Nt[i],1) for i in range(len(Nt))]\r\n\tfor i in range(len(Nt1)):\r\n\t if (Nt1[i]-int(Nt1[i])) >= 0.5:\r\n\t Nt1[i] = math.ceil(Nt1[i])\r\n\t else:\r\n\t Nt1[i] = math.floor(Nt1[i])\r\n\t \r\n\t# print(\"rounded total number of HN nodes in each level : \",Nt1)\r\n\ttotal_energy_of_all_nodes = 0.0\r\n\tNt[:] = Nt1[:]\r\n\tfor i in range(len(Nt1)):\r\n\t print(\"Nodes in level - \",i+1,\" = \",Nt[i] ,\" , Energy = \",Et[i])\r\n\t total_energy_of_all_nodes +=Nt[i]*Et[i]\r\n\t# print(\"rounded total number of HN nodes in each level : \",Nt)\r\n\t#checking\r\n\tsum1 = 0\r\n\tfor i in Nt:\r\n\t sum1=sum1+i\r\n\tif N == sum1:\r\n\t print(\"sum of nodes in all the level is equal to N \")\r\n\telse:\r\n\t print(\"sum of nodes in all the level is not equal to N,Something went wrong \")\r\n\t#for sake of solving\r\n\t# print(Nt)\r\n\tfor i in range(len(Nt)):\r\n\t Nt[i] = float(Nt[i]/100)\r\n\t #print(Nt[i])\r\n\t if (Nt[i] - int(Nt[i])) >= 0.5:\r\n\t Nt[i] = math.ceil(Nt[i])*100\r\n\t else:\r\n\t Nt[i] = math.floor(Nt[i])*100\r\n\t \r\n\tprint(\"rounded total number of HN nodes in each level : \",Nt)\r\n\tZ = math.pow(math.ceil(math.sqrt(N)/math.sqrt(Fs)),2)\r\n\tprint(\"number of zones : \",Z)#number of zones\r\n\tNz = []\r\n\tfor z in range(1,int(Z)+1):\r\n\t if z==1:\r\n\t Nz.append(math.ceil(N/Z))\r\n\t else:\r\n\t k = 0\r\n\t for i in Nz:\r\n\t k = k + i\r\n\t \r\n\t Nz.append(math.ceil((N - k)/(Z - z + 1)))\r\n\t \r\n\tprint(\"number of nodes in each zones : \",Nz)\r\n\r\n\trsmax = []#node sensing\r\n\trcmax = []#communication ranges\r\n\r\n\tfor i in range(0,int(Z)):\r\n\t rsmax.append( (L/math.sqrt(int(Z) )) * math.sqrt(2) )\r\n\t rcmax.append( (L/math.sqrt(int(Z)))*math.sqrt(2)*2)\r\n\r\n\t# print(\"sensing range\",rsmax,\" communication range \",rcmax)\r\n\tR = int(math.pow( math.sqrt(int(Z))+1 , 2 ))\r\n\tprint(\"number of relay nodes : \",R)\r\n\tD = int(L/math.sqrt(Z))\r\n\tprint(\"length of working area : \",D)\r\n\r\n\tnodesinlevel = []\r\n\tfor i in range(len(Nt)):\r\n\t nodesinlevel.append(int(Nt[i]/int(Z)))\r\n\r\n\tzones_objects = [] # all nodes objects\r\n\tobject_of_zones = [] # each element in this represent a zone object\r\n\tfor i in range(0,int(math.sqrt(int(Z)))):\r\n\t\tfor j in range(0,int(math.sqrt(int(Z)))):\r\n\t\t\tnp = []\r\n\t\t\tfor p in getpoints(j*20,i*20,(j+1)*20,(i+1)*20,j*20+1+200*i):\r\n\t\t\t\tnp.append(nd.Node(p[0][0],p[0][1],p[1],p[2],p[3],i*10+j+1))\r\n\t\t\tzones_objects.extend(np)\r\n\t\t\tobject_of_zones.append(zone.Zone(i*10+j+1,np))\r\n\t\r\n\r\n\t# for i in zones_objects:\r\n\t# \tprint(i.getlocation(),i.get_node_id(),i.get_e_initial())\r\n\t#for each corner place a EH node\r\n\tEH = []\r\n\tcount = 1\r\n\tfor i in range(0,L+1,20):\r\n\t\tfor j in range(0,L+1,20):\r\n\t\t if count <= R: \r\n\t\t \tEH.append(relay_nd.EH_relay(i,j,count))\r\n\t\t \tcount+=1\r\n\r\n\tEHx = []\r\n\tEHy = []\r\n\tfor j in EH:\r\n\t EHx.append(j.getlocation()[0])\r\n\t EHy.append(j.getlocation()[1])\r\n\t# print(EHx,EHy)\r\n\r\n\tx = [[] for i in range(len(Et))]\r\n\ty = [[] for i in range(len(Et))]\r\n\r\n\tfor i in zones_objects:\r\n\t for k in range(len(Et)):\r\n\t if Et[k] == i.get_e_initial():\r\n\t x[k].append(i.getlocation()[0])\r\n\t y[k].append(i.getlocation()[1])\r\n\r\n\t# we will definately start plotting form (0,0), so\r\n\t# we can place base stations from 20 to 50 distance from working area uniformly so as to cover all the nodes\r\n\tdist_lbs = int(input(\"Enter the distance of LBS from working area : \"))\r\n\tperimneter_lbs = (L+2*dist_lbs)*4\r\n\tdistance_btw_lbs = int(perimneter_lbs/B)\r\n\r\n\tleft = [(-dist_lbs, i) for i in range(-dist_lbs, L+2*dist_lbs+1,distance_btw_lbs )]\r\n\ttop = [(i,L+dist_lbs ) for i in range(-dist_lbs+distance_btw_lbs,L+2*dist_lbs+1, distance_btw_lbs)]\r\n\tright = [(L+dist_lbs, i) for i in range(L+dist_lbs-distance_btw_lbs, -dist_lbs-1, -distance_btw_lbs)]\r\n\tbottom = [(i, -dist_lbs) for i in range(L+dist_lbs-distance_btw_lbs, -dist_lbs, -distance_btw_lbs)]\r\n\tidx = left+top+right+bottom\r\n\t#local base station objects\r\n\tlocal_bs = []\r\n\tcount = 1\r\n\tfor i in idx:\r\n\t\tlocal_bs.append(L_B_S.LBS(i[0],i[1],count))\r\n\t\tcount+=1\r\n\r\n\t# network formation\r\n\tnetwork = nw.Network(object_of_zones,EH,local_bs)\r\n\r\n\t#print(x,y)\r\n\tfig = plt.figure()#defining size\r\n\tfig.set_size_inches(100,100)\r\n\tax1 = fig.add_subplot(1,1,1)#adding a plot to figure\r\n\r\n\tspacing = D # This can be your user specified spacing. \r\n\tminorLocator = MultipleLocator(spacing)\r\n\t# jet = plt.get_cmap('jet')\r\n\t# colors = iter(jet(np.linspace(0,1,10)))\r\n\tcolors = ['g','r','c','m','y','k','b']\r\n\tfor i in range(0,len(x),1):\r\n\t ax1.plot(x[i],y[i], 'o',color = colors[i])\r\n\r\n\tHN_ids = []\r\n\tfor i in zones_objects:\r\n\t\tHN_ids.append(i.get_node_id())\r\n\t# print(HN_ids)\r\n\r\n\tfor i, txt in enumerate(HN_ids):\r\n\t\tax1.annotate(txt, (zones_objects[i].getlocation()[0],zones_objects[i].getlocation()[1]))\r\n\r\n\tax1.plot(EHx,EHy, 'D',color = colors[len(colors)-1],markersize=12)\r\n\r\n\tEH_ids = []\r\n\tfor i in EH:\r\n\t\tEH_ids.append(i.get_node_id())\r\n\t# print(EH_ids)\r\n\r\n\tfor i, txt in enumerate(EH_ids):\r\n\t\tax1.annotate(txt, (EH[i].getlocation()[0],EH[i].getlocation()[1]))\r\n\r\n\tx = [-50, -50, -20, -20]\r\n\ty = [-50, L+50, L+50, -50]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ -20,L+50,L+50 ,-20]\r\n\ty = [ L+50,L+50,L+20 ,L+20]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ L+50,L+50,L+20 ,L+20]\r\n\ty = [ L+50,-50 ,-50,L+50]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ -20,-20,L+20 ,L+20]\r\n\ty = [ -50,-20 ,-20,-50]\r\n\tax1.fill(x,y,'y')\r\n\tfor i in local_bs:\r\n\t\t# print(i.get_node_id())\r\n\t\tax1.plot(i.getlocation()[0],i.getlocation()[1], 'D',color = colors[1],markersize=20)\r\n\r\n\tLB_ids = []\r\n\tfor i in local_bs:\r\n\t\tLB_ids.append(i.get_node_id())\r\n\t# print(LB_ids)\r\n\r\n\tfor i, txt in enumerate(LB_ids):\r\n\t\tax1.annotate(txt, (local_bs[i].getlocation()[0],local_bs[i].getlocation()[1]))\r\n\r\n\r\n\t# Set minor tick locations.\r\n\tax1.yaxis.set_minor_locator(minorLocator)\r\n\tax1.xaxis.set_minor_locator(minorLocator)\r\n\r\n\tplt.axis([-60, L+60, -60, L+60])#defining axix x and y\r\n\t# Set grid to use minor tick locations. \r\n\r\n\tax1.grid(which = 'minor')#only major works fine\r\n\r\n\tplt.show()\r\n\t\r\nelse:\r\n print(\"error in input values\")" ]
[ [ "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jthhk/Binance-volatility-trading-bot
[ "d815716d2161c5d07cea0506049d73450bd5ef5b" ]
[ "strategies/vyacheslav_signalbuy_VolScan.py" ]
[ "# VolScan is a Binance Volatility Bot(BVT Bot)\n# compatible module that generates crypto buying signals based upon negative price change & volatility.\n# It does this in two different ways,\n# the main one being by calculating the aggregate price change within a user defined period,\n# the second way being by use of the Coefficient Of Variation(CV),\n# which is a statistical measure of the dispersion of data points in a data series around the mean,\n# and is used in certain markets to ascertain the volatility of products:\n# https://www.investopedia.com/terms/c/coefficientofvariation.asp.\n#\n# VolScan provides the option to use either signals generating method individually,\n# or combined within user defined settings.\n# Volscan will provide all the buying signals required for your bot,\n# so other external signal generating modules should be disabled.\n#\n# The way that VolScan works is that it collects all the cryto coin/token data for all USDT coin\n# pairings that appear on Binance into user defined \"scanning periods\" which are varying numbers of minutes in length,\n# each period then being split into the number of individual scans that make up the period.\n# Example. you decide you want your scanning period to be 3 minutes in duration,\n# and within that period you want all coins scanned every 30 seconds,\n# so in total VolScan will carry out 2 scans per minute for 3 minutes in total = 6 price check scans,\n# it then checks the variables between the current price & the previous price all the way back through the total number\n# of scans, coming up with an aggregate change in price % for the whole scanning period.\n# It then removes all coins that have positive changes in price %,\n# and creates a list of all the coins that had a negative change in price, the list is in sequential order,\n# the highest negative price change at the top, the lowest negative price change at the bottom.\n#\n# The Coefficient of Variation method works along similar lines,\n# but concentrates purely on standard deviation in price ranges,\n# the mean or average price which then is calculated into the final CV score for the scanning period....\n# the higher the CV score, the higher the volatility of the coins/tokens.\n# The CV rated coins are then created into a tickers list in exactly\n# the same way as the previously described negative change in price coins.\n#\n# Whichever way you choose to have your tickers lists created,\n# they will then be dynamically updated at the end of every scanning period with a completely new lists\n# of the latest high volatilty coin results.\n#\n# The VolScan module is easy to format with most processes done automatically for you,\n# below are the user defined settings you will need to create to get started using the module:\n\n\nimport os\nimport numpy as np\nfrom time import sleep\nfrom datetime import datetime\n\nfrom binance.client import Client\n\nfrom helpers.parameters import parse_args, load_config\n# Load creds modules\nfrom helpers.handle_creds import (\n load_correct_creds\n)\n\nargs = parse_args()\nDEFAULT_CONFIG_FILE = 'config.yml'\nDEFAULT_CREDS_FILE = 'creds.yml'\n\nconfig_file = args.config if args.config else DEFAULT_CONFIG_FILE\ncreds_file = args.creds if args.creds else DEFAULT_CREDS_FILE\nparsed_creds = load_config(creds_file)\nparsed_config = load_config(config_file)\n\n# Load trading vars\nPAIR_WITH = parsed_config['trading_options']['PAIR_WITH']\nEX_PAIRS = parsed_config['trading_options']['FIATS']\n\n# Load creds for correct environment\naccess_key, secret_key = load_correct_creds(parsed_creds)\nclient = Client(access_key, secret_key)\n\n\n# SCANNING_PERIOD - by default, we check the price difference for each coin on Binance for the last 3 minutes,\n# you can change this value for different results.\n# This also determines how often each iteration of the code is executed.\nSCANNING_PERIOD = 3 # minutes\n\n# TIME_SLEEP - how many seconds do you want between each price scan.\n# By default, every 12 seconds the price change will be recorded during SCANNING_PERIOD (3min)\n# After which the calculation is performed. The price change is also calculated every 12 seconds.\nTIME_SLEEP = 30 # seconds\n\n# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.\n# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.\nCREATE_TICKER_LIST = False\n\n# NUMBER_COINS_IN_LIST - Limit the number of coins that can be added to the dynamic list of volatile coins. For example,\n# if NUMBER_COINS_IN_LIST = 20,\n# then each period only 20 sorted coins will be added to the list (Above the lowest values with a minus sign).\nNUMBER_COINS_IN_LIST = 20\n\n# CV_INDEX - Coefficient of Variation. Only those coins with a COV greater than the specified value will be displayed.\nCoV_INDEX = 0.0\n\n# CREATE_LIST_BY_COV_AND_PRICE_CHANGE is a filter for creating dynamic lists of the most volatile coins.\n# If COV_FILTER = True, lists of volatile coins will take into account the CoV parameter.\n# For example,\n# if CoV_INDEX = 0.5, then only coins with CoV above 0.5 and price change less than 0 will be added to list.\n# If False will be used only Price Change.\nCREATE_LIST_BY_COV_AND_PRICE_CHANGE = False\n\n# CREATE_LIST_BY_ONLY_COV - If True - A dynamic list of volatile coins will be created only based on the CoV parameter.\n# For example: If CoV_INDEX = 0.3 then the list will include coins with CoV_INDEX greater than 0.3 and the list will be\n# sorted\n# (At the top there will be coins with the highest CoV)\n# If False The list will be created only based on the Price Change.\nCREATE_LIST_BY_ONLY_COV = False\n\n# When creating a ticker list from the source site:\n# http://edgesforledges.com you can use the parameter (all or innovation-zone).\n# ticker_type = 'innovation-zone'\nticker_type = 'all'\nif CREATE_TICKER_LIST:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n TICKERS_LIST = 'tickers_all.txt'\n\n# BTC_FILTER - This feature is still in development.\n# Objective: Check the change in the price of bitcoin over the scanning period and,\n# based upon the results, either halt the bot from buying, or allow it to continue.\n# make further purchases of coins.\n# For example, if Bitcoin price change = 1.0 and coin price change is negative (-0.8), we give a buy signal....\n# BTC_FILTER = False\n\n\nSIGNAL_NAME = 'vyacheslav_signalbuy_VolScan'\nSIGNAL_FILE_BUY = 'signals/' + SIGNAL_NAME + '.buy'\n\nclass txcolors:\n BUY = '\\033[92m'\n WARNING = '\\033[93m'\n SELL_LOSS = '\\033[91m'\n SELL_PROFIT = '\\033[32m'\n DIM = '\\033[2m\\033[35m'\n DEFAULT = '\\033[39m'\n YELLOW = '\\033[33m'\n TURQUOISE = '\\033[36m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n ITALICS = '\\033[3m'\n\n\n# get_price() function, takes 1 parameter (Binance client).\n# And it returns a dictionary of coins,\n# with the given keys ('symbol'(str), 'price'(float), 'time', 'price_list'(list), 'change_price'(float), 'cov'(float)).\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_all_tickers()\n\n for coin in prices:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['price'],\n 'time': datetime.now(),\n 'price_list': [],\n 'change_price': 0.0,\n 'cov': 0.0}\n return initial_price\n\n\n# Function с_о_v(), takes 1 parameter (List of coin prices for the period 'price_list': []).\n# And it returns the Coefficient of Variation (float) of the coin.\ndef c_o_v(price_list):\n if price_list:\n a = np.array(price_list, float)\n cov = round((a.std() / a.mean()) * 100, 2)\n return cov\n return 0.0\n\n\n# Percentage_price_change() function, takes 1 parameter (List of coin prices for the period 'price_list': []).\n# And it returns the percentage of price change.\ndef percentage_price_change(price_list):\n if price_list:\n return round(sum([100 * (b - a) / a for a, b in zip(price_list[::1], price_list[1::1])]), 4)\n\n\n# sort_list_coins() function, takes 2 parameters (List of coins and sorting type).\n# Based on the sorting type, sorts the coins in the list by their 'change_price' or 'cov'.\n# And it returns a sorted list.\ndef sort_list_coins(list_coins, sort_type='change_price'):\n if sort_type == 'cov':\n sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'], reverse=True)\n else:\n sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'])\n return sort_list\n\n\n# do_work () function, takes 1 parameter (Binance client). This is the main function of the module.\n# Which, in an endless cycle, searches for coins with a negative indicator of price change,\n# sorts them and gives buy signals.\ndef do_work():\n # Initializing coins for data storage.\n init_price = get_price(client)\n list_volatility = []\n count = 0\n\n while True:\n print(f'{txcolors.YELLOW}{SIGNAL_NAME} launched with a period of {SCANNING_PERIOD} minutes.')\n print(f\"{txcolors.YELLOW}Number of coins to scan - {len(init_price)}\")\n # We reset the data every period.\n if count == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n init_price = get_price(client)\n list_volatility = []\n count = 0\n\n # Start a cycle to collect prices for each coin within a period.\n while count < (SCANNING_PERIOD * 60) / TIME_SLEEP:\n count += 1\n print(f'{txcolors.YELLOW}{SIGNAL_NAME} Round {count} complete. Next scan in {TIME_SLEEP} seconds.')\n try:\n # Requesting the latest coin prices\n last_price = get_price(client)\n\n for coin in last_price:\n # if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n # del init_price[coin]['price_list'][0]\n init_price[coin]['price_list'].append(float(last_price[coin]['price']))\n\n if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n coin_price_list = init_price[coin]['price_list']\n percent_change_price = percentage_price_change(coin_price_list)\n cov = c_o_v(coin_price_list)\n\n if CREATE_LIST_BY_COV_AND_PRICE_CHANGE:\n condition = percent_change_price < 0 and cov >= CoV_INDEX\n\n elif CREATE_LIST_BY_ONLY_COV:\n condition = cov >= CoV_INDEX\n\n else:\n condition = percent_change_price < 0\n\n if condition:\n if init_price[coin] not in list_volatility:\n init_price[coin]['time'] = datetime.now()\n init_price[coin]['change_price'] = percent_change_price\n init_price[coin]['cov'] = cov\n\n list_volatility.append(init_price[coin])\n\n if not list_volatility:\n print(f'{txcolors.YELLOW}Stand by for next update ...')\n else:\n if os.path.exists(SIGNAL_FILE_BUY):\n os.remove(SIGNAL_FILE_BUY)\n\n if CREATE_LIST_BY_ONLY_COV:\n sort_t = 'cov'\n else:\n sort_t = 'change_price'\n sort_list_vol_coin = sort_list_coins(list_volatility, sort_type=sort_t)\n\n for item in sort_list_vol_coin[:NUMBER_COINS_IN_LIST]:\n print(f'{txcolors.YELLOW}{SIGNAL_NAME}: detected a signal on{txcolors.END} '\n f'{txcolors.YELLOW}{item[\"symbol\"]}{txcolors.END}'\n )\n with open(SIGNAL_FILE_BUY, 'a+') as f:\n f.write(item[\"symbol\"] + '\\n')\n\n sleep(TIME_SLEEP)\n except Exception as e:\n print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')\n continue\n except KeyboardInterrupt as ki:\n continue" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MSLars/allennlp
[ "2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475", "2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475", "2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475", "2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475", "2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475" ]
[ "tests/modules/seq2seq_encoders/gated_cnn_encoder_test.py", "tests/modules/seq2vec_encoders/pytorch_seq2vec_wrapper_test.py", "allennlp/modules/maxout.py", "tests/modules/transformer/transformer_embeddings_test.py", "tests/commands/evaluate_test.py" ]
[ "import torch\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder\n\n\nclass TestGatedCnnEncoder(AllenNlpTestCase):\n def test_gated_cnn_encoder(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32,\n layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert list(output.size()) == [5, 10, 64]\n\n def test_gated_cnn_encoder_dilations(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32, layers=[[[2, 32, 1]], [[2, 32, 2]], [[2, 32, 4]], [[2, 32, 8]]]\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert list(output.size()) == [5, 10, 64]\n\n def test_gated_cnn_encoder_layers(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32,\n layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],\n return_all_layers=True,\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert len(output) == 3\n concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)\n assert list(concat_layers.size()) == [5, 3, 10, 64]\n", "import pytest\nfrom numpy.testing import assert_almost_equal\nimport torch\nfrom torch.nn import LSTM\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper\nfrom allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask\nfrom allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm\n\n\nclass TestPytorchSeq2VecWrapper(AllenNlpTestCase):\n def test_get_dimensions_is_correct(self):\n lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)\n encoder = PytorchSeq2VecWrapper(lstm)\n assert encoder.get_output_dim() == 14\n assert encoder.get_input_dim() == 2\n lstm = LSTM(\n bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True\n )\n encoder = PytorchSeq2VecWrapper(lstm)\n assert encoder.get_output_dim() == 7\n assert encoder.get_input_dim() == 2\n\n def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):\n lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)\n encoder = PytorchSeq2VecWrapper(lstm)\n input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])\n lstm_output = lstm(input_tensor)\n encoder_output = encoder(input_tensor, None)\n assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy()[:, -1, :])\n\n def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):\n lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)\n encoder = PytorchSeq2VecWrapper(lstm)\n\n input_tensor = torch.rand([5, 7, 3])\n input_tensor[1, 6:, :] = 0\n input_tensor[2, 4:, :] = 0\n input_tensor[3, 2:, :] = 0\n input_tensor[4, 1:, :] = 0\n mask = torch.ones(5, 7).bool()\n mask[1, 6:] = False\n mask[2, 4:] = False\n mask[3, 2:] = False\n mask[4, 1:] = False\n\n sequence_lengths = get_lengths_from_binary_sequence_mask(mask)\n packed_sequence = pack_padded_sequence(\n input_tensor, sequence_lengths.tolist(), batch_first=True\n )\n _, state = lstm(packed_sequence)\n # Transpose output state, extract the last forward and backward states and\n # reshape to be of dimension (batch_size, 2 * hidden_size).\n reshaped_state = state[0].transpose(0, 1)[:, -2:, :].contiguous()\n explicitly_concatenated_state = torch.cat(\n [reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1\n )\n encoder_output = encoder(input_tensor, mask)\n assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())\n\n def test_forward_works_even_with_empty_sequences(self):\n lstm = LSTM(\n bidirectional=True, num_layers=3, input_size=3, hidden_size=11, batch_first=True\n )\n encoder = PytorchSeq2VecWrapper(lstm)\n\n tensor = torch.rand([5, 7, 3])\n tensor[1, 6:, :] = 0\n tensor[2, :, :] = 0\n tensor[3, 2:, :] = 0\n tensor[4, :, :] = 0\n mask = torch.ones(5, 7).bool()\n mask[1, 6:] = False\n mask[2, :] = False\n mask[3, 2:] = False\n mask[4, :] = False\n\n results = encoder(tensor, mask)\n\n for i in (0, 1, 3):\n assert not (results[i] == 0.0).data.all()\n for i in (2, 4):\n assert (results[i] == 0.0).data.all()\n\n def test_forward_pulls_out_correct_tensor_with_unsorted_batches(self):\n lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)\n encoder = PytorchSeq2VecWrapper(lstm)\n\n input_tensor = torch.rand([5, 7, 3])\n input_tensor[0, 3:, :] = 0\n input_tensor[1, 4:, :] = 0\n input_tensor[2, 2:, :] = 0\n input_tensor[3, 6:, :] = 0\n mask = torch.ones(5, 7).bool()\n mask[0, 3:] = False\n mask[1, 4:] = False\n mask[2, 2:] = False\n mask[3, 6:] = False\n\n sequence_lengths = get_lengths_from_binary_sequence_mask(mask)\n sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(\n input_tensor, sequence_lengths\n )\n packed_sequence = pack_padded_sequence(\n sorted_inputs, sorted_sequence_lengths.tolist(), batch_first=True\n )\n _, state = lstm(packed_sequence)\n # Transpose output state, extract the last forward and backward states and\n # reshape to be of dimension (batch_size, 2 * hidden_size).\n sorted_transposed_state = state[0].transpose(0, 1).index_select(0, restoration_indices)\n reshaped_state = sorted_transposed_state[:, -2:, :].contiguous()\n explicitly_concatenated_state = torch.cat(\n [reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1\n )\n encoder_output = encoder(input_tensor, mask)\n assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())\n\n def test_wrapper_raises_if_batch_first_is_false(self):\n with pytest.raises(ConfigurationError):\n lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)\n _ = PytorchSeq2VecWrapper(lstm)\n\n def test_wrapper_works_with_alternating_lstm(self):\n model = PytorchSeq2VecWrapper(\n StackedAlternatingLstm(input_size=4, hidden_size=5, num_layers=3)\n )\n\n input_tensor = torch.randn(2, 3, 4)\n mask = torch.ones(2, 3).bool()\n output = model(input_tensor, mask)\n assert tuple(output.size()) == (2, 5)\n", "\"\"\"\nA maxout neural network.\n\"\"\"\nfrom typing import Sequence, Union\n\nimport torch\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.registrable import FromParams\n\n\nclass Maxout(torch.nn.Module, FromParams):\n \"\"\"\n This `Module` is a maxout neural network.\n\n # Parameters\n\n input_dim : `int`, required\n The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.\n num_layers : `int`, required\n The number of maxout layers to apply to the input.\n output_dims : `Union[int, Sequence[int]]`, required\n The output dimension of each of the maxout layers. If this is a single `int`, we use\n it for all maxout layers. If it is a `Sequence[int]`, `len(output_dims)` must be\n `num_layers`.\n pool_sizes : `Union[int, Sequence[int]]`, required\n The size of max-pools. If this is a single `int`, we use\n it for all maxout layers. If it is a `Sequence[int]`, `len(pool_sizes)` must be\n `num_layers`.\n dropout : `Union[float, Sequence[float]]`, optional (default = `0.0`)\n If given, we will apply this amount of dropout after each layer. Semantics of `float`\n versus `Sequence[float]` is the same as with other parameters.\n \"\"\"\n\n def __init__(\n self,\n input_dim: int,\n num_layers: int,\n output_dims: Union[int, Sequence[int]],\n pool_sizes: Union[int, Sequence[int]],\n dropout: Union[float, Sequence[float]] = 0.0,\n ) -> None:\n super().__init__()\n if not isinstance(output_dims, list):\n output_dims = [output_dims] * num_layers # type: ignore\n if not isinstance(pool_sizes, list):\n pool_sizes = [pool_sizes] * num_layers # type: ignore\n if not isinstance(dropout, list):\n dropout = [dropout] * num_layers # type: ignore\n if len(output_dims) != num_layers:\n raise ConfigurationError(\n \"len(output_dims) (%d) != num_layers (%d)\" % (len(output_dims), num_layers)\n )\n if len(pool_sizes) != num_layers:\n raise ConfigurationError(\n \"len(pool_sizes) (%d) != num_layers (%d)\" % (len(pool_sizes), num_layers)\n )\n if len(dropout) != num_layers:\n raise ConfigurationError(\n \"len(dropout) (%d) != num_layers (%d)\" % (len(dropout), num_layers)\n )\n\n self._pool_sizes = pool_sizes\n input_dims = [input_dim] + output_dims[:-1]\n linear_layers = []\n for layer_input_dim, layer_output_dim, pool_size in zip(\n input_dims, output_dims, pool_sizes\n ):\n linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim * pool_size))\n self._linear_layers = torch.nn.ModuleList(linear_layers)\n dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]\n self._dropout = torch.nn.ModuleList(dropout_layers)\n self._output_dims = output_dims\n self._output_dim = output_dims[-1]\n self._input_dim = input_dim\n\n def get_output_dim(self):\n return self._output_dim\n\n def get_input_dim(self):\n return self._input_dim\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n\n output = inputs\n for layer, layer_output_dim, dropout, pool_size in zip(\n self._linear_layers, self._output_dims, self._dropout, self._pool_sizes\n ):\n affine_output = layer(output)\n # Compute and apply the proper shape for the max.\n shape = list(inputs.size())\n shape[-1] = layer_output_dim\n shape.append(pool_size)\n\n maxed_output = torch.max(affine_output.view(*shape), dim=-1)[0]\n dropped_output = dropout(maxed_output)\n output = dropped_output\n return output\n", "import copy\n\nimport pytest\nimport torch\nfrom torch.testing import assert_allclose\nfrom transformers import AutoModel\nfrom transformers.models.bert.configuration_bert import BertConfig\nfrom transformers.models.bert.modeling_bert import BertEmbeddings\nfrom transformers.models.albert.configuration_albert import AlbertConfig\nfrom transformers.models.albert.modeling_albert import AlbertEmbeddings\n\nfrom allennlp.common import Params, FromParams\nfrom allennlp.modules.transformer import (\n TransformerEmbeddings,\n ImageFeatureEmbeddings,\n TransformerModule,\n)\n\n\nPARAMS_DICT = {\n \"vocab_size\": 20,\n \"embedding_size\": 5,\n \"pad_token_id\": 0,\n \"max_position_embeddings\": 3,\n \"type_vocab_size\": 2,\n \"dropout\": 0.5,\n}\n\n\[email protected]\ndef params_dict():\n return copy.deepcopy(PARAMS_DICT)\n\n\[email protected]\ndef params(params_dict):\n return Params(params_dict)\n\n\[email protected]\ndef transformer_embeddings(params):\n return TransformerEmbeddings.from_params(params.duplicate())\n\n\ndef test_can_construct_from_params(params_dict, transformer_embeddings):\n embeddings = transformer_embeddings.embeddings\n assert embeddings.word_embeddings.num_embeddings == params_dict[\"vocab_size\"]\n assert embeddings.word_embeddings.embedding_dim == params_dict[\"embedding_size\"]\n assert embeddings.word_embeddings.padding_idx == params_dict[\"pad_token_id\"]\n\n assert embeddings.position_embeddings.num_embeddings == params_dict[\"max_position_embeddings\"]\n assert embeddings.position_embeddings.embedding_dim == params_dict[\"embedding_size\"]\n\n assert embeddings.token_type_embeddings.num_embeddings == params_dict[\"type_vocab_size\"]\n assert embeddings.token_type_embeddings.embedding_dim == params_dict[\"embedding_size\"]\n\n assert transformer_embeddings.layer_norm.normalized_shape[0] == params_dict[\"embedding_size\"]\n\n assert transformer_embeddings.dropout.p == params_dict[\"dropout\"]\n\n\ndef test_sanity():\n class TextEmbeddings(TransformerModule, FromParams):\n def __init__(\n self,\n vocab_size: int,\n hidden_size: int,\n pad_token_id: int,\n max_position_embeddings: int,\n type_vocab_size: int,\n dropout: float,\n ):\n super().__init__()\n self.word_embeddings = torch.nn.Embedding(\n vocab_size, hidden_size, padding_idx=pad_token_id\n )\n self.position_embeddings = torch.nn.Embedding(max_position_embeddings, hidden_size)\n self.token_type_embeddings = torch.nn.Embedding(type_vocab_size, hidden_size)\n\n self.layer_norm = torch.nn.LayerNorm(hidden_size, eps=1e-12)\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.layer_norm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n torch.manual_seed(23)\n text = TextEmbeddings(10, 5, 2, 3, 7, 0.0)\n torch.manual_seed(23)\n transformer = TransformerEmbeddings(10, 5, 2, 3, None, 7, 0.0)\n\n input_ids = torch.tensor([[1, 2]])\n token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)\n position_ids = torch.tensor([[0, 1]])\n\n text_output = text(input_ids, token_type_ids, position_ids)\n transformer_output = transformer(input_ids, token_type_ids, position_ids)\n\n assert_allclose(text_output, transformer_output)\n\n\ndef test_forward_runs_with_inputs(transformer_embeddings):\n input_ids = torch.tensor([[1, 2]])\n token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)\n position_ids = torch.tensor([[0, 1]])\n transformer_embeddings(\n input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids\n )\n\n\ndef test_output_size(params):\n input_ids = torch.tensor([[1, 2]])\n token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)\n position_ids = torch.tensor([[0, 1]])\n params[\"output_size\"] = 7\n module = TransformerEmbeddings.from_params(params)\n output = module(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\n\n assert output.shape[-1] == 7\n\n\ndef test_no_token_type_layer(params):\n params[\"type_vocab_size\"] = 0\n module = TransformerEmbeddings.from_params(params)\n assert len(module.embeddings) == 2\n\n\[email protected](\n \"pretrained_name\",\n [\n \"bert-base-cased\",\n \"epwalsh/bert-xsmall-dummy\",\n ],\n)\ndef test_loading_from_pretrained_module(pretrained_name):\n TransformerEmbeddings.from_pretrained_module(pretrained_name)\n\n\ndef test_loading_albert():\n \"\"\"\n Albert is a special case because it includes a Linear layer in the encoder\n that maps the embeddings to the encoder hidden size, but we include this linear\n layer within our embedding layer.\n \"\"\"\n transformer_embedding = TransformerEmbeddings.from_pretrained_module(\n \"albert-base-v2\",\n )\n albert = AutoModel.from_pretrained(\"albert-base-v2\")\n assert_allclose(\n transformer_embedding.embeddings.word_embeddings.weight.data,\n albert.embeddings.word_embeddings.weight.data,\n )\n assert_allclose(\n transformer_embedding.linear_transform.weight.data,\n albert.encoder.embedding_hidden_mapping_in.weight.data,\n )\n\n\ndef get_modules():\n params = copy.deepcopy(PARAMS_DICT)\n\n params[\"hidden_dropout_prob\"] = params.pop(\"dropout\")\n params[\"hidden_size\"] = params.pop(\"embedding_size\")\n\n # bert, roberta, electra self attentions have the same code.\n\n torch.manual_seed(1234)\n yield \"bert\", BertEmbeddings(BertConfig(**params))\n\n albertparams = copy.deepcopy(PARAMS_DICT)\n albertparams[\"hidden_dropout_prob\"] = albertparams.pop(\"dropout\")\n\n torch.manual_seed(1234)\n yield \"albert\", AlbertEmbeddings(AlbertConfig(**albertparams))\n\n\[email protected](\"module_name, hf_module\", get_modules())\ndef test_forward_against_huggingface_output(transformer_embeddings, module_name, hf_module):\n input_ids = torch.tensor([[1, 2]])\n token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)\n position_ids = torch.tensor([[0, 1]])\n\n state_dict = transformer_embeddings._get_mapped_state_dict(hf_module.state_dict())\n if \"position_ids\" in state_dict:\n del state_dict[\"position_ids\"]\n transformer_embeddings.load_state_dict(state_dict)\n\n torch.manual_seed(1234)\n transformer_embeddings = (\n transformer_embeddings.eval()\n ) # setting to eval mode to avoid non-deterministic dropout.\n output = transformer_embeddings(\n input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids\n )\n\n torch.manual_seed(1234)\n hf_module = hf_module.eval() # setting to eval mode to avoid non-deterministic dropout.\n hf_output = hf_module(\n input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids\n )\n\n assert torch.allclose(output, hf_output)\n\n\[email protected]\ndef image_params_dict():\n return {\"feature_size\": 3, \"embedding_size\": 5, \"dropout\": 0.1}\n\n\[email protected]\ndef image_params(image_params_dict):\n return Params(image_params_dict)\n\n\[email protected]\ndef image_embeddings(image_params):\n return ImageFeatureEmbeddings.from_params(image_params.duplicate())\n\n\ndef test_can_construct_image_embeddings_from_params(image_embeddings, image_params_dict):\n assert (\n image_embeddings.embeddings.image_embeddings.in_features\n == image_params_dict[\"feature_size\"]\n )\n assert (\n image_embeddings.embeddings.image_embeddings.out_features\n == image_params_dict[\"embedding_size\"]\n )\n assert (\n image_embeddings.embeddings.location_embeddings.out_features\n == image_params_dict[\"embedding_size\"]\n )\n assert image_embeddings.dropout.p == image_params_dict[\"dropout\"]\n\n\ndef test_image_embedding_forward_runs_with_inputs(image_embeddings, image_params_dict):\n batch_size = 2\n feature_dim = image_params_dict[\"feature_size\"]\n image_feature = torch.randn(batch_size, feature_dim)\n image_location = torch.randn(batch_size, 4)\n image_embeddings(image_feature, image_location)\n\n\ndef test_image_embeddings_sanity(image_params_dict):\n class OldImageFeatureEmbeddings(TransformerModule, FromParams):\n \"\"\"Construct the embeddings from image, spatial location (omit now) and\n token_type embeddings.\n \"\"\"\n\n def __init__(self, feature_size: int, embedding_size: int, dropout: float = 0.0):\n super().__init__()\n\n self.image_embeddings = torch.nn.Linear(feature_size, embedding_size)\n self.image_location_embeddings = torch.nn.Linear(4, embedding_size, bias=False)\n self.layer_norm = torch.nn.LayerNorm(embedding_size, eps=1e-12)\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(self, image_feature: torch.Tensor, image_location: torch.Tensor):\n img_embeddings = self.image_embeddings(image_feature)\n loc_embeddings = self.image_location_embeddings(image_location)\n embeddings = self.layer_norm(img_embeddings + loc_embeddings)\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n torch.manual_seed(23)\n old = OldImageFeatureEmbeddings(**image_params_dict)\n torch.manual_seed(23)\n now = ImageFeatureEmbeddings(**image_params_dict)\n\n batch_size = 2\n\n image_feature = torch.randn(batch_size, image_params_dict[\"feature_size\"])\n image_location = torch.randn(batch_size, 4)\n\n torch.manual_seed(23)\n old_output = old(image_feature, image_location)\n torch.manual_seed(23)\n now_output = now(image_feature, image_location)\n\n assert_allclose(old_output, now_output)\n", "import argparse\nimport json\nfrom typing import Iterator, List, Dict\n\nimport torch\nfrom flaky import flaky\nimport pytest\n\nfrom allennlp.commands.evaluate import evaluate_from_args, Evaluate, evaluate\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data.data_loaders import TensorDict\nfrom allennlp.models import Model\n\n\nclass DummyDataLoader:\n def __init__(self, outputs: List[TensorDict]) -> None:\n super().__init__()\n self._outputs = outputs\n\n def __iter__(self) -> Iterator[TensorDict]:\n yield from self._outputs\n\n def __len__(self):\n return len(self._outputs)\n\n def set_target_device(self, _):\n pass\n\n\nclass DummyModel(Model):\n def __init__(self) -> None:\n super().__init__(None) # type: ignore\n\n def forward(self, **kwargs) -> Dict[str, torch.Tensor]: # type: ignore\n return kwargs\n\n\nclass TestEvaluate(AllenNlpTestCase):\n def setup_method(self):\n super().setup_method()\n\n self.parser = argparse.ArgumentParser(description=\"Testing\")\n subparsers = self.parser.add_subparsers(title=\"Commands\", metavar=\"\")\n Evaluate().add_subparser(subparsers)\n\n def test_evaluate_calculates_average_loss(self):\n losses = [7.0, 9.0, 8.0]\n outputs = [{\"loss\": torch.Tensor([loss])} for loss in losses]\n data_loader = DummyDataLoader(outputs)\n metrics = evaluate(DummyModel(), data_loader, -1, \"\")\n assert metrics[\"loss\"] == pytest.approx(8.0)\n\n def test_evaluate_calculates_average_loss_with_weights(self):\n losses = [7.0, 9.0, 8.0]\n weights = [10, 2, 1.5]\n inputs = zip(losses, weights)\n outputs = [\n {\"loss\": torch.Tensor([loss]), \"batch_weight\": torch.Tensor([weight])}\n for loss, weight in inputs\n ]\n data_loader = DummyDataLoader(outputs)\n metrics = evaluate(DummyModel(), data_loader, -1, \"batch_weight\")\n assert metrics[\"loss\"] == pytest.approx((70 + 18 + 12) / 13.5)\n\n @flaky\n def test_evaluate_from_args(self):\n kebab_args = [\n \"evaluate\",\n str(\n self.FIXTURES_ROOT / \"simple_tagger_with_span_f1\" / \"serialization\" / \"model.tar.gz\"\n ),\n str(self.FIXTURES_ROOT / \"data\" / \"conll2003.txt\"),\n \"--cuda-device\",\n \"-1\",\n ]\n\n args = self.parser.parse_args(kebab_args)\n metrics = evaluate_from_args(args)\n assert metrics.keys() == {\n \"accuracy\",\n \"accuracy3\",\n \"precision-overall\",\n \"recall-overall\",\n \"f1-measure-overall\",\n \"loss\",\n }\n\n def test_output_file_evaluate_from_args(self):\n output_file = str(self.TEST_DIR / \"metrics.json\")\n predictions_output_file = str(self.TEST_DIR / \"predictions.jsonl\")\n kebab_args = [\n \"evaluate\",\n str(\n self.FIXTURES_ROOT / \"simple_tagger_with_span_f1\" / \"serialization\" / \"model.tar.gz\"\n ),\n str(self.FIXTURES_ROOT / \"data\" / \"conll2003.txt\"),\n \"--cuda-device\",\n \"-1\",\n \"--output-file\",\n output_file,\n \"--predictions-output-file\",\n predictions_output_file,\n ]\n args = self.parser.parse_args(kebab_args)\n computed_metrics = evaluate_from_args(args)\n\n with open(output_file, \"r\") as file:\n saved_metrics = json.load(file)\n assert computed_metrics == saved_metrics\n\n with open(predictions_output_file, \"r\") as file:\n for line in file:\n prediction = json.loads(line.strip())\n assert \"tags\" in prediction\n\n def test_multiple_output_files_evaluate_from_args(self):\n output_file = str(self.TEST_DIR / \"metrics.json\")\n predictions_output_file = str(self.TEST_DIR / \"predictions.jsonl\")\n kebab_args = [\n \"evaluate\",\n str(\n self.FIXTURES_ROOT / \"simple_tagger_with_span_f1\" / \"serialization\" / \"model.tar.gz\"\n ),\n str(self.FIXTURES_ROOT / \"data\" / \"conll2003.txt\")\n + \",\"\n + str(self.FIXTURES_ROOT / \"data\" / \"conll2003.txt\"),\n \"--cuda-device\",\n \"-1\",\n \"--output-file\",\n output_file + \",\" + output_file,\n \"--predictions-output-file\",\n predictions_output_file + \",\" + predictions_output_file,\n ]\n args = self.parser.parse_args(kebab_args)\n computed_metrics = evaluate_from_args(args)\n\n with open(output_file, \"r\") as file:\n saved_metrics = json.load(file)\n assert computed_metrics == saved_metrics\n\n with open(predictions_output_file, \"r\") as file:\n for line in file:\n prediction = json.loads(line.strip())\n assert \"tags\" in prediction\n\n def test_evaluate_works_with_vocab_expansion(self):\n archive_path = str(\n self.FIXTURES_ROOT / \"basic_classifier\" / \"serialization\" / \"model.tar.gz\"\n )\n # snli2 has a extra token (\"seahorse\") in it.\n evaluate_data_path = str(\n self.FIXTURES_ROOT / \"data\" / \"text_classification_json\" / \"imdb_corpus2.jsonl\"\n )\n embeddings_filename = str(\n self.FIXTURES_ROOT / \"data\" / \"unawarded_embeddings.gz\"\n ) # has only unawarded vector\n embedding_sources_mapping = json.dumps(\n {\"_text_field_embedder.token_embedder_tokens\": embeddings_filename}\n )\n kebab_args = [\"evaluate\", archive_path, evaluate_data_path, \"--cuda-device\", \"-1\"]\n\n # TODO(mattg): the unawarded_embeddings.gz file above doesn't exist, but this test still\n # passes. This suggests that vocab extension in evaluate isn't currently doing anything,\n # and so it is broken.\n\n # Evaluate 1 with no vocab expansion,\n # Evaluate 2 with vocab expansion with no pretrained embedding file.\n # Evaluate 3 with vocab expansion with given pretrained embedding file.\n metrics_1 = evaluate_from_args(self.parser.parse_args(kebab_args))\n metrics_2 = evaluate_from_args(self.parser.parse_args(kebab_args + [\"--extend-vocab\"]))\n metrics_3 = evaluate_from_args(\n self.parser.parse_args(\n kebab_args + [\"--embedding-sources-mapping\", embedding_sources_mapping]\n )\n )\n assert metrics_1 != metrics_2\n assert metrics_2 != metrics_3\n" ]
[ [ "torch.ones", "torch.rand" ], [ "torch.ones", "torch.nn.LSTM", "torch.randn", "torch.FloatTensor", "torch.rand" ], [ "torch.nn.Dropout", "torch.nn.Linear", "torch.nn.ModuleList" ], [ "torch.nn.Dropout", "torch.testing.assert_allclose", "torch.zeros", "torch.manual_seed", "torch.randn", "torch.nn.Embedding", "torch.tensor", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.arange", "torch.allclose" ], [ "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Guangrui-best/ML_from_scratch
[ "afbdfb7046544bd5639a2b73fa4fe711b4c86593", "afbdfb7046544bd5639a2b73fa4fe711b4c86593" ]
[ "mlfromscratch/lda.py", "mlfromscratch/lda_tests.py" ]
[ "import numpy as np\n\nclass LDA:\n\n def __init__(self, n_components):\n self.n_components = n_components\n self.linear_discriminants = None\n\n def fit(self, X, y):\n n_features = X.shape[1]\n class_labels = np.unique(y)\n\n # Within class scatter matrix:\n # SW = sum((X_c - mean_X_c)^2 )\n\n # Between class scatter:\n # SB = sum( n_c * (mean_X_c - mean_overall)^2 )\n\n mean_overall = np.mean(X, axis=0)\n SW = np.zeros((n_features, n_features))\n SB = np.zeros((n_features, n_features))\n for c in class_labels:\n X_c = X[y == c]\n mean_c = np.mean(X_c, axis=0)\n # (4, n_c) * (n_c, 4) = (4,4) -> transpose\n SW += (X_c - mean_c).T.dot((X_c - mean_c))\n\n # (4, 1) * (1, 4) = (4,4) -> reshape\n n_c = X_c.shape[0]\n mean_diff = (mean_c - mean_overall).reshape(n_features, 1)\n SB += n_c * (mean_diff).dot(mean_diff.T)\n\n # Determine SW^-1 * SB\n A = np.linalg.inv(SW).dot(SB)\n # Get eigenvalues and eigenvectors of SW^-1 * SB\n eigenvalues, eigenvectors = np.linalg.eig(A)\n # -> eigenvector v = [:,i] column vector, transpose for easier calculations\n # sort eigenvalues high to low\n eigenvectors = eigenvectors.T\n idxs = np.argsort(abs(eigenvalues))[::-1]\n eigenvalues = eigenvalues[idxs]\n eigenvectors = eigenvectors[idxs]\n # store first n eigenvectors\n self.linear_discriminants = eigenvectors[0:self.n_components]\n\n def transform(self, X):\n # project data\n return np.dot(X, self.linear_discriminants.T)\n", "from sklearn import datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom lda import LDA\n\ndata = datasets.load_iris()\nX = data.data\ny = data.target\n\n# Project the data onto the 2 primary linear discriminants\nlda = LDA(2)\nlda.fit(X, y)\nX_projected = lda.transform(X)\n\nprint('Shape of X:', X.shape)\nprint('Shape of transformed X:', X_projected.shape)\n\nx1 = X_projected[:, 0]\nx2 = X_projected[:, 1]\n\nplt.scatter(x1, x2,\n c=y, edgecolor='none', alpha=0.8,\n cmap=plt.cm.get_cmap('viridis', 3))\n\nplt.xlabel('Linear Discriminant 1')\nplt.ylabel('Linear Discriminant 2')\nplt.colorbar()\nplt.show()\n" ]
[ [ "numpy.dot", "numpy.unique", "numpy.linalg.inv", "numpy.linalg.eig", "numpy.mean", "numpy.zeros" ], [ "matplotlib.pyplot.cm.get_cmap", "sklearn.datasets.load_iris", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elcronos/adversarial_genattack
[ "b86b07a938a6aef54a41891fc2de3feeaa8e03aa" ]
[ "main.py" ]
[ "\"\"\"\nAuthor: Moustafa Alzantot ([email protected])\n\n\"\"\"\nimport time\nimport os\nimport sys\nimport random\nimport numpy as np\n\nimport tensorflow as tf \nfrom setup_inception import ImageNet, InceptionModel\n\nimport utils\nfrom genattack_tf2 import GenAttack2\n\nflags = tf.app.flags\nflags.DEFINE_string('input_dir', '', 'Path for input images.')\nflags.DEFINE_string('output_dir', 'output', 'Path to save results.')\nflags.DEFINE_integer('test_size', 1, 'Number of test images.')\nflags.DEFINE_bool('verbose', True, 'Print logs.')\nflags.DEFINE_integer('test_example', default=None, help='Test only one image')\n\nflags.DEFINE_float('mutation_rate', default=0.005, help='Mutation rate')\nflags.DEFINE_float('eps', default=0.10, help='maximum L_inf distance threshold')\nflags.DEFINE_float('alpha', default=0.20, help='Step size')\nflags.DEFINE_integer('pop_size', default=6, help='Population size')\nflags.DEFINE_integer('max_steps', default=10000, help='Maximum number of iterations')\nflags.DEFINE_integer('resize_dim', None, 'Reduced dimension for dimensionality reduction')\nflags.DEFINE_bool('adaptive', True, 'Turns on the dynamic scaling of mutation prameters')\nflags.DEFINE_string('model', 'inception', 'model name')\nflags.DEFINE_integer('target', None, 'target class. if not provided will be random')\nFLAGS = flags.FLAGS\n\nif __name__ == '__main__':\n\n # random.seed(FLAGS.seed)\n # tf.set_random_seed(FLAGS.seed)\n # np.random.seed(FLAGS.seed)\n\n dataset = ImageNet(FLAGS.input_dir)\n inputs, targets, reals, paths = utils.generate_data(dataset, FLAGS.test_size)\n \n with tf.Session() as sess:\n model = InceptionModel(sess, use_log=True)\n test_in = tf.placeholder(tf.float32, (1,299,299,3), 'x')\n test_pred = tf.argmax(model.predict(test_in), axis=1)\n \n \n attack = GenAttack2(model=model,\n pop_size=FLAGS.pop_size,\n mutation_rate = FLAGS.mutation_rate,\n eps=FLAGS.eps,\n max_steps=FLAGS.max_steps,\n alpha=FLAGS.alpha,\n resize_dim=FLAGS.resize_dim,\n adaptive=FLAGS.adaptive)\n num_valid_images = len(inputs)\n total_count = 0 # Total number of images attempted\n success_count = 0\n logger = utils.ResultLogger(FLAGS.output_dir, FLAGS.flag_values_dict())\n for ii in range(num_valid_images):\n if (FLAGS.test_example and FLAGS.test_example != ii):\n continue\n input_img = inputs[ii]\n input_img_path = paths[ii]\n if FLAGS.target:\n target_label = FLAGS.target + 1\n else:\n target_label = np.argmax(targets[ii])\n real_label = reals[ii]\n orig_pred = sess.run(test_pred, feed_dict={test_in: [input_img]})[0]\n if FLAGS.verbose:\n print('Real = {}, Predicted = {}, Target = {}'.format(\n real_label, orig_pred, target_label))\n if orig_pred != real_label:\n if FLAGS.verbose:\n print('\\t Skipping incorrectly classified image.')\n continue\n total_count += 1\n start_time = time.time()\n result = attack.attack(sess, input_img, target_label)\n end_time = time.time()\n attack_time = (end_time-start_time)\n if result is not None:\n adv_img, query_count, margin_log = result\n final_pred = sess.run(test_pred, feed_dict={test_in: [adv_img]})[0]\n if (final_pred == target_label):\n success_count += 1\n print('--- SUCCEEEED ----')\n logger.add_result(ii, input_img, adv_img, real_label,\n target_label, query_count, attack_time, margin_log)\n else:\n print('Attack failed')\n logger.close(num_attempts=total_count)\n print('Number of success = {} / {}.'.format(success_count, total_count))\n" ]
[ [ "numpy.argmax", "tensorflow.placeholder", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
sanowar-raihan/nerf-meta
[ "dbb97431b613acb3dfdc7075344c6e1fd1b6cf51" ]
[ "shapenet_train.py" ]
[ "import argparse\nimport json\nimport copy\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom datasets.shapenet import build_shapenet\nfrom models.nerf import build_nerf\nfrom models.rendering import get_rays_shapenet, sample_points, volume_render\n\n\ndef inner_loop(model, optim, imgs, poses, hwf, bound, num_samples, raybatch_size, inner_steps):\n \"\"\"\n train the inner model for a specified number of iterations\n \"\"\"\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(inner_steps):\n indices = torch.randint(num_rays, size=[raybatch_size])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()\n\n\ndef train_meta(args, meta_model, meta_optim, data_loader, device):\n \"\"\"\n train the meta_model for one epoch using reptile meta learning\n https://arxiv.org/abs/1803.02999\n \"\"\"\n for imgs, poses, hwf, bound in data_loader:\n imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)\n imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()\n\n meta_optim.zero_grad()\n\n inner_model = copy.deepcopy(meta_model)\n inner_optim = torch.optim.SGD(inner_model.parameters(), args.inner_lr)\n\n inner_loop(inner_model, inner_optim, imgs, poses,\n hwf, bound, args.num_samples,\n args.train_batchsize, args.inner_steps)\n \n with torch.no_grad():\n for meta_param, inner_param in zip(meta_model.parameters(), inner_model.parameters()):\n meta_param.grad = meta_param - inner_param\n \n meta_optim.step()\n\n\ndef report_result(model, imgs, poses, hwf, bound, num_samples, raybatch_size):\n \"\"\"\n report view-synthesis result on heldout views\n \"\"\"\n ray_origins, ray_directions = get_rays_shapenet(hwf, poses)\n\n view_psnrs = []\n for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\n num_samples, perturb=False)\n \n synth = []\n num_rays = rays_d.shape[0]\n with torch.no_grad():\n for i in range(0, num_rays, raybatch_size):\n rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])\n color_batch = volume_render(rgbs_batch, sigmas_batch, \n t_vals[i:i+raybatch_size],\n white_bkgd=True)\n synth.append(color_batch)\n synth = torch.cat(synth, dim=0).reshape_as(img)\n error = F.mse_loss(img, synth)\n psnr = -10*torch.log10(error)\n view_psnrs.append(psnr)\n \n scene_psnr = torch.stack(view_psnrs).mean()\n return scene_psnr\n\n\ndef val_meta(args, model, val_loader, device):\n \"\"\"\n validate the meta trained model for few-shot view synthesis\n \"\"\"\n meta_trained_state = model.state_dict()\n val_model = copy.deepcopy(model)\n \n val_psnrs = []\n for imgs, poses, hwf, bound in val_loader:\n imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)\n imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()\n\n tto_imgs, test_imgs = torch.split(imgs, [args.tto_views, args.test_views], dim=0)\n tto_poses, test_poses = torch.split(poses, [args.tto_views, args.test_views], dim=0)\n\n val_model.load_state_dict(meta_trained_state)\n val_optim = torch.optim.SGD(val_model.parameters(), args.tto_lr)\n\n inner_loop(val_model, val_optim, tto_imgs, tto_poses, hwf,\n bound, args.num_samples, args.tto_batchsize, args.tto_steps)\n \n scene_psnr = report_result(val_model, test_imgs, test_poses, hwf, bound, \n args.num_samples, args.test_batchsize)\n val_psnrs.append(scene_psnr)\n\n val_psnr = torch.stack(val_psnrs).mean()\n return val_psnr\n\n\ndef main():\n parser = argparse.ArgumentParser(description='shapenet few-shot view synthesis')\n parser.add_argument('--config', type=str, required=True,\n help='config file for the shape class (cars, chairs or lamps)')\n args = parser.parse_args()\n\n with open(args.config) as config:\n info = json.load(config)\n for key, value in info.items():\n args.__dict__[key] = value\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n train_set = build_shapenet(image_set=\"train\", dataset_root=args.dataset_root,\n splits_path=args.splits_path, num_views=args.train_views)\n train_loader = DataLoader(train_set, batch_size=1, shuffle=True)\n\n val_set = build_shapenet(image_set=\"val\", dataset_root=args.dataset_root,\n splits_path=args.splits_path,\n num_views=args.tto_views+args.test_views)\n val_loader = DataLoader(val_set, batch_size=1, shuffle=False)\n\n meta_model = build_nerf(args)\n meta_model.to(device)\n\n meta_optim = torch.optim.Adam(meta_model.parameters(), lr=args.meta_lr)\n\n for epoch in range(1, args.meta_epochs+1):\n train_meta(args, meta_model, meta_optim, train_loader, device)\n val_psnr = val_meta(args, meta_model, val_loader, device)\n print(f\"Epoch: {epoch}, val psnr: {val_psnr:0.3f}\")\n\n torch.save({\n 'epoch': epoch,\n 'meta_model_state_dict': meta_model.state_dict(),\n 'meta_optim_state_dict': meta_optim.state_dict(),\n }, f'meta_epoch{epoch}.pth')\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.randint", "torch.cat", "torch.utils.data.DataLoader", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.cuda.is_available", "torch.split", "torch.stack", "torch.log10" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cxxixi/Online-opinions-on-weibo
[ "ae4586b8b42d166c9a2386319891a04d390585fb" ]
[ "plot/event1/comment_concat.py" ]
[ "import pandas as pd\n\ncsv1 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\500-593.csv',header=None)\ncsv2 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment594.csv',header=None)\ncsv3 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment855.csv',header=None)\ncsv4 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment993.csv',header=None)\ncsv5 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1123.csv',header=None)\ncsv6 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1137.csv',header=None)\ncsv7 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1.csv',header=None)\ncsv8 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment350.csv',header=None)\ncsv9 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\addition.csv',header=None)\ncsv10 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\addition_1.csv',header=None)\n\n\n\ncomment_data = pd.concat([csv1,csv2,csv3,csv4,csv5,csv6,csv7,csv8,csv9,csv10],axis=0)\ncomment_data.head()\ncomment_data.shape\ncomment_data.columns = ['tweetid', 'comment_id', 'created_at', 'text', 'like_counts', 'reply_id', 'reply_text', 'user_id',\\\n 'profile_url', 'screen_name', 'verified', 'verified_type']\n\ncomment_data = comment_data.drop_duplicates()\ncomment_data.groupby(by=['tweetid']).size().sort_values(ascending=False)\n\n\ncomment_data.to_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment_data.csv')\n\n\n\n\n\n\n\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
donalrinho/zfit
[ "9769ef7d56a6be9a5d438e47b80ea5a8f772bc24", "9769ef7d56a6be9a5d438e47b80ea5a8f772bc24" ]
[ "examples/custom_pdf_advanced.py", "tests/test_fitresult.py" ]
[ "# Copyright (c) 2021 zfit\n\nimport tensorflow as tf\n\nimport zfit\nfrom zfit import z\n\n\nclass CustomPDF2D(zfit.pdf.BasePDF):\n \"\"\"My custom, 2 dimensional pdf. The axes are: Energy, Momentum.\n\n \"\"\"\n\n def __init__(self, param1, param2, param3, obs, name=\"CustomPDF\", ):\n # we can now do complicated stuff here if needed\n # only thing: we have to specify explicitly here what is which parameter\n params = {'super_param': param1, # we can change/compose etc parameters\n 'param2': param2, 'param3': param3}\n super().__init__(obs, params, name=name)\n\n def _unnormalized_pdf(self, x):\n energy, momentum = x.unstack_x()\n param1 = self.params['super_param']\n param2 = self.params['param2']\n param3 = self.params['param3']\n\n # just a fantasy function\n probs = param1 * tf.cos(energy ** 2) + tf.math.log(param2 * momentum ** 2) + param3\n return probs\n\n\n# add an analytic integral\n\n# define the integral function\ndef integral_full(limits, norm_range, params, model):\n lower, upper = limits.rect_limits # for a more detailed guide, see the space.py example\n param1 = params['super_param']\n param2 = params['param2']\n param3 = params['param3']\n\n lower = z.convert_to_tensor(lower)\n upper = z.convert_to_tensor(upper)\n\n # calculate the integral here, dummy integral, wrong!\n integral = param1 * param2 * param3 + z.reduce_sum([lower, upper])\n return integral\n\n\n# define the space over which it is defined. Here, we use the axes\nlower_full = (-10, zfit.Space.ANY_LOWER)\nupper_full = (10, zfit.Space.ANY_UPPER)\nintegral_full_limits = zfit.Space(axes=(0, 1),\n limits=(lower_full, upper_full))\n\nCustomPDF2D.register_analytic_integral(func=integral_full,\n limits=integral_full_limits)\n\n\n# define the partial integral function\ndef integral_axis1(x, limits, norm_range, params, model):\n data_0 = x.unstack_x() # data from axis 0\n\n param1 = params['super_param']\n param2 = params['param2']\n param3 = params['param3']\n\n lower, upper = limits.limit1d # for a more detailed guide, see the space.py example\n lower = z.convert_to_tensor(lower) # the limits are now 1-D, for axis 1\n upper = z.convert_to_tensor(upper)\n\n # calculate the integral here, dummy integral\n integral = data_0 ** 2 * param1 * param2 * param3 + z.reduce_sum([lower, upper])\n # notice that the returned shape will be in the same as data_0, e.g. the number of events given in x\n return integral\n\n\n# define the space over which it is defined. Here, we use the axes\nlower_axis1 = ((zfit.Space.ANY_LOWER,),)\nupper_axis1 = ((zfit.Space.ANY_UPPER,),)\nintegral_axis1_limits = zfit.Space(axes=(1,), # axes one corresponds to the second obs, here obs2\n limits=(lower_axis1, upper_axis1))\n\nCustomPDF2D.register_analytic_integral(func=integral_axis1,\n limits=integral_axis1_limits)\n\nif __name__ == '__main__':\n import numpy as np\n\n obs = zfit.Space('obs1', (-10, 10)) * zfit.Space('obs2', (-3, 5))\n pdf = CustomPDF2D(1, 2, 3, obs=obs)\n sample = pdf.sample(n=1000)\n pdf.pdf([[2., 2.5], [5.4, 3.2]])\n x_part = zfit.Data.from_numpy(array=np.array([2.1, 2.2, 3.2]), obs='obs1')\n\n # integrate over obs2 with limits 1, 2 for the `x_part`. This will use the analytic integral above\n pdf.partial_integrate(x=x_part, limits=zfit.Space('obs2', (1, 2)))\n # we can explicitly call the analytic integral. Without registering it (e.g. comment the line with the `register`\n # and run again), it will raise an error\n pdf.partial_analytic_integrate(x=x_part, limits=zfit.Space('obs2', (1, 2)))\n", "# Copyright (c) 2021 zfit\nimport numpy as np\nimport pytest\n\nimport zfit\nfrom zfit import z\nfrom zfit.minimizers.fitresult import FitResult\nfrom zfit.minimizers.errors import compute_errors\n# noinspection PyUnresolvedReferences\nfrom zfit.core.testing import setup_function, teardown_function, tester\n\ntrue_a = 1.\ntrue_b = 4.\ntrue_c = -0.3\n\n\ndef create_loss(n=15000, weights=None):\n avalue = 1.5\n a_param = zfit.Parameter(\"variable_a15151\", avalue, -1., 20.,\n step_size=z.constant(0.1))\n a_param.init_val = avalue\n bvalue = 3.5\n b_param = zfit.Parameter(\"variable_b15151\", bvalue, 0, 20)\n b_param.init_val = bvalue\n cvalue = -0.04\n c_param = zfit.Parameter(\"variable_c15151\", cvalue, -1, 0.)\n c_param.init_val = cvalue\n obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))\n\n # load params for sampling\n a_param.set_value(true_a)\n b_param.set_value(true_b)\n c_param.set_value(true_c)\n\n gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)\n exp1 = zfit.pdf.Exponential(lam=c_param, obs=obs1)\n\n sum_pdf1 = zfit.pdf.SumPDF((gauss1, exp1), 0.7)\n\n sampled_data = sum_pdf1.create_sampler(n=n)\n sampled_data.resample()\n\n if weights is not None:\n sampled_data.set_weights(weights)\n\n loss = zfit.loss.UnbinnedNLL(model=sum_pdf1, data=sampled_data)\n\n return loss, (a_param, b_param, c_param)\n\n\ndef create_fitresult(minimizer_class_and_kwargs, n=15000, weights=None):\n loss, (a_param, b_param, c_param) = create_loss(n=n, weights=weights)\n\n true_minimum = loss.value().numpy()\n\n for param in [a_param, b_param, c_param]:\n param.assign(param.init_val) # reset the value\n\n minimizer_class, minimizer_kwargs, test_error = minimizer_class_and_kwargs\n minimizer = minimizer_class(**minimizer_kwargs)\n\n result = minimizer.minimize(loss=loss)\n cur_val = loss.value().numpy()\n aval, bval, cval = [v.numpy() for v in (a_param, b_param, c_param)]\n\n ret = {'result': result, 'true_min': true_minimum, 'cur_val': cur_val, 'a': aval, 'b': bval, 'c': cval,\n 'a_param': a_param, 'b_param': b_param, 'c_param': c_param}\n\n return ret\n\n\ndef test_set_values():\n fitresult = create_fitresult((zfit.minimize.Minuit, {}, True))\n result = fitresult['result']\n param_a = fitresult['a_param']\n param_b = fitresult['b_param']\n param_c = fitresult['c_param']\n\n val_a = fitresult['a']\n val_b = fitresult['b']\n val_c = fitresult['c']\n param_b.set_value(999)\n param_c.set_value(9999)\n zfit.param.set_values([param_c, param_b], values=result)\n\n assert param_a.value() == val_a\n assert param_b.value() == val_b\n assert param_c.value() == val_c\n\n param_d = zfit.Parameter(\"param_d\", 12)\n with pytest.raises(ValueError):\n zfit.param.set_values([param_d], result)\n\n\nminimizers = [\n # (zfit.minimize.WrapOptimizer, dict(optimizer=tf.train.AdamOptimizer(learning_rate=0.5)), False),\n # (zfit.minimize.Adam, dict(learning_rate=0.5), False),\n (zfit.minimize.Minuit, {}, True),\n # (zfit.minimize.Scipy, {}, False),\n]\n\n\[email protected](\"minimizer_class_and_kwargs\", minimizers)\ndef test_fmin(minimizer_class_and_kwargs):\n results = create_fitresult(minimizer_class_and_kwargs=minimizer_class_and_kwargs)\n result = results['result']\n assert pytest.approx(results['cur_val']) == result.fmin\n\n\[email protected](\"minimizer_class_and_kwargs\", minimizers)\ndef test_params_at_limit(minimizer_class_and_kwargs):\n loss, (param_a, param_b, param_c) = create_loss(n=5000)\n old_lower = param_a.lower\n param_a.lower = param_a.upper\n param_a.upper += 5\n minimizer = zfit.minimize.Minuit(use_minuit_grad=True, tolerance=0.1)\n result = minimizer.minimize(loss)\n assert param_a.at_limit\n assert result.params_at_limit\n param_a.lower = old_lower\n assert not param_a.at_limit\n assert result.params_at_limit\n assert not result.valid\n\n\[email protected](reruns=3)\[email protected](\"minimizer_class_and_kwargs\", minimizers)\[email protected](\"use_weights\", [False, True])\ndef test_covariance(minimizer_class_and_kwargs, use_weights):\n n = 15000\n if use_weights:\n weights = np.random.normal(1, 0.001, n)\n else:\n weights = None\n\n results = create_fitresult(minimizer_class_and_kwargs=minimizer_class_and_kwargs,\n n=n,\n weights=weights)\n result = results['result']\n hesse = result.hesse()\n a = results['a_param']\n b = results['b_param']\n c = results['c_param']\n\n with pytest.raises(KeyError):\n result.covariance(params=[a, b, c], method=\"hesse\")\n\n cov_mat_3 = result.covariance(params=[a, b, c])\n cov_mat_2 = result.covariance(params=[c, b])\n cov_dict = result.covariance(params=[a, b, c], as_dict=True)\n\n assert pytest.approx(hesse[a]['error'], rel=0.01) == np.sqrt(cov_dict[(a, a)])\n assert pytest.approx(hesse[a]['error'], rel=0.01) == np.sqrt(cov_mat_3[0, 0])\n\n assert pytest.approx(hesse[b]['error'], rel=0.01) == np.sqrt(cov_dict[(b, b)])\n assert pytest.approx(hesse[b]['error'], rel=0.01) == np.sqrt(cov_mat_3[1, 1])\n assert pytest.approx(hesse[b]['error'], rel=0.01) == np.sqrt(cov_mat_2[1, 1])\n\n assert pytest.approx(hesse[c]['error'], rel=0.01) == np.sqrt(cov_dict[(c, c)])\n assert pytest.approx(hesse[c]['error'], rel=0.01) == np.sqrt(cov_mat_3[2, 2])\n assert pytest.approx(hesse[c]['error'], rel=0.01) == np.sqrt(cov_mat_2[0, 0])\n\n if use_weights:\n rtol, atol = 0.1, 0.01\n else:\n rtol, atol = 0.05, 0.001\n\n cov_mat_3_np = result.covariance(params=[a, b, c], method=\"hesse_np\")\n np.testing.assert_allclose(cov_mat_3, cov_mat_3_np, rtol=rtol, atol=atol)\n\n\[email protected](reruns=3)\[email protected](\"minimizer_class_and_kwargs\", minimizers)\ndef test_correlation(minimizer_class_and_kwargs):\n results = create_fitresult(minimizer_class_and_kwargs=minimizer_class_and_kwargs)\n result = results['result']\n hesse = result.hesse()\n a = results['a_param']\n b = results['b_param']\n c = results['c_param']\n\n cor_mat = result.correlation(params=[a, b, c])\n cov_mat = result.covariance(params=[a, b, c])\n cor_dict = result.correlation(params=[a, b], as_dict=True)\n\n np.testing.assert_allclose(np.diag(cor_mat), 1.0)\n\n a_error = hesse[a]['error']\n b_error = hesse[b]['error']\n assert pytest.approx(cor_mat[0, 1], rel=0.01) == cov_mat[0, 1]/(a_error * b_error)\n assert pytest.approx(cor_dict[(a, b)], rel=0.01) == cov_mat[0, 1]/(a_error * b_error)\n\[email protected] # currently stuck in an endless loop?\[email protected](\"minimizer_class_and_kwargs\", minimizers)\[email protected](\"cl\", [None, 0.95]) # TODO: currently only None supported, 1 sigma\n# @pytest.mark.parametrize(\"sigma\", [1])\ndef test_errors(minimizer_class_and_kwargs, cl):\n n_max_trials = 5 # how often to try to find a new minimum\n results = create_fitresult(minimizer_class_and_kwargs=minimizer_class_and_kwargs)\n result = results['result']\n a = results['a_param']\n b = results['b_param']\n c = results['c_param']\n\n for n_trial in range(n_max_trials):\n z_errors, new_result = result.errors(method=\"zfit_error\", cl=cl)\n minos_errors, _ = result.errors(method=\"minuit_minos\", cl=cl)\n if new_result is None:\n break\n else:\n result = new_result\n else: # no break occured\n assert False, \"Always a new minimum was found, cannot perform test.\"\n print(result)\n\n # @marinang this test seems to fail when a new minimum is found\n for param in [a, b, c]:\n z_error_param = z_errors[param]\n minos_errors_param = minos_errors[param]\n for dir in [\"lower\", \"upper\"]:\n assert pytest.approx(z_error_param[dir], rel=0.03) == getattr(minos_errors_param, dir)\n\n with pytest.raises(KeyError):\n result.errors(method=\"error\")\n\n\n# @pytest.mark.skip # currently, fmin is not correct, loops, see: https://github.com/scikit-hep/iminuit/issues/395\[email protected](reruns=3)\[email protected](\"minimizer_class_and_kwargs\", minimizers)\ndef test_new_minimum(minimizer_class_and_kwargs):\n loss, params = create_loss(10000)\n\n minimizer_class, minimizer_kwargs, test_error = minimizer_class_and_kwargs\n minimizer = minimizer_class(**minimizer_kwargs)\n\n a_param, b_param, c_param = params\n\n if test_error:\n\n b_param.floating = False\n b_param.set_value(3.7)\n c_param.floating = False\n result = minimizer.minimize(loss=loss)\n b_param.floating = True\n c_param.floating = True\n\n params_dict = {p: p.numpy() for p in params}\n hacked_result = FitResult(params=params_dict, edm=result.edm, fmin=result.fmin, info=result.info,\n loss=loss, status=result.status, converged=result.converged,\n minimizer=minimizer.copy())\n\n method = lambda **kwgs: compute_errors(covariance_method=\"hesse_np\", **kwgs)\n\n errors, new_result = hacked_result.errors(params=params, method=method, error_name=\"interval\")\n\n assert new_result is not None\n\n assert hacked_result.valid is False\n for p in params:\n assert errors[p] == \"Invalid, a new minimum was found.\"\n\n assert new_result.valid is True\n errors, _ = new_result.errors()\n for param in params:\n assert errors[param]['lower'] < 0\n assert errors[param]['upper'] > 0\n" ]
[ [ "numpy.array", "tensorflow.cos", "tensorflow.math.log" ], [ "numpy.diag", "numpy.random.normal", "numpy.sqrt", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jiaojiao1234/RISE
[ "fd85aa6e475534a74faab5c4644c63dc0c01d236", "fd85aa6e475534a74faab5c4644c63dc0c01d236", "fd85aa6e475534a74faab5c4644c63dc0c01d236" ]
[ "Jupyter/UDO_Free/RP/test_start.py", "Jupyter/WiAG_O_test/L5/test_start.py", "Jupyter/WiG_test/S2/test_start.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 8 22:09:47 2021\n\n@author: Apple\n\"\"\"\ndef start():\n import numpy as np\n import scipy.io as sio\n import sklearn.ensemble\n from sklearn import svm\n from sklearn.model_selection import StratifiedKFold\n from sklearn.metrics import confusion_matrix\n from sklearn import preprocessing\n import joblib\n from sklearn import neighbors\n from sklearn.model_selection import StratifiedShuffleSplit\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n from sklearn.tree import DecisionTreeClassifier\n import random\n from sklearn.linear_model import LogisticRegression \n from sklearn.ensemble import GradientBoostingClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.naive_bayes import GaussianNB\n from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n from sklearn.ensemble import VotingClassifier\n from nonconformist.nc import MarginErrFunc\n import warnings\n warnings.filterwarnings(\"ignore\", message=\"Numerical issues were encountered \")\n import sys\n sys.path.insert(0,'/root/RISE-Version2/')\n from Statistical_vector.statistical_vector import train_statistical_vector, test_statistical_vector_param, non_condition_p\n \n \n min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))\n myclassifier = [svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0),\n sklearn.ensemble.RandomForestClassifier(n_estimators=100,random_state=0),\n DecisionTreeClassifier(random_state=0),neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform',\n algorithm='auto', leaf_size=30,\n p=2, metric='minkowski',\n metric_params=None, n_jobs=1),\n LogisticRegression(random_state=0),GradientBoostingClassifier(n_estimators=100,random_state=0),\n LinearDiscriminantAnalysis(), AdaBoostClassifier(),\n GaussianNB(),QuadraticDiscriminantAnalysis()] \n \n \n times = ['1'] ##test set\n train_name = ['2','3','4'] ##train set\n filepath = r'/root/RISE-Version2/Jupyter/UDO_Free/RP/data/' \n filename = ['zjq_']\n class_index = 3\n class_num = 5 \n \n \n ##load test data\n #print('\\n---------------test data is ' + times[0] + ' scenario-------------\\n')\n data = sio.loadmat(filepath + filename[0] + times[0] + '.mat')\n xx2 = data['alldata']\n yy2 = data['alllabel']\n yy2 = yy2.flatten()\n test_x = xx2\n test_y = yy2\n \n ##load train data\n #print('\\n-------training data is ' + str(train_name) + ' scenario----------\\n')\n xx1 = np.empty(shape=[0, xx2.shape[1]])\n yy1 = np.empty(shape=[1, 0],dtype=int) \n yy1 = yy1.flatten() \n for ii in train_name:\n data = sio.loadmat(filepath + filename[0] + ii+ '.mat')\n x1 = data['alldata']\n y1 = data['alllabel']\n y1 = y1.flatten()\n x1 = min_max_scaler.fit_transform(x1)\n xx1 = np.append(xx1, x1, axis=0)\n yy1 = np.append(yy1, y1, axis=0)\n yy1 = yy1.flatten()\n \n index = [t for t in range(xx1.shape[0])] \n random.shuffle(index)\n x_train11 = xx1[index]\n x_train1 = x_train11\n y_train1 = yy1[index]\n #y_train1 = y_train1 - 1 \n \n \n ############################ Without RISE ###############################\n print('\\n-------- The performance of the underlying model without RISE --------\\n')\n x_test1 = min_max_scaler.fit_transform(test_x)\n y_test1 = test_y\n #y_test1 = y_test1 - 1\n clf_dif = myclassifier[class_index]\n clf_dif.fit(x_train1,y_train1)\n acc_dif = clf_dif.score(x_test1,y_test1)\n print('The accuracy without RISE: ',acc_dif)\n y_true_dif, y_pred_dif = y_test1,clf_dif.predict(x_test1)\n test_confusion_matrix = confusion_matrix(y_true_dif, y_pred_dif)\n print('Confusion matrix without RISE: \\n',test_confusion_matrix)\n \n return x_train1, y_train1, x_test1, y_test1, myclassifier, y_true_dif, y_pred_dif,class_num,class_index", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 8 22:09:47 2021\n\n@author: Apple\n\"\"\"\ndef start():\n import numpy as np\n import scipy.io as sio\n import sklearn.ensemble\n from sklearn import svm\n from sklearn.model_selection import StratifiedKFold\n from sklearn.metrics import confusion_matrix\n from sklearn import preprocessing\n import joblib\n from sklearn import neighbors\n from sklearn.model_selection import StratifiedShuffleSplit\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n from sklearn.tree import DecisionTreeClassifier\n import random\n from sklearn.linear_model import LogisticRegression \n from sklearn.ensemble import GradientBoostingClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.naive_bayes import GaussianNB\n from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n from sklearn.ensemble import VotingClassifier\n from nonconformist.nc import MarginErrFunc\n import warnings\n warnings.filterwarnings(\"ignore\", message=\"Numerical issues were encountered \")\n import sys\n sys.path.insert(0,'/root/RISE-Version2/')\n from Statistical_vector.statistical_vector import train_statistical_vector, test_statistical_vector_param, non_condition_p\n \n \n min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))\n myclassifier = [svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0),\n sklearn.ensemble.RandomForestClassifier(n_estimators=100,random_state=0),\n DecisionTreeClassifier(random_state=0),neighbors.KNeighborsClassifier(n_neighbors=10),\n LogisticRegression(random_state=0),GradientBoostingClassifier(n_estimators=100,random_state=0),\n LinearDiscriminantAnalysis(), AdaBoostClassifier(),\n GaussianNB(),QuadraticDiscriminantAnalysis()] \n \n \n times = ['p5'] ##test set\n train_name = ['p1','p2','p3','p4'] ##train set\n filepath = r'/root/RISE-Version2/Jupyter/WiAG_O_test/L5/data/' \n filename = ['mobisys9_dwt9_label']\n class_index = 3\n class_num = 6\n \n \n ##load test data\n #print('\\n---------------test data is ' + times[0] + ' scenario-------------\\n')\n data = sio.loadmat(filepath + filename[0] + times[0] + '.mat')\n xx2 = data['fe_dwt_mobisys9']\n yy2 = data['label']\n yy2 = yy2.flatten()\n test_x = xx2\n test_y = yy2\n \n ##load train data\n #print('\\n-------training data is ' + str(train_name) + ' scenario----------\\n')\n xx1 = np.empty(shape=[0, xx2.shape[1]])\n yy1 = np.empty(shape=[1, 0],dtype=int) \n for ii in train_name:\n data = sio.loadmat(filepath + filename[0] + ii+ '.mat')\n x1 = data['fe_dwt_mobisys9']\n y1 = data['label']\n x1 = min_max_scaler.fit_transform(x1)\n xx1 = np.append(xx1, x1, axis=0)\n yy1 = np.append(yy1, y1, axis=1)\n yy1 = yy1.flatten()\n \n index = [t for t in range(xx1.shape[0])] \n random.shuffle(index)\n x_train11 = xx1[index]\n x_train1 = x_train11\n y_train1 = yy1[index]\n y_train1 = y_train1 - 1 \n \n \n ############################ Without RISE ###############################\n print('\\n-------- The performance of the underlying model without RISE --------\\n')\n x_test1 = min_max_scaler.fit_transform(test_x)\n y_test1 = test_y\n y_test1 = y_test1 - 1\n clf_dif = myclassifier[class_index]\n clf_dif.fit(x_train1,y_train1)\n acc_dif = clf_dif.score(x_test1,y_test1)\n print('The accuracy without RISE: ',acc_dif)\n y_true_dif, y_pred_dif = y_test1,clf_dif.predict(x_test1)\n test_confusion_matrix = confusion_matrix(y_true_dif, y_pred_dif)\n print('Confusion matrix without RISE: \\n',test_confusion_matrix)\n \n return x_train1, y_train1, x_test1, y_test1, myclassifier, y_true_dif, y_pred_dif,class_num,class_index", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 8 22:09:47 2021\n\n@author: Apple\n\"\"\"\ndef start():\n import numpy as np\n import scipy.io as sio\n import sklearn.ensemble\n from sklearn import svm\n from sklearn.model_selection import StratifiedKFold\n from sklearn.metrics import confusion_matrix\n from sklearn import preprocessing\n import joblib\n from sklearn import neighbors\n from sklearn.model_selection import StratifiedShuffleSplit\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n from sklearn.tree import DecisionTreeClassifier\n import random\n from sklearn.linear_model import LogisticRegression \n from sklearn.ensemble import GradientBoostingClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.naive_bayes import GaussianNB\n from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n from sklearn.ensemble import VotingClassifier\n from nonconformist.nc import MarginErrFunc\n import warnings\n warnings.filterwarnings(\"ignore\", message=\"Numerical issues were encountered \")\n import sys\n sys.path.insert(0,'/root/RISE-Version2/')\n from Statistical_vector.statistical_vector import train_statistical_vector, test_statistical_vector_param, non_condition_p\n \n \n min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))\n myclassifier = [svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0),\n sklearn.ensemble.RandomForestClassifier(n_estimators=100,random_state=0),\n DecisionTreeClassifier(random_state=0),neighbors.KNeighborsClassifier(n_neighbors=10),\n LogisticRegression(random_state=0),GradientBoostingClassifier(n_estimators=100,random_state=0),\n LinearDiscriminantAnalysis(), AdaBoostClassifier(),\n GaussianNB(),QuadraticDiscriminantAnalysis()] \n \n \n times = ['p2_'] ##test set\n train_name = ['p1_','p3_','m1_','m2_'] ##train set\n filepath = r'/root/RISE-Version2/Jupyter/WiG_test/S2/data/' \n #filepath = r'./data/' \n filename = ['wig9_label_startend30']\n class_index = 0\n class_num = 6 \n \n \n ##load test data\n #print('\\n---------------test data is ' + times[0] + ' scenario-------------\\n')\n data = sio.loadmat(filepath + filename[0] + times[0] + '.mat')\n xx2 = data['fe_wig']\n yy2 = data['label']\n yy2 = yy2.flatten()\n test_x = xx2\n test_y = yy2\n \n ##load train data\n #print('\\n-------training data is ' + str(train_name) + ' scenario----------\\n')\n xx1 = np.empty(shape=[0, xx2.shape[1]])\n yy1 = np.empty(shape=[1, 0],dtype=int) \n for ii in train_name:\n data = sio.loadmat(filepath + filename[0] + ii+ '.mat')\n x1 = data['fe_wig']\n y1 = data['label']\n x1 = min_max_scaler.fit_transform(x1)\n xx1 = np.append(xx1, x1, axis=0)\n yy1 = np.append(yy1, y1, axis=1)\n yy1 = yy1.flatten()\n \n index = [t for t in range(xx1.shape[0])] \n random.shuffle(index)\n x_train11 = xx1[index]\n x_train1 = x_train11\n y_train1 = yy1[index]\n y_train1 = y_train1 - 1 \n \n \n ############################ Without RISE ###############################\n print('\\n-------- The performance of the underlying model without RISE --------\\n')\n x_test1 = min_max_scaler.fit_transform(test_x)\n y_test1 = test_y\n y_test1 = y_test1 - 1\n clf_dif = myclassifier[class_index]\n clf_dif.fit(x_train1,y_train1)\n acc_dif = clf_dif.score(x_test1,y_test1)\n print('The accuracy without RISE: ',acc_dif)\n y_true_dif, y_pred_dif = y_test1,clf_dif.predict(x_test1)\n test_confusion_matrix = confusion_matrix(y_true_dif, y_pred_dif)\n print('Confusion matrix without RISE: \\n',test_confusion_matrix)\n \n return x_train1, y_train1, x_test1, y_test1, myclassifier, y_true_dif, y_pred_dif,class_num,class_index" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.linear_model.LogisticRegression", "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis", "scipy.io.loadmat", "sklearn.metrics.confusion_matrix", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "sklearn.neighbors.KNeighborsClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "numpy.append", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "numpy.empty" ], [ "sklearn.naive_bayes.GaussianNB", "sklearn.linear_model.LogisticRegression", "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis", "scipy.io.loadmat", "sklearn.metrics.confusion_matrix", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "sklearn.neighbors.KNeighborsClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "numpy.append", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "numpy.empty" ], [ "sklearn.naive_bayes.GaussianNB", "sklearn.linear_model.LogisticRegression", "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis", "scipy.io.loadmat", "sklearn.metrics.confusion_matrix", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "sklearn.neighbors.KNeighborsClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "numpy.append", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
DREAMS-lab/mask_rcnn_pytorch
[ "1fcc8489758aa4673c29a32a2413f7f86742414f" ]
[ "rock_c3.py" ]
[ "\"\"\"\nrock.py\nZhiang Chen, Feb 2020\ndata class for mask rcnn\n\"\"\"\n\nimport os\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport pickle\nimport matplotlib.pyplot as plt\n\n\"\"\"\n./datasets/\n Rock/\n data/\n 0_8.npy\n 0_9.npy\n 1_4.npy\n ...\n\"\"\"\n\nclass Dataset(object):\n def __init__(self, data_path, transforms=None, input_channel=6, include_name=True):\n self.data_path = data_path\n self.transforms = transforms\n self.data_files = [f for f in os.listdir(data_path) if f.endswith(\".npy\")]\n self.input_channel = input_channel\n self.include_name = include_name\n\n def __getitem__(self, idx):\n data_path = os.path.join(self.data_path, self.data_files[idx])\n\n data = np.load(data_path)\n\n if self.input_channel == 6:\n image = data[:, :, :self.input_channel]\n elif self.input_channel == 3:\n image = data[:, :, :3]\n elif self.input_channel == 4:\n rgb = data[:, :, :3]\n dem = data[:, :, 3:]\n d = dem[:,:,0]*0.33 + dem[:,:,1]*0.33 + dem[:,:,2]*0.33\n image = np.append(rgb, np.expand_dims(d, axis=2), axis=2)\n\n if data.shape[2] == 6:\n masks = np.ones_like(image[:, :, :3]) * 255\n else:\n masks = data[:, :, 6:]\n num_objs = masks.shape[2]\n \"\"\"\n for i in reversed(range(num_objs)):\n mask = masks[:, :, i]\n if mask.max() < 250:\n masks = np.delete(masks, i, axis=2)\n num_objs = masks.shape[2]\n \"\"\"\n # 0 encoding non-damaged is supposed to be 1 for training.\n # In training, 0 is of background\n obj_ids = np.ones(num_objs)\n\n masks = masks >= 250 # convert to binary masks\n\n boxes = []\n\n for i in range(num_objs):\n pos = np.where(masks[:, :, i])\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n boxes.append([xmin, ymin, xmax, ymax])\n\n # convert everything into a torch.Tensor\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n # labels = torch.ones((num_objs,), dtype=torch.int64)\n labels = torch.as_tensor(obj_ids, dtype=torch.int64)\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n masks = masks.permute((2, 0, 1))\n\n image_id = torch.tensor([idx])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n if self.include_name:\n target[\"image_name\"] = data_path\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self):\n return len(self.data_files)\n\n def show(self, idx):\n image, target = self.__getitem__(idx)\n rgb = image[:, :, :3].astype(np.uint8)\n rgb = Image.fromarray(rgb)\n rgb.show()\n masks = target[\"masks\"]\n masks = masks.permute((1, 2, 0))\n masks = masks.numpy()\n masks = masks.max(axis=2) * 255\n masks = Image.fromarray(masks)\n masks.show()\n\n def imageStat(self):\n images = np.empty((0, 6), float)\n for data_file in self.data_files:\n if len(data_file.split('_'))==2:\n data_path = os.path.join(self.data_path, data_file)\n data = np.load(data_path)\n print(data.shape)\n image = data[:, :, :6].astype(float).reshape(-1, 6)/255.0\n images = np.append(images, image, axis=0)\n return np.mean(images, axis=0).tolist(), np.std(images, axis=0).tolist(), \\\n np.max(images, axis=0).tolist(), np.min(images, axis=0).tolist()\n\n\n def imageStat2(self):\n images = np.empty((0, 3), float)\n import random\n random.shuffle(self.data_files)\n for data_file in self.data_files[:40]:\n if True:\n data_path = os.path.join(self.data_path, data_file)\n data = np.load(data_path)\n image = data[:, :, :3].astype(float).reshape(-1, 3)/255.0\n images = np.append(images, image, axis=0)\n return np.mean(images, axis=0).tolist(), np.std(images, axis=0).tolist(), \\\n np.max(images, axis=0).tolist(), np.min(images, axis=0).tolist()\n\n\nif __name__ == \"__main__\":\n #ds = Dataset(\"./datasets/Rock/data/\")\n ds = Dataset(\"./datasets/hypolith_sample_set_throop/npy\",input_channel=3)\n # image_mean, image_std, image_max, image_min = ds.imageStat()\n\n\n id = 29\n image, target = ds[id]\n print(target['image_name'])\n ds.show(id)\n\n id = 28\n image, target = ds[id]\n print(target['image_name'])\n ds.show(id)\n print(ds.imageStat2())\n" ]
[ [ "numpy.expand_dims", "numpy.ones_like", "torch.zeros", "numpy.min", "torch.tensor", "numpy.ones", "numpy.max", "numpy.append", "numpy.std", "numpy.mean", "numpy.load", "numpy.where", "numpy.empty", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shantanusharma/keras
[ "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938", "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938", "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938", "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938", "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938", "662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938" ]
[ "keras/engine/sequential.py", "keras/saving/saved_model/utils.py", "keras/distribute/saved_model_mixed_api_test.py", "keras/layers/multi_head_attention.py", "keras/utils/tf_utils_test.py", "keras/layers/preprocessing/normalization.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Home of the `Sequential` model.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport copy\nimport warnings\nfrom keras import layers as layer_module\nfrom keras.engine import base_layer\nfrom keras.engine import functional\nfrom keras.engine import input_layer\nfrom keras.engine import training_utils\nfrom keras.saving.saved_model import model_serialization\nfrom keras.utils import generic_utils\nfrom keras.utils import layer_utils\nfrom keras.utils import tf_inspect\nfrom keras.utils import tf_utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nSINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '\n 'a single output tensor. For multi-output '\n 'layers, use the functional API.')\n\n\n@keras_export('keras.Sequential', 'keras.models.Sequential')\nclass Sequential(functional.Functional):\n \"\"\"`Sequential` groups a linear stack of layers into a `tf.keras.Model`.\n\n `Sequential` provides training and inference features on this model.\n\n Examples:\n\n >>> # Optionally, the first layer can receive an `input_shape` argument:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> # Afterwards, we do automatic shape inference:\n >>> model.add(tf.keras.layers.Dense(4))\n\n >>> # This is identical to the following:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.Input(shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(8))\n\n >>> # Note that you can also omit the `input_shape` argument.\n >>> # In that case the model doesn't have any weights until the first call\n >>> # to a training/evaluation method (since it isn't yet built):\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> # model.weights not created yet\n\n >>> # Whereas if you specify the input shape, the model gets built\n >>> # continuously as you are adding layers:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> len(model.weights)\n 4\n\n >>> # When using the delayed-build pattern (no input shape specified), you can\n >>> # choose to manually build your model by calling\n >>> # `build(batch_input_shape)`:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> model.build((None, 16))\n >>> len(model.weights)\n 4\n\n ```python\n # Note that when using the delayed-build pattern (no input shape specified),\n # the model gets built the first time you call `fit`, `eval`, or `predict`,\n # or the first time you call the model on some input data.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(8))\n model.add(tf.keras.layers.Dense(1))\n model.compile(optimizer='sgd', loss='mse')\n # This builds the model for the first time:\n model.fit(x, y, batch_size=32, epochs=10)\n ```\n \"\"\"\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self, layers=None, name=None):\n \"\"\"Creates a `Sequential` model instance.\n\n Args:\n layers: Optional list of layers to add to the model.\n name: Optional name for the model.\n \"\"\"\n # Skip the init in FunctionalModel since model doesn't have input/output yet\n super(functional.Functional, self).__init__( # pylint: disable=bad-super-call\n name=name, autocast=False)\n base_layer.keras_api_gauge.get_cell('Sequential').set(True)\n self.supports_masking = True\n self._compute_output_and_mask_jointly = True\n self._auto_track_sub_layers = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._input_dtype = None\n self._layer_call_argspecs = {}\n self._created_nodes = set()\n # Flag that indicate whether the sequential network topology has been\n # created. It is false when there isn't any layer, or the layers doesn't\n # have input shape.\n self._graph_initialized = False\n\n # Unfortunately some Sequential models using custom layers or FeatureColumn\n # layers have multiple inputs. This is fundamentally incompatible with\n # most of the Sequential API, and we have to disable a number of features\n # for such models.\n self._use_legacy_deferred_behavior = False\n\n # Add to the model any layers passed to the constructor.\n if layers:\n if not isinstance(layers, (list, tuple)):\n layers = [layers]\n for layer in layers:\n self.add(layer)\n\n @property\n def layers(self):\n # Historically, `sequential.layers` only returns layers that were added\n # via `add`, and omits the auto-generated `InputLayer` that comes at the\n # bottom of the stack.\n # `Trackable` manages the `_layers` attributes and does filtering\n # over it.\n layers = super(Sequential, self).layers\n if layers and isinstance(layers[0], input_layer.InputLayer):\n return layers[1:]\n return layers[:]\n\n @trackable.no_automatic_dependency_tracking\n def add(self, layer):\n \"\"\"Adds a layer instance on top of the layer stack.\n\n Args:\n layer: layer instance.\n\n Raises:\n TypeError: If `layer` is not a layer instance.\n ValueError: In case the `layer` argument does not\n know its input shape.\n ValueError: In case the `layer` argument has\n multiple output tensors, or is already connected\n somewhere else (forbidden in `Sequential` models).\n \"\"\"\n # If we are passed a Keras tensor created by keras.Input(), we can extract\n # the input layer from its keras history and use that without any loss of\n # generality.\n if hasattr(layer, '_keras_history'):\n origin_layer = layer._keras_history[0]\n if isinstance(origin_layer, input_layer.InputLayer):\n layer = origin_layer\n logging.warning(\n 'Please add `keras.layers.InputLayer` instead of `keras.Input` to '\n 'Sequential model. `keras.Input` is intended to be used by '\n 'Functional model.')\n\n if isinstance(layer, tf.Module):\n if not isinstance(layer, base_layer.Layer):\n layer = functional.ModuleWrapper(layer)\n else:\n raise TypeError('The added layer must be '\n 'an instance of class Layer. '\n 'Found: ' + str(layer))\n\n tf_utils.assert_no_legacy_layers([layer])\n if not self._is_layer_name_unique(layer):\n raise ValueError('All layers added to a Sequential model '\n 'should have unique names. Name \"%s\" is already the name'\n ' of a layer in this model. Update the `name` argument '\n 'to pass a unique name.' % (layer.name,))\n\n self.built = False\n set_inputs = False\n self._maybe_create_attribute('_self_tracked_trackables', [])\n if not self._self_tracked_trackables:\n if isinstance(layer, input_layer.InputLayer):\n # Case where the user passes an Input or InputLayer layer via `add`.\n set_inputs = True\n else:\n batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)\n if batch_shape:\n # Instantiate an input layer.\n x = input_layer.Input(\n batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')\n # This will build the current layer\n # and create the node connecting the current layer\n # to the input layer we just created.\n layer(x)\n set_inputs = True\n\n if set_inputs:\n outputs = tf.nest.flatten(layer._inbound_nodes[-1].outputs)\n if len(outputs) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = outputs\n self.inputs = layer_utils.get_source_inputs(self.outputs[0])\n self.built = True\n self._has_explicit_input_shape = True\n\n elif self.outputs:\n # If the model is being built continuously on top of an input layer:\n # refresh its output.\n output_tensor = layer(self.outputs[0])\n if len(tf.nest.flatten(output_tensor)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = [output_tensor]\n self.built = True\n\n if set_inputs or self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n self._graph_initialized = True\n else:\n self._self_tracked_trackables.append(layer)\n self._handle_deferred_layer_dependencies([layer])\n\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n\n @trackable.no_automatic_dependency_tracking\n def pop(self):\n \"\"\"Removes the last layer in the model.\n\n Raises:\n TypeError: if there are no layers in the model.\n \"\"\"\n if not self.layers:\n raise TypeError('There are no layers in the model.')\n\n layer = self._self_tracked_trackables.pop()\n self._layer_call_argspecs.pop(layer)\n if not self.layers:\n self.outputs = None\n self.inputs = None\n self.built = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._graph_initialized = False\n elif self._graph_initialized:\n self.layers[-1]._outbound_nodes = []\n self.outputs = [self.layers[-1].output]\n self._init_graph_network(self.inputs, self.outputs)\n self.built = True\n\n @trackable.no_automatic_dependency_tracking\n def _build_graph_network_for_inferred_shape(self,\n input_shape,\n input_dtype=None):\n if input_shape is None or not self.layers:\n return\n if not tf.__internal__.tf2.enabled() or not tf.compat.v1.executing_eagerly_outside_functions():\n # This behavior is disabled in V1 or when eager execution is disabled.\n return\n if (not self._has_explicit_input_shape and\n not self._use_legacy_deferred_behavior):\n # Determine whether the input shape is novel, i.e. whether the model\n # should be rebuilt.\n input_shape = tuple(input_shape)\n if self._inferred_input_shape is None:\n new_shape = input_shape\n else:\n new_shape = relax_input_shape(self._inferred_input_shape, input_shape)\n if (new_shape is not None and new_shape != self._inferred_input_shape):\n # A novel shape has been received: we need to rebuild the model.\n # In case we are inside a graph function, we step out of it.\n with tf.init_scope():\n inputs = input_layer.Input(\n batch_shape=new_shape,\n dtype=input_dtype,\n name=self.layers[0].name + '_input')\n layer_input = inputs\n created_nodes = set()\n for layer in self.layers:\n # Clear nodes previously created via this method. This prevents\n # node accumulation and ensures that e.g. `layer.output` is\n # always connected to `model.inputs`\n # (this is important e.g. for the feature extraction use case).\n # We don't just do `layer._inbound_nodes = []` in order\n # not to break shared layers added to Sequential models (which is\n # technically illegal as per the `add()` docstring,\n # but wasn't previously disabled).\n clear_previously_created_nodes(layer, self._created_nodes)\n try:\n # Create Functional API connection by calling the current layer\n layer_output = layer(layer_input)\n except: # pylint:disable=bare-except\n # Functional API calls may fail for a number of reasons:\n # 1) The layer may be buggy. In this case it will be easier for\n # the user to debug if we fail on the first call on concrete data,\n # instead of our own call on a symbolic input.\n # 2) The layer is dynamic (graph-incompatible) and hasn't\n # overridden `compute_output_shape`. In this case, it is\n # impossible to build a graph network.\n # 3) The layer is otherwise incompatible with the Functional API\n # (e.g. this is the case for some probabilistic layers that rely\n # on hacks and that do not return tensors).\n # In all these cases, we should avoid creating a graph network\n # (or we simply can't).\n self._use_legacy_deferred_behavior = True\n return\n if len(tf.nest.flatten(layer_output)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # Keep track of nodes just created above\n track_nodes_created_by_last_call(layer, created_nodes)\n layer_input = layer_output\n outputs = layer_output\n self._created_nodes = created_nodes\n try:\n # Initialize a graph Network. This call will never fail for\n # a stack of valid Keras layers.\n # However some users have layers that are fundamentally incompatible\n # with the Functional API, which do not return tensors. In this\n # case, we fall back to the legacy deferred behavior.\n # TODO(fchollet): consider raising here, as we should not be\n # supporting such layers.\n self._init_graph_network(inputs, outputs)\n self._graph_initialized = True\n except: # pylint:disable=bare-except\n self._use_legacy_deferred_behavior = True\n self._inferred_input_shape = new_shape\n\n @generic_utils.default\n def build(self, input_shape=None):\n if self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n else:\n if input_shape is None:\n raise ValueError('You must provide an `input_shape` argument.')\n self._build_graph_network_for_inferred_shape(input_shape)\n if not self.built:\n input_shape = tuple(input_shape)\n self._build_input_shape = input_shape\n super(Sequential, self).build(input_shape)\n self.built = True\n\n def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name\n # If applicable, update the static input shape of the model.\n if not self._has_explicit_input_shape:\n if not tf.is_tensor(inputs) and not isinstance(\n inputs, tf.Tensor):\n # This is a Sequential with mutiple inputs. This is technically an\n # invalid use case of Sequential, but we tolerate it for backwards\n # compatibility.\n self._use_legacy_deferred_behavior = True\n self._build_input_shape = tf.nest.map_structure(_get_shape_tuple, inputs)\n if tf.__internal__.tf2.enabled():\n logging.warning('Layers in a Sequential model should only have a '\n 'single input tensor, but we receive a %s input: %s'\n '\\nConsider rewriting this model with the Functional '\n 'API.' % (type(inputs), inputs))\n else:\n self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)\n\n if self._graph_initialized:\n if not self.built:\n self._init_graph_network(self.inputs, self.outputs)\n return super(Sequential, self).call(inputs, training=training, mask=mask)\n\n outputs = inputs # handle the corner case where self.layers is empty\n for layer in self.layers:\n # During each iteration, `inputs` are the inputs to `layer`, and `outputs`\n # are the outputs of `layer` applied to `inputs`. At the end of each\n # iteration `inputs` is set to `outputs` to prepare for the next layer.\n kwargs = {}\n argspec = self._layer_call_argspecs[layer].args\n if 'mask' in argspec:\n kwargs['mask'] = mask\n if 'training' in argspec:\n kwargs['training'] = training\n\n outputs = layer(inputs, **kwargs)\n\n if len(tf.nest.flatten(outputs)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # `outputs` will be the inputs to the next layer.\n inputs = outputs\n mask = getattr(outputs, '_keras_mask', None)\n return outputs\n\n def compute_output_shape(self, input_shape):\n shape = input_shape\n for layer in self.layers:\n shape = layer.compute_output_shape(shape)\n return shape\n\n def compute_mask(self, inputs, mask):\n # TODO(omalleyt): b/123540974 This function is not really safe to call\n # by itself because it will duplicate any updates and losses in graph\n # mode by `call`ing the Layers again.\n outputs = self.call(inputs, mask=mask)\n return getattr(outputs, '_keras_mask', None)\n\n def predict_proba(self, x, batch_size=32, verbose=0):\n \"\"\"Generates class probability predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A Numpy array of probability predictions.\n \"\"\"\n warnings.warn('`model.predict_proba()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use `model.predict()` instead.')\n preds = self.predict(x, batch_size, verbose)\n if preds.min() < 0. or preds.max() > 1.:\n logging.warning('Network returning invalid probability values. '\n 'The last layer might not normalize predictions '\n 'into probabilities '\n '(like softmax or sigmoid would).')\n return preds\n\n def predict_classes(self, x, batch_size=32, verbose=0):\n \"\"\"Generate class predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A numpy array of class predictions.\n \"\"\"\n warnings.warn('`model.predict_classes()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use instead:'\n '* `np.argmax(model.predict(x), axis=-1)`, '\n ' if your model does multi-class classification '\n ' (e.g. if it uses a `softmax` last-layer activation).'\n '* `(model.predict(x) > 0.5).astype(\"int32\")`, '\n ' if your model does binary classification '\n ' (e.g. if it uses a `sigmoid` last-layer activation).')\n proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n if proba.shape[-1] > 1:\n return proba.argmax(axis=-1)\n else:\n return (proba > 0.5).astype('int32')\n\n def get_config(self):\n layer_configs = []\n for layer in super(Sequential, self).layers:\n # `super().layers` include the InputLayer if available (it is filtered out\n # of `self.layers`). Note that `self._self_tracked_trackables` is managed\n # by the tracking infrastructure and should not be used.\n layer_configs.append(generic_utils.serialize_keras_object(layer))\n config = {\n 'name': self.name,\n 'layers': copy.deepcopy(layer_configs)\n }\n if not self._is_graph_network and self._build_input_shape is not None:\n config['build_input_shape'] = self._build_input_shape\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n if 'name' in config:\n name = config['name']\n build_input_shape = config.get('build_input_shape')\n layer_configs = config['layers']\n else:\n name = None\n build_input_shape = None\n layer_configs = config\n model = cls(name=name)\n for layer_config in layer_configs:\n layer = layer_module.deserialize(layer_config,\n custom_objects=custom_objects)\n model.add(layer)\n if (not model.inputs and build_input_shape and\n isinstance(build_input_shape, (tuple, list))):\n model.build(build_input_shape)\n return model\n\n @property\n def input_spec(self):\n if hasattr(self, '_manual_input_spec'):\n return self._manual_input_spec\n if self.layers and hasattr(self.layers[0], 'input_spec'):\n return self.layers[0].input_spec\n return None\n\n @input_spec.setter\n def input_spec(self, value):\n self._manual_input_spec = value\n\n @property\n def _trackable_saved_model_saver(self):\n return model_serialization.SequentialSavedModelSaver(self)\n\n def _is_layer_name_unique(self, layer):\n for ref_layer in self.layers:\n if layer.name == ref_layer.name and ref_layer is not layer:\n return False\n return True\n\n def _assert_weights_created(self):\n if self._graph_initialized:\n return\n # When the graph has not been initialized, use the Model's implementation to\n # to check if the weights has been created.\n super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call\n\n\ndef _get_shape_tuple(t):\n if hasattr(t, 'shape'):\n shape = t.shape\n if isinstance(shape, tuple):\n return shape\n if shape.rank is not None:\n return tuple(shape.as_list())\n return None\n return None\n\n\ndef relax_input_shape(shape_1, shape_2):\n if shape_1 is None or shape_2 is None:\n return None\n if len(shape_1) != len(shape_2):\n return None\n return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))\n\n\ndef clear_previously_created_nodes(layer, created_nodes):\n \"\"\"Remove nodes from `created_nodes` from the layer's inbound_nodes.\"\"\"\n for node in layer._inbound_nodes:\n prev_layers = node.inbound_layers\n for prev_layer in tf.nest.flatten(prev_layers):\n prev_layer._outbound_nodes = [\n n for n in prev_layer._outbound_nodes\n if n not in created_nodes]\n layer._inbound_nodes = [\n n for n in layer._inbound_nodes if n not in created_nodes]\n\n\ndef track_nodes_created_by_last_call(layer, created_nodes):\n \"\"\"Adds to `created_nodes` the nodes created by the last call to `layer`.\"\"\"\n if not layer._inbound_nodes:\n return\n created_nodes.add(layer._inbound_nodes[-1])\n prev_layers = layer._inbound_nodes[-1].inbound_layers\n for prev_layer in tf.nest.flatten(prev_layers):\n if prev_layer._outbound_nodes:\n created_nodes.add(prev_layer._outbound_nodes[-1])\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions shared between SavedModel saving/loading implementations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport itertools\nimport threading\nimport types\nfrom keras import backend as K\nfrom keras.engine import base_layer_utils\nfrom keras.utils import control_flow_util\nfrom keras.utils import tf_contextlib\nfrom keras.utils import tf_inspect\nfrom keras.utils.generic_utils import LazyLoader\n\n\n# pylint:disable=g-inconsistent-quotes\ntraining_lib = LazyLoader(\n \"training_lib\", globals(),\n \"keras.engine.training\")\n# pylint:enable=g-inconsistent-quotes\n\n\ndef use_wrapped_call(layer, call_fn, default_training_value=None,\n return_method=False):\n \"\"\"Creates fn that adds the losses returned by call_fn & returns the outputs.\n\n Args:\n layer: A Keras layer object\n call_fn: tf.function that takes layer inputs (and possibly a training arg),\n and returns a tuple of (outputs, list of losses).\n default_training_value: Default value of the training kwarg. If `None`, the\n default is `K.learning_phase()`.\n return_method: Whether to return a method bound to the layer.\n\n Returns:\n function that calls call_fn and returns the outputs. Losses returned by\n call_fn are added to the layer losses.\n \"\"\"\n expects_training_arg = layer_uses_training_bool(layer)\n if hasattr(call_fn, 'original_call'): # call_fn is a LayerCall object\n original_call = call_fn.original_call\n # In Python 3, callable objects are not compatible with inspect.getargspec\n call_fn = call_fn.__call__\n else:\n original_call = call_fn\n fn, arg_spec = maybe_add_training_arg(\n original_call, call_fn, expects_training_arg, default_training_value)\n\n def return_outputs_and_add_losses(*args, **kwargs):\n \"\"\"Returns the outputs from the layer call function, and adds the losses.\"\"\"\n if return_method:\n args = args[1:]\n\n outputs, losses = fn(*args, **kwargs)\n layer.add_loss(losses, inputs=True)\n\n # TODO(kathywu): This is a temporary hack. When a network of layers is\n # revived from SavedModel, only the top-level layer will have losses. This\n # causes issues in eager mode because the child layers may have graph losses\n # (thus model.losses returns a mix of Eager and graph tensors). To fix this,\n # whenever eager losses are added to one layer, add eager losses to all\n # child layers. This causes `.losses` to only return eager losses.\n # pylint: disable=protected-access\n if tf.executing_eagerly():\n for i in layer._flatten_layers():\n if i is not layer:\n i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n # pylint: enable=protected-access\n return outputs\n\n decorated = tf.__internal__.decorator.make_decorator(\n target=call_fn,\n decorator_func=return_outputs_and_add_losses,\n decorator_argspec=arg_spec)\n\n if return_method:\n return types.MethodType(decorated, layer)\n else:\n return decorated\n\n\ndef layer_uses_training_bool(layer):\n \"\"\"Returns whether this layer or any of its children uses the training arg.\"\"\"\n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited = {layer}\n to_visit = list_all_layers(layer)\n while to_visit:\n layer = to_visit.pop()\n if layer in visited:\n continue\n if getattr(layer, '_expects_training_arg', True):\n return True\n visited.add(layer)\n to_visit.extend(list_all_layers(layer))\n return False\n\n\ndef list_all_layers(obj):\n if isinstance(obj, training_lib.Model):\n # Handle special case of Sequential, which doesn't return\n # the `Input` layer.\n return obj.layers\n else:\n return list(obj._flatten_layers(include_self=False, recursive=False)) # pylint: disable=protected-access\n\n\ndef list_all_layers_and_sublayers(obj):\n s = set([obj])\n s.update(itertools.chain.from_iterable(\n list_all_layers_and_sublayers(layer) for layer in list_all_layers(obj)))\n return s\n\n\ndef maybe_add_training_arg(\n original_call, wrapped_call, expects_training_arg, default_training_value):\n \"\"\"Decorate call and optionally adds training argument.\n\n If a layer expects a training argument, this function ensures that 'training'\n is present in the layer args or kwonly args, with the default training value.\n\n Args:\n original_call: Original call function.\n wrapped_call: Wrapped call function.\n expects_training_arg: Whether to include 'training' argument.\n default_training_value: Default value of the training kwarg to include in\n the arg spec. If `None`, the default is `K.learning_phase()`.\n\n Returns:\n Tuple of (\n function that calls `wrapped_call` and sets the training arg,\n Argspec of returned function or `None` if the argspec is unchanged)\n \"\"\"\n if not expects_training_arg:\n return wrapped_call, None\n\n def wrap_with_training_arg(*args, **kwargs):\n \"\"\"Wrap the `wrapped_call` function, and set training argument.\"\"\"\n training_arg_index = get_training_arg_index(original_call)\n training = get_training_arg(training_arg_index, args, kwargs)\n if training is None:\n training = default_training_value or K.learning_phase()\n\n args = list(args)\n kwargs = kwargs.copy()\n\n def replace_training_and_call(training):\n set_training_arg(training, training_arg_index, args, kwargs)\n return wrapped_call(*args, **kwargs)\n\n return control_flow_util.smart_cond(\n training, lambda: replace_training_and_call(True),\n lambda: replace_training_and_call(False))\n\n # Create arg spec for decorated function. If 'training' is not defined in the\n # args of the original arg spec, then add it to kwonlyargs.\n arg_spec = tf_inspect.getfullargspec(original_call)\n defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []\n\n kwonlyargs = arg_spec.kwonlyargs\n kwonlydefaults = arg_spec.kwonlydefaults or {}\n # Add training arg if it does not exist, or set the default training value.\n if 'training' not in arg_spec.args:\n kwonlyargs.append('training')\n kwonlydefaults['training'] = default_training_value\n else:\n index = arg_spec.args.index('training')\n training_default_index = len(arg_spec.args) - index\n if (arg_spec.defaults and\n len(arg_spec.defaults) >= training_default_index and\n defaults[-training_default_index] is None):\n defaults[-training_default_index] = default_training_value\n\n decorator_argspec = tf_inspect.FullArgSpec(\n args=arg_spec.args,\n varargs=arg_spec.varargs,\n varkw=arg_spec.varkw,\n defaults=defaults,\n kwonlyargs=kwonlyargs,\n kwonlydefaults=kwonlydefaults,\n annotations=arg_spec.annotations)\n return wrap_with_training_arg, decorator_argspec\n\n\ndef get_training_arg_index(call_fn):\n \"\"\"Returns the index of 'training' in the layer call function arguments.\n\n Args:\n call_fn: Call function.\n\n Returns:\n - n: index of 'training' in the call function arguments.\n - -1: if 'training' is not found in the arguments, but layer.call accepts\n variable keyword arguments\n - None: if layer doesn't expect a training argument.\n \"\"\"\n arg_list = tf_inspect.getfullargspec(call_fn).args\n if tf_inspect.ismethod(call_fn):\n arg_list = arg_list[1:]\n if 'training' in arg_list:\n return arg_list.index('training')\n else:\n return -1\n\n\ndef set_training_arg(training, index, args, kwargs):\n if index is None:\n pass\n elif index >= 0 and len(args) > index:\n args[index] = training\n else:\n kwargs['training'] = training\n return args, kwargs\n\n\ndef get_training_arg(index, args, kwargs):\n if index is None:\n return None\n elif index >= 0 and len(args) > index:\n return args[index]\n else:\n return kwargs.get('training', None)\n\n\ndef remove_training_arg(index, args, kwargs):\n if index is None:\n pass\n elif index >= 0 and len(args) > index:\n args.pop(index)\n else:\n kwargs.pop('training', None)\n\n\nclass SaveOptionsContext(threading.local):\n\n def __init__(self):\n super(SaveOptionsContext, self).__init__()\n self.save_traces = True\n\n\n_save_options_context = SaveOptionsContext()\n\n\n@tf_contextlib.contextmanager\ndef keras_option_scope(save_traces):\n previous_value = _save_options_context.save_traces\n try:\n _save_options_context.save_traces = save_traces\n yield\n finally:\n _save_options_context.save_traces = previous_value\n\n\ndef should_save_traces():\n return _save_options_context.save_traces\n\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for saving and loading with mixed APIs with distribution strategies.\n\nFor saving, Keras's export_saved_model() API is used; and for loading,\nsaved_model's load() API is used. Keras's export_save_model() when used with\n`serving_only` parameter equals to True should be the same as using\ntf.saved_model.save().\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom keras import testing_utils\nfrom keras.distribute import saved_model_test_base as test_base\nfrom keras.saving import save\n\n_DEFAULT_FUNCTION_KEY = 'serving_default'\n\n\n@testing_utils.run_all_without_tensor_float_32(\n 'Uses Dense layers, which call matmul')\nclass SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):\n\n def setUp(self):\n self._root_dir = 'saved_model_save_load'\n super(SavedModelSaveAndLoadTest, self).setUp()\n\n def _save_model(self, model, saved_dir):\n save.save_model(model, saved_dir, save_format='tf')\n\n def _load_and_run_model(self,\n distribution,\n saved_dir,\n predict_dataset,\n output_name='output_1'):\n return test_base.load_and_run_with_saved_model_api(distribution, saved_dir,\n predict_dataset,\n output_name)\n\n @tf.__internal__.distribute.combinations.generate(test_base.simple_models_with_strategies())\n def test_save_no_strategy_restore_strategy(self, model_and_input,\n distribution):\n self.run_test_save_no_strategy_restore_strategy(\n model_and_input, distribution)\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.times(test_base.simple_models_with_strategies(),\n tf.__internal__.test.combinations.combine(save_in_scope=[True, False])))\n def test_save_strategy_restore_no_strategy(self, model_and_input,\n distribution, save_in_scope):\n self.run_test_save_strategy_restore_no_strategy(\n model_and_input, distribution, save_in_scope)\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.times(test_base.simple_models_with_strategy_pairs(),\n tf.__internal__.test.combinations.combine(save_in_scope=[True, False])))\n def test_save_strategy_restore_strategy(self, model_and_input,\n distribution_for_saving,\n distribution_for_restoring,\n save_in_scope):\n self.run_test_save_strategy_restore_strategy(model_and_input,\n distribution_for_saving,\n distribution_for_restoring,\n save_in_scope)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n", "# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras-based attention layer.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport collections\nimport math\nimport string\n\nimport numpy as np\nfrom keras import constraints\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras.engine.base_layer import Layer\nfrom keras.layers import advanced_activations\nfrom keras.layers import core\nfrom keras.layers import einsum_dense\nfrom keras.utils import tf_utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n_CHR_IDX = string.ascii_lowercase\n\n\ndef _build_attention_equation(rank, attn_axes):\n \"\"\"Builds einsum equations for the attention computation.\n\n Query, key, value inputs after projection are expected to have the shape as:\n (bs, <non-attention dims>, <attention dims>, num_heads, channels).\n bs and <non-attention dims> are treated as <batch dims>.\n The attention operations can be generalized:\n (1) Query-key dot product:\n (<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,\n <key attention dims>, num_heads, channels) -> (<batch dims>,\n num_heads, <query attention dims>, <key attention dims>)\n (2) Combination:\n (<batch dims>, num_heads, <query attention dims>, <key attention dims>),\n (<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,\n <query attention dims>, num_heads, channels)\n\n Args:\n rank: the rank of query, key, value tensors.\n attn_axes: a list/tuple of axes, [-1, rank), that will do attention.\n\n Returns:\n Einsum equations.\n \"\"\"\n target_notation = _CHR_IDX[:rank]\n # `batch_dims` includes the head dim.\n batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))\n letter_offset = rank\n source_notation = \"\"\n for i in range(rank):\n if i in batch_dims or i == rank - 1:\n source_notation += target_notation[i]\n else:\n source_notation += _CHR_IDX[letter_offset]\n letter_offset += 1\n\n product_notation = \"\".join([target_notation[i] for i in batch_dims] +\n [target_notation[i] for i in attn_axes] +\n [source_notation[i] for i in attn_axes])\n dot_product_equation = \"%s,%s->%s\" % (source_notation, target_notation,\n product_notation)\n attn_scores_rank = len(product_notation)\n combine_equation = \"%s,%s->%s\" % (product_notation, source_notation,\n target_notation)\n return dot_product_equation, combine_equation, attn_scores_rank\n\n\ndef _build_proj_equation(free_dims, bound_dims, output_dims):\n \"\"\"Builds an einsum equation for projections inside multi-head attention.\"\"\"\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n bias_axes = \"\"\n letter_offset = 0\n for i in range(free_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n bias_axes += char\n equation = \"%s,%s->%s\" % (input_str, kernel_str, output_str)\n\n return equation, bias_axes, len(output_str)\n\n\ndef _get_output_shape(output_rank, known_last_dims):\n return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)\n\n\n@keras_export(\"keras.layers.MultiHeadAttention\")\nclass MultiHeadAttention(Layer):\n \"\"\"MultiHeadAttention layer.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `query`, `key,` `value` are the same, then\n this is self-attention. Each timestep in `query` attends to the\n corresponding sequence in `key`, and returns a fixed-width vector.\n\n This layer first projects `query`, `key` and `value`. These are\n (effectively) a list of tensors of length `num_attention_heads`, where the\n corresponding shapes are [batch_size, <query dimensions>, key_dim],\n [batch_size, <key/value dimensions>, key_dim],\n [batch_size, <key/value dimensions>, value_dim].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor.\n\n Finally, the result tensor with the last dimension as value_dim can take an\n linear projection and return.\n\n Examples:\n\n Performs 1D cross-attention over two sequence inputs with an attention mask.\n Returns the additional attention weights over heads.\n\n >>> layer = MultiHeadAttention(num_heads=2, key_dim=2)\n >>> target = tf.keras.Input(shape=[8, 16])\n >>> source = tf.keras.Input(shape=[4, 16])\n >>> output_tensor, weights = layer(target, source,\n ... return_attention_scores=True)\n >>> print(output_tensor.shape)\n (None, 8, 16)\n >>> print(weights.shape)\n (None, 2, 8, 4)\n\n Performs 2D self-attention over a 5D input tensor on axes 2 and 3.\n\n >>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))\n >>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])\n >>> output_tensor = layer(input_tensor, input_tensor)\n >>> print(output_tensor.shape)\n (None, 5, 3, 4, 16)\n\n Args:\n num_heads: Number of attention heads.\n key_dim: Size of each attention head for query and key.\n value_dim: Size of each attention head for value.\n dropout: Dropout probability.\n use_bias: Boolean, whether the dense layers use bias vectors/matrices.\n output_shape: The expected shape of an output tensor, besides the batch and\n sequence dims. If not specified, projects back to the key feature dim.\n attention_axes: axes over which the attention is applied. `None` means\n attention over all axes, but batch, heads, and features.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n\n Call arguments:\n query: Query `Tensor` of shape `[B, T, dim]`.\n value: Value `Tensor` of shape `[B, S, dim]`.\n key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use\n `value` for both `key` and `value`, which is the most common case.\n attention_mask: a boolean mask of shape `[B, T, S]`, that prevents\n attention to certain positions. The boolean mask specifies which query\n elements can attend to which key elements, 1 indicates attention and 0\n indicates no attention. Broadcasting can happen for the missing batch\n dimensions and the head dimension.\n return_attention_scores: A boolean to indicate whether the output should\n be attention output if True, or (attention_output, attention_scores) if\n False. Defaults to False.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n Defaults to either using the training mode of the parent layer/model,\n or False (inference) if there is no parent layer.\n\n Returns:\n attention_output: The result of the computation, of shape [B, T, E],\n where `T` is for target sequence shapes and `E` is the query input last\n dimension if `output_shape` is `None`. Otherwise, the multi-head outputs\n are project to the shape specified by `output_shape`.\n attention_scores: [Optional] multi-head attention coeffients over\n attention axes.\n \"\"\"\n\n def __init__(self,\n num_heads,\n key_dim,\n value_dim=None,\n dropout=0.0,\n use_bias=True,\n output_shape=None,\n attention_axes=None,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(MultiHeadAttention, self).__init__(**kwargs)\n self._num_heads = num_heads\n self._key_dim = key_dim\n self._value_dim = value_dim if value_dim else key_dim\n self._dropout = dropout\n self._use_bias = use_bias\n self._output_shape = output_shape\n self._kernel_initializer = initializers.get(kernel_initializer)\n self._bias_initializer = initializers.get(bias_initializer)\n self._kernel_regularizer = regularizers.get(kernel_regularizer)\n self._bias_regularizer = regularizers.get(bias_regularizer)\n self._kernel_constraint = constraints.get(kernel_constraint)\n self._bias_constraint = constraints.get(bias_constraint)\n if attention_axes is not None and not isinstance(attention_axes,\n collections.abc.Sized):\n self._attention_axes = (attention_axes,)\n else:\n self._attention_axes = attention_axes\n self._built_from_signature = False\n self._query_shape, self._key_shape, self._value_shape = None, None, None\n\n def get_config(self):\n config = {\n \"num_heads\":\n self._num_heads,\n \"key_dim\":\n self._key_dim,\n \"value_dim\":\n self._value_dim,\n \"dropout\":\n self._dropout,\n \"use_bias\":\n self._use_bias,\n \"output_shape\":\n self._output_shape,\n \"attention_axes\":\n self._attention_axes,\n \"kernel_initializer\":\n initializers.serialize(self._kernel_initializer),\n \"bias_initializer\":\n initializers.serialize(self._bias_initializer),\n \"kernel_regularizer\":\n regularizers.serialize(self._kernel_regularizer),\n \"bias_regularizer\":\n regularizers.serialize(self._bias_regularizer),\n \"activity_regularizer\":\n regularizers.serialize(self._activity_regularizer),\n \"kernel_constraint\":\n constraints.serialize(self._kernel_constraint),\n \"bias_constraint\":\n constraints.serialize(self._bias_constraint),\n \"query_shape\": self._query_shape,\n \"key_shape\": self._key_shape,\n \"value_shape\": self._value_shape,\n }\n base_config = super(MultiHeadAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n # If the layer has a different build() function from the Keras default,\n # we need to trigger the customized build to create weights.\n query_shape = config.pop(\"query_shape\")\n key_shape = config.pop(\"key_shape\")\n value_shape = config.pop(\"value_shape\")\n layer = cls(**config)\n if None in [query_shape, key_shape, value_shape]:\n logging.warning(\n \"One of the input shape is missing. They should be \"\n \"memorized when the layer was serialized. \"\n \"%s is created without weights.\",\n str(cls))\n else:\n layer._build_from_signature(query_shape, value_shape, key_shape) # pylint: disable=protected-access\n return layer\n\n def _build_from_signature(self, query, value, key=None):\n \"\"\"Builds layers and variables.\n\n Once the method is called, self._built_from_signature will be set to True.\n\n Args:\n query: query tensor or TensorShape.\n value: value tensor or TensorShape.\n key: key tensor or TensorShape.\n \"\"\"\n self._built_from_signature = True\n if hasattr(query, \"shape\"):\n self._query_shape = tf.TensorShape(query.shape)\n else:\n self._query_shape = tf.TensorShape(query)\n if hasattr(value, \"shape\"):\n self._value_shape = tf.TensorShape(value.shape)\n else:\n self._value_shape = tf.TensorShape(value)\n if key is None:\n self._key_shape = self._value_shape\n elif hasattr(key, \"shape\"):\n self._key_shape = tf.TensorShape(key.shape)\n else:\n self._key_shape = tf.TensorShape(key)\n\n common_kwargs = dict(\n kernel_initializer=self._kernel_initializer,\n bias_initializer=self._bias_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activity_regularizer=self._activity_regularizer,\n kernel_constraint=self._kernel_constraint,\n bias_constraint=self._bias_constraint)\n # Any setup work performed only once should happen in an `init_scope`\n # to avoid creating symbolic Tensors that will later pollute any eager\n # operations.\n with tf_utils.maybe_init_scope(self):\n free_dims = self._query_shape.rank - 1\n einsum_equation, bias_axes, output_rank = _build_proj_equation(\n free_dims, bound_dims=1, output_dims=2)\n self._query_dense = einsum_dense.EinsumDense(\n einsum_equation,\n output_shape=_get_output_shape(output_rank - 1,\n [self._num_heads, self._key_dim]),\n bias_axes=bias_axes if self._use_bias else None,\n name=\"query\",\n **common_kwargs)\n einsum_equation, bias_axes, output_rank = _build_proj_equation(\n self._key_shape.rank - 1, bound_dims=1, output_dims=2)\n self._key_dense = einsum_dense.EinsumDense(\n einsum_equation,\n output_shape=_get_output_shape(output_rank - 1,\n [self._num_heads, self._key_dim]),\n bias_axes=bias_axes if self._use_bias else None,\n name=\"key\",\n **common_kwargs)\n einsum_equation, bias_axes, output_rank = _build_proj_equation(\n self._value_shape.rank - 1, bound_dims=1, output_dims=2)\n self._value_dense = einsum_dense.EinsumDense(\n einsum_equation,\n output_shape=_get_output_shape(output_rank - 1,\n [self._num_heads, self._value_dim]),\n bias_axes=bias_axes if self._use_bias else None,\n name=\"value\",\n **common_kwargs)\n\n # Builds the attention computations for multi-head dot product attention.\n # These computations could be wrapped into the keras attention layer once\n # it support mult-head einsum computations.\n self._build_attention(output_rank)\n self._output_dense = self._make_output_dense(\n free_dims, common_kwargs, \"attention_output\")\n\n def _make_output_dense(self, free_dims, common_kwargs, name=None):\n \"\"\"Builds the output projection matrix.\n\n Args:\n free_dims: Number of free dimensions for einsum equation building.\n common_kwargs: Common keyword arguments for einsum layer.\n name: the name for the projection layer.\n\n Returns:\n Projection layer.\n \"\"\"\n if self._output_shape:\n if not isinstance(self._output_shape, collections.abc.Sized):\n output_shape = [self._output_shape]\n else:\n output_shape = self._output_shape\n else:\n output_shape = [self._query_shape[-1]]\n einsum_equation, bias_axes, output_rank = _build_proj_equation(\n free_dims, bound_dims=2, output_dims=len(output_shape))\n return einsum_dense.EinsumDense(\n einsum_equation,\n output_shape=_get_output_shape(output_rank - 1, output_shape),\n bias_axes=bias_axes if self._use_bias else None,\n name=name,\n **common_kwargs)\n\n def _build_attention(self, rank):\n \"\"\"Builds multi-head dot-product attention computations.\n\n This function builds attributes necessary for `_compute_attention` to\n costomize attention computation to replace the default dot-product\n attention.\n\n Args:\n rank: the rank of query, key, value tensors.\n \"\"\"\n if self._attention_axes is None:\n self._attention_axes = tuple(range(1, rank - 2))\n else:\n self._attention_axes = tuple(self._attention_axes)\n self._dot_product_equation, self._combine_equation, attn_scores_rank = (\n _build_attention_equation(rank, attn_axes=self._attention_axes))\n norm_axes = tuple(\n range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))\n self._softmax = advanced_activations.Softmax(axis=norm_axes)\n self._dropout_layer = core.Dropout(rate=self._dropout)\n\n def _masked_softmax(self, attention_scores, attention_mask=None):\n # Normalize the attention scores to probabilities.\n # `attention_scores` = [B, N, T, S]\n if attention_mask is not None:\n # The expand dim happens starting from the `num_heads` dimension,\n # (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)\n mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]\n for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):\n attention_mask = tf.compat.v1.expand_dims(\n attention_mask, axis=mask_expansion_axes)\n return self._softmax(attention_scores, attention_mask)\n\n def _compute_attention(self,\n query,\n key,\n value,\n attention_mask=None,\n training=None):\n \"\"\"Applies Dot-product attention with query, key, value tensors.\n\n This function defines the computation inside `call` with projected\n multi-head Q, K, V inputs. Users can override this function for customized\n attention implementation.\n\n Args:\n query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.\n key: Projected key `Tensor` of shape `[B, T, N, key_dim]`.\n value: Projected value `Tensor` of shape `[B, T, N, value_dim]`.\n attention_mask: a boolean mask of shape `[B, T, S]`, that prevents\n attention to certain positions.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Returns:\n attention_output: Multi-headed outputs of attention computation.\n attention_scores: Multi-headed attention weights.\n \"\"\"\n # Note: Applying scalar multiply at the smaller end of einsum improves\n # XLA performance, but may introduce slight numeric differences in\n # the Transformer attention head.\n query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(self._dot_product_equation, key,\n query)\n\n attention_scores = self._masked_softmax(attention_scores, attention_mask)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_scores_dropout = self._dropout_layer(\n attention_scores, training=training)\n\n # `context_layer` = [B, T, N, H]\n attention_output = tf.einsum(self._combine_equation,\n attention_scores_dropout, value)\n return attention_output, attention_scores\n\n def call(self,\n query,\n value,\n key=None,\n attention_mask=None,\n return_attention_scores=False,\n training=None):\n if not self._built_from_signature:\n self._build_from_signature(query=query, value=value, key=key)\n if key is None:\n key = value\n\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query` = [B, T, N ,H]\n query = self._query_dense(query)\n\n # `key` = [B, S, N, H]\n key = self._key_dense(key)\n\n # `value` = [B, S, N, H]\n value = self._value_dense(value)\n\n attention_output, attention_scores = self._compute_attention(\n query, key, value, attention_mask, training)\n attention_output = self._output_dense(attention_output)\n\n if return_attention_scores:\n return attention_output, attention_scores\n return attention_output\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras TF utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom absl.testing import parameterized\n\nimport keras\nfrom keras import combinations\nfrom keras.utils import tf_utils\n\ntry:\n import attr # pylint:disable=g-import-not-at-top\nexcept ImportError:\n attr = None\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TestIsSymbolicTensor(tf.test.TestCase, parameterized.TestCase):\n\n def test_default_behavior(self):\n if tf.executing_eagerly():\n self.assertFalse(tf_utils.is_symbolic_tensor(\n tf.Variable(name='blah', initial_value=0.)))\n self.assertFalse(\n tf_utils.is_symbolic_tensor(\n tf.convert_to_tensor(0.)))\n self.assertFalse(tf_utils.is_symbolic_tensor(\n tf.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))\n else:\n self.assertTrue(tf_utils.is_symbolic_tensor(\n tf.Variable(name='blah', initial_value=0.)))\n self.assertTrue(\n tf_utils.is_symbolic_tensor(\n tf.convert_to_tensor(0.)))\n self.assertTrue(tf_utils.is_symbolic_tensor(\n tf.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))\n\n def test_works_with_registered(self):\n\n class CustomClass(object):\n\n def value(self):\n return tf.convert_to_tensor(42.)\n\n tf.register_tensor_conversion_function(\n CustomClass, lambda value, **_: value.value())\n\n tf_utils.register_symbolic_tensor_type(CustomClass)\n\n if tf.executing_eagerly():\n self.assertFalse(tf_utils.is_symbolic_tensor(\n tf.Variable(name='blah', initial_value=0.)))\n self.assertFalse(\n tf_utils.is_symbolic_tensor(\n tf.convert_to_tensor(0.)))\n self.assertFalse(tf_utils.is_symbolic_tensor(\n tf.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))\n self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))\n else:\n self.assertTrue(tf_utils.is_symbolic_tensor(\n tf.Variable(name='blah', initial_value=0.)))\n self.assertTrue(\n tf_utils.is_symbolic_tensor(\n tf.convert_to_tensor(0.)))\n self.assertTrue(tf_utils.is_symbolic_tensor(\n tf.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))\n self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))\n\n def test_enables_nontensor_plumbing(self):\n if tf.executing_eagerly():\n self.skipTest('`compile` functionality changed.')\n # Setup.\n\n class Foo(object):\n\n def __init__(self, input_):\n self._input = input_\n self.value = tf.convert_to_tensor([[42.]])\n\n @property\n def dtype(self):\n return self.value.dtype\n\n tf.register_tensor_conversion_function(\n Foo, lambda x, *args, **kwargs: x.value)\n tf_utils.register_symbolic_tensor_type(Foo)\n\n class PlumbingLayer(keras.layers.Lambda):\n\n def __init__(self, fn, **kwargs):\n def _fn(*fargs, **fkwargs):\n d = fn(*fargs, **fkwargs)\n x = tf.convert_to_tensor(d)\n d.shape = x.shape\n d.get_shape = x.get_shape\n return d, x\n super(PlumbingLayer, self).__init__(_fn, **kwargs)\n self._enter_dunder_call = False\n\n def __call__(self, inputs, *args, **kwargs):\n self._enter_dunder_call = True\n d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)\n self._enter_dunder_call = False\n return d\n\n def call(self, inputs, *args, **kwargs):\n d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)\n if self._enter_dunder_call:\n return d, v\n return d\n\n # User-land.\n model = keras.Sequential([\n keras.layers.InputLayer((1,)),\n PlumbingLayer(Foo), # Makes a `Foo` object.\n ])\n # Let's ensure Keras graph history is preserved by composing the models.\n model = keras.Model(model.inputs, model(model.outputs))\n # Now we instantiate the model and verify we have a `Foo` object, not a\n # `Tensor`.\n y = model(tf.convert_to_tensor([[7.]]))\n self.assertIsInstance(y, Foo)\n # Confirm that (custom) loss sees `Foo` instance, not Tensor.\n obtained_prediction_box = [None]\n def custom_loss(y_obs, y_pred):\n del y_obs\n obtained_prediction_box[0] = y_pred\n return y_pred\n # Apparently `compile` calls the loss function enough to trigger the\n # side-effect.\n model.compile('SGD', loss=custom_loss)\n self.assertIsInstance(obtained_prediction_box[0], Foo)\n\n\nclass ConvertInnerNodeDataTest(tf.test.TestCase):\n\n def test_convert_inner_node_data(self):\n data = tf_utils.convert_inner_node_data((tf_utils.ListWrapper(['l', 2, 3]),\n tf_utils.ListWrapper(['l', 5, 6])))\n self.assertEqual(data, (['l', 2, 3], ['l', 5, 6]))\n\n data = tf_utils.convert_inner_node_data(((['l', 2, 3], ['l', 5, 6])),\n wrap=True)\n self.assertTrue(all(isinstance(ele, tf_utils.ListWrapper) for ele in data))\n\n\nclass AttrsTest(tf.test.TestCase):\n\n def test_map_structure_with_atomic_accept_attr(self):\n if attr is None:\n self.skipTest('attr module is unavailable.')\n\n @attr.s(frozen=True)\n class Foo(object):\n\n bar = attr.ib()\n\n self.assertEqual(\n Foo(2),\n tf_utils.map_structure_with_atomic(\n is_atomic_fn=lambda x: isinstance(x, int),\n map_fn=lambda x: x + 1,\n nested=Foo(1)))\n\n\nclass TestIsRagged(tf.test.TestCase):\n\n def test_is_ragged_return_true_for_ragged_tensor(self):\n tensor = tf.RaggedTensor.from_row_splits(\n values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])\n self.assertTrue(tf_utils.is_ragged(tensor))\n\n def test_is_ragged_return_false_for_list(self):\n tensor = [1., 2., 3.]\n self.assertFalse(tf_utils.is_ragged(tensor))\n\n\nclass TestIsExtensionType(tf.test.TestCase):\n\n def test_is_extension_type_return_true_for_ragged_tensor(self):\n self.assertTrue(tf_utils.is_extension_type(\n tf.ragged.constant([[1, 2], [3]])))\n\n def test_is_extension_type_return_true_for_sparse_tensor(self):\n self.assertTrue(tf_utils.is_extension_type(\n tf.sparse.from_dense([[1, 2], [3, 4]])))\n\n def test_is_extension_type_return_false_for_dense_tensor(self):\n self.assertFalse(tf_utils.is_extension_type(\n tf.constant([[1, 2], [3, 4]])))\n\n def test_is_extension_type_return_false_for_list(self):\n tensor = [1., 2., 3.]\n self.assertFalse(tf_utils.is_extension_type(tensor))\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Normalization preprocessing layer.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.engine import base_preprocessing_layer\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.experimental.preprocessing.Normalization', v1=[])\nclass Normalization(base_preprocessing_layer.PreprocessingLayer):\n \"\"\"Feature-wise normalization of the data.\n\n This layer will coerce its inputs into a distribution centered around\n 0 with standard deviation 1. It accomplishes this by precomputing the mean and\n variance of the data, and calling (input-mean)/sqrt(var) at runtime.\n\n What happens in `adapt`: Compute mean and variance of the data and store them\n as the layer's weights. `adapt` should be called before `fit`, `evaluate`,\n or `predict`.\n\n Args:\n axis: Integer or tuple of integers, the axis or axes that should be\n \"kept\". These axes are not be summed over when calculating the\n normalization statistics. By default the last axis, the `features` axis\n is kept and any `space` or `time` axes are summed. Each element in the\n the axes that are kept is normalized independently. If `axis` is set to\n 'None', the layer will perform scalar normalization (dividing the input\n by a single scalar value). The `batch` axis, 0, is always summed over\n (`axis=0` is not allowed).\n mean: The mean value(s) to use during normalization. The passed value(s)\n will be broadcast to the shape of the kept axes above; if the value(s)\n cannot be broadcast, an error will be raised when this layer's build()\n method is called.\n variance: The variance value(s) to use during normalization. The passed\n value(s) will be broadcast to the shape of the kept axes above; if the\n value(s)cannot be broadcast, an error will be raised when this layer's\n build() method is called.\n\n Examples:\n\n Calculate the mean and variance by analyzing the dataset in `adapt`.\n\n >>> adapt_data = np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32)\n >>> input_data = np.array([[1.], [2.], [3.]], np.float32)\n >>> layer = Normalization()\n >>> layer.adapt(adapt_data)\n >>> layer(input_data)\n <tf.Tensor: shape=(3, 1), dtype=float32, numpy=\n array([[-1.4142135 ],\n [-0.70710677],\n [ 0. ]], dtype=float32)>\n\n Pass the mean and variance directly.\n\n >>> input_data = np.array([[1.], [2.], [3.]], np.float32)\n >>> layer = Normalization(mean=3., variance=2.)\n >>> layer(input_data)\n <tf.Tensor: shape=(3, 1), dtype=float32, numpy=\n array([[-1.4142135 ],\n [-0.70710677],\n [ 0. ]], dtype=float32)>\n \"\"\"\n\n def __init__(self, axis=-1, mean=None, variance=None, **kwargs):\n super(Normalization, self).__init__(stateful=True, streaming=True, **kwargs)\n base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True)\n\n # Standardize `axis` to a tuple.\n if axis is None:\n axis = ()\n elif isinstance(axis, int):\n axis = (axis,)\n else:\n axis = tuple(axis)\n if 0 in axis:\n raise ValueError('The argument \\'axis\\' may not be 0.')\n self.axis = axis\n\n # Set `mean` and `variance` if passed.\n if isinstance(mean, tf.Variable):\n raise ValueError('Normalization does not support passing a Variable '\n 'for the `mean` init arg.')\n if isinstance(variance, tf.Variable):\n raise ValueError('Normalization does not support passing a Variable '\n 'for the `variance` init arg.')\n if mean is not None and variance is not None:\n mean = convert_to_ndarray(mean)\n variance = convert_to_ndarray(variance)\n elif mean is not None or variance is not None:\n raise ValueError(\n 'When setting values directly, both `mean` and `variance` '\n 'must be set. Got mean: {} and variance: {}'.format(mean, variance))\n self.mean_val = mean\n self.variance_val = variance\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if len(input_shape) == 1:\n input_shape = input_shape + [1]\n ndim = len(input_shape)\n\n if any(a < 1 - ndim or a >= ndim for a in self.axis):\n raise ValueError('All `axis` values must be in the range '\n '[1 - ndim, ndim - 1]. Found '\n 'ndim: `{}`, axis: {}'.format(ndim, self.axis))\n\n # Axes to be kept, replacing negative values with positive equivalents.\n # Sorted to avoid transposing axes.\n self._keep_axis = sorted([d if d >= 0 else d + ndim for d in self.axis])\n # Axes to be reduced.\n self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis]\n # 1 if an axis should be reduced, 0 otherwise.\n self._reduce_axis_mask = [\n 0 if d in self._keep_axis else 1 for d in range(ndim)\n ]\n # Broadcast any reduced axes.\n self._broadcast_shape = [\n input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)\n ]\n # Create variables without keeping reduced axes.\n mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)\n\n self.mean = self.add_weight(\n name='mean',\n shape=mean_and_var_shape,\n dtype=self.dtype,\n initializer=tf.compat.v1.zeros_initializer,\n trainable=False)\n self.variance = self.add_weight(\n name='variance',\n shape=mean_and_var_shape,\n dtype=self.dtype,\n initializer=tf.compat.v1.ones_initializer,\n trainable=False)\n self.count = self.add_weight(\n name='count',\n shape=(),\n dtype=tf.int64,\n initializer=tf.compat.v1.zeros_initializer,\n trainable=False)\n\n super(Normalization, self).build(input_shape)\n\n if (self.mean_val is not None and self.variance_val is not None):\n mean_val = self.mean_val * np.ones(mean_and_var_shape)\n variance_val = self.variance_val * np.ones(mean_and_var_shape)\n self.mean.assign(mean_val)\n self.variance.assign(variance_val)\n\n self.built = True\n\n def update_state(self, data):\n if not self.built:\n raise RuntimeError('`build` must be called before `update_state`.')\n\n data = self._standardize_inputs(data)\n batch_mean, batch_variance = tf.nn.moments(\n data, axes=self._reduce_axis)\n batch_shape = tf.compat.v1.shape(data, out_type=self.count.dtype)\n batch_reduce_shape = tf.compat.v1.gather(batch_shape, self._reduce_axis)\n batch_count = tf.reduce_prod(batch_reduce_shape)\n\n total_count = batch_count + self.count\n batch_weight = (\n tf.cast(batch_count, dtype=self.dtype) /\n tf.cast(total_count, dtype=self.dtype))\n existing_weight = 1. - batch_weight\n\n total_mean = self.mean * existing_weight + batch_mean * batch_weight\n # The variance is computed using the lack-of-fit sum of squares\n # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).\n total_variance = ((self.variance +\n (self.mean - total_mean)**2) * existing_weight +\n (batch_variance +\n (batch_mean - total_mean)**2) * batch_weight)\n self.mean.assign(total_mean)\n self.variance.assign(total_variance)\n self.count.assign(total_count)\n\n def merge_state(self, layers):\n layers = layers + [self]\n if any(not l.built for l in layers):\n raise ValueError(\n 'All layers to be merged must have been adapted to some inputs '\n 'first (otherwise they have no state).')\n\n layer_counts = [l.count for l in layers]\n layer_means = [l.mean for l in layers]\n layer_variances = [l.variance for l in layers]\n\n total_count = tf.reduce_sum(layer_counts)\n layer_weightings = (\n tf.cast(layer_counts, self.dtype) /\n tf.cast(total_count, self.dtype))\n layer_weightings = tf.reshape(\n layer_weightings, shape=[len(layers)] + [1] * self.mean.shape.rank)\n\n total_mean = tf.reduce_sum(layer_means * layer_weightings, axis=0)\n inter_layer_variances = (layer_means - total_mean)**2\n total_variance = tf.reduce_sum(\n ((layer_variances + inter_layer_variances) * layer_weightings), axis=0)\n\n self.mean.assign(total_mean)\n self.variance.assign(total_variance)\n self.count.assign(total_count)\n\n def reset_state(self): # pylint: disable=method-hidden\n if self.built:\n self.mean.assign(tf.compat.v1.zeros_like(self.mean))\n self.variance.assign(tf.compat.v1.ones_like(self.variance))\n self.count.assign(tf.compat.v1.zeros_like(self.count))\n\n def call(self, inputs):\n inputs = self._standardize_inputs(inputs)\n # We need to reshape the mean and variance data to ensure that Tensorflow\n # broadcasts the data correctly.\n mean = tf.reshape(self.mean, self._broadcast_shape)\n variance = tf.reshape(self.variance, self._broadcast_shape)\n return ((inputs - mean) /\n tf.maximum(tf.sqrt(variance), K.epsilon()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def compute_output_signature(self, input_spec):\n return input_spec\n\n def get_config(self):\n config = super(Normalization, self).get_config()\n config.update({'axis': self.axis})\n return config\n\n def set_weights(self, weights):\n \"\"\"Override for set_weights to ensure we can set just mean/var weights.\"\"\"\n if len(weights) == 2:\n weights.append(np.array(0))\n super(Normalization, self).set_weights(weights)\n\n def _standardize_inputs(self, inputs):\n inputs = tf.convert_to_tensor(inputs)\n if inputs.shape.rank == 0:\n inputs = tf.reshape(inputs, [1, 1])\n elif inputs.shape.rank == 1:\n inputs = tf.compat.v1.expand_dims(inputs, 1)\n\n if inputs.dtype != self.dtype:\n inputs = tf.cast(inputs, self.dtype)\n return inputs\n\n\ndef convert_to_ndarray(values):\n if isinstance(values, np.ndarray):\n return values\n elif isinstance(values, tf.Tensor):\n return K.get_value(values)\n else:\n return np.array(values)\n" ]
[ [ "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.compat.v1.executing_eagerly_outside_functions", "tensorflow.python.platform.tf_logging.warning", "tensorflow.compat.v2.init_scope", "tensorflow.compat.v2.is_tensor", "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.__internal__.tf2.enabled", "tensorflow.compat.v2.nest.flatten" ], [ "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.__internal__.decorator.make_decorator" ], [ "tensorflow.compat.v2.__internal__.test.combinations.combine", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.compat.v1.enable_eager_execution" ], [ "tensorflow.compat.v2.TensorShape", "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.compat.v1.expand_dims", "tensorflow.compat.v2.einsum" ], [ "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.sparse.from_dense", "tensorflow.compat.v2.register_tensor_conversion_function", "tensorflow.compat.v2.ragged.constant", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.SparseTensor", "tensorflow.compat.v2.RaggedTensor.from_row_splits", "tensorflow.compat.v2.constant" ], [ "tensorflow.compat.v2.reduce_prod", "tensorflow.compat.v2.compat.v1.shape", "tensorflow.compat.v2.compat.v1.ones_like", "tensorflow.compat.v2.compat.v1.gather", "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.sqrt", "tensorflow.compat.v2.compat.v1.zeros_like", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.compat.v1.expand_dims", "numpy.ones", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.TensorShape", "numpy.array", "tensorflow.compat.v2.nn.moments" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mehdijj/ppd
[ "9e7626a47886d9d2016873d722f97a930e96540d" ]
[ "utils.py" ]
[ "import numpy as np\r\nimport Augmentor\r\nimport os\r\n\r\n\r\ndef _permute_index(l, seed):\r\n \"\"\"\r\n Creates a permutation of np.array([0, ..., l-1]) and its inverse\r\n :param l: length of the array to permute\r\n :param seed: permutation seed\r\n :return: (s, s_inverse) where s is permutation of np.array([0, ..., l-1]) and s_inverse is its inverse\r\n \"\"\"\r\n st0 = np.random.get_state()\r\n s = np.arange(l)\r\n np.random.seed(seed)\r\n np.random.shuffle(s)\r\n s_inverse = np.argsort(s)\r\n np.random.set_state(st0)\r\n return s, s_inverse\r\n\r\n\r\ndef permute(data, seed):\r\n \"\"\"\r\n Permutes images in the data with given seed for each channel.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param seed: permutation seed. If seed=None returns data without permutation\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of permuted images\r\n \"\"\"\r\n \"\"\"\r\n Permutes images in the data with given seed. If seed=None, returns data without permutation.\r\n Assumes data has shape (nb_images, img_rows, img_cols, nb_channels)\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = data.shape\r\n if seed is None:\r\n return data\r\n l = img_rows * img_cols # length of the permutation array\r\n s, _ = _permute_index(l, seed)\r\n output = np.zeros(data.shape)\r\n for ch in range(nb_channels):\r\n output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s].reshape(-1, img_rows, img_cols)\r\n return output\r\n\r\n\r\ndef ipermute(data, seed):\r\n \"\"\"\r\n inverse of permute\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param seed: permutation seed. If seed=None returns data without permutation\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of inverse permuted images\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = data.shape\r\n if seed is None:\r\n return data\r\n l = img_rows * img_cols # length of the permutation array\r\n _, s_inverse = _permute_index(l, seed)\r\n output = np.zeros(data.shape)\r\n for ch in range(nb_channels):\r\n output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s_inverse].reshape(-1, img_rows, img_cols)\r\n return output\r\n\r\n\r\ndef fourier(data):\r\n \"\"\"\r\n converts each channel of images in the data to its 2-dimensional discrete Fourier transform.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, 2*nb_channels)\r\n The first half of output channels are magnitude information, the second half are phase info in range (-pi, pi)\r\n \"\"\"\r\n channels = data.shape[-1]\r\n output_shape = list(data.shape)\r\n output_shape[-1] = channels*2\r\n data_f = np.zeros(output_shape)\r\n for i in range(data.shape[0]):\r\n for ch in range(channels):\r\n f = np.fft.fft2(data[i, :, :, ch])\r\n fshift = np.fft.fftshift(f)\r\n magnitude = np.abs(fshift)\r\n phase = np.angle(fshift)\r\n data_f[i, :, :, ch] = magnitude\r\n data_f[i, :, :, ch + channels] = phase\r\n return data_f\r\n\r\n\r\ndef pol2cart(r, theta):\r\n \"\"\"\r\n Convert polar representation of a complex number to cartesian representation\r\n :param r: scalar or numpy array denoting magnitude component of the complex number\r\n :param theta: scalar or numpy array denoting phase of the complex number in radians.\r\n \"\"\"\r\n return r * np.exp(1j * theta)\r\n\r\n\r\ndef ifourier(data_f):\r\n \"\"\"\r\n inverse of fourier function\r\n :param data_f: numpy array with shape (nb_images, img_rows, img_cols, 2*nb_channels)\r\n The first half of output channels are magnitude information, the second half are phase info in range (-pi, pi)\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) denoting data in pixel domain.\r\n \"\"\"\r\n channels = int(data_f.shape[-1]/2)\r\n output_shape = list(data_f.shape)\r\n output_shape[-1] = channels\r\n data = np.zeros(output_shape, dtype='complex') # The dtype is now changed to 'complex' not to lose any information.\r\n for i in range(data_f.shape[0]):\r\n for ch in range(channels):\r\n fshift = pol2cart(data_f[i, :, :, ch], data_f[i, :, :, ch + channels])\r\n f = np.fft.ifftshift(fshift)\r\n data[i, :, :, ch] = np.fft.ifft2(f)\r\n return data\r\n\r\n\r\ndef phase2pixel(phase):\r\n \"\"\"\r\n reconstruct pixel domain from phase by adding unity magnitude.\r\n :param phase: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) containing phase component\r\n of two dimensional discrete Fourier transform.\r\n :return: numpy array with same shape as phase denoting pixel reconstruction from phase only\r\n while setting magnitude=1\r\n \"\"\"\r\n magnitude = np.ones(phase.shape)\r\n data_f = np.concatenate((magnitude, phase), axis=3)\r\n return ifourier(data_f)\r\n\r\n\r\ndef pixel2phase(data):\r\n \"\"\"\r\n converts each channel of images in the data to phase component of its 2-dimensional discrete Fourier transform.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array with same shape as data\r\n \"\"\"\r\n channels = data.shape[-1]\r\n return fourier(data)[:, :, :, channels:]\r\n\r\n\r\ndef augment(path_to_training_data, nb_samples):\r\n if os.path.exists(os.path.join(path_to_training_data, 'output')):\r\n print('Augmented data is already saved to {0}'.format(os.path.join(path_to_training_data, 'output')))\r\n return\r\n p = Augmentor.Pipeline(path_to_training_data)\r\n\r\n # augmentation pipeline\r\n p.rotate(probability=0.5, max_left_rotation=10, max_right_rotation=10)\r\n p.zoom(probability=.5, min_factor=0.8, max_factor=1.2)\r\n p.random_distortion(probability=.5, grid_width=6, grid_height=6, magnitude=1)\r\n\r\n print(p.status())\r\n print('{0} samples generated and saved to {1}'.format(nb_samples, os.path.join(path_to_training_data, 'output')))\r\n p.sample(nb_samples)\r\n\r\n\r\ndef load_images_from_folder(folder):\r\n \"\"\"\r\n loads png images and labels from folder. The folder must contain subfolders of images for different labels.\r\n For example, it should contain subfolders 0, 1, 2, ... where each subfolder contains images of the\r\n corresponding label. Note that the first time this function is called, it saves images and labels as npy\r\n files in the path of folder for later reference.\r\n :param folder: string of path to the folder.\r\n :return: a tuple (images, labels) of numpy arrays\r\n \"\"\"\r\n images = []\r\n labels = []\r\n if 'images.npy' in os.listdir(folder) and 'labels.npy' in os.listdir(folder):\r\n images = np.load(os.path.join(folder, 'images.npy'))\r\n labels = np.load(os.path.join(folder, 'labels.npy'))\r\n else:\r\n from PIL import Image\r\n for subfolder in os.listdir(folder):\r\n if subfolder.isdigit():\r\n for filename in os.listdir(os.path.join(folder, subfolder)):\r\n img = Image.open(os.path.join(folder, subfolder, filename))\r\n img_arr = np.array(img, dtype='uint8')\r\n images.append(img_arr)\r\n labels.append(int(subfolder))\r\n perm = np.random.permutation(len(labels))\r\n images = np.array(images)[perm]\r\n labels = np.array(labels)[perm]\r\n np.save(os.path.join(folder, 'images'), images)\r\n np.save(os.path.join(folder, 'labels'), labels)\r\n return images, labels\r\n\r\n\r\ndef log_attack(attack_name, adv_x, perturbation_strength, attack_params):\r\n \"\"\"\r\n saves adv_x with name perturbation_strength in folder with attack_name\r\n :param attack_name: string name of attack\r\n :param adv_x: numpy array of adversarial images\r\n :param perturbation_strength: scalar showing perturbation strength of the adversarial images.\r\n used for filename of adv_x\r\n :param attack_params: dictionary of parameters of the attack\r\n \"\"\"\r\n directory = os.path.join('Attack Logs', attack_name)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n import json\r\n with open(os.path.join(directory, 'params' + str(perturbation_strength) + '.txt'), 'w') as file:\r\n file.write(json.dumps(attack_params)) # use `json.loads` to do the reverse\r\n np.save(os.path.join(directory, str(perturbation_strength)), adv_x)\r\n\r\n\r\ndef _read_attack(attack_name, perturbation_strength):\r\n \"\"\"\r\n loads adv_x with perturbation_strength from folder with attack_name\r\n :param attack_name: string of attack name used for folder to save\r\n :param perturbation_strength: a float or string of attack file\r\n \"\"\"\r\n filename = os.path.join('Attack Logs', attack_name, str(perturbation_strength) + '.npy')\r\n return np.load(filename)\r\n\r\n\r\ndef measure_perturbation(x, adv_x, order):\r\n \"\"\"\r\n average perturbation between x and adv_x. Note that each image is converted to\r\n a vector of size (img_rows*img_cols*nb_channels) and then norm is calculated.\r\n :param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param adv_x: numpy array with same shape as x\r\n :param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2\r\n :return: a scalar denoting perturbation between x and adv_x averaged over images.\r\n \"\"\"\r\n nb_images, _, _, _ = x.shape\r\n dev = (x-adv_x).reshape(nb_images, -1)\r\n dev_norms = np.linalg.norm(dev, order, axis=1)\r\n return np.mean(dev_norms)\r\n\r\n\r\ndef random_perturb(x, perturbation_strength, order):\r\n \"\"\"\r\n randomly perturbes pixels of x with perturbation_strength such that\r\n measure_perturbation(x, random_perturb(x, perturbation_strength, order), order) = perturbation_strength.\r\n For order=np.inf each pixel is perturbed with either -perturbation_strenth or perturbation_strength.\r\n For order = 1 and order = 2, images of the pixel are perturbed with a uniform random noise with mean zero.\r\n :param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param perturbation_strength: a scalar that is strength of noise.\r\n :param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2\r\n :return: numpy array with same shape as x denoting random perturbation of pixels of x with perturbation_strength\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = x.shape\r\n if order == np.inf:\r\n dev = (np.random.randint(0, 2, size=nb_images*img_rows*img_cols*nb_channels) * 2 * perturbation_strength - perturbation_strength)\r\n elif order == 1:\r\n tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5\r\n coef = perturbation_strength / np.sum(np.abs(tmp), axis=1)\r\n dev = tmp * np.expand_dims(coef, axis=1)\r\n elif order == 2:\r\n tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5\r\n coef = perturbation_strength / np.linalg.norm(tmp, 2, axis=1)\r\n dev = tmp * np.expand_dims(coef, axis=1)\r\n else:\r\n raise(ValueError('order should be np.inf, 1 or 2'))\r\n return x + dev.reshape(x.shape)\r\n\r\n\r\ndef read_attack(attack_name):\r\n \"\"\"\r\n reads a dictionary whose keys are perturbation strength and values are numpy array of adversarial test images\r\n :param attack_name: string of attack name (the folder containing adversarial images)\r\n :return: a dictionary with (key, value) as (scalar of perturbation strength, numpy array of adversarial images)\r\n \"\"\"\r\n directory = os.path.join('Attack Logs', attack_name)\r\n out = dict()\r\n for filename in os.listdir(directory):\r\n if filename.endswith('.npy'):\r\n path_to_file = os.path.join(directory, filename)\r\n out[np.float(os.path.splitext(filename)[0])] = np.load(path_to_file)\r\n return out\r\n\r\n\r\ndef log_plot_data(attack_name, header, arr):\r\n \"\"\"\r\n concatenates numpy arrays in arr and saves them as 'plot_data.csv'.\r\n :param attack_name: string of attack name (the folder in which data is to be logged)\r\n :param header: list of strings denoting header name for element of arr\r\n :param arr: list of numpy arrays to be logged. For example: [strength, adv_acc, ...]\r\n \"\"\"\r\n import pandas as pd\r\n directory = os.path.join('Attack Logs', attack_name)\r\n tmp = np.concatenate(tuple([np.array(a).reshape(-1, 1) for a in arr]), axis=1)\r\n df = pd.DataFrame(tmp, columns=header)\r\n df.to_csv(os.path.join(directory, 'plot_data'), index=False)\r\n\r\n\r\ndef load_plot_data(attack_name):\r\n \"\"\"\r\n reads data saved with log_plot_data\r\n :param attack_name: string of attack name (the folder to read from)\r\n :return: a pandas dataFrame containing plot data.\r\n \"\"\"\r\n import pandas as pd\r\n path = os.path.join('Attack Logs', attack_name, 'plot_data')\r\n df = pd.read_csv(path)\r\n return df\r\n\r\n\r\ndef mnist_denoise(data):\r\n \"\"\"\r\n denoise MNIST data by making background black.\r\n :param data: numpy array of shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array of denoised data with the same shape as input\r\n \"\"\"\r\n threshold = .45\r\n data[data < threshold] = 0\r\n return data\r\n" ]
[ [ "numpy.expand_dims", "pandas.DataFrame", "numpy.fft.fftshift", "numpy.concatenate", "numpy.mean", "numpy.exp", "numpy.random.randint", "pandas.read_csv", "numpy.arange", "numpy.random.set_state", "numpy.fft.ifftshift", "numpy.load", "numpy.zeros", "numpy.fft.fft2", "numpy.random.rand", "numpy.argsort", "numpy.array", "numpy.random.get_state", "numpy.fft.ifft2", "numpy.abs", "numpy.random.seed", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.ones", "numpy.angle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
HERA-Team/vis_cpu
[ "32de2318efd6690e4a8c6c64aad5eae6f429b10a" ]
[ "tests/test_plot.py" ]
[ "\"\"\"Compare vis_cpu with pyuvsim visibilities.\"\"\"\nimport numpy as np\nfrom pyuvsim.analyticbeam import AnalyticBeam\n\nfrom vis_cpu import conversions, plot\n\nnsource = 10\n\n\ndef test_source_az_za_beam():\n \"\"\"Test function that calculates the Az and ZA positions of sources.\"\"\"\n # Observation latitude and LST\n hera_lat = -30.7215\n lst = 0.78\n\n # Add random sources\n ra = np.random.uniform(low=0.0, high=360.0, size=nsource - 1)\n dec = -30.72 + np.random.random(nsource - 1) * 10.0\n ra = np.deg2rad(ra)\n dec = np.deg2rad(dec)\n\n # Point source coordinate transform, from equatorial to Cartesian\n crd_eq = conversions.point_source_crd_eq(ra, dec)\n\n # Beam model\n beam = AnalyticBeam(type=\"gaussian\", diameter=14.0)\n\n # Calculate source locations and positions\n az, za, beamval = plot._source_az_za_beam(\n lst, crd_eq, beam, ref_freq=100.0e6, latitude=np.deg2rad(hera_lat)\n )\n assert np.all(np.isfinite(az))\n assert np.all(np.isfinite(za))\n # (Values of beamval should be NaN below the horizon)\n\n\ndef test_animate_source_map():\n \"\"\"Test function that animates source positions vs LST.\"\"\"\n # Observation latitude and LSTs\n hera_lat = -30.7215\n lsts = np.linspace(0.0, 2.0 * np.pi, 5)\n\n # Add random sources\n ra = np.random.uniform(low=0.0, high=360.0, size=nsource - 1)\n dec = -30.72 + np.random.random(nsource - 1) * 10.0\n ra = np.deg2rad(ra)\n dec = np.deg2rad(dec)\n\n # Beam model\n beam = AnalyticBeam(type=\"gaussian\", diameter=14.0)\n\n # Generate animation\n anim = plot.animate_source_map(\n ra,\n dec,\n lsts,\n beam,\n interval=200,\n ref_freq=100.0e6,\n latitude=np.deg2rad(hera_lat),\n )\n assert anim is not None\n" ]
[ [ "numpy.random.random", "numpy.isfinite", "numpy.linspace", "numpy.deg2rad", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LBJ-Wade/bilby
[ "b1e02f1dfae03d4939cae9c95eff300c22919689", "b1e02f1dfae03d4939cae9c95eff300c22919689", "b1e02f1dfae03d4939cae9c95eff300c22919689", "b1e02f1dfae03d4939cae9c95eff300c22919689" ]
[ "bilby/gw/source.py", "examples/gw_examples/injection_examples/create_your_own_source_model.py", "bilby/core/prior/analytical.py", "test/bilby_mcmc/test_proposals.py" ]
[ "import numpy as np\n\nfrom ..core import utils\nfrom ..core.utils import logger\nfrom .conversion import bilby_to_lalsimulation_spins\nfrom .utils import (lalsim_GetApproximantFromString,\n lalsim_SimInspiralFD,\n lalsim_SimInspiralChooseFDWaveform,\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1,\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2,\n lalsim_SimInspiralChooseFDWaveformSequence)\n\n\ndef lal_binary_black_hole(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **kwargs):\n \"\"\" A Binary Black Hole waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the two component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total binary angular momentum and the\n orbital angular momentum\n theta_jn: float\n Angle between the total binary angular momentum and the line of sight\n phase: float\n The phase at coalescence\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2', reference_frequency=50.0,\n minimum_frequency=20.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12,\n phi_jl=phi_jl, **waveform_kwargs)\n\n\ndef lal_binary_neutron_star(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, lambda_1, lambda_2,\n **kwargs):\n \"\"\" A Binary Neutron Star waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the two component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total binary angular momentum and the\n orbital angular momentum\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n lambda_1: float\n Dimensionless tidal deformability of mass_1\n lambda_2: float\n Dimensionless tidal deformability of mass_2\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2_NRTidal', reference_frequency=50.0,\n minimum_frequency=20.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12,\n phi_jl=phi_jl, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs)\n\n\ndef lal_eccentric_binary_black_hole_no_spins(\n frequency_array, mass_1, mass_2, eccentricity, luminosity_distance,\n theta_jn, phase, **kwargs):\n \"\"\" Eccentric binary black hole waveform model using lalsimulation (EccentricFD)\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n eccentricity: float\n The orbital eccentricity of the system\n luminosity_distance: float\n The luminosity distance in megaparsec\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='EccentricFD', reference_frequency=10.0,\n minimum_frequency=10.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n eccentricity=eccentricity, **waveform_kwargs)\n\n\ndef _base_lal_cbc_fd_waveform(\n frequency_array, mass_1, mass_2, luminosity_distance, theta_jn, phase,\n a_1=0.0, a_2=0.0, tilt_1=0.0, tilt_2=0.0, phi_12=0.0, phi_jl=0.0,\n lambda_1=0.0, lambda_2=0.0, eccentricity=0.0, **waveform_kwargs):\n \"\"\" Generate a cbc waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total and orbital angular momenta\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n eccentricity: float\n Binary eccentricity\n lambda_1: float\n Tidal deformability of the more massive object\n lambda_2: float\n Tidal deformability of the less massive object\n kwargs: dict\n Optional keyword arguments\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n import lal\n import lalsimulation as lalsim\n\n waveform_approximant = waveform_kwargs['waveform_approximant']\n reference_frequency = waveform_kwargs['reference_frequency']\n minimum_frequency = waveform_kwargs['minimum_frequency']\n maximum_frequency = waveform_kwargs['maximum_frequency']\n catch_waveform_errors = waveform_kwargs['catch_waveform_errors']\n pn_spin_order = waveform_kwargs['pn_spin_order']\n pn_tidal_order = waveform_kwargs['pn_tidal_order']\n pn_phase_order = waveform_kwargs['pn_phase_order']\n pn_amplitude_order = waveform_kwargs['pn_amplitude_order']\n waveform_dictionary = waveform_kwargs.get(\n 'lal_waveform_dictionary', lal.CreateDict()\n )\n\n approximant = lalsim_GetApproximantFromString(waveform_approximant)\n\n if pn_amplitude_order != 0:\n start_frequency = lalsim.SimInspiralfLow2fStart(\n minimum_frequency, int(pn_amplitude_order), approximant)\n else:\n start_frequency = minimum_frequency\n\n delta_frequency = frequency_array[1] - frequency_array[0]\n\n frequency_bounds = ((frequency_array >= minimum_frequency) *\n (frequency_array <= maximum_frequency))\n\n luminosity_distance = luminosity_distance * 1e6 * utils.parsec\n mass_1 = mass_1 * utils.solar_mass\n mass_2 = mass_2 * utils.solar_mass\n\n iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = bilby_to_lalsimulation_spins(\n theta_jn=theta_jn, phi_jl=phi_jl, tilt_1=tilt_1, tilt_2=tilt_2,\n phi_12=phi_12, a_1=a_1, a_2=a_2, mass_1=mass_1, mass_2=mass_2,\n reference_frequency=reference_frequency, phase=phase)\n\n longitude_ascending_nodes = 0.0\n mean_per_ano = 0.0\n\n lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(\n waveform_dictionary, int(pn_spin_order))\n lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(\n waveform_dictionary, int(pn_tidal_order))\n lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(\n waveform_dictionary, int(pn_phase_order))\n lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(\n waveform_dictionary, int(pn_amplitude_order))\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n for key, value in waveform_kwargs.items():\n func = getattr(lalsim, \"SimInspiralWaveformParamsInsert\" + key, None)\n if func is not None:\n func(waveform_dictionary, value)\n\n if waveform_kwargs.get('numerical_relativity_file', None) is not None:\n lalsim.SimInspiralWaveformParamsInsertNumRelData(\n waveform_dictionary, waveform_kwargs['numerical_relativity_file'])\n\n if ('mode_array' in waveform_kwargs) and waveform_kwargs['mode_array'] is not None:\n mode_array = waveform_kwargs['mode_array']\n mode_array_lal = lalsim.SimInspiralCreateModeArray()\n for mode in mode_array:\n lalsim.SimInspiralModeArrayActivateMode(mode_array_lal, mode[0], mode[1])\n lalsim.SimInspiralWaveformParamsInsertModeArray(waveform_dictionary, mode_array_lal)\n\n if lalsim.SimInspiralImplementedFDApproximants(approximant):\n wf_func = lalsim_SimInspiralChooseFDWaveform\n else:\n wf_func = lalsim_SimInspiralFD\n try:\n hplus, hcross = wf_func(\n mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, luminosity_distance, iota, phase,\n longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,\n start_frequency, maximum_frequency, reference_frequency,\n waveform_dictionary, approximant)\n except Exception as e:\n if not catch_waveform_errors:\n raise\n else:\n EDOM = (e.args[0] == 'Internal function call failed: Input domain error')\n if EDOM:\n failed_parameters = dict(mass_1=mass_1, mass_2=mass_2,\n spin_1=(spin_1x, spin_2y, spin_1z),\n spin_2=(spin_2x, spin_2y, spin_2z),\n luminosity_distance=luminosity_distance,\n iota=iota, phase=phase,\n eccentricity=eccentricity,\n start_frequency=start_frequency)\n logger.warning(\"Evaluating the waveform failed with error: {}\\n\".format(e) +\n \"The parameters were {}\\n\".format(failed_parameters) +\n \"Likelihood will be set to -inf.\")\n return None\n else:\n raise\n\n h_plus = np.zeros_like(frequency_array, dtype=complex)\n h_cross = np.zeros_like(frequency_array, dtype=complex)\n\n if len(hplus.data.data) > len(frequency_array):\n logger.debug(\"LALsim waveform longer than bilby's `frequency_array`\" +\n \"({} vs {}), \".format(len(hplus.data.data), len(frequency_array)) +\n \"probably because padded with zeros up to the next power of two length.\" +\n \" Truncating lalsim array.\")\n h_plus = hplus.data.data[:len(h_plus)]\n h_cross = hcross.data.data[:len(h_cross)]\n else:\n h_plus[:len(hplus.data.data)] = hplus.data.data\n h_cross[:len(hcross.data.data)] = hcross.data.data\n\n h_plus *= frequency_bounds\n h_cross *= frequency_bounds\n\n if wf_func == lalsim_SimInspiralFD:\n dt = 1 / hplus.deltaF + (hplus.epoch.gpsSeconds + hplus.epoch.gpsNanoSeconds * 1e-9)\n time_shift = np.exp(-1j * 2 * np.pi * dt * frequency_array[frequency_bounds])\n h_plus[frequency_bounds] *= time_shift\n h_cross[frequency_bounds] *= time_shift\n\n return dict(plus=h_plus, cross=h_cross)\n\n\ndef binary_black_hole_roq(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **waveform_arguments):\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2', reference_frequency=20.0)\n waveform_kwargs.update(waveform_arguments)\n return _base_roq_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_jl=phi_jl,\n phi_12=phi_12, lambda_1=0.0, lambda_2=0.0, **waveform_kwargs)\n\n\ndef binary_neutron_star_roq(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, lambda_1, lambda_2, theta_jn, phase,\n **waveform_arguments):\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomD_NRTidal', reference_frequency=20.0)\n waveform_kwargs.update(waveform_arguments)\n return _base_roq_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_jl=phi_jl,\n phi_12=phi_12, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs)\n\n\ndef _base_roq_waveform(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, lambda_1, lambda_2, phi_jl, theta_jn, phase,\n **waveform_arguments):\n \"\"\"\n See https://git.ligo.org/lscsoft/lalsuite/blob/master/lalsimulation/src/LALSimInspiral.c#L1460\n\n Parameters\n ==========\n frequency_array: np.array\n This input is ignored for the roq source model\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n\n Waveform arguments\n ===================\n Non-sampled extra data used in the source model calculation\n frequency_nodes_linear: np.array\n frequency_nodes_quadratic: np.array\n reference_frequency: float\n approximant: str\n\n Note: for the frequency_nodes_linear and frequency_nodes_quadratic arguments,\n if using data from https://git.ligo.org/lscsoft/ROQ_data, this should be\n loaded as `np.load(filename).T`.\n\n Returns\n =======\n waveform_polarizations: dict\n Dict containing plus and cross modes evaluated at the linear and\n quadratic frequency nodes.\n \"\"\"\n from lal import CreateDict\n frequency_nodes_linear = waveform_arguments['frequency_nodes_linear']\n frequency_nodes_quadratic = waveform_arguments['frequency_nodes_quadratic']\n reference_frequency = waveform_arguments['reference_frequency']\n approximant = lalsim_GetApproximantFromString(\n waveform_arguments['waveform_approximant'])\n\n luminosity_distance = luminosity_distance * 1e6 * utils.parsec\n mass_1 = mass_1 * utils.solar_mass\n mass_2 = mass_2 * utils.solar_mass\n\n waveform_dictionary = CreateDict()\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = bilby_to_lalsimulation_spins(\n theta_jn=theta_jn, phi_jl=phi_jl, tilt_1=tilt_1, tilt_2=tilt_2,\n phi_12=phi_12, a_1=a_1, a_2=a_2, mass_1=mass_1, mass_2=mass_2,\n reference_frequency=reference_frequency, phase=phase)\n\n h_linear_plus, h_linear_cross = lalsim_SimInspiralChooseFDWaveformSequence(\n phase, mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, reference_frequency, luminosity_distance, iota,\n waveform_dictionary, approximant, frequency_nodes_linear)\n\n waveform_dictionary = CreateDict()\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n h_quadratic_plus, h_quadratic_cross = lalsim_SimInspiralChooseFDWaveformSequence(\n phase, mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, reference_frequency, luminosity_distance, iota,\n waveform_dictionary, approximant, frequency_nodes_quadratic)\n\n waveform_polarizations = dict()\n waveform_polarizations['linear'] = dict(\n plus=h_linear_plus.data.data, cross=h_linear_cross.data.data)\n waveform_polarizations['quadratic'] = dict(\n plus=h_quadratic_plus.data.data, cross=h_quadratic_cross.data.data)\n\n return waveform_polarizations\n\n\ndef sinegaussian(frequency_array, hrss, Q, frequency, **kwargs):\n tau = Q / (np.sqrt(2.0) * np.pi * frequency)\n temp = Q / (4.0 * np.sqrt(np.pi) * frequency)\n fm = frequency_array - frequency\n fp = frequency_array + frequency\n\n h_plus = ((hrss / np.sqrt(temp * (1 + np.exp(-Q**2)))) *\n ((np.sqrt(np.pi) * tau) / 2.0) *\n (np.exp(-fm**2 * np.pi**2 * tau**2) +\n np.exp(-fp**2 * np.pi**2 * tau**2)))\n\n h_cross = (-1j * (hrss / np.sqrt(temp * (1 - np.exp(-Q**2)))) *\n ((np.sqrt(np.pi) * tau) / 2.0) *\n (np.exp(-fm**2 * np.pi**2 * tau**2) -\n np.exp(-fp**2 * np.pi**2 * tau**2)))\n\n return{'plus': h_plus, 'cross': h_cross}\n\n\ndef supernova(\n frequency_array, realPCs, imagPCs, file_path, luminosity_distance, **kwargs):\n \"\"\" A supernova NR simulation for injections \"\"\"\n\n realhplus, imaghplus, realhcross, imaghcross = np.loadtxt(\n file_path, usecols=(0, 1, 2, 3), unpack=True)\n\n # waveform in file at 10kpc\n scaling = 1e-3 * (10.0 / luminosity_distance)\n\n h_plus = scaling * (realhplus + 1.0j * imaghplus)\n h_cross = scaling * (realhcross + 1.0j * imaghcross)\n return {'plus': h_plus, 'cross': h_cross}\n\n\ndef supernova_pca_model(\n frequency_array, pc_coeff1, pc_coeff2, pc_coeff3, pc_coeff4, pc_coeff5,\n luminosity_distance, **kwargs):\n \"\"\" Supernova signal model \"\"\"\n\n realPCs = kwargs['realPCs']\n imagPCs = kwargs['imagPCs']\n\n pc1 = realPCs[:, 0] + 1.0j * imagPCs[:, 0]\n pc2 = realPCs[:, 1] + 1.0j * imagPCs[:, 1]\n pc3 = realPCs[:, 2] + 1.0j * imagPCs[:, 2]\n pc4 = realPCs[:, 3] + 1.0j * imagPCs[:, 3]\n pc5 = realPCs[:, 4] + 1.0j * imagPCs[:, 5]\n\n # file at 10kpc\n scaling = 1e-23 * (10.0 / luminosity_distance)\n\n h_plus = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +\n pc_coeff4 * pc4 + pc_coeff5 * pc5)\n h_cross = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +\n pc_coeff4 * pc4 + pc_coeff5 * pc5)\n\n return {'plus': h_plus, 'cross': h_cross}\n\n\nprecession_only = {\n \"tilt_1\", \"tilt_2\", \"phi_12\", \"phi_jl\", \"chi_1_in_plane\", \"chi_2_in_plane\",\n}\n\nspin = {\n \"a_1\", \"a_2\", \"tilt_1\", \"tilt_2\", \"phi_12\", \"phi_jl\", \"chi_1\", \"chi_2\",\n \"chi_1_in_plane\", \"chi_2_in_plane\",\n}\nmass = {\n \"chirp_mass\", \"mass_ratio\", \"total_mass\", \"mass_1\", \"mass_2\",\n \"symmetric_mass_ratio\",\n}\nprimary_spin_and_q = {\n \"a_1\", \"chi_1\", \"mass_ratio\"\n}\ntidal = {\n \"lambda_1\", \"lambda_2\", \"lambda_tilde\", \"delta_lambda_tilde\"\n}\nphase = {\n \"phase\", \"delta_phase\",\n}\nextrinsic = {\n \"azimuth\", \"zenith\", \"luminosity_distance\", \"psi\", \"theta_jn\",\n \"cos_theta_jn\", \"geocent_time\", \"time_jitter\", \"ra\", \"dec\",\n \"H1_time\", \"L1_time\", \"V1_time\",\n}\n\nPARAMETER_SETS = dict(\n spin=spin, mass=mass, phase=phase, extrinsic=extrinsic,\n tidal=tidal, primary_spin_and_q=primary_spin_and_q,\n intrinsic=spin.union(mass).union(phase).union(tidal),\n precession_only=precession_only,\n)\n", "#!/usr/bin/env python\n\"\"\"\nA script to demonstrate how to use your own source model\n\"\"\"\nimport bilby\nimport numpy as np\n\n# First set up logging and some output directories and labels\noutdir = 'outdir'\nlabel = 'create_your_own_source_model'\nsampling_frequency = 4096\nduration = 1\n\n\n# Here we define out source model - this is the sine-Gaussian model in the\n# frequency domain.\ndef sine_gaussian(f, A, f0, tau, phi0, geocent_time, ra, dec, psi):\n arg = -(np.pi * tau * (f - f0))**2 + 1j * phi0\n plus = np.sqrt(np.pi) * A * tau * np.exp(arg) / 2.\n cross = plus * np.exp(1j * np.pi / 2)\n return {'plus': plus, 'cross': cross}\n\n\n# We now define some parameters that we will inject\ninjection_parameters = dict(A=1e-23, f0=100, tau=1, phi0=0, geocent_time=0,\n ra=0, dec=0, psi=0)\n\n# Now we pass our source function to the WaveformGenerator\nwaveform_generator = bilby.gw.waveform_generator.WaveformGenerator(\n duration=duration, sampling_frequency=sampling_frequency,\n frequency_domain_source_model=sine_gaussian)\n\n# Set up interferometers.\nifos = bilby.gw.detector.InterferometerList(['H1', 'L1'])\nifos.set_strain_data_from_power_spectral_densities(\n sampling_frequency=sampling_frequency, duration=duration,\n start_time=injection_parameters['geocent_time'] - 3)\nifos.inject_signal(waveform_generator=waveform_generator,\n parameters=injection_parameters)\n\n# Here we define the priors for the search. We use the injection parameters\n# except for the amplitude, f0, and geocent_time\nprior = injection_parameters.copy()\nprior['A'] = bilby.core.prior.LogUniform(minimum=1e-25, maximum=1e-21, name='A')\nprior['f0'] = bilby.core.prior.Uniform(90, 110, 'f')\n\nlikelihood = bilby.gw.likelihood.GravitationalWaveTransient(\n interferometers=ifos, waveform_generator=waveform_generator)\n\nresult = bilby.core.sampler.run_sampler(\n likelihood, prior, sampler='dynesty', outdir=outdir, label=label,\n resume=False, sample='unif', injection_parameters=injection_parameters)\nresult.plot_corner()\n", "import numpy as np\nfrom scipy.special import erfinv\nfrom scipy.special._ufuncs import xlogy, erf, log1p, stdtrit, gammaln, stdtr, \\\n btdtri, betaln, btdtr, gammaincinv, gammainc\n\nfrom .base import Prior\nfrom ..utils import logger\n\n\nclass DeltaFunction(Prior):\n\n def __init__(self, peak, name=None, latex_label=None, unit=None):\n \"\"\"Dirac delta function prior, this always returns peak.\n\n Parameters\n ==========\n peak: float\n Peak value of the delta function\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n \"\"\"\n super(DeltaFunction, self).__init__(name=name, latex_label=latex_label, unit=unit,\n minimum=peak, maximum=peak, check_range_nonzero=False)\n self.peak = peak\n self._is_fixed = True\n\n def rescale(self, val):\n \"\"\"Rescale everything to the peak with the correct shape.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Rescaled probability, equivalent to peak\n \"\"\"\n return self.peak * val ** 0\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: np.inf if val = peak, 0 otherwise\n\n \"\"\"\n at_peak = (val == self.peak)\n return np.nan_to_num(np.multiply(at_peak, np.inf))\n\n def cdf(self, val):\n return np.ones_like(val) * (val > self.peak)\n\n\nclass PowerLaw(Prior):\n\n def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,\n unit=None, boundary=None):\n \"\"\"Power law with bounds and alpha, spectral index\n\n Parameters\n ==========\n alpha: float\n Power law exponent parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(PowerLaw, self).__init__(name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit,\n boundary=boundary)\n self.alpha = alpha\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the power-law prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n Uniform probability\n\n Returns\n =======\n Union[float, array_like]: Rescaled probability\n \"\"\"\n if self.alpha == -1:\n return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))\n else:\n return (self.minimum ** (1 + self.alpha) + val *\n (self.maximum ** (1 + self.alpha) - self.minimum ** (1 + self.alpha))) ** (1. / (1 + self.alpha))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n if self.alpha == -1:\n return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * self.is_in_prior_range(val)\n else:\n return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /\n (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))) * self.is_in_prior_range(val)\n\n def ln_prob(self, val):\n \"\"\"Return the logarithmic prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float:\n\n \"\"\"\n if self.alpha == -1:\n normalising = 1. / np.log(self.maximum / self.minimum)\n else:\n normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -\n self.minimum ** (1 + self.alpha))\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ln_in_range = np.log(1. * self.is_in_prior_range(val))\n ln_p = self.alpha * np.nan_to_num(np.log(val)) + np.log(normalising)\n\n return ln_p + ln_in_range\n\n def cdf(self, val):\n if self.alpha == -1:\n _cdf = (np.log(val / self.minimum) /\n np.log(self.maximum / self.minimum))\n else:\n _cdf = np.atleast_1d(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1)) / \\\n (self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))\n _cdf = np.minimum(_cdf, 1)\n _cdf = np.maximum(_cdf, 0)\n return _cdf\n\n\nclass Uniform(Prior):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None, boundary=None):\n \"\"\"Uniform prior with bounds\n\n Parameters\n ==========\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Uniform, self).__init__(name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit,\n boundary=boundary)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the power-law prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n Uniform probability\n\n Returns\n =======\n Union[float, array_like]: Rescaled probability\n \"\"\"\n return self.minimum + val * (self.maximum - self.minimum)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n return ((val >= self.minimum) & (val <= self.maximum)) / (self.maximum - self.minimum)\n\n def ln_prob(self, val):\n \"\"\"Return the log prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: log probability of val\n \"\"\"\n return xlogy(1, (val >= self.minimum) & (val <= self.maximum)) - xlogy(1, self.maximum - self.minimum)\n\n def cdf(self, val):\n _cdf = (val - self.minimum) / (self.maximum - self.minimum)\n _cdf = np.minimum(_cdf, 1)\n _cdf = np.maximum(_cdf, 0)\n return _cdf\n\n\nclass LogUniform(PowerLaw):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None, boundary=None):\n \"\"\"Log-Uniform prior with bounds\n\n Parameters\n ==========\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(LogUniform, self).__init__(name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum, alpha=-1, boundary=boundary)\n if self.minimum <= 0:\n logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))\n\n\nclass SymmetricLogUniform(Prior):\n\n def __init__(self, minimum, maximum, name=None, latex_label=None,\n unit=None, boundary=None):\n \"\"\"Symmetric Log-Uniform distributions with bounds\n\n This is identical to a Log-Uniform distribution, but mirrored about\n the zero-axis and subsequently normalized. As such, the distribution\n has support on the two regions [-maximum, -minimum] and [minimum,\n maximum].\n\n Parameters\n ==========\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(SymmetricLogUniform, self).__init__(name=name, latex_label=latex_label,\n minimum=minimum, maximum=maximum, unit=unit,\n boundary=boundary)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the power-law prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n Uniform probability\n\n Returns\n =======\n Union[float, array_like]: Rescaled probability\n \"\"\"\n if isinstance(val, (float, int)):\n if val < 0.5:\n return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))\n else:\n return self.minimum * np.exp(np.log(self.maximum / self.minimum) * (2 * val - 1))\n else:\n vals_less_than_5 = val < 0.5\n rescaled = np.empty_like(val)\n rescaled[vals_less_than_5] = -self.maximum * np.exp(-2 * val[vals_less_than_5] *\n np.log(self.maximum / self.minimum))\n rescaled[~vals_less_than_5] = self.minimum * np.exp(np.log(self.maximum / self.minimum) *\n (2 * val[~vals_less_than_5] - 1))\n return rescaled\n\n def prob(self, val):\n \"\"\"Return the prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n val = np.abs(val)\n return (np.nan_to_num(0.5 / val / np.log(self.maximum / self.minimum)) *\n self.is_in_prior_range(val))\n\n def ln_prob(self, val):\n \"\"\"Return the logarithmic prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float:\n\n \"\"\"\n return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))\n\n def cdf(self, val):\n val = np.atleast_1d(val)\n norm = 0.5 / np.log(self.maximum / self.minimum)\n cdf = np.zeros((len(val)))\n lower_indices = np.where(np.logical_and(-self.maximum <= val, val <= -self.minimum))[0]\n upper_indices = np.where(np.logical_and(self.minimum <= val, val <= self.maximum))[0]\n cdf[lower_indices] = -norm * np.log(-val[lower_indices] / self.maximum)\n cdf[np.where(np.logical_and(-self.minimum < val, val < self.minimum))] = 0.5\n cdf[upper_indices] = 0.5 + norm * np.log(val[upper_indices] / self.minimum)\n cdf[np.where(self.maximum < val)] = 1\n return cdf\n\n\nclass Cosine(Prior):\n\n def __init__(self, minimum=-np.pi / 2, maximum=np.pi / 2, name=None,\n latex_label=None, unit=None, boundary=None):\n \"\"\"Cosine prior with bounds\n\n Parameters\n ==========\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Cosine, self).__init__(minimum=minimum, maximum=maximum, name=name,\n latex_label=latex_label, unit=unit, boundary=boundary)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in cosine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))\n return np.arcsin(val / norm + np.sin(self.minimum))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [-pi/2, pi/2].\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n return np.cos(val) / 2 * self.is_in_prior_range(val)\n\n def cdf(self, val):\n _cdf = np.atleast_1d((np.sin(val) - np.sin(self.minimum)) /\n (np.sin(self.maximum) - np.sin(self.minimum)))\n _cdf[val > self.maximum] = 1\n _cdf[val < self.minimum] = 0\n return _cdf\n\n\nclass Sine(Prior):\n\n def __init__(self, minimum=0, maximum=np.pi, name=None,\n latex_label=None, unit=None, boundary=None):\n \"\"\"Sine prior with bounds\n\n Parameters\n ==========\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Sine, self).__init__(minimum=minimum, maximum=maximum, name=name,\n latex_label=latex_label, unit=unit, boundary=boundary)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to a uniform in sine prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))\n return np.arccos(np.cos(self.minimum) - val / norm)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val. Defined over [0, pi].\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.sin(val) / 2 * self.is_in_prior_range(val)\n\n def cdf(self, val):\n _cdf = np.atleast_1d((np.cos(val) - np.cos(self.minimum)) /\n (np.cos(self.maximum) - np.cos(self.minimum)))\n _cdf[val > self.maximum] = 1\n _cdf[val < self.minimum] = 0\n return _cdf\n\n\nclass Gaussian(Prior):\n\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Gaussian prior with mean mu and width sigma\n\n Parameters\n ==========\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Gaussian, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gaussian prior.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma\n\n def ln_prob(self, val):\n \"\"\"Return the Log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n\n return -0.5 * ((self.mu - val) ** 2 / self.sigma ** 2 + np.log(2 * np.pi * self.sigma ** 2))\n\n def cdf(self, val):\n return (1 - erf((self.mu - val) / 2 ** 0.5 / self.sigma)) / 2\n\n\nclass Normal(Gaussian):\n \"\"\"A synonym for the Gaussian distribution. \"\"\"\n\n\nclass TruncatedGaussian(Prior):\n\n def __init__(self, mu, sigma, minimum, maximum, name=None,\n latex_label=None, unit=None, boundary=None):\n \"\"\"Truncated Gaussian prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Truncated_normal_distribution\n\n Parameters\n ==========\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(TruncatedGaussian, self).__init__(name=name, latex_label=latex_label, unit=unit,\n minimum=minimum, maximum=maximum, boundary=boundary)\n self.mu = mu\n self.sigma = sigma\n\n @property\n def normalisation(self):\n \"\"\" Calculates the proper normalisation of the truncated Gaussian\n\n Returns\n =======\n float: Proper normalisation of the truncated Gaussian\n \"\"\"\n return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate truncated Gaussian prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return erfinv(2 * val * self.normalisation + erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 \\\n / self.sigma / self.normalisation * self.is_in_prior_range(val)\n\n def cdf(self, val):\n val = np.atleast_1d(val)\n _cdf = (erf((val - self.mu) / 2 ** 0.5 / self.sigma) - erf(\n (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2 / self.normalisation\n _cdf[val > self.maximum] = 1\n _cdf[val < self.minimum] = 0\n return _cdf\n\n\nclass TruncatedNormal(TruncatedGaussian):\n \"\"\"A synonym for the TruncatedGaussian distribution.\"\"\"\n\n\nclass HalfGaussian(TruncatedGaussian):\n def __init__(self, sigma, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"A Gaussian with its mode at zero, and truncated to only be positive.\n\n Parameters\n ==========\n sigma: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(HalfGaussian, self).__init__(mu=0., sigma=sigma, minimum=0., maximum=np.inf,\n name=name, latex_label=latex_label,\n unit=unit, boundary=boundary)\n\n\nclass HalfNormal(HalfGaussian):\n \"\"\"A synonym for the HalfGaussian distribution.\"\"\"\n\n\nclass LogNormal(Prior):\n def __init__(self, mu, sigma, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Log-normal prior with mean mu and width sigma\n\n https://en.wikipedia.org/wiki/Log-normal_distribution\n\n Parameters\n ==========\n mu: float\n Mean of the Gaussian prior\n sigma:\n Width/Standard deviation of the Gaussian prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(LogNormal, self).__init__(name=name, minimum=0., latex_label=latex_label,\n unit=unit, boundary=boundary)\n\n if sigma <= 0.:\n raise ValueError(\"For the LogGaussian prior the standard deviation must be positive\")\n\n self.mu = mu\n self.sigma = sigma\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate LogNormal prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))\n\n def prob(self, val):\n \"\"\"Returns the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val <= self.minimum:\n _prob = 0.\n else:\n _prob = np.exp(-(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2)\\\n / np.sqrt(2 * np.pi) / val / self.sigma\n else:\n _prob = np.zeros(val.size)\n idx = (val > self.minimum)\n _prob[idx] = np.exp(-(np.log(val[idx]) - self.mu) ** 2 / self.sigma ** 2 / 2)\\\n / np.sqrt(2 * np.pi) / val[idx] / self.sigma\n return _prob\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val <= self.minimum:\n _ln_prob = -np.inf\n else:\n _ln_prob = -(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2\\\n - np.log(np.sqrt(2 * np.pi) * val * self.sigma)\n else:\n _ln_prob = -np.inf * np.ones(val.size)\n idx = (val > self.minimum)\n _ln_prob[idx] = -(np.log(val[idx]) - self.mu) ** 2\\\n / self.sigma ** 2 / 2 - np.log(np.sqrt(2 * np.pi) * val[idx] * self.sigma)\n return _ln_prob\n\n def cdf(self, val):\n if isinstance(val, (float, int)):\n if val <= self.minimum:\n _cdf = 0.\n else:\n _cdf = 0.5 + erf((np.log(val) - self.mu) / self.sigma / np.sqrt(2)) / 2\n else:\n _cdf = np.zeros(val.size)\n _cdf[val > self.minimum] = 0.5 + erf((\n np.log(val[val > self.minimum]) - self.mu) / self.sigma / np.sqrt(2)) / 2\n return _cdf\n\n\nclass LogGaussian(LogNormal):\n \"\"\"Synonym of LogNormal prior.\"\"\"\n\n\nclass Exponential(Prior):\n def __init__(self, mu, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Exponential prior with mean mu\n\n Parameters\n ==========\n mu: float\n Mean of the Exponential prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Exponential, self).__init__(name=name, minimum=0., latex_label=latex_label,\n unit=unit, boundary=boundary)\n self.mu = mu\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Exponential prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return -self.mu * log1p(-val)\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val < self.minimum:\n _prob = 0.\n else:\n _prob = np.exp(-val / self.mu) / self.mu\n else:\n _prob = np.zeros(val.size)\n _prob[val >= self.minimum] = np.exp(-val[val >= self.minimum] / self.mu) / self.mu\n return _prob\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val < self.minimum:\n _ln_prob = -np.inf\n else:\n _ln_prob = -val / self.mu - np.log(self.mu)\n else:\n _ln_prob = -np.inf * np.ones(val.size)\n _ln_prob[val >= self.minimum] = -val[val >= self.minimum] / self.mu - np.log(self.mu)\n return _ln_prob\n\n def cdf(self, val):\n if isinstance(val, (float, int)):\n if val < self.minimum:\n _cdf = 0.\n else:\n _cdf = 1. - np.exp(-val / self.mu)\n else:\n _cdf = np.zeros(val.size)\n _cdf[val >= self.minimum] = 1. - np.exp(-val[val >= self.minimum] / self.mu)\n return _cdf\n\n\nclass StudentT(Prior):\n def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,\n unit=None, boundary=None):\n \"\"\"Student's t-distribution prior with number of degrees of freedom df,\n mean mu and scale\n\n https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution\n\n Parameters\n ==========\n df: float\n Number of degrees of freedom for distribution\n mu: float\n Mean of the Student's t-prior\n scale:\n Width of the Student's t-prior\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(StudentT, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)\n\n if df <= 0. or scale <= 0.:\n raise ValueError(\"For the StudentT prior the number of degrees of freedom and scale must be positive\")\n\n self.df = df\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Student's t-prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n if isinstance(val, (float, int)):\n if val == 0:\n rescaled = -np.inf\n elif val == 1:\n rescaled = np.inf\n else:\n rescaled = stdtrit(self.df, val) * self.scale + self.mu\n else:\n rescaled = stdtrit(self.df, val) * self.scale + self.mu\n rescaled[val == 0] = -np.inf\n rescaled[val == 1] = np.inf\n return rescaled\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.exp(self.ln_prob(val))\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return gammaln(0.5 * (self.df + 1)) - gammaln(0.5 * self.df)\\\n - np.log(np.sqrt(np.pi * self.df) * self.scale) - (self.df + 1) / 2 *\\\n np.log(1 + ((val - self.mu) / self.scale) ** 2 / self.df)\n\n def cdf(self, val):\n return stdtr(self.df, (val - self.mu) / self.scale)\n\n\nclass Beta(Prior):\n def __init__(self, alpha, beta, minimum=0, maximum=1, name=None,\n latex_label=None, unit=None, boundary=None):\n \"\"\"Beta distribution\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n This wraps around\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html\n\n Parameters\n ==========\n alpha: float\n first shape parameter\n beta: float\n second shape parameter\n minimum: float\n See superclass\n maximum: float\n See superclass\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Beta, self).__init__(minimum=minimum, maximum=maximum, name=name,\n latex_label=latex_label, unit=unit, boundary=boundary)\n\n if alpha <= 0. or beta <= 0.:\n raise ValueError(\"alpha and beta must both be positive values\")\n\n self.alpha = alpha\n self.beta = beta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Beta prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.exp(self.ln_prob(val))\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n _ln_prob = xlogy(self.alpha - 1, val - self.minimum) + xlogy(self.beta - 1, self.maximum - val)\\\n - betaln(self.alpha, self.beta) - xlogy(self.alpha + self.beta - 1, self.maximum - self.minimum)\n\n # deal with the fact that if alpha or beta are < 1 you get infinities at 0 and 1\n if isinstance(val, (float, int)):\n if np.isfinite(_ln_prob) and self.minimum <= val <= self.maximum:\n return _ln_prob\n return -np.inf\n else:\n _ln_prob_sub = -np.inf * np.ones(val.size)\n idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum)\n _ln_prob_sub[idx] = _ln_prob[idx]\n return _ln_prob_sub\n\n def cdf(self, val):\n if isinstance(val, (float, int)):\n if val > self.maximum:\n return 1.\n elif val < self.minimum:\n return 0.\n else:\n return btdtr(self.alpha, self.beta,\n (val - self.minimum) / (self.maximum - self.minimum))\n else:\n _cdf = np.nan_to_num(btdtr(self.alpha, self.beta,\n (val - self.minimum) / (self.maximum - self.minimum)))\n _cdf[val < self.minimum] = 0.\n _cdf[val > self.maximum] = 1.\n return _cdf\n\n\nclass Logistic(Prior):\n def __init__(self, mu, scale, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Logistic distribution\n\n https://en.wikipedia.org/wiki/Logistic_distribution\n\n Parameters\n ==========\n mu: float\n Mean of the distribution\n scale: float\n Width of the distribution\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Logistic, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)\n\n if scale <= 0.:\n raise ValueError(\"For the Logistic prior the scale must be positive\")\n\n self.mu = mu\n self.scale = scale\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Logistic prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n if isinstance(val, (float, int)):\n if val == 0:\n rescaled = -np.inf\n elif val == 1:\n rescaled = np.inf\n else:\n rescaled = self.mu + self.scale * np.log(val / (1. - val))\n else:\n rescaled = np.inf * np.ones(val.size)\n rescaled[val == 0] = -np.inf\n rescaled[(val > 0) & (val < 1)] = self.mu + self.scale\\\n * np.log(val[(val > 0) & (val < 1)] / (1. - val[(val > 0) & (val < 1)]))\n return rescaled\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.exp(self.ln_prob(val))\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return -(val - self.mu) / self.scale -\\\n 2. * np.log(1. + np.exp(-(val - self.mu) / self.scale)) - np.log(self.scale)\n\n def cdf(self, val):\n return 1. / (1. + np.exp(-(val - self.mu) / self.scale))\n\n\nclass Cauchy(Prior):\n def __init__(self, alpha, beta, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Cauchy distribution\n\n https://en.wikipedia.org/wiki/Cauchy_distribution\n\n Parameters\n ==========\n alpha: float\n Location parameter\n beta: float\n Scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Cauchy, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)\n\n if beta <= 0.:\n raise ValueError(\"For the Cauchy prior the scale must be positive\")\n\n self.alpha = alpha\n self.beta = beta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Cauchy prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))\n if isinstance(val, (float, int)):\n if val == 1:\n rescaled = np.inf\n elif val == 0:\n rescaled = -np.inf\n else:\n rescaled[val == 1] = np.inf\n rescaled[val == 0] = -np.inf\n return rescaled\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return 1. / self.beta / np.pi / (1. + ((val - self.alpha) / self.beta) ** 2)\n\n def ln_prob(self, val):\n \"\"\"Return the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Log prior probability of val\n \"\"\"\n return - np.log(self.beta * np.pi) - np.log(1. + ((val - self.alpha) / self.beta) ** 2)\n\n def cdf(self, val):\n return 0.5 + np.arctan((val - self.alpha) / self.beta) / np.pi\n\n\nclass Lorentzian(Cauchy):\n \"\"\"Synonym for the Cauchy distribution\"\"\"\n\n\nclass Gamma(Prior):\n def __init__(self, k, theta=1., name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Gamma distribution\n\n https://en.wikipedia.org/wiki/Gamma_distribution\n\n Parameters\n ==========\n k: float\n The shape parameter\n theta: float\n The scale parameter\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n super(Gamma, self).__init__(name=name, minimum=0., latex_label=latex_label,\n unit=unit, boundary=boundary)\n\n if k <= 0 or theta <= 0:\n raise ValueError(\"For the Gamma prior the shape and scale must be positive\")\n\n self.k = k\n self.theta = theta\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Gamma prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n \"\"\"\n return gammaincinv(self.k, val) * self.theta\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n return np.exp(self.ln_prob(val))\n\n def ln_prob(self, val):\n \"\"\"Returns the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val < self.minimum:\n _ln_prob = -np.inf\n else:\n _ln_prob = xlogy(self.k - 1, val) - val / self.theta - xlogy(self.k, self.theta) - gammaln(self.k)\n else:\n _ln_prob = -np.inf * np.ones(val.size)\n idx = (val >= self.minimum)\n _ln_prob[idx] = xlogy(self.k - 1, val[idx]) - val[idx] / self.theta\\\n - xlogy(self.k, self.theta) - gammaln(self.k)\n return _ln_prob\n\n def cdf(self, val):\n if isinstance(val, (float, int)):\n if val < self.minimum:\n _cdf = 0.\n else:\n _cdf = gammainc(self.k, val / self.theta)\n else:\n _cdf = np.zeros(val.size)\n _cdf[val >= self.minimum] = gammainc(self.k, val[val >= self.minimum] / self.theta)\n return _cdf\n\n\nclass ChiSquared(Gamma):\n def __init__(self, nu, name=None, latex_label=None, unit=None, boundary=None):\n \"\"\"Chi-squared distribution\n\n https://en.wikipedia.org/wiki/Chi-squared_distribution\n\n Parameters\n ==========\n nu: int\n Number of degrees of freedom\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n boundary: str\n See superclass\n \"\"\"\n\n if nu <= 0 or not isinstance(nu, int):\n raise ValueError(\"For the ChiSquared prior the number of degrees of freedom must be a positive integer\")\n\n super(ChiSquared, self).__init__(name=name, k=nu / 2., theta=2.,\n latex_label=latex_label, unit=unit, boundary=boundary)\n\n @property\n def nu(self):\n return int(self.k * 2)\n\n @nu.setter\n def nu(self, nu):\n self.k = nu / 2.\n\n\nclass FermiDirac(Prior):\n def __init__(self, sigma, mu=None, r=None, name=None, latex_label=None,\n unit=None):\n \"\"\"A Fermi-Dirac type prior, with a fixed lower boundary at zero\n (see, e.g. Section 2.3.5 of [1]_). The probability distribution\n is defined by Equation 22 of [1]_.\n\n Parameters\n ==========\n sigma: float (required)\n The range over which the attenuation of the distribution happens\n mu: float\n The point at which the distribution falls to 50% of its maximum\n value\n r: float\n A value giving mu/sigma. This can be used instead of specifying\n mu.\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n\n References\n ==========\n\n .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1\n <https:arxiv.org/abs/1705.08978v1>`_, 2017.\n \"\"\"\n super(FermiDirac, self).__init__(name=name, latex_label=latex_label, unit=unit, minimum=0.)\n\n self.sigma = sigma\n\n if mu is None and r is None:\n raise ValueError(\"For the Fermi-Dirac prior either a 'mu' value or 'r' \"\n \"value must be given.\")\n\n if r is None and mu is not None:\n self.mu = mu\n self.r = self.mu / self.sigma\n else:\n self.r = r\n self.mu = self.sigma * self.r\n\n if self.r <= 0. or self.sigma <= 0.:\n raise ValueError(\"For the Fermi-Dirac prior the values of sigma and r \"\n \"must be positive.\")\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n This maps to the inverse CDF. This has been analytically solved for this case,\n see Equation 24 of [1]_.\n\n References\n ==========\n\n .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1\n <https:arxiv.org/abs/1705.08978v1>`_, 2017.\n \"\"\"\n inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +\n np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)\n\n # if val is 1 this will cause inv to be negative (due to numerical\n # issues), so return np.inf\n if isinstance(val, (float, int)):\n if inv < 0:\n return np.inf\n else:\n return -self.sigma * np.log(inv)\n else:\n idx = inv >= 0.\n tmpinv = np.inf * np.ones(len(np.atleast_1d(val)))\n tmpinv[idx] = -self.sigma * np.log(inv[idx])\n return tmpinv\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n return np.exp(self.ln_prob(val))\n\n def ln_prob(self, val):\n \"\"\"Return the log prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n Union[float, array_like]: Log prior probability of val\n \"\"\"\n\n norm = -np.log(self.sigma * np.log(1. + np.exp(self.r)))\n if isinstance(val, (float, int)):\n if val < self.minimum:\n return -np.inf\n else:\n return norm - np.logaddexp((val / self.sigma) - self.r, 0.)\n else:\n val = np.atleast_1d(val)\n lnp = -np.inf * np.ones(len(val))\n idx = val >= self.minimum\n lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)\n return lnp\n\n\nclass Categorical(Prior):\n def __init__(self, ncategories, name=None, latex_label=None,\n unit=None, boundary=\"periodic\"):\n \"\"\" An equal-weighted Categorical prior\n\n Parameters:\n -----------\n ncategories: int\n The number of available categories. The prior mass support is then\n integers [0, ncategories - 1].\n name: str\n See superclass\n latex_label: str\n See superclass\n unit: str\n See superclass\n \"\"\"\n\n minimum = 0\n # Small delta added to help with MCMC walking\n maximum = ncategories - 1 + 1e-15\n super(Categorical, self).__init__(\n name=name, latex_label=latex_label, minimum=minimum,\n maximum=maximum, unit=unit, boundary=boundary)\n self.ncategories = ncategories\n self.categories = np.arange(self.minimum, self.maximum)\n self.p = 1 / self.ncategories\n self.lnp = -np.log(self.ncategories)\n\n def rescale(self, val):\n \"\"\"\n 'Rescale' a sample from the unit line element to the categorical prior.\n\n This maps to the inverse CDF. This has been analytically solved for this case.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n Uniform probability\n\n Returns\n =======\n Union[float, array_like]: Rescaled probability\n \"\"\"\n return np.floor(val * (1 + self.maximum))\n\n def prob(self, val):\n \"\"\"Return the prior probability of val.\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float: Prior probability of val\n \"\"\"\n if isinstance(val, (float, int)):\n if val in self.categories:\n return self.p\n else:\n return 0\n else:\n val = np.atleast_1d(val)\n probs = np.zeros_like(val, dtype=np.float64)\n idxs = np.isin(val, self.categories)\n probs[idxs] = self.p\n return probs\n\n def ln_prob(self, val):\n \"\"\"Return the logarithmic prior probability of val\n\n Parameters\n ==========\n val: Union[float, int, array_like]\n\n Returns\n =======\n float:\n\n \"\"\"\n if isinstance(val, (float, int)):\n if val in self.categories:\n return self.lnp\n else:\n return -np.inf\n else:\n val = np.atleast_1d(val)\n probs = -np.inf * np.ones_like(val, dtype=np.float64)\n idxs = np.isin(val, self.categories)\n probs[idxs] = self.lnp\n return probs\n", "import os\nimport copy\nimport shutil\nimport unittest\nimport inspect\nimport importlib\nimport sys\nimport time\nimport bilby\nfrom bilby.bilby_mcmc.chain import Chain, Sample\nfrom bilby.bilby_mcmc import proposals\nfrom bilby.bilby_mcmc.utils import LOGLKEY, LOGPKEY\nimport numpy as np\n\n\nclass GivenProposal(proposals.BaseProposal):\n \"\"\" A simple proposal class used for testing \"\"\"\n def __init__(self, priors, weight=1, subset=None, sigma=0.01):\n super(GivenProposal, self).__init__(priors, weight, subset)\n\n def propose(self, chain):\n log_factor = 0\n return self.given_sample, log_factor\n\n\nclass TestBaseProposals(unittest.TestCase):\n def create_priors(self, ndim=2, boundary=None):\n priors = bilby.core.prior.PriorDict({\n f'x{i}': bilby.core.prior.Uniform(-10, 10, name=f'x{i}', boundary=boundary)\n for i in range(ndim)\n })\n priors[\"fixedA\"] = bilby.core.prior.DeltaFunction(1)\n return priors\n\n def create_random_sample(self, ndim=2):\n p = {f\"x{i}\": np.random.normal(0, 1) for i in range(ndim)}\n p[LOGLKEY] = np.random.normal(0, 1)\n p[LOGPKEY] = -1\n p[\"fixedA\"] = 1\n return Sample(p)\n\n def create_chain(self, n=1000, ndim=2):\n initial_sample = self.create_random_sample(ndim)\n chain = Chain(initial_sample=initial_sample)\n for i in range(n):\n chain.append(self.create_random_sample(ndim))\n return chain\n\n def test_GivenProposal(self):\n priors = self.create_priors()\n chain = self.create_chain()\n proposal = GivenProposal(priors)\n proposal.given_sample = self.create_random_sample()\n prop, _ = proposal(chain)\n self.assertEqual(prop, proposal.given_sample)\n\n def test_noboundary(self):\n priors = self.create_priors()\n chain = self.create_chain()\n proposal = GivenProposal(priors)\n\n sample = self.create_random_sample()\n sample[\"x0\"] = priors[\"x0\"].maximum + 0.5\n proposal.given_sample = sample\n\n prop, _ = proposal(chain)\n self.assertEqual(prop, proposal.given_sample)\n self.assertEqual(prop[\"x0\"], priors[\"x0\"].maximum + 0.5)\n\n def test_periodic_boundary_above(self):\n priors = self.create_priors(boundary=\"periodic\")\n chain = self.create_chain()\n proposal = GivenProposal(priors)\n\n sample = self.create_random_sample()\n sample[\"x0\"] = priors[\"x0\"].maximum + 0.5\n proposal.given_sample = copy.deepcopy(sample)\n\n prop, _ = proposal(chain)\n self.assertFalse(prop[\"x0\"] == priors[\"x0\"].maximum + 0.5)\n self.assertEqual(prop[\"x0\"], priors[\"x0\"].minimum + 0.5)\n\n def test_periodic_boundary_below(self):\n priors = self.create_priors(boundary=\"periodic\")\n chain = self.create_chain()\n proposal = GivenProposal(priors)\n\n sample = self.create_random_sample()\n sample[\"x0\"] = priors[\"x0\"].minimum - 0.5\n proposal.given_sample = copy.deepcopy(sample)\n\n prop, _ = proposal(chain)\n self.assertFalse(prop[\"x0\"] == priors[\"x0\"].minimum - 0.5)\n self.assertEqual(prop[\"x0\"], priors[\"x0\"].maximum - 0.5)\n\n\nclass TestProposals(TestBaseProposals):\n def setUp(self):\n self.outdir = \"chain_test\"\n if os.path.isdir(self.outdir) is False:\n os.mkdir(self.outdir)\n\n def tearDown(self):\n if os.path.isdir(self.outdir):\n shutil.rmtree(self.outdir)\n\n def get_simple_proposals(self):\n clsmembers = inspect.getmembers(\n sys.modules[proposals.__name__], inspect.isclass\n )\n clsmembers_clean = []\n for name, cls in clsmembers:\n a = \"Proposal\" in name\n b = \"Base\" not in name\n c = \"Ensemble\" not in name\n d = \"Phase\" not in name\n e = \"Polarisation\" not in name\n f = \"Cycle\" not in name\n g = \"KDE\" not in name\n h = \"NormalizingFlow\" not in name\n if a * b * c * d * e * f * g * h:\n clsmembers_clean.append((name, cls))\n\n return clsmembers_clean\n\n def proposal_check(self, prop, ndim=2, N=100):\n chain = self.create_chain(ndim=ndim)\n\n print(f\"Testing {prop.__class__.__name__}\")\n # Timing and return type\n start = time.time()\n for _ in range(N):\n p, w = prop(chain)\n chain.append(p)\n dt = 1e3 * (time.time() - start) / N\n print(f\"Testing {prop.__class__.__name__}: dt~{dt:0.2g} [ms]\")\n\n self.assertTrue(isinstance(p, Sample))\n self.assertTrue(isinstance(w, (int, float)))\n\n def test_proposal_return_type(self):\n priors = self.create_priors()\n for name, cls in self.get_simple_proposals():\n prop = cls(priors)\n self.proposal_check(prop)\n\n def test_KDE_proposal(self):\n priors = self.create_priors()\n prop = proposals.KDEProposal(priors)\n self.proposal_check(prop, N=20000)\n\n def test_GMM_proposal(self):\n if importlib.util.find_spec(\"sklearn\") is not None:\n priors = self.create_priors()\n prop = proposals.GMMProposal(priors)\n self.proposal_check(prop, N=20000)\n self.assertTrue(prop.trained)\n else:\n print(\"Unable to test GMM as sklearn is not installed\")\n\n def test_NF_proposal(self):\n priors = self.create_priors()\n chain = self.create_chain(10000)\n prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)\n prop.steps_since_refit = 9999\n start = time.time()\n p, w = prop(chain)\n dt = time.time() - start\n print(f\"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]\")\n self.assertTrue(prop.trained)\n\n self.proposal_check(prop)\n\n def test_NF_proposal_15D(self):\n ndim = 15\n priors = self.create_priors(ndim)\n chain = self.create_chain(10000, ndim=ndim)\n prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)\n prop.steps_since_refit = 9999\n start = time.time()\n p, w = prop(chain)\n dt = time.time() - start\n print(f\"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]\")\n self.assertTrue(prop.trained)\n\n self.proposal_check(prop, ndim=ndim)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.exp", "numpy.zeros_like", "numpy.sqrt", "numpy.loadtxt" ], [ "numpy.exp", "numpy.sqrt" ], [ "scipy.special._ufuncs.log1p", "numpy.minimum", "numpy.sqrt", "numpy.arctan", "scipy.special.erfinv", "numpy.nan_to_num", "scipy.special._ufuncs.xlogy", "numpy.zeros_like", "numpy.exp", "numpy.where", "numpy.ones_like", "numpy.arange", "numpy.empty_like", "scipy.special._ufuncs.btdtri", "scipy.special._ufuncs.gammaincinv", "numpy.sin", "numpy.atleast_1d", "scipy.special._ufuncs.stdtr", "scipy.special._ufuncs.erf", "numpy.zeros", "numpy.isin", "numpy.log", "numpy.multiply", "scipy.special._ufuncs.btdtr", "scipy.special._ufuncs.stdtrit", "numpy.tan", "numpy.floor", "numpy.errstate", "numpy.logical_and", "numpy.logaddexp", "numpy.maximum", "numpy.abs", "scipy.special._ufuncs.gammainc", "numpy.isfinite", "scipy.special._ufuncs.betaln", "numpy.cos", "numpy.ones", "scipy.special._ufuncs.gammaln" ], [ "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajclaros/rl_legged_walker
[ "26d0e124ef38045943449c2772b966571117683b", "26d0e124ef38045943449c2772b966571117683b", "26d0e124ef38045943449c2772b966571117683b", "26d0e124ef38045943449c2772b966571117683b" ]
[ "vis/plot_2d_neural_outputs.py", "andrew/generategenomes.py", "jason/xp_figure2_msc.py", "andrew/evoandlearning.py" ]
[ "\nfrom jason.ctrnn import CTRNN\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport random\nimport sys\nimport json\nimport os\nimport math\nfrom util.fitness_functions import fitness_maximize_output_change, fitness_frequency_match\n\n\ndef main():\n trial_seed=1\n sol_seed=6\n size=2\n directory=f\"data/perturbed_networks/nnsize-{size}_sol-seed-{sol_seed}/seed{trial_seed}/\"\n directory=f\"jason/data/ctrnn_snapshots_recovery/\"\n \n\n #plot_2d_neural_outputs\n\n \n #filename = f\"data/evolved_solutions/mga_pop-20_gen-1000/ALL/discovery_mga_best_nn{size}_seed-{seed}.json\" \n plot_2d_neural_outputs( directory, size=2)\n\n\n\ndef plot_2d_neural_outputs(directory, size=2, stepsize=0.01):\n \n filenames = os.listdir(directory)\n\n rows=int( math.ceil( math.sqrt(len(filenames)) ))\n print(rows)\n fig, axs = plt.subplots(rows, rows)\n\n count=0\n for filename in filenames:\n count+=1\n #r=count/2\n #c=count%2+1\n\n filepath=f\"{directory}{filename}\"\n\n ctrnn = CTRNN( size)\n ctrnn.load_json( filepath )\n mid_point=50\n\n fitness, output_history = simulate_ctrnn(ctrnn, stepsize=0.01, init_duration=0, test_duration=100)\n output_history = output_history.transpose(1,0)\n ax1 = plt.subplot(rows,rows,count)\n \n start_of_test=int(mid_point/stepsize)\n\n ax1.plot(output_history[0][0:start_of_test],output_history[1][0:start_of_test], color='r')\n ax1.plot(output_history[0][start_of_test:],output_history[1][start_of_test:], color='b')\n ax1.set_xlim(0,1)\n ax1.set_ylim(0,1)\n\n ax1.set_title(f\"{filename}\\n{fitness:0.2f}\")\n plt.show()\n\n\ndef simulate_ctrnn(ctrnn, stepsize=0.01, init_duration=0, test_duration=10):\n \"\"\"This function simply provides an average change in output per neuron per time. Optionally can include initial duration to prevent transient changes at start of simulation.\"\"\"\n\n\n init_time = np.arange(0.0, init_duration, stepsize)\n test_time = np.arange(0.0, test_duration, stepsize)\n\n output_history=np.zeros((len(test_time),ctrnn.size))\n\n #allow transients to clear\n ctrnn.initializeState( np.zeros( ctrnn.size ))\n\n #ctrnn.initializeState( np.asarray( [-5.0, 10.0] ))\n\n for i in range(len(init_time)):\n ctrnn.step(stepsize)\n \n #evaluate after transient period\n change_in_output=0\n for i in range(len(test_time)):\n output_history[i] = ctrnn.outputs\n pastOutputs = ctrnn.outputs\n ctrnn.step(stepsize)\n currentOutputs = ctrnn.outputs\n change_in_output += np.sum(abs(currentOutputs - pastOutputs) ) \n \n #average over time and per neuron\n return change_in_output / ctrnn.size / test_duration, output_history\n\n\n\nmain()\n", "import numpy as np\nimport os\nfrom fitnessFunction import fitnessFunction\nimport pandas as pd\n\n#filename = \"two_neuron-step1.npy\"\n\ngenome_arr = np.load(filename)\nbest = genome_arr[3]\n#index = 0\nselect_genome = 7\n#lower_fitness = np.load(\"./genomes/scalinggenome-{select_genome}.npy\")\n# uncomment to generate new scaling genome\n\nfor j in range(1):\n #lower_fitness = np.zeros((best.shape))\n #for i in range(lower_fitness.size):\n # lower_fitness[i] = - np.random.uniform(0, 0.4)\n #np.save(f\"./genomes/scalinggenome-{select_genome}.npy\",lower_fitness)\n #genome9 start 6130\n #delete 7\n print(j)\n points = [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]\n for i in range(10000):\n save_fitness = best+lower_fitness*(i*10**-4)\n fit = fitnessFunction(save_fitness)\n print(fit)\n #print(fit, end=\" \", flush=False)\n point_delete = []\n if not (i % 100):\n print(i, end=' ', flush=False)\n for point in points:\n if fit<point+0.02 and fit> point-0.02:\n print(f\"index{i}\")\n print(\"saving fit of {} to {}\".format(fit, point))\n # np.save(f\"./perturbations/{point}/p-{select_genome}-{i}.npy\", save_fitness)\n point_delete.append(points.index(point))\n break\n for ind in point_delete:\n points.pop(ind)\n print(f\"points left:{points}\")\n\n#find genome with fitness in range 0.1 genome 5,\n#lowest fitness before before given range\n", "import numpy as np\nimport numpy as np\nimport random\nfrom jason.ctrnn import CTRNN\nfrom jason.rl_ctrnn import RL_CTRNN\nfrom jason.xp_perturb_solutions import run_perturb_experiment\nfrom jason.mga import MicrobialGA\nimport matplotlib.pyplot as plt\nimport sys\nimport json\nimport os\nfrom jason.simple_oscillator_task import SimpleOscillatorTask\nfrom util.fitness_functions import fitness_maximize_output_change\n\nfrom tqdm.contrib.concurrent import process_map\n\nfrom multiprocessing import Pool\n\ndef main():\n run_sweep()\n #main_perturb_AND_recover()\n\n\ndef get_config():\n \n init_flux=3\n learning_duration=5000\n prefix=\"v1_plusminus8_by2\"\n save_filename=f\"jason/figure2/{prefix}_fig2_data_{learning_duration/1000}k_initflux-{init_flux}.csv\"\n save_dat_filename=f\"jason/figure2/{prefix}_fig2_{learning_duration/1000}k_initflux-{init_flux}.dat\"\n performance_bias=0.01 #0.03\n performance_update_rate=0.001 #0.05 0.03\n flux_convergence= 1.5 #1.5\n performance_bias=0.05 #0.03\n performance_update_rate=0.001 #0.05 0.03\n running_window_mode=True\n running_window_size=2000 # 2000 = 20 seconds ~= 0.001\n ignore_transients=100 #20\n ###########################\n\n\n return init_flux, learning_duration, save_filename, save_dat_filename, performance_bias, performance_update_rate,\\\n flux_convergence, performance_bias, performance_update_rate, running_window_mode, running_window_size, ignore_transients\n\n\n\ndef run_sweep():\n\n #plusminus 2\n w00s=[ 4, 5, 6, 7, 8 ] #+/- 2 6\n w01s=[16, 15, 14] #+/- 2 \n w10s=[-16,-15, -14 ] #+/- 2\n w11s=[ 2, 3, 4, 5, 6 ] #+/- 2 4\n\n #close range within\n adj=8\n adj_inc=2\n w00s= range(6-adj,6+adj+1,adj_inc)\n w01s= range(16,16-adj-1,-adj_inc)\n w10s= range(-16,-16+adj+1, adj_inc)\n w11s= range(4-adj,4+adj+1,adj_inc)\n\n #general sweep\n # w00s= range(-12, 13, 6)\n # w01s= range(-12, 13, 6)\n # w10s= range(-12, 13, 6)\n # w11s= range(-12, 13, 6)\n\n\n THREAD_COUNT=10\n p = Pool(THREAD_COUNT)\n\n sweep = get_sweep( w00s, w01s, w10s, w11s)\n init_flux, learning_duration, save_filename, save_dat_filename, performance_bias, performance_update_rate, \\\n flux_convergence, performance_bias, performance_update_rate, running_window_mode, running_window_size, ignore_transients = get_config()\n \n \n \n line=\"init_fit,final_fit,init_est_dist,final_est_dist,\"\n line+=\"init_w00,init_w01,init_w10,init_w11,\"\n line+=\"final_w00,final_w01,final_w10,final_w11,\"\n # for i in range(learning_duration+1):\n # line+=f\"ARP{i},\"\n \n if not os.path.exists(save_filename):\n print(\"File does not exist, writing to new file\")\n write_to_file( save_filename, line,'w' )\n write_to_file( save_dat_filename, \"\",'w' ) #make sure file is created\n print( len(sweep) )\n sweep = get_sweep(w00s, w01s, w10s, w11s)\n r = process_map(main_recover, sweep, max_workers=THREAD_COUNT, chunksize=1)\n #data = p.map(main_recover, sweep )\n\n\ndef get_sweep( w00s, w01s, w10s, w11s):\n params = []\n for w00 in w00s:\n for w01 in w01s:\n for w10 in w10s:\n for w11 in w11s:\n params.append((w00, w01, w10, w11))\n \n return params\n\n\n\ndef main_recover( params ):\n\n init_flux, learning_duration, save_filename, save_dat_filename, performance_bias, performance_update_rate, \\\n flux_convergence, performance_bias, performance_update_rate, running_window_mode, running_window_size, ignore_transients = get_config()\n \n\n w00s=[params[0]]\n w01s=[params[1]]\n w10s=[params[2]]\n w11s=[params[3]]\n\n weight_range=16\n bias_range=16\n tc_min=1\n tc_max=1\n\n\n show_plots=False\n show_subplots=False\n seed=1\n size=2\n nnsize=size\n sol_seed=6\n \n\n seeds=[0] #range(10)\n sol_seeds=[1] #4 is best in 10nnsize\n nnsizes=[2]\n test_duration=10 #?\n\n #Load a previously evolved example ctrnn\n best_nn = CTRNN( nnsize, weight_range=weight_range, bias_range=bias_range, tc_min=tc_min, tc_max=tc_max )\n best_evolved_filename=f\"data/evolved_solutions/mga_pop-20_gen-1000/ALL/discovery_mga_best_nn{nnsize}_seed-{sol_seed}.json\"\n best_nn.load_json( best_evolved_filename )\n\n new_ctrnn = CTRNN( nnsize, weight_range=weight_range, bias_range=bias_range, tc_min=tc_min, tc_max=tc_max )\n orig_params=new_ctrnn.get_normalized_parameters()\n\n #faster to simulate and easier to evolve\n orig_fit=fitness_maximize_output_change(best_nn, test_duration=test_duration)\n\n \n #try systematically perturbing the network \n\n for w00 in w00s:\n for w01 in w01s:\n for w10 in w10s:\n for w11 in w11s:\n new_ctrnn.load_json( best_evolved_filename )\n new_ctrnn.inner_weights[0][0] = w00\n new_ctrnn.inner_weights[0][1] = w01\n new_ctrnn.inner_weights[1][0] = w10\n new_ctrnn.inner_weights[1][1] = w11\n\n #vector difference\n diff_vec = (best_nn.inner_weights - new_ctrnn.inner_weights ) \n #Cartesian distance\n init_est_dist = np.sqrt( np.sum(diff_vec**2) ) \n init_fit=fitness_maximize_output_change(new_ctrnn, test_duration=test_duration)\n\n #used to run recovery\n norm_params = new_ctrnn.get_normalized_parameters()\n\n\n final_fitness, final_ctrnn, arp_timeseries = run_recovery( norm_params, performance_bias=performance_bias, \\\n init_flux=init_flux,\\\n running_window_mode=running_window_mode, running_window_size=running_window_size,\\\n performance_update_rate=performance_update_rate, nnsize=nnsize,learning_duration=learning_duration,\\\n ignore_transients=ignore_transients )\n \n diff_vec = (best_nn.inner_weights - final_ctrnn.inner_weights ) \n final_est_dist = np.sqrt( np.sum(diff_vec**2) ) \n\n \n\n final_w00 = final_ctrnn.inner_weights[0][0]\n final_w01 = final_ctrnn.inner_weights[0][1]\n final_w10 = final_ctrnn.inner_weights[1][0]\n final_w11 = final_ctrnn.inner_weights[1][1]\n \n line2=f\"{init_fit},{final_fitness},{init_est_dist},{final_est_dist},\"\n line2+=f\"{w00},{w01},{w10},{w11},\"\n line2+=f\"{final_w00},{final_w01},{final_w10},{final_w11}\" #\",{arp_timeseries}\"\n\n write_to_file( save_filename, line2,'a' )\n write_to_file( save_dat_filename, line2,'a' )\n #print(line2)\n #quit()\n #print( new_ctrnn.inner_weights )\n #print( f\"fit: {init_fit:.4f}->{final_fitness:.4f} dist: {init_est_dist:.4f}->{final_est_dist:.4f}\" ) \n\n\ndef run_recovery( norm_params, init_flux=1, nnsize=2, weight_range=16, bias_range=16,learning_duration=2000, performance_bias=0.005, \\\n performance_update_rate=0.002, flux_convergence=1.0, show_plots=False, show_subplots=False, save_recover_data_filename=False,\\\n running_window_mode=True, running_window_size=2000, \\\n ignore_transients=0 ):\n \n # Parameters RL-CTRNN specific\n init_flux_amp=init_flux\n max_flux_amp=8\n flux_period_min=4\n flux_period_max=8\n flux_conv_rate=flux_convergence\n learn_rate=1.0\n\n # could be tuned\n bias_init_flux_amp=0\n bias_max_flux_amp=0\n bias_flux_period_min=0\n bias_flux_period_max=0\n bias_flux_conv_rate=0\n\n \n convergence_epsilon=0.05\n stop_at_convergence=False\n gaussian_mode=True\n # All Tasks\n stepsize=0.01\n tc_min=1\n tc_max=1\n\n rl_nn = RL_CTRNN( nnsize, weight_range=weight_range, bias_range=bias_range, tc_min=tc_min, tc_max=tc_max,\\\n init_flux_amp=init_flux_amp, max_flux_amp=max_flux_amp, flux_period_min=flux_period_min, flux_period_max=flux_period_max, flux_conv_rate=flux_conv_rate, learn_rate=learn_rate,\\\n gaussian_mode=gaussian_mode, \\\n bias_init_flux_amp=bias_init_flux_amp, bias_max_flux_amp=bias_max_flux_amp, \\\n bias_flux_period_min=bias_flux_period_min,bias_flux_period_max=bias_flux_period_max,\\\n bias_flux_conv_rate=bias_flux_conv_rate)\n\n rl_nn.set_normalized_parameters(norm_params)\n\n task = SimpleOscillatorTask( learning_duration, stepsize, stop_at_convergence, \\\n running_window_mode=running_window_mode, running_window_size=running_window_size, \\\n convergence_epsilon=convergence_epsilon, performance_update_rate=performance_update_rate, performance_bias=performance_bias)\n\n\n\n nn, plot_info, converged = task.simulate(rl_nn, ignore_transients=ignore_transients, \\\n show_plots=show_plots, show_subplots=show_subplots, record_data=True )\n \n final_ctrnn = CTRNN(nnsize, weight_range=weight_range, bias_range=bias_range, tc_min=tc_min, tc_max=tc_max )\n final_ctrnn.set_normalized_parameters( nn.get_normalized_parameters() )\n final_fitness = fitness_maximize_output_change( final_ctrnn) \n\n arp_timeseries=plot_info[\"running_average_performances\"][::100] #only 1 in eveyr 100\n arp_timeseries_string=\"\"\n for val in arp_timeseries:\n arp_timeseries_string+=f\"{val},\"\n\n\n return final_fitness, final_ctrnn, arp_timeseries_string\n \n\n\ndef write_to_file(save_filename, line, flag='a'):\n with open( save_filename, flag) as filehandle:\n filehandle.write( line+\"\\n\" )\n filehandle.close()\n\n\n\nif __name__ == \"__main__\":\n main()", "import ea\nimport leggedwalker\nimport numpy as np\nimport math\nfrom jason.rl_ctrnn import RL_CTRNN\nfrom jason.ctrnn import CTRNN\nfrom walking_task2 import WalkingTask\nimport warnings\nfrom scipy.ndimage.interpolation import shift\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport os\nimport time\nfrom matplotlib.colors import ListedColormap\nimport concurrent.futures #multiprocessing\n\n\nnp.seterr(all='warn')\nwarnings.simplefilter(\"always\")\n# Nervous System Parameters\nN = 2 # Number of neurons in the nervous system\nWR = 16 # Weight range - maps from [-1, 1] to: [-16,16]\nBR = 16 # Bias range - maps from [-1, 1] to: [-16,16]\nTR = 5.0 # Time range - maps from [-1, 1] to: [-5, 5]\nTA = 6.0 # Time add - maps from [-5, 5] to: [1,11]\n\n# Task Parameters\nstepsize = 0.1\n#time = np.arange(0.0, duration, stepsize)\n\nx = cm.get_cmap('tab10')\ncolors = x.colors\nduration = 2000\ndef fitnessFunction(genotype):\n # Create the agent's body\n legged = leggedwalker.LeggedAgent()\n # Create the nervous system\n ns = CTRNN(N)\n # Set the parameters of the nervous system according to the genotype-phenotype map\n weights = genotype[0:N*N]\n ns.setWeights(weights.reshape((N, N)))\n ns.setBiases(genotype[N*N:N*N+N])\n ns.setTimeConstants(genotype[N*N+N:])\n # Initialize the state of the nervous system to some value\n ns.initializeState(np.zeros(N))\n #learner = RL_CTRNN(ns)\n # Loop through simulated time, use Euler Method to step the nervous system and body\n time = np.arange(0.0, duration, stepsize)\n for i, t in enumerate(time):\n ns.setInputs(np.array([legged.anglefeedback()]*N)) # Set neuron input to angle feedback based on current body state\n ns.step(stepsize) # Update the nervous system based on inputs\n legged.step1(stepsize, ns.outputs) # Update the body based on nervous system activity\n# fitness_arr[i] = body.cx # track position of body\n #update neurons based on speed of movement (cx(t)-cx(t-1))/dt\n # Calculate the fitness based on distance covered over the duration of time\n# fit = legged.cx/duration\n return legged.cx/duration\n\n\npopsize = 25\ngenesize = N*N + 2*N\nrecombProb = 0.5\nmutatProb = 0.01\ndemesize = 2\ngenerations = 30\n\n\n\n\ninit_flux = 0.1\ndef learningFunction(genotype):\n weights = genotype[0:N*N]\n learner = WalkingTask(size=2,\n duration=duration,\n stepsize=0.1,\n reward_func=None,\n performance_func=None,\n running_window_mode=True,\n running_window_size=4000,\n performance_update_rate=0.05,\n init_flux_amp= init_flux,\n max_flux_amp=40,\n flux_period_min=300,\n flux_period_max=400,\n flux_conv_rate=0.004, learn_rate=0.008,\n bias_init_flux_amp=init_flux,\n bias_max_flux_amp=40,\n bias_flux_period_min=300,\n bias_flux_period_max=400,\n bias_flux_conv_rate=0.004,\n )\n learner.setWeights(weights.reshape((N, N)))\n learner.setBiases(genotype[N*N:N*N+N])\n learner.setTimeConstants(genotype[N*N+N:])\n learner.initializeState(np.zeros(N))\n body = leggedwalker.LeggedAgent()\n learner.simulate(body, learning_start=4000, trackpercent=1.00)\n return body.cx/duration\n\n\n#create dictionary of 10 parallel processes\n#each process is alternating between evo and evo+learn\n#num_process = 4\n#function = {'evo': fitnessFunction, 'learn':learningFunction}\n#function_keys = list(function.keys())\n#genetic = {function_keys[i%2]+f'{i//2}':ea.Microbial(function[function_keys[i%2]], popsize, genesize, recombProb, mutatProb, demesize, generations) for i in range(num_process)}\n#with concurrent.futures.ProcessPoolExecutor() as executor:\n# s = [executor.submit(genetic[function_keys[i%2]+f\"{i//2}\"].run) for i in range(num_process)]\n# for p in s:\n# print('done')\n#style = ['-', ':']\n#\n#results = dict()\n#for i in range(2):\n# results[function_keys[i%2]+'best'] = []\n# results[function_keys[i%2]+'avg'] = []\n#\n#\n#for i in range(num_process):\n# plt.plot(genetic[function_keys[i%2]+f\"{i//2}\"].bestHistory, label=function_keys[i%2], color='r', ls = style[i%2])\n# results[function_keys[i%2]+'best'].append(genetic[function_keys[i%2]+f\"{i//2}\"].bestHistory)\n# plt.plot(genetic[function_keys[i%2]+f\"{i//2}\"].avgHistory, label=function_keys[i%2], color='k', ls = style[i%2])\n# results[function_keys[i%2]+'avg'].append(genetic[function_keys[i%2]+f\"{i//2}\"].avgHistory)\n#plt.plot(np.mean(results['evobest']), label='avgEvoBest', color='c', ls=':')\n#plt.plot(np.mean(results['evoavg']), label='avgEvoAvg', color='y', ls=':')\n#plt.plot(np.mean(results['learnbest']), label='avgLearnBest', color='c', ls='-')\n#plt.plot(np.mean(results['learnavg']), label='avgLearnAvg', color='y', ls='-')\n#plt.xlabel(\"Generations\")\n#plt.ylabel(\"Fitness\")\n#plt.title(f\"Microbial: Best and average fitness\\nBest evo+learn \\ninit flux:{init_flux}\\nT:{duration}s\")\n#plt.legend()\n#plt.show()\n#\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.zeros" ], [ "numpy.load" ], [ "numpy.sum" ], [ "numpy.seterr", "numpy.arange", "numpy.zeros", "matplotlib.cm.get_cmap" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RaphaelMeudec/tf-explain
[ "1a75841762985e55abe19107d09279f68f5731c8" ]
[ "examples/callbacks/mnist.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tf_explain\n\nINPUT_SHAPE = (28, 28, 1)\nNUM_CLASSES = 10\n\nAVAILABLE_DATASETS = {\n 'mnist': tf.keras.datasets.mnist,\n 'fashion_mnist': tf.keras.datasets.fashion_mnist,\n}\nDATASET_NAME = 'fashion_mnist' # Choose between \"mnist\" and \"fashion_mnist\"\n\n# Load dataset\ndataset = AVAILABLE_DATASETS[DATASET_NAME]\n(train_images, train_labels), (test_images, test_labels) = dataset.load_data()\n\n# Convert from (28, 28) images to (28, 28, 1)\ntrain_images = train_images[..., tf.newaxis]\ntest_images = test_images[..., tf.newaxis]\n\n# One hot encore labels 0, 1, .., 9 to [0, 0, .., 1, 0, 0]\ntrain_labels = tf.keras.utils.to_categorical(train_labels, num_classes=NUM_CLASSES)\ntest_labels = tf.keras.utils.to_categorical(test_labels, num_classes=NUM_CLASSES)\n\n# Create model\nimg_input = tf.keras.Input(INPUT_SHAPE)\n\nx = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(img_input)\nx = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='target_layer')(x)\nx = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)\n\nx = tf.keras.layers.Dropout(0.25)(x)\nx = tf.keras.layers.Flatten()(x)\n\nx = tf.keras.layers.Dense(128, activation='relu')(x)\nx = tf.keras.layers.Dropout(0.5)(x)\n\nx = tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')(x)\n\nmodel = tf.keras.Model(img_input, x)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Select a subset of the validation data to examine\n# Here, we choose 5 elements with label \"0\" == [1, 0, 0, .., 0]\nvalidation_class_zero = (np.array([\n el for el, label in zip(test_images, test_labels)\n if np.all(label == np.array([1] + [0] * 9))\n][0:5]), None)\n# Select a subset of the validation data to examine\n# Here, we choose 5 elements with label \"4\" == [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\nvalidation_class_fours = (np.array([\n el for el, label in zip(test_images, test_labels)\n if np.all(label == np.array([0] * 4 + [1] + [0] * 5))\n][0:5]), None)\n\n# Instantiate callbacks\n# class_index value should match the validation_data selected above\ncallbacks = [\n tf_explain.callbacks.GradCAMCallback(validation_class_zero, 'target_layer', class_index=0),\n tf_explain.callbacks.GradCAMCallback(validation_class_fours, 'target_layer', class_index=4),\n tf_explain.callbacks.ActivationsVisualizationCallback(validation_class_zero, layers_name=['target_layer']),\n tf_explain.callbacks.SmoothGradCallback(validation_class_zero, class_index=0, num_samples=15, noise=1.),\n tf_explain.callbacks.IntegratedGradientsCallback(validation_class_zero, class_index=0, n_steps=10),\n]\n\n# Start training\nmodel.fit(train_images, train_labels, epochs=5, callbacks=callbacks)\n" ]
[ [ "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Dropout", "numpy.array", "tensorflow.keras.layers.Flatten", "tensorflow.keras.utils.to_categorical" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
SanjayMarreddi/GameInShape
[ "64d64c2cb7bc472c3319b949f6f0b6b67ea15910" ]
[ "Web_App/GesturePredictor.py" ]
[ "# Importing relevant libraries\r\nimport cv2\r\nimport imutils\r\nimport tflearn\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tflearn.layers.estimator import regression\r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected\r\n\r\n# Import all the functionalities from Switch_Control\r\nfrom Web_App.Switch_Control import *\r\n\r\n# global variables\r\nbg = None\r\n\r\n\r\ndef resizeImage(imageName):\r\n basewidth = 100\r\n img = Image.open(imageName)\r\n wpercent = (basewidth/float(img.size[0]))\r\n hsize = int((float(img.size[1])*float(wpercent)))\r\n img = img.resize((basewidth, hsize), Image.ANTIALIAS)\r\n img.save(imageName)\r\n\r\n\r\ndef run_avg(image, aWeight):\r\n global bg\r\n # initialize the background\r\n if bg is None:\r\n bg = image.copy().astype(\"float\")\r\n return\r\n\r\n # compute weighted average, accumulate it and update the background\r\n cv2.accumulateWeighted(image, bg, aWeight)\r\n\r\n\r\ndef segment(image, threshold=25):\r\n global bg\r\n # find the absolute difference between background and current frame\r\n diff = cv2.absdiff(bg.astype(\"uint8\"), image)\r\n\r\n # threshold the diff image so that we get the foreground\r\n thresholded = cv2.threshold(diff,\r\n threshold,\r\n 255,\r\n cv2.THRESH_BINARY)[1]\r\n\r\n # get the contours in the thresholded image\r\n (cnts, _) = cv2.findContours(thresholded.copy(),\r\n cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n # return None, if no contours detected\r\n if len(cnts) == 0:\r\n return\r\n else:\r\n # based on contour area, get the maximum contour which is the hand\r\n segmented = max(cnts, key=cv2.contourArea)\r\n return (thresholded, segmented)\r\n\r\n\r\ndef getPredictedClass():\r\n\r\n image = cv2.imread('Temp.png')\r\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray_image = cv2.resize(gray_image, (89, 100))\r\n prediction = model.predict([gray_image.reshape(89, 100, 1)])\r\n return np.argmax(prediction), (np.amax(prediction) / (prediction[0][0] + prediction[0][1] + prediction[0][2]))\r\n\r\n\r\n# Model defined\r\nops.reset_default_graph()\r\nconvnet = input_data(shape=[None, 89, 100, 1], name='input')\r\nconvnet = conv_2d(convnet, 32, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = fully_connected(convnet, 1000, activation='relu')\r\nconvnet = dropout(convnet, 0.75)\r\n\r\nconvnet = fully_connected(convnet, 3, activation='softmax')\r\n\r\nconvnet = regression(convnet, optimizer='adam', learning_rate=0.001,\r\n loss='categorical_crossentropy', name='regression')\r\n\r\nmodel = tflearn.DNN(convnet, tensorboard_verbose=0)\r\n\r\n# Load Saved Model\r\nmodel.load(\"Web_App/TrainedModel/GestureRecogModel.tfl\")\r\n\r\n\r\n\"\"\" \r\nNote: This Trained Model for Hand Gesture Recognition is taken from\r\n https://github.com/SparshaSaha/Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network \r\n\"\"\"\r\n" ]
[ [ "numpy.amax", "numpy.argmax", "tensorflow.python.framework.ops.reset_default_graph" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
int-brain-lab/ibllib
[ "93be6b98848758e05cdc9398caaf19e6a68f7386", "93be6b98848758e05cdc9398caaf19e6a68f7386" ]
[ "brainbox/plot.py", "ibllib/io/extractors/base.py" ]
[ "\"\"\"\nPlots metrics that assess quality of single units. Some functions here generate plots for the\noutput of functions in the brainbox `single_units.py` module.\n\nRun the following to set-up the workspace to run the docstring examples:\n>>> from brainbox import processing\n>>> import alf.io as aio\n>>> import numpy as np\n>>> import matplotlib.pyplot as plt\n>>> import ibllib.ephys.spikes as e_spks\n# (*Note, if there is no 'alf' directory, make 'alf' directory from 'ks2' output directory):\n>>> e_spks.ks2_to_alf(path_to_ks_out, path_to_alf_out)\n# Load the alf spikes bunch and clusters bunch, and get a units bunch.\n>>> spks_b = aio.load_object(path_to_alf_out, 'spikes')\n>>> clstrs_b = aio.load_object(path_to_alf_out, 'clusters')\n>>> units_b = processing.get_units_bunch(spks_b) # may take a few mins to compute\n\"\"\"\n\nimport time\nfrom warnings import warn\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n# from matplotlib.ticker import StrMethodFormatter\nfrom brainbox import singlecell\nfrom brainbox.metrics import single_units\nfrom brainbox.processing import bincount2D\nfrom brainbox.io.spikeglx import extract_waveforms\nfrom ibllib.io import spikeglx\n\n\ndef feat_vars(units_b, units=None, feat_name='amps', dist='norm', test='ks', cmap_name='coolwarm',\n ax=None):\n '''\n Plots the coefficients of variation of a particular spike feature for all units as a bar plot,\n where each bar is color-coded corresponding to the depth of the max amplitude channel of the\n respective unit.\n\n Parameters\n ----------\n units_b : bunch\n A units bunch containing fields with spike information (e.g. cluster IDs, times, features,\n etc.) for all units.\n units : array-like (optional)\n A subset of all units for which to create the bar plot. (If `None`, all units are used)\n feat_name : string (optional)\n The spike feature to plot.\n dist : string (optional)\n The type of hypothetical null distribution from which the empirical spike feature\n distributions are presumed to belong to.\n test : string (optional)\n The statistical test used to calculate the probability that the empirical spike feature\n distributions come from `dist`.\n cmap_name : string (optional)\n The name of the colormap associated with the plot.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n cv_vals : ndarray\n The coefficients of variation of `feat_name` for each unit.\n p_vals : ndarray\n The probabilites that the distribution for `feat_name` for each unit comes from a\n `dist` distribution based on the `test` statistical test.\n\n See Also\n --------\n metrics.unit_stability\n\n Examples\n --------\n 1) Create a bar plot of the coefficients of variation of the spike amplitudes for all units.\n >>> fig, var_vals, p_vals = bb.plot.feat_vars(units_b)\n '''\n\n # Get units.\n if not (units is None): # we're using a subset of all units\n unit_list = list(units_b['depths'].keys())\n # For each unit in `unit_list`, remove unit from `units_b` if not in `units`.\n [units_b['depths'].pop(unit) for unit in unit_list if not (int(unit) in units)]\n unit_list = list(units_b['depths'].keys()) # get new `unit_list` after removing unit\n\n # Calculate coefficients of variation for all units\n p_vals_b, cv_b = single_units.unit_stability(\n units_b, units=units, feat_names=[feat_name], dist=dist, test=test)\n cv_vals = np.array(tuple(cv_b[feat_name].values()))\n cv_vals = cv_vals * 1e6 if feat_name == 'amps' else cv_vals # convert to uV if amps\n p_vals = np.array(tuple(p_vals_b[feat_name].values()))\n\n # Remove any empty units. This must be done AFTER the above calculations for ALL units so that\n # we can keep direct indexing.\n empty_unit_idxs = np.where([len(units_b['times'][unit]) == 0 for unit in unit_list])[0]\n good_units = [unit for unit in unit_list if unit not in empty_unit_idxs.astype(str)]\n\n # Get mean depths of spikes for good units\n depths = np.asarray([np.mean(units_b['depths'][str(unit)]) for unit in good_units])\n\n # Create unit normalized colormap based on `depths`, sorted by depth.\n cmap = plt.cm.get_cmap(cmap_name)\n depths_norm = depths / np.max(depths)\n rgba = np.asarray([cmap(depth) for depth in np.sort(np.flip(depths_norm))])\n\n # Plot depth-color-coded h bar plot of CVs for `feature` for each unit, where units are\n # sorted descendingly by depth along y-axis.\n if ax is None:\n fig, ax = plt.subplots()\n ax.barh(y=[int(unit) for unit in good_units], width=cv_vals[np.argsort(depths)], color=rgba)\n fig = ax.figure\n cbar = fig.colorbar(plt.cm.ScalarMappable(cmap=cmap), ax=ax)\n max_d = np.max(depths)\n tick_labels = [int(max_d * tick) for tick in (0, 0.2, 0.4, 0.6, 0.8, 1.0)]\n cbar.set_ticks(cbar.get_ticks()) # must call `set_ticks` to call `set_ticklabels`\n cbar.set_ticklabels(tick_labels)\n ax.set_title('CV of {feat}'.format(feat=feat_name))\n ax.set_ylabel('Unit Number (sorted by depth)')\n ax.set_xlabel('CV')\n cbar.set_label('Depth', rotation=-90)\n\n return cv_vals, p_vals\n\n\ndef missed_spikes_est(feat, feat_name, spks_per_bin=20, sigma=5, min_num_bins=50, ax=None):\n '''\n Plots the pdf of an estimated symmetric spike feature distribution, with a vertical cutoff line\n that indicates the approximate fraction of spikes missing from the distribution, assuming the\n true distribution is symmetric.\n\n Parameters\n ----------\n feat : ndarray\n The spikes' feature values.\n feat_name : string\n The spike feature to plot.\n spks_per_bin : int (optional)\n The number of spikes per bin from which to compute the spike feature histogram.\n sigma : int (optional)\n The standard deviation for the gaussian kernel used to compute the pdf from the spike\n feature histogram.\n min_num_bins : int (optional)\n The minimum number of bins used to compute the spike feature histogram.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n fraction_missing : float\n The fraction of missing spikes (0-0.5). *Note: If more than 50% of spikes are missing, an\n accurate estimate isn't possible.\n\n See Also\n --------\n single_units.feature_cutoff\n\n Examples\n --------\n 1) Plot cutoff line indicating the fraction of spikes missing from a unit based on the recorded\n unit's spike amplitudes, assuming the distribution of the unit's spike amplitudes is symmetric.\n >>> feat = units_b['amps']['1']\n >>> fraction_missing = bb.plot.missed_spikes_est(feat, feat_name='amps', unit=1)\n '''\n\n # Calculate the feature distribution histogram and fraction of spikes missing.\n fraction_missing, pdf, cutoff_idx = \\\n single_units.missed_spikes_est(feat, spks_per_bin, sigma, min_num_bins)\n\n # Plot.\n if ax is None: # create two axes\n fig, ax = plt.subplots(nrows=1, ncols=2)\n if ax is None or len(ax) == 2: # plot histogram and pdf on two separate axes\n num_bins = int(feat.size / spks_per_bin)\n ax[0].hist(feat, bins=num_bins)\n ax[0].set_xlabel('{0}'.format(feat_name))\n ax[0].set_ylabel('Count')\n ax[0].set_title('Histogram of {0}'.format(feat_name))\n ax[1].plot(pdf)\n ax[1].vlines(cutoff_idx, 0, np.max(pdf), colors='r')\n ax[1].set_xlabel('Bin Number')\n ax[1].set_ylabel('Density')\n ax[1].set_title('PDF Symmetry Cutoff\\n'\n '(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))\n else: # just plot pdf\n ax = ax[0]\n ax.plot(pdf)\n ax.vlines(cutoff_idx, 0, np.max(pdf), colors='r')\n ax.set_xlabel('Bin Number')\n ax.set_ylabel('Density')\n ax.set_title('PDF Symmetry Cutoff\\n'\n '(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))\n\n return fraction_missing\n\n\ndef wf_comp(ephys_file, ts1, ts2, ch, sr=30000, n_ch_probe=385, dtype='int16', car=True,\n col=['b', 'r'], ax=None):\n '''\n Plots two different sets of waveforms across specified channels after (optionally)\n common-average-referencing. In this way, waveforms can be compared to see if there is,\n e.g. drift during the recording, or if two units should be merged, or one unit should be split.\n\n Parameters\n ----------\n ephys_file : string\n The file path to the binary ephys data.\n ts1 : array_like\n A set of timestamps for which to compare waveforms with `ts2`.\n ts2: array_like\n A set of timestamps for which to compare waveforms with `ts1`.\n ch : array-like\n The channels to use for extracting and plotting the waveforms.\n sr : int (optional)\n The sampling rate (in hz) that the ephys data was acquired at.\n n_ch_probe : int (optional)\n The number of channels of the recording.\n dtype: str (optional)\n The datatype represented by the bytes in `ephys_file`.\n car: bool (optional)\n A flag for whether or not to perform common-average-referencing before extracting waveforms\n col: list of strings or float arrays (optional)\n Two elements in the list, where each specifies the color the `ts1` and `ts2` waveforms\n will be plotted in, respectively.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n wf1 : ndarray\n The waveforms for the spikes in `ts1`: an array of shape (#spikes, #samples, #channels).\n wf2 : ndarray\n The waveforms for the spikes in `ts2`: an array of shape (#spikes, #samples, #channels).\n s : float\n The similarity score between the two sets of waveforms, calculated by\n `single_units.wf_similarity`\n\n See Also\n --------\n io.extract_waveforms\n single_units.wf_similarity\n\n Examples\n --------\n 1) Compare first and last 100 spike waveforms for unit1, across 20 channels around the channel\n of max amplitude, and compare the waveforms in the first minute to the waveforms in the fourth\n minutes for unit2, across 10 channels around the mean.\n # Get first and last 100 spikes, and 20 channels around channel of max amp for unit 1:\n >>> ts1 = units_b['times']['1'][:100]\n >>> ts2 = units_b['times']['1'][-100:]\n >>> max_ch = clstrs_b['channels'][1]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 20)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 20, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch + 10)\n >>> wf1, wf2, s = bb.plot.wf_comp(path_to_ephys_file, ts1, ts2, ch)\n # Plot waveforms for unit2 from the first and fourth minutes across 10 channels.\n >>> ts = units_b['times']['2']\n >>> ts1_2 = ts[np.where(ts<60)[0]]\n >>> ts2_2 = ts[np.where(ts>180)[0][:len(ts1)]]\n >>> max_ch = clstrs_b['channels'][2]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 10)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 5, max_ch + 5)\n >>> wf1_2, wf2_2, s_2 = bb.plot.wf_comp(path_to_ephys_file, ts1_2, ts2_2, ch)\n '''\n\n # Ensure `ch` is ndarray\n ch = np.asarray(ch)\n ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch\n\n # Extract the waveforms for these timestamps and compute similarity score.\n wf1 = extract_waveforms(ephys_file, ts1, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,\n car=car)\n wf2 = extract_waveforms(ephys_file, ts2, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,\n car=car)\n s = single_units.wf_similarity(wf1, wf2)\n\n # Plot these waveforms against each other.\n n_ch = ch.size\n if ax is None:\n fig, ax = plt.subplots(nrows=n_ch, ncols=2) # left col is all waveforms, right col is mean\n for cur_ax, cur_ch in enumerate(ch):\n ax[cur_ax][0].plot(wf1[:, :, cur_ax].T, c=col[0])\n ax[cur_ax][0].plot(wf2[:, :, cur_ax].T, c=col[1])\n ax[cur_ax][1].plot(np.mean(wf1[:, :, cur_ax], axis=0), c=col[0])\n ax[cur_ax][1].plot(np.mean(wf2[:, :, cur_ax], axis=0), c=col[1])\n ax[cur_ax][0].set_ylabel('Ch {0}'.format(cur_ch))\n ax[0][0].set_title('All Waveforms. S = {:.2f}'.format(s))\n ax[0][1].set_title('Mean Waveforms')\n plt.legend(['1st spike set', '2nd spike set'])\n\n return wf1, wf2, s\n\n\ndef amp_heatmap(ephys_file, ts, ch, sr=30000, n_ch_probe=385, dtype='int16', cmap_name='RdBu',\n car=True, ax=None):\n '''\n Plots a heatmap of the normalized voltage values over time and space for given timestamps and\n channels, after (optionally) common-average-referencing.\n\n Parameters\n ----------\n ephys_file : string\n The file path to the binary ephys data.\n ts: array_like\n A set of timestamps for which to get the voltage values.\n ch : array-like\n The channels to use for extracting the voltage values.\n sr : int (optional)\n The sampling rate (in hz) that the ephys data was acquired at.\n n_ch_probe : int (optional)\n The number of channels of the recording.\n dtype: str (optional)\n The datatype represented by the bytes in `ephys_file`.\n cmap_name : string (optional)\n The name of the colormap associated with the plot.\n car: bool (optional)\n A flag for whether or not to perform common-average-referencing before extracting waveforms\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n v_vals : ndarray\n The voltage values.\n\n Examples\n --------\n 1) Plot a heatmap of the spike amplitudes across 20 channels around the channel of max\n amplitude for all spikes in unit 1.\n >>> ts = units_b['times']['1']\n >>> max_ch = clstrs_b['channels'][1]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 20)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 20, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch + 10)\n >>> bb.plot.amp_heatmap(path_to_ephys_file, ts, ch)\n '''\n # Ensure `ch` is ndarray\n ch = np.asarray(ch)\n ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch\n\n # Get memmapped array of `ephys_file`\n s_reader = spikeglx.Reader(ephys_file, open=True)\n file_m = s_reader.data\n\n # Get voltage values for each peak amplitude sample for `ch`.\n max_amp_samples = (ts * sr).astype(int)\n # Currently this is an annoying way to calculate `v_vals` b/c indexing with multiple values\n # is currently unsupported.\n v_vals = np.zeros((max_amp_samples.size, ch.size))\n for sample in range(max_amp_samples.size):\n v_vals[sample] = file_m[max_amp_samples[sample]:max_amp_samples[sample] + 1, ch]\n if car: # compute spatial noise in chunks, and subtract from `v_vals`.\n # Get subset of time (from first to last max amp sample)\n n_chunk_samples = 5e6 # number of samples per chunk\n n_chunks = np.ceil((max_amp_samples[-1] - max_amp_samples[0]) /\n n_chunk_samples).astype('int')\n # Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the\n # samples that make up the first chunk.\n chunk_sample = np.arange(max_amp_samples[0], max_amp_samples[-1], n_chunk_samples,\n dtype=int)\n chunk_sample = np.append(chunk_sample, max_amp_samples[-1])\n noise_s_chunks = np.zeros((n_chunks, ch.size), dtype=np.int16) # spatial noise array\n # Give time estimate for computing `noise_s_chunks`.\n t0 = time.perf_counter()\n np.median(file_m[chunk_sample[0]:chunk_sample[1], ch], axis=0)\n dt = time.perf_counter() - t0\n print('Performing spatial CAR before waveform extraction. Estimated time is {:.2f} mins.'\n ' ({})'.format(dt * n_chunks / 60, time.ctime()))\n # Compute noise for each chunk, then take the median noise of all chunks.\n for chunk in range(n_chunks):\n noise_s_chunks[chunk, :] = np.median(\n file_m[chunk_sample[chunk]:chunk_sample[chunk + 1], ch], axis=0)\n noise_s = np.median(noise_s_chunks, axis=0)\n v_vals -= noise_s[None, :]\n print('Done. ({})'.format(time.ctime()))\n s_reader.close()\n\n # Plot heatmap.\n if ax is None:\n fig, ax = plt.subplots()\n v_vals_norm = (v_vals / np.max(abs(v_vals))).T\n cbar_map = ax.imshow(v_vals_norm, cmap=cmap_name, aspect='auto',\n extent=[ts[0], ts[-1], ch[0], ch[-1]], origin='lower')\n ax.set_yticks(np.arange(ch[0], ch[-1], 5))\n ax.set_ylabel('Channel Numbers')\n ax.set_xlabel('Time (s)')\n ax.set_title('Voltage Heatmap')\n fig = ax.figure\n cbar = fig.colorbar(cbar_map, ax=ax)\n cbar.set_label('V', rotation=-90)\n\n return v_vals\n\n\ndef firing_rate(ts, hist_win=0.01, fr_win=0.5, n_bins=10, show_fr_cv=True, ax=None):\n '''\n Plots the instantaneous firing rate of for given spike timestamps over time, and optionally\n overlays the value of the coefficient of variation of the firing rate for a specified number\n of bins.\n\n Parameters\n ----------\n ts : ndarray\n The spike timestamps from which to compute the firing rate.\n hist_win : float (optional)\n The time window (in s) to use for computing spike counts.\n fr_win : float (optional)\n The time window (in s) to use as a moving slider to compute the instantaneous firing rate.\n n_bins : int (optional)\n The number of bins in which to compute coefficients of variation of the firing rate.\n show_fr_cv : bool (optional)\n A flag for whether or not to compute and show the coefficients of variation of the firing\n rate for `n_bins`.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n fr: ndarray\n The instantaneous firing rate over time (in hz).\n cv: float\n The mean coefficient of variation of the firing rate of the `n_bins` number of coefficients\n computed. Can only be returned if `show_fr_cv` is True.\n cvs: ndarray\n The coefficients of variation of the firing for each bin of `n_bins`. Can only be returned\n if `show_fr_cv` is True.\n\n See Also\n --------\n single_units.firing_rate_cv\n singecell.firing_rate\n\n Examples\n --------\n 1) Plot the firing rate for unit 1 from the time of its first to last spike, showing the cv\n of the firing rate for 10 evenly spaced bins.\n >>> ts = units_b['times']['1']\n >>> fr, cv, cvs = bb.plot.firing_rate(ts)\n '''\n\n if ax is None:\n fig, ax = plt.subplots()\n if not (show_fr_cv): # compute just the firing rate\n fr = singlecell.firing_rate(ts, hist_win=hist_win, fr_win=fr_win)\n else: # compute firing rate and coefficients of variation\n cv, cvs, fr = single_units.firing_rate_coeff_var(ts, hist_win=hist_win, fr_win=fr_win,\n n_bins=n_bins)\n x = np.arange(fr.size) * hist_win\n ax.plot(x, fr)\n ax.set_title('Firing Rate')\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Rate (s$^-1$)')\n\n if not (show_fr_cv):\n return fr\n else: # show coefficients of variation\n y_max = np.max(fr) * 1.05\n x_l = x[int(x.size / n_bins)]\n # Plot vertical lines separating plots into `n_bins`.\n [ax.vlines((x_l * i), 0, y_max, linestyles='dashed', linewidth=2)\n for i in range(1, n_bins)]\n # Plot text with cv of firing rate for each bin.\n [ax.text(x_l * (i + 1), y_max, 'cv={0:.2f}'.format(cvs[i]), fontsize=9, ha='right')\n for i in range(n_bins)]\n return fr, cv, cvs\n\n\ndef peri_event_time_histogram(\n spike_times, spike_clusters, events, cluster_id, # Everything you need for a basic plot\n t_before=0.2, t_after=0.5, bin_size=0.025, smoothing=0.025, as_rate=True,\n include_raster=False, n_rasters=None, error_bars='std', ax=None,\n pethline_kwargs={'color': 'blue', 'lw': 2},\n errbar_kwargs={'color': 'blue', 'alpha': 0.5},\n eventline_kwargs={'color': 'black', 'alpha': 0.5},\n raster_kwargs={'color': 'black', 'lw': 0.5}, **kwargs):\n \"\"\"\n Plot peri-event time histograms, with the meaning firing rate of units centered on a given\n series of events. Can optionally add a raster underneath the PETH plot of individual spike\n trains about the events.\n\n Parameters\n ----------\n spike_times : array_like\n Spike times (in seconds)\n spike_clusters : array-like\n Cluster identities for each element of spikes\n events : array-like\n Times to align the histogram(s) to\n cluster_id : int\n Identity of the cluster for which to plot a PETH\n\n t_before : float, optional\n Time before event to plot (default: 0.2s)\n t_after : float, optional\n Time after event to plot (default: 0.5s)\n bin_size :float, optional\n Width of bin for histograms (default: 0.025s)\n smoothing : float, optional\n Sigma of gaussian smoothing to use in histograms. (default: 0.025s)\n as_rate : bool, optional\n Whether to use spike counts or rates in the plot (default: `True`, uses rates)\n include_raster : bool, optional\n Whether to put a raster below the PETH of individual spike trains (default: `False`)\n n_rasters : int, optional\n If include_raster is True, the number of rasters to include. If `None`\n will default to plotting rasters around all provided events. (default: `None`)\n error_bars : {'std', 'sem', 'none'}, optional\n Defines which type of error bars to plot. Options are:\n -- `'std'` for 1 standard deviation\n -- `'sem'` for standard error of the mean\n -- `'none'` for only plotting the mean value\n (default: `'std'`)\n ax : matplotlib axes, optional\n If passed, the function will plot on the passed axes. Note: current\n behavior causes whatever was on the axes to be cleared before plotting!\n (default: `None`)\n pethline_kwargs : dict, optional\n Dict containing line properties to define PETH plot line. Default\n is a blue line with weight of 2. Needs to have color. See matplotlib plot documentation\n for more options.\n (default: `{'color': 'blue', 'lw': 2}`)\n errbar_kwargs : dict, optional\n Dict containing fill-between properties to define PETH error bars.\n Default is a blue fill with 50 percent opacity.. Needs to have color. See matplotlib\n fill_between documentation for more options.\n (default: `{'color': 'blue', 'alpha': 0.5}`)\n eventline_kwargs : dict, optional\n Dict containing fill-between properties to define line at event.\n Default is a black line with 50 percent opacity.. Needs to have color. See matplotlib\n vlines documentation for more options.\n (default: `{'color': 'black', 'alpha': 0.5}`)\n raster_kwargs : dict, optional\n Dict containing properties defining lines in the raster plot.\n Default is black lines with line width of 0.5. See matplotlib vlines for more options.\n (default: `{'color': 'black', 'lw': 0.5}`)\n\n Returns\n -------\n ax : matplotlib axes\n Axes with all of the plots requested.\n \"\"\"\n\n # Check to make sure if we fail, we fail in an informative way\n if not len(spike_times) == len(spike_clusters):\n raise ValueError('Spike times and clusters are not of the same shape')\n if len(events) == 1:\n raise ValueError('Cannot make a PETH with only one event.')\n if error_bars not in ('std', 'sem', 'none'):\n raise ValueError('Invalid error bar type was passed.')\n if not all(np.isfinite(events)):\n raise ValueError('There are NaN or inf values in the list of events passed. '\n ' Please remove non-finite data points and try again.')\n\n # Compute peths\n peths, binned_spikes = singlecell.calculate_peths(spike_times, spike_clusters, [cluster_id],\n events, t_before, t_after, bin_size,\n smoothing, as_rate)\n # Construct an axis object if none passed\n if ax is None:\n plt.figure()\n ax = plt.gca()\n # Plot the curve and add error bars\n mean = peths.means[0, :]\n ax.plot(peths.tscale, mean, **pethline_kwargs)\n if error_bars == 'std':\n bars = peths.stds[0, :]\n elif error_bars == 'sem':\n bars = peths.stds[0, :] / np.sqrt(len(events))\n else:\n bars = np.zeros_like(mean)\n if error_bars != 'none':\n ax.fill_between(peths.tscale, mean - bars, mean + bars, **errbar_kwargs)\n\n # Plot the event marker line. Extends to 5% higher than max value of means plus any error bar.\n plot_edge = (mean.max() + bars[mean.argmax()]) * 1.05\n ax.vlines(0., 0., plot_edge, **eventline_kwargs)\n # Set the limits on the axes to t_before and t_after. Either set the ylim to the 0 and max\n # values of the PETH, or if we want to plot a spike raster below, create an equal amount of\n # blank space below the zero where the raster will go.\n ax.set_xlim([-t_before, t_after])\n ax.set_ylim([-plot_edge if include_raster else 0., plot_edge])\n # Put y ticks only at min, max, and zero\n if mean.min() != 0:\n ax.set_yticks([0, mean.min(), mean.max()])\n else:\n ax.set_yticks([0., mean.max()])\n # Move the x axis line from the bottom of the plotting space to zero if including a raster,\n # Then plot the raster\n if include_raster:\n if n_rasters is None:\n n_rasters = len(events)\n if n_rasters > 60:\n warn(\"Number of raster traces is greater than 60. This might look bad on the plot.\")\n ax.axhline(0., color='black')\n tickheight = plot_edge / len(events[:n_rasters]) # How much space per trace\n tickedges = np.arange(0., -plot_edge - 1e-5, -tickheight)\n clu_spks = spike_times[spike_clusters == cluster_id]\n for i, t in enumerate(events[:n_rasters]):\n idx = np.bitwise_and(clu_spks >= t - t_before, clu_spks <= t + t_after)\n event_spks = clu_spks[idx]\n ax.vlines(event_spks - t, tickedges[i + 1], tickedges[i], **raster_kwargs)\n ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes', y=0.75)\n else:\n ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xlabel('Time (s) after event')\n return ax\n\n\ndef driftmap(ts, feat, ax=None, plot_style='bincount',\n t_bin=0.01, d_bin=20, weights=None, vmax=None, **kwargs):\n \"\"\"\n Plots the values of a spike feature array (y-axis) over time (x-axis).\n Two arguments can be given for the plot_style of the drift map:\n - 'scatter' : whereby each value is plotted as a marker (up to 100'000 data point)\n - 'bincount' : whereby the values are binned (optimised to represent spike raster)\n\n Parameters\n ----------\n feat : ndarray\n The spikes' feature values.\n ts : ndarray\n The spike timestamps from which to compute the firing rate.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n t_bin: time bin used when plot_style='bincount'\n d_bin: depth bin used when plot_style='bincount'\n plot_style: 'scatter', 'bincount'\n **kwargs: matplotlib.imshow arguments\n\n Returns\n -------\n cd: float\n The cumulative drift of `feat`.\n md: float\n The maximum drift of `feat`.\n\n See Also\n --------\n metrics.cum_drift\n metrics.max_drift\n\n Examples\n --------\n 1) Plot the amplitude driftmap for unit 1.\n >>> ts = units_b['times']['1']\n >>> amps = units_b['amps']['1']\n >>> ax = bb.plot.driftmap(ts, amps)\n 2) Plot the depth driftmap for unit 1.\n >>> ts = units_b['times']['1']\n >>> depths = units_b['depths']['1']\n >>> ax = bb.plot.driftmap(ts, depths)\n \"\"\"\n iok = ~np.isnan(feat)\n if ax is None:\n fig, ax = plt.subplots()\n\n if plot_style == 'scatter' and len(ts) < 100000:\n print('here todo')\n if 'color' not in kwargs.keys():\n kwargs['color'] = 'k'\n ax.plot(ts, feat, **kwargs)\n else:\n # compute raster map as a function of site depth\n R, times, depths = bincount2D(\n ts[iok], feat[iok], t_bin, d_bin, weights=weights)\n # plot raster map\n ax.imshow(R, aspect='auto', cmap='binary', vmin=0, vmax=vmax or np.std(R) * 4,\n extent=np.r_[times[[0, -1]], depths[[0, -1]]], origin='lower', **kwargs)\n ax.set_xlabel('time (secs)')\n ax.set_ylabel('depth (um)')\n return ax\n\n\ndef pres_ratio(ts, hist_win=10, ax=None):\n '''\n Plots the presence ratio of spike counts: the number of bins where there is at least one\n spike, over the total number of bins, given a specified bin width.\n\n Parameters\n ----------\n ts : ndarray\n The spike timestamps from which to compute the presence ratio.\n hist_win : float\n The time window (in s) to use for computing the presence ratio.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n pr : float\n The presence ratio.\n spks_bins : ndarray\n The number of spks in each bin.\n\n See Also\n --------\n metrics.pres_ratio\n\n Examples\n --------\n 1) Plot the presence ratio for unit 1, given a window of 10 s.\n >>> ts = units_b['times']['1']\n >>> pr, pr_bins = bb.plot.pres_ratio(ts)\n '''\n\n pr, spks_bins = single_units.pres_ratio(ts, hist_win)\n pr_bins = np.where(spks_bins > 0, 1, 0)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.plot(pr_bins)\n ax.set_xlabel('Bin Number (width={:.1f}s)'.format(hist_win))\n ax.set_ylabel('Presence')\n ax.set_title('Presence Ratio')\n\n return pr, spks_bins\n\n\ndef driftmap_color(\n clusters_depths, spikes_times,\n spikes_amps, spikes_depths, spikes_clusters,\n ax=None, axesoff=False, return_lims=False):\n\n '''\n Plots the driftmap of a session or a trial\n\n The plot shows the spike times vs spike depths.\n Each dot is a spike, whose color indicates the cluster\n and opacity indicates the spike amplitude.\n\n Parameters\n -------------\n clusters_depths: ndarray\n depths of all clusters\n spikes_times: ndarray\n spike times of all clusters\n spikes_amps: ndarray\n amplitude of each spike\n spikes_depths: ndarray\n depth of each spike\n spikes_clusters: ndarray\n cluster idx of each spike\n ax: matplotlib.axes.Axes object (optional)\n The axis object to plot the driftmap on\n (if `None`, a new figure and axis is created)\n\n Return\n ---\n ax: matplotlib.axes.Axes object\n The axis object with driftmap plotted\n x_lim: list of two elements\n range of x axis\n y_lim: list of two elements\n range of y axis\n '''\n\n color_bins = sns.color_palette(\"hls\", 500)\n new_color_bins = np.vstack(\n np.transpose(np.reshape(color_bins, [5, 100, 3]), [1, 0, 2]))\n\n # get the sorted idx of each depth, and create colors based on the idx\n\n sorted_idx = np.argsort(np.argsort(clusters_depths))\n\n colors = np.vstack(\n [np.repeat(\n new_color_bins[np.mod(idx, 500), :][np.newaxis, ...],\n n_spikes, axis=0)\n for (idx, n_spikes) in\n zip(sorted_idx, np.unique(spikes_clusters,\n return_counts=True)[1])])\n\n max_amp = np.percentile(spikes_amps, 90)\n min_amp = np.percentile(spikes_amps, 10)\n opacity = np.divide(spikes_amps - min_amp, max_amp - min_amp)\n opacity[opacity > 1] = 1\n opacity[opacity < 0] = 0\n\n colorvec = np.zeros([len(opacity), 4], dtype='float16')\n colorvec[:, 3] = opacity.astype('float16')\n colorvec[:, 0:3] = colors.astype('float16')\n\n x = spikes_times.astype('float32')\n y = spikes_depths.astype('float32')\n\n args = dict(color=colorvec, edgecolors='none')\n\n if ax is None:\n fig = plt.Figure(dpi=200, frameon=False, figsize=[10, 10])\n ax = plt.Axes(fig, [0.1, 0.1, 0.9, 0.9])\n ax.set_xlabel('Time (sec)')\n ax.set_ylabel('Distance from the probe tip (um)')\n savefig = True\n args.update(s=0.1)\n\n ax.scatter(x, y, **args)\n x_edge = (max(x) - min(x)) * 0.05\n x_lim = [min(x) - x_edge, max(x) + x_edge]\n y_lim = [min(y) - 50, max(y) + 100]\n ax.set_xlim(x_lim[0], x_lim[1])\n ax.set_ylim(y_lim[0], y_lim[1])\n\n if axesoff:\n ax.axis('off')\n\n if savefig:\n fig.add_axes(ax)\n fig.savefig('driftmap.png')\n\n if return_lims:\n return ax, x_lim, y_lim\n else:\n return ax\n", "\"\"\"Base Extractor classes\nA module for the base Extractor classes. The Extractor, given a session path, will extract the\nprocessed data from raw hardware files and optionally save them.\n\"\"\"\n\nimport abc\nfrom collections import OrderedDict\nimport json\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom one.alf.files import get_session_path\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.raw_data_loaders import load_settings, _logger\n\n\nclass BaseExtractor(abc.ABC):\n \"\"\"\n Base extractor class\n Writing an extractor checklist:\n - on the child class, overload the _extract method\n - this method should output one or several numpy.arrays or dataframe with a consistent shape\n - save_names is a list or a string of filenames, there should be one per dataset\n - set save_names to None for a dataset that doesn't need saving (could be set dynamically\n in the _extract method)\n :param session_path: Absolute path of session folder\n :type session_path: str/Path\n \"\"\"\n\n session_path = None\n save_names = None\n default_path = Path(\"alf\") # relative to session\n\n def __init__(self, session_path=None):\n # If session_path is None Path(session_path) will fail\n self.session_path = Path(session_path)\n\n def extract(self, save=False, path_out=None, **kwargs):\n \"\"\"\n :return: numpy.ndarray or list of ndarrays, list of filenames\n :rtype: dtype('float64')\n \"\"\"\n out = self._extract(**kwargs)\n files = self._save(out, path_out=path_out) if save else None\n return out, files\n\n def _save(self, data, path_out=None):\n # Chack if self.save_namesis of the same length of out\n if not path_out:\n path_out = self.session_path.joinpath(self.default_path)\n path_out.mkdir(exist_ok=True, parents=True)\n\n def _write_to_disk(file_path, data):\n \"\"\"Implements different save calls depending on file extension\"\"\"\n csv_separators = {\n \".csv\": \",\",\n \".ssv\": \" \",\n \".tsv\": \"\\t\",\n }\n file_path = Path(file_path)\n if file_path.suffix == \".npy\":\n np.save(file_path, data)\n elif file_path.suffix in [\".parquet\", \".pqt\"]:\n if not isinstance(data, pd.DataFrame):\n _logger.error(\"Data is not a panda's DataFrame object\")\n raise TypeError(\"Data is not a panda's DataFrame object\")\n data.to_parquet(file_path)\n elif file_path.suffix in [\".csv\", \".ssv\", \".tsv\"]:\n sep = csv_separators[file_path.suffix]\n data.to_csv(file_path, sep=sep)\n # np.savetxt(file_path, data, delimiter=sep)\n else:\n _logger.error(f\"Don't know how to save {file_path.suffix} files yet\")\n\n if self.save_names is None:\n file_paths = []\n elif isinstance(self.save_names, str):\n file_paths = path_out.joinpath(self.save_names)\n _write_to_disk(file_paths, data)\n else: # Should be list or tuple...\n assert len(data) == len(self.save_names)\n file_paths = []\n for data, fn in zip(data, self.save_names):\n if fn:\n fpath = path_out.joinpath(fn)\n _write_to_disk(fpath, data)\n file_paths.append(fpath)\n return file_paths\n\n @abc.abstractmethod\n def _extract(self):\n pass\n\n\nclass BaseBpodTrialsExtractor(BaseExtractor):\n \"\"\"\n Base (abstract) extractor class for bpod jsonable data set\n Wrps the _extract private method\n\n :param session_path: Absolute path of session folder\n :type session_path: str\n :param bpod_trials\n :param settings\n \"\"\"\n\n bpod_trials = None\n settings = None\n\n def extract(self, bpod_trials=None, settings=None, **kwargs):\n \"\"\"\n :param: bpod_trials (optional) bpod trials from jsonable in a dictionary\n :param: settings (optional) bpod iblrig settings json file in a dictionary\n :param: save (bool) write output ALF files, defaults to False\n :param: path_out (pathlib.Path) output path (defaults to `{session_path}/alf`)\n :return: numpy.ndarray or list of ndarrays, list of filenames\n :rtype: dtype('float64')\n \"\"\"\n self.bpod_trials = bpod_trials\n self.settings = settings\n if self.bpod_trials is None:\n self.bpod_trials = raw.load_data(self.session_path)\n if not self.settings:\n self.settings = raw.load_settings(self.session_path)\n if self.settings is None:\n self.settings = {\"IBLRIG_VERSION_TAG\": \"100.0.0\"}\n elif self.settings[\"IBLRIG_VERSION_TAG\"] == \"\":\n self.settings[\"IBLRIG_VERSION_TAG\"] = \"100.0.0\"\n return super(BaseBpodTrialsExtractor, self).extract(**kwargs)\n\n\ndef run_extractor_classes(classes, session_path=None, **kwargs):\n \"\"\"\n Run a set of extractors with the same inputs\n :param classes: list of Extractor class\n :param save: True/False\n :param path_out: (defaults to alf path)\n :param kwargs: extractor arguments (session_path...)\n :return: dictionary of arrays, list of files\n \"\"\"\n files = []\n outputs = OrderedDict({})\n assert session_path\n # if a single class is passed, convert as a list\n try:\n iter(classes)\n except TypeError:\n classes = [classes]\n for classe in classes:\n cls = classe(session_path=session_path)\n out, fil = cls.extract(**kwargs)\n if isinstance(fil, list):\n files.extend(fil)\n elif fil is not None:\n files.append(fil)\n if isinstance(cls.var_names, str):\n outputs[cls.var_names] = out\n else:\n for i, k in enumerate(cls.var_names):\n outputs[k] = out[i]\n return outputs, files\n\n\ndef _get_task_types_json_config():\n with open(Path(__file__).parent.joinpath('extractor_types.json')) as fp:\n task_types = json.load(fp)\n return task_types\n\n\ndef get_task_protocol(session_path):\n try:\n settings = load_settings(get_session_path(session_path))\n except json.decoder.JSONDecodeError:\n _logger.error(f\"Can't read settings for {session_path}\")\n return\n if settings:\n return settings.get('PYBPOD_PROTOCOL', None)\n else:\n return\n\n\ndef get_task_extractor_type(task_name):\n \"\"\"\n Returns the task type string from the full pybpod task name:\n _iblrig_tasks_biasedChoiceWorld3.7.0 returns \"biased\"\n _iblrig_tasks_trainingChoiceWorld3.6.0 returns \"training'\n :param task_name:\n :return: one of ['biased', 'habituation', 'training', 'ephys', 'mock_ephys', 'sync_ephys']\n \"\"\"\n if isinstance(task_name, Path):\n task_name = get_task_protocol(task_name)\n if task_name is None:\n return\n task_types = _get_task_types_json_config()\n task_type = next((task_types[tt] for tt in task_types if tt in task_name), None)\n if task_type is None:\n _logger.warning(f\"No extractor type found for {task_name}\")\n return task_type\n\n\ndef get_session_extractor_type(session_path):\n \"\"\"\n From a session path, loads the settings file, finds the task and checks if extractors exist\n task names examples:\n :param session_path:\n :return: bool\n \"\"\"\n settings = load_settings(session_path)\n if settings is None:\n _logger.error(f'ABORT: No data found in \"raw_behavior_data\" folder {session_path}')\n return False\n extractor_type = get_task_extractor_type(settings['PYBPOD_PROTOCOL'])\n if extractor_type:\n return extractor_type\n else:\n return False\n\n\ndef get_pipeline(session_path):\n \"\"\"\n Get the pre-processinf pipeline name from a session path\n :param session_path:\n :return:\n \"\"\"\n stype = get_session_extractor_type(session_path)\n return _get_pipeline_from_task_type(stype)\n\n\ndef _get_pipeline_from_task_type(stype):\n \"\"\"\n Returns the pipeline from the task type. Some tasks types directly define the pipeline\n :param stype: session_type or task extractor type\n :return:\n \"\"\"\n if stype in ['ephys_biased_opto', 'ephys', 'ephys_training', 'mock_ephys', 'sync_ephys']:\n return 'ephys'\n elif stype in ['habituation', 'training', 'biased', 'biased_opto']:\n return 'training'\n else:\n return stype\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.Figure", "numpy.asarray", "numpy.max", "numpy.mean", "numpy.zeros_like", "numpy.where", "numpy.divide", "matplotlib.pyplot.gca", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.ceil", "numpy.std", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.cm.get_cmap", "numpy.isnan", "numpy.median", "numpy.append", "numpy.argsort", "numpy.flip", "matplotlib.pyplot.cm.ScalarMappable", "numpy.isfinite", "matplotlib.pyplot.Axes", "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.bitwise_and", "numpy.mod" ], [ "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jdvelasq/techMiner
[ "c611d96d2f812b0890513514d9d19787a1edfe2d", "c611d96d2f812b0890513514d9d19787a1edfe2d", "c611d96d2f812b0890513514d9d19787a1edfe2d" ]
[ "techminer/core/normalize_network.py", "techminer/gui/correlation_analysis.py", "techminer/plots/bubble_plot.py" ]
[ "import numpy as np\n\n\ndef normalize_network(X, normalization=None):\n \"\"\"\n \"\"\"\n X = X.copy()\n\n if isinstance(normalization, str) and normalization == \"None\":\n normalization = None\n\n if normalization is None:\n X = X.applymap(lambda w: int(w))\n else:\n X = X.applymap(lambda w: float(w))\n\n M = X.copy()\n\n if normalization == \"Jaccard\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (\n M.loc[row, row] + M.at[col, col] - M.at[row, col]\n )\n\n if normalization == \"Dice\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (\n M.loc[row, row] + M.at[col, col] + 2 * M.at[row, col]\n )\n\n if normalization == \"Salton/Cosine\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / np.sqrt(\n (M.loc[row, row] * M.at[col, col])\n )\n\n if normalization == \"Equivalence\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] ** 2 / (\n M.loc[row, row] * M.at[col, col]\n )\n\n ## inclusion\n if normalization == \"Inclusion\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / min(M.loc[row, row], M.at[col, col])\n\n if normalization == \"Mutual Information\":\n N = len(M.columns)\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = np.log(\n M.at[row, col] / (N * M.loc[row, row] * M.at[col, col])\n )\n\n if normalization == \"Association\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (M.loc[row, row] * M.at[col, col])\n\n return X\n", "import matplotlib.pyplot as pyplot\nimport numpy as np\nimport pandas as pd\nimport ipywidgets as widgets\n\nimport techminer.core.dashboard as dash\nfrom techminer.core import (\n Dashboard,\n Network,\n TF_matrix,\n add_counters_to_axis,\n corpus_filter,\n exclude_terms,\n sort_by_axis,\n)\nfrom techminer.core.dashboard import min_occurrence\nfrom techminer.plots import (\n ChordDiagram,\n bubble_plot,\n counters_to_node_colors,\n counters_to_node_sizes,\n heatmap,\n)\nfrom techminer.core.filter_records import filter_records\n\n###############################################################################\n##\n## MODEL\n##\n###############################################################################\n\n\nclass Model:\n def __init__(\n self,\n data,\n limit_to,\n exclude,\n years_range,\n clusters=None,\n cluster=None,\n ):\n ##\n if years_range is not None:\n initial_year, final_year = years_range\n data = data[(data.Year >= initial_year) & (data.Year <= final_year)]\n\n #\n # Filter for cluster members\n #\n if clusters is not None and cluster is not None:\n data = corpus_filter(data=data, clusters=clusters, cluster=cluster)\n\n self.data = data\n self.limit_to = limit_to\n self.exclude = exclude\n\n def apply(self):\n\n x = self.data.copy()\n\n if self.column == self.by:\n\n ##\n ## Drop NA from column\n ##\n w = x[[self.column, \"ID\"]].dropna()\n\n ##\n ## Computes TF_matrix with occurrence >= min_occurrence\n ##\n A = TF_matrix(\n data=w,\n column=self.column,\n scheme=None,\n min_occurrence=self.min_occ,\n )\n\n ##\n ## Exclude Terms\n ##\n A = exclude_terms(data=A, axis=1)\n\n ##\n ## Select max_items\n ##\n A = add_counters_to_axis(X=A, axis=1, data=self.data, column=self.column)\n A = sort_by_axis(data=A, sort_by=self.top_by, ascending=False, axis=1)\n A = A[A.columns[: self.max_items]]\n if len(A.columns) > self.max_items:\n top_items = A.sum(axis=0)\n top_items = top_items.sort_values(ascending=False)\n top_items = top_items.head(self.max_items)\n A = A.loc[:, top_items.index]\n rows = A.sum(axis=1)\n rows = rows[rows > 0]\n A = A.loc[rows.index, :]\n\n ##\n ## Computes correlation\n ##\n matrix = A.corr(method=self.method)\n\n else:\n\n ##\n ## Drop NA from column\n ##\n w = x[[self.column, self.by, \"ID\"]].dropna()\n\n ##\n ## Computes TF_matrix with occurrence >= min_occurrence\n ##\n A = TF_matrix(data=w, column=self.column, scheme=None)\n\n ##\n ## Exclude Terms\n ##\n A = exclude_terms(data=A, axis=1)\n\n ##\n ## Minimal occurrence\n ##\n terms = A.sum(axis=0)\n terms = terms.sort_values(ascending=False)\n terms = terms[terms >= self.min_occ]\n A = A.loc[:, terms.index]\n\n ##\n ## Select max_items\n ##\n A = add_counters_to_axis(X=A, axis=1, data=self.data, column=self.column)\n A = sort_by_axis(data=A, sort_by=self.top_by, ascending=False, axis=1)\n if len(A.columns) > self.max_items:\n A = A[A.columns[: self.max_items]]\n\n ##\n ## Computes correlation\n ##\n B = TF_matrix(w, column=self.by, scheme=None)\n matrix = np.matmul(B.transpose().values, A.values)\n matrix = pd.DataFrame(matrix, columns=A.columns, index=B.columns)\n matrix = matrix.corr(method=self.method)\n\n matrix = sort_by_axis(\n data=matrix,\n sort_by=self.sort_r_axis_by,\n ascending=self.r_axis_ascending,\n axis=0,\n )\n\n matrix = sort_by_axis(\n data=matrix,\n sort_by=self.sort_c_axis_by,\n ascending=self.c_axis_ascending,\n axis=1,\n )\n self.X_ = matrix\n\n def matrix(self):\n self.apply()\n return self.X_.style.format(\"{:+4.3f}\").background_gradient(\n cmap=self.colormap, axis=None\n )\n\n def heatmap(self):\n self.apply()\n return heatmap(self.X_, cmap=self.colormap, figsize=(self.width, self.height))\n\n def bubble_plot(self):\n self.apply()\n return bubble_plot(\n self.X_,\n axis=0,\n cmap=self.colormap,\n figsize=(self.width, self.height),\n )\n\n def chord_diagram(self):\n self.apply()\n x = self.X_.copy()\n terms = self.X_.columns.tolist()\n node_sizes = counters_to_node_sizes(x=terms)\n node_colors = counters_to_node_colors(x, cmap=pyplot.cm.get_cmap(self.colormap))\n\n cd = ChordDiagram()\n\n ## add nodes\n for idx, term in enumerate(x.columns):\n cd.add_node(term, color=node_colors[idx], s=node_sizes[idx])\n\n ## add links\n m = x.stack().to_frame().reset_index()\n m = m[m.level_0 < m.level_1]\n m.columns = [\"from_\", \"to_\", \"link_\"]\n m = m.reset_index(drop=True)\n\n d = {\n 0: {\"linestyle\": \"-\", \"linewidth\": 4, \"color\": \"black\"},\n 1: {\"linestyle\": \"-\", \"linewidth\": 2, \"color\": \"black\"},\n 2: {\"linestyle\": \"--\", \"linewidth\": 1, \"color\": \"gray\"},\n 3: {\"linestyle\": \":\", \"linewidth\": 1, \"color\": \"lightgray\"},\n }\n\n for idx in range(len(m)):\n\n key = (\n 0\n if m.link_[idx] > 0.75\n else (1 if m.link_[idx] > 0.50 else (2 if m.link_[idx] > 0.25 else 3))\n )\n\n cd.add_edge(m.from_[idx], m.to_[idx], **(d[key]))\n\n return cd.plot(figsize=(self.width, self.height))\n\n def correlation_map_nx(self):\n self.apply()\n return Network(\n X=self.X_,\n top_by=self.top_by,\n n_labels=self.n_labels,\n clustering=self.clustering,\n ).networkx_plot(\n layout=self.layout,\n iterations=self.nx_iterations,\n figsize=(self.width, self.height),\n )\n\n def communities(self):\n self.fit()\n return Network(\n X=self.X_,\n top_by=self.top_by,\n n_labels=self.n_labels,\n clustering=self.clustering,\n ).cluster_members_\n\n def correlation_map_interactive(self):\n self.apply()\n return Network(\n X=self.X_,\n top_by=self.top_by,\n n_labels=self.n_labels,\n clustering=self.clustering,\n ).pyvis_plot()\n\n\n###############################################################################\n##\n## DASHBOARD\n##\n###############################################################################\n\n\nclass App(Dashboard, Model):\n def __init__(\n self,\n limit_to=None,\n exclude=None,\n years_range=None,\n ):\n data = filter_records(pd.read_csv(\"corpus.csv\"))\n\n Model.__init__(\n self,\n data=data,\n limit_to=limit_to,\n exclude=exclude,\n years_range=years_range,\n )\n\n COLUMNS = sorted(\n [\n column\n for column in data.columns\n if column\n not in [\n \"Abb_Source_Title\",\n \"Abstract\",\n \"Affiliations\",\n \"Authors_ID\",\n \"Bradford_Law_Zone\",\n \"Document_Type\",\n \"Frac_Num_Documents\",\n \"Global_Citations\",\n \"Global_References\",\n \"Historiograph_ID\",\n \"ID\",\n \"Local_Citations\",\n \"Local_References\",\n \"Num_Authors\",\n \"Source_Title\",\n \"Title\",\n \"Year\",\n ]\n ]\n )\n\n self.command_panel = [\n dash.HTML(\"Display:\", hr=False, margin=\"0px, 0px, 0px, 5px\"),\n dash.Dropdown(\n options=[\n \"Matrix\",\n \"Heatmap\",\n \"Bubble plot\",\n \"Correlation map (nx)\",\n \"Chord diagram\",\n ],\n ),\n dash.HTML(\"Parameters:\"),\n dash.Dropdown(description=\"Column:\", options=sorted(data.columns)),\n dash.Dropdown(description=\"By:\", options=sorted(data.columns)),\n dash.Dropdown(\n description=\"Method:\", options=[\"pearson\", \"kendall\", \"spearman\"]\n ),\n dash.min_occurrence(),\n dash.max_items(),\n dash.network_clustering(),\n dash.HTML(\"Visualization:\"),\n dash.Dropdown(\n description=\"Top by:\",\n options=[\n \"Num Documents\",\n \"Global Citations\",\n ],\n ),\n dash.Dropdown(\n description=\"Sort C-axis by:\",\n options=[\n \"Alphabetic\",\n \"Num Documents\",\n \"Global Citations\",\n ],\n ),\n dash.c_axis_ascending(),\n dash.Dropdown(\n description=\"Sort R-axis by:\",\n options=[\n \"Alphabetic\",\n \"Num Documents\",\n \"Global Citations\",\n ],\n ),\n dash.r_axis_ascending(),\n dash.cmap(),\n dash.nx_layout(),\n dash.n_labels(),\n dash.nx_iterations(),\n dash.fig_width(),\n dash.fig_height(),\n ]\n\n #\n # interactive output function\n #\n widgets.interactive_output(\n f=self.interactive_output,\n controls={\n # Display:\n \"menu\": self.command_panel[1],\n # Parameters:\n \"column\": self.command_panel[3],\n \"by\": self.command_panel[4],\n \"method\": self.command_panel[5],\n \"min_occ\": self.command_panel[6],\n \"max_items\": self.command_panel[7],\n \"clustering\": self.command_panel[8],\n # Visualization\n \"top_by\": self.command_panel[10],\n \"sort_c_axis_by\": self.command_panel[11],\n \"c_axis_ascending\": self.command_panel[12],\n \"sort_r_axis_by\": self.command_panel[13],\n \"r_axis_ascending\": self.command_panel[14],\n \"colormap\": self.command_panel[15],\n \"layout\": self.command_panel[16],\n \"n_labels\": self.command_panel[17],\n \"nx_iterations\": self.command_panel[18],\n \"width\": self.command_panel[19],\n \"height\": self.command_panel[20],\n },\n )\n\n Dashboard.__init__(self)\n\n def interactive_output(self, **kwargs):\n\n Dashboard.interactive_output(self, **kwargs)\n\n if self.menu in [\n \"Matrix\",\n \"Heatmap\",\n \"Bubble plot\",\n ]:\n self.set_enabled(\"Sort C-axis by:\")\n self.set_enabled(\"C-axis ascending:\")\n self.set_enabled(\"Sort R-axis by:\")\n self.set_enabled(\"R-axis ascending:\")\n else:\n self.set_disabled(\"Sort C-axis by:\")\n self.set_disabled(\"C-axis ascending:\")\n self.set_disabled(\"Sort R-axis by:\")\n self.set_disabled(\"R-axis ascending:\")\n\n if self.menu == \"Correlation map (nx)\":\n self.set_enabled(\"Layout:\")\n self.set_enabled(\"N labels:\")\n else:\n self.set_disabled(\"Layout:\")\n self.set_disabled(\"N labels:\")\n\n if self.menu == \"Correlation map\" and self.layout == \"Spring\":\n self.set_enabled(\"nx iterations:\")\n else:\n self.set_disabled(\"nx iterations:\")\n\n if self.menu in [\"Matrix\", \"Correlation map (interactive)\"]:\n self.set_disabled(\"Width:\")\n self.set_disabled(\"Height:\")\n else:\n self.set_enabled(\"Width:\")\n self.set_enabled(\"Height:\")\n\n\n###############################################################################\n##\n## EXTERNAL INTERFACE\n##\n###############################################################################\n\n\ndef correlation_analysis(\n limit_to=None,\n exclude=None,\n years_range=None,\n):\n\n return App(\n limit_to=limit_to,\n exclude=exclude,\n years_range=years_range,\n ).run()\n", "import textwrap\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nTEXTLEN = 35\n\n\ndef collapse_text(w, width=TEXTLEN):\n if not isinstance(w, str):\n return w\n text_begining = \" \".join(w.split(\" \")[:-1])\n text_ending = w.split(\" \")[-1]\n return textwrap.shorten(text=text_begining, width=TEXTLEN) + \" \" + text_ending\n\n\ndef bubble_plot(\n X,\n darkness=None,\n figsize=(6, 6),\n cmap=\"Greys\",\n grid_lw=1.0,\n grid_c=\"gray\",\n grid_ls=\":\",\n fontsize=11,\n **kwargs,\n):\n\n \"\"\"Creates a gant activity plot from a dataframe.\n\n Examples\n ----------------------------------------------------------------------------------------------\n\n >>> import pandas as pd\n >>> df = pd.DataFrame(\n ... {\n ... \"author 0\": [ 1, 2, 3, 4, 5, 6, 7],\n ... \"author 1\": [14, 13, 12, 11, 10, 9, 8],\n ... \"author 2\": [1, 5, 8, 9, 0, 0, 0],\n ... \"author 3\": [0, 0, 1, 1, 1, 0, 0],\n ... \"author 4\": [0, 10, 0, 4, 2, 0, 1],\n ... },\n ... index =[2010, 2011, 2012, 2013, 2014, 2015, 2016]\n ... )\n >>> df\n author 0 author 1 author 2 author 3 author 4\n 2010 1 14 1 0 0\n 2011 2 13 5 0 10\n 2012 3 12 8 1 0\n 2013 4 11 9 1 4\n 2014 5 10 0 1 2\n 2015 6 9 0 0 0\n 2016 7 8 0 0 1\n\n >>> fig = bubble(df, axis=0, alpha=0.5, rmax=150)\n >>> fig.savefig('/workspaces/techminer/sphinx/images/bubbleplot0.png')\n\n .. image:: images/bubbleplot0.png\n :width: 400px\n :align: center\n\n >>> fig = bubble(df, axis=1, alpha=0.5, rmax=150)\n >>> fig.savefig('/workspaces/techminer/sphinx/images/bubbleplot1.png')\n\n .. image:: images/bubbleplot1.png\n :width: 400px\n :align: center\n\n\n \"\"\"\n matplotlib.rc(\"font\", size=fontsize)\n fig = plt.Figure(figsize=figsize)\n ax = fig.subplots()\n cmap = plt.cm.get_cmap(cmap)\n\n x = X.copy()\n\n ##\n ## Text wrap\n ##\n x.columns = [collapse_text(w) for w in x.columns.tolist()]\n x.index = [collapse_text(w) for w in x.index.tolist()]\n\n size_max = x.max().max()\n size_min = x.min().min()\n\n if darkness is None:\n darkness = x\n darkness = darkness.loc[:, x.columns]\n\n color_max = darkness.max().max()\n color_min = darkness.min().min()\n\n for idx, row in enumerate(x.index.tolist()):\n\n sizes = [\n 150 + 1000 * (w - size_min) / (size_max - size_min) if w != 0 else 0\n for w in x.loc[row, :]\n ]\n\n colors = [\n cmap(0.2 + 0.8 * (w - color_min) / (color_max - color_min))\n for w in darkness.loc[row, :]\n ]\n\n #  return range(len(x.columns)), [idx] * len(x.columns)\n\n ax.scatter(\n list(range(len(x.columns))),\n [idx] * len(x.columns),\n marker=\"o\",\n s=sizes,\n alpha=1.0,\n c=colors,\n edgecolors=\"k\",\n zorder=11,\n #  **kwargs,\n )\n\n for idx, row in enumerate(x.iterrows()):\n ax.hlines(\n idx, -1, len(x.columns), linewidth=grid_lw, color=grid_c, linestyle=grid_ls,\n )\n\n for idx, col in enumerate(x.columns):\n ax.vlines(\n idx, -1, len(x.index), linewidth=grid_lw, color=grid_c, linestyle=grid_ls,\n )\n\n mean_color = 0.5 * (color_min + color_max)\n for idx_col, col in enumerate(x.columns):\n for idx_row, row in enumerate(x.index):\n\n if x[col][row] != 0:\n if darkness[col][row] >= 0.8 * mean_color:\n text_color = \"w\"\n else:\n text_color = \"k\"\n\n ax.text(\n idx_col,\n idx_row,\n \"{}\".format(x[col][row])\n if x[col][row].dtype == \"int64\"\n else \"{:.2f}\".format(x[col][row]),\n va=\"center\",\n ha=\"center\",\n zorder=12,\n color=text_color,\n )\n\n ax.set_aspect(\"equal\")\n\n ax.set_xlim(-1, len(x.columns))\n ax.set_ylim(-1, len(x.index) + 1)\n\n ax.set_xticks(np.arange(len(x.columns)))\n ax.set_xticklabels(x.columns)\n ax.tick_params(axis=\"x\", labelrotation=90)\n ax.xaxis.tick_top()\n\n ax.invert_yaxis()\n ax.set_yticks(np.arange(len(x.index)))\n ax.set_yticklabels(x.index)\n\n for x in [\"top\", \"right\", \"left\", \"bottom\"]:\n ax.spines[x].set_visible(False)\n\n fig.set_tight_layout(True)\n\n return fig\n" ]
[ [ "numpy.log", "numpy.sqrt" ], [ "pandas.read_csv", "matplotlib.pyplot.cm.get_cmap", "pandas.DataFrame" ], [ "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.Figure", "matplotlib.rc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
botmatic/tacotron2
[ "c2dee4930f6bd1cf707e0565fd0675b8646a51a1" ]
[ "parallel_wavenet_vocoder/tests/test_mixture.py" ]
[ "# coding: utf-8\nfrom __future__ import with_statement, print_function, absolute_import\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport librosa\nimport pysptk\n\nfrom wavenet_vocoder.mixture import discretized_mix_logistic_loss\nfrom wavenet_vocoder.mixture import sample_from_discretized_mix_logistic\n\n\ndef log_prob_from_logits(x):\n \"\"\" numerically stable log_softmax implementation that prevents overflow \"\"\"\n # TF ordering\n axis = len(x.size()) - 1\n m, _ = torch.max(x, dim=-1, keepdim=True)\n return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))\n\n\ndef test_log_softmax():\n x = torch.rand(2, 16000, 30)\n y = log_prob_from_logits(x)\n y_hat = F.log_softmax(x, -1)\n\n y = y.data.cpu().numpy()\n y_hat = y_hat.data.cpu().numpy()\n assert np.allclose(y, y_hat)\n\n\ndef test_mixture():\n np.random.seed(1234)\n\n x, sr = librosa.load(pysptk.util.example_audio_file(), sr=None)\n assert sr == 16000\n\n T = len(x)\n x = x.reshape(1, T, 1)\n y = torch.from_numpy(x).float()\n y_hat = torch.rand(1, 30, T).float()\n\n print(y.shape, y_hat.shape)\n\n loss = discretized_mix_logistic_loss(y_hat, y)\n print(loss)\n\n loss = discretized_mix_logistic_loss(y_hat, y, reduce=False)\n print(loss.size(), y.size())\n assert loss.size() == y.size()\n\n y = sample_from_discretized_mix_logistic(y_hat)\n print(y.shape)\n\n\ndef test_misc():\n # https://en.wikipedia.org/wiki/Logistic_distribution\n # what i have learned\n # m = (x - mu) / s\n m = torch.rand(10, 10)\n log_pdf_mid1 = -2 * torch.log(torch.exp(m / 2) + torch.exp(-m / 2))\n log_pdf_mid2 = m - 2 * F.softplus(m)\n assert np.allclose(log_pdf_mid1.data.numpy(), log_pdf_mid2.data.numpy())\n\n # Edge case for 0\n plus_in = torch.rand(10, 10)\n log_cdf_plus1 = torch.sigmoid(m).log()\n log_cdf_plus2 = m - F.softplus(m)\n assert np.allclose(log_cdf_plus1.data.numpy(), log_cdf_plus2.data.numpy())\n\n # Edge case for 255\n min_in = torch.rand(10, 10)\n log_one_minus_cdf_min1 = (1 - torch.sigmoid(min_in)).log()\n log_one_minus_cdf_min2 = -F.softplus(min_in)\n assert np.allclose(log_one_minus_cdf_min1.data.numpy(), log_one_minus_cdf_min2.data.numpy())\n" ]
[ [ "torch.sigmoid", "numpy.allclose", "numpy.random.seed", "torch.max", "torch.nn.functional.log_softmax", "torch.from_numpy", "torch.exp", "torch.rand", "torch.nn.functional.softplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjbrophy47/tree_deletion
[ "97041d129da335de3018b3243bc81943088abf24", "97041d129da335de3018b3243bc81943088abf24", "97041d129da335de3018b3243bc81943088abf24" ]
[ "scripts/experiments/roar.py", "scripts/postprocess/roar.py", "scripts/experiments/performance.py" ]
[ "\"\"\"\nRemove and Retrain (ROAR) experiment.\n\"\"\"\nimport os\nimport sys\nimport time\nimport argparse\nfrom datetime import datetime\n\nimport numpy as np\n\nhere = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, here + '/../../')\nsys.path.insert(0, here + '/../')\nimport dare\nfrom utility import data_util\nfrom utility import exp_util\nfrom utility import print_util\n\nMAX_SEED_INCREASE = 1000\n\n\ndef _get_model(args):\n \"\"\"\n Return model.\n \"\"\"\n model = dare.Forest(criterion=args.criterion,\n topd=0,\n k=args.k,\n n_estimators=args.n_estimators,\n max_features=args.max_features,\n max_depth=args.max_depth,\n random_state=args.rs)\n\n return model\n\n\ndef measure_performance(sort_indices, percentages, X_test, y_test, X_train, y_train,\n logger=None):\n \"\"\"\n Measures the change in log loss as training instances are removed.\n \"\"\"\n r = {}\n aucs = []\n accs = []\n aps = []\n\n # remove training samples in batches\n for percentage in percentages:\n n_samples = int(X_train.shape[0] * (percentage / 100))\n remove_indices = sort_indices[:n_samples]\n\n new_X_train = np.delete(X_train, remove_indices, axis=0)\n new_y_train = np.delete(y_train, remove_indices)\n\n if len(np.unique(new_y_train)) == 1:\n print(percentage)\n break\n\n # train target model\n model = _get_model(args)\n label = '{}%'.format(percentage)\n model = model.fit(new_X_train, new_y_train)\n\n auc, acc, ap = exp_util.performance(model, X_test, y_test,\n logger=logger, name=label)\n aucs.append(auc)\n accs.append(acc)\n aps.append(ap)\n\n r['auc'] = aucs\n r['acc'] = accs\n r['ap'] = aps\n\n return r\n\n\ndef experiment(args, logger, out_dir):\n \"\"\"\n Obtains data, trains model, and generates instance-attribution explanations.\n \"\"\"\n\n # get data\n X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)\n\n # select a subset of the test data for evaluation\n n_test_samples = args.n_test if args.n_test is not None else int(X_test.shape[0] * args.test_frac)\n np.random.seed(args.rs)\n test_indices = np.random.choice(X_test.shape[0], size=n_test_samples, replace=False)\n X_test_sub, y_test_sub = X_test[test_indices], y_test[test_indices]\n\n # choose new subset if test subset all contain the same label\n new_seed = args.rs\n while y_test_sub.sum() == len(y_test_sub) or y_test_sub.sum() == 0:\n np.random.seed(new_seed)\n new_seed += np.random.randint(MAX_SEED_INCREASE)\n np.random.seed(new_seed)\n test_indices = np.random.choice(X_test.shape[0], size=n_test_samples, replace=False)\n X_test_sub, y_test_sub = X_test[test_indices], y_test[test_indices]\n\n X_test = X_test_sub\n y_test = y_test_sub\n\n # dataset statistics\n logger.info('\\ntrain instances: {:,}'.format(X_train.shape[0]))\n logger.info('test instances: {:,}'.format(X_test.shape[0]))\n logger.info('features: {:,}'.format(X_train.shape[1]))\n\n # experiment settings\n logger.info('\\nrandom state: {}'.format(args.rs))\n logger.info('criterion: {}'.format(args.criterion))\n logger.info('n_estimators: {}'.format(args.n_estimators))\n logger.info('max_depth: {}'.format(args.max_depth))\n logger.info('k: {}'.format(args.k))\n logger.info('max_features: {}'.format(args.max_features))\n logger.info('n_test: {}\\n'.format(args.n_test))\n\n # train target model\n model = _get_model(args)\n name = 'G-DaRE'\n\n start = time.time()\n model = model.fit(X_train, y_train)\n train_time = time.time() - start\n\n logger.info('[{}] train time: {:.3f}s'.format(name, train_time))\n exp_util.performance(model, X_test, y_test, logger=logger, name=name)\n\n percentages = list(range(0, 100, 1))\n start = time.time()\n\n # random method\n if args.method == 'random':\n logger.info('\\nordering by random...')\n np.random.seed(args.rs)\n train_order = np.random.choice(np.arange(X_train.shape[0]), size=X_train.shape[0], replace=False)\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 1: ordered from biggest sum increase in positive label confidence to least\n elif args.method == 'dare1':\n logger.info('\\nordering by G-DaRE...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 2: ordered by most positively influential to least positively influential\n elif args.method == 'dare2':\n logger.info('\\nordering by G-DaRE 2...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test, y_test=y_test)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 3: ordered by biggest sum of absolute change in predictions\n elif args.method == 'dart3':\n logger.info('\\nordering by G-DaRE 3...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test, use_abs=True)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n logger.info('time: {:3f}s'.format(time.time() - start))\n\n results['percentage'] = percentages\n np.save(os.path.join(out_dir, 'results.npy'), results)\n\n\ndef main(args):\n\n # create output dir\n out_dir = os.path.join(args.out_dir,\n args.dataset,\n args.criterion,\n args.method,\n 'rs_{}'.format(args.rs))\n\n log_fp = os.path.join(out_dir, 'log.txt')\n os.makedirs(out_dir, exist_ok=True)\n\n # skip experiment if results already exist\n if args.append_results and os.path.exists(os.path.join(out_dir, 'results.npy')):\n return\n\n # create logger\n logger = print_util.get_logger(log_fp)\n logger.info(args)\n logger.info(datetime.now())\n\n # run experiment\n experiment(args, logger, out_dir)\n\n # remove logger\n print_util.remove_logger(logger)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # I/O settings\n parser.add_argument('--out_dir', type=str, default='output/roar/', help='output directory.')\n parser.add_argument('--data_dir', type=str, default='data', help='data directory.')\n parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')\n parser.add_argument('--append_results', action='store_true', default=False, help='add results.')\n\n # experiment settings\n parser.add_argument('--rs', type=int, default=1, help='seed to enhance reproducibility.')\n parser.add_argument('--n_test', type=int, default=50, help='no. test instances')\n parser.add_argument('--method', type=str, default='dare1', help='method to use.')\n\n # tree hyperparameters\n parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.')\n parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.')\n parser.add_argument('--k', type=int, default=25, help='number of thresholds to consider.')\n parser.add_argument('--max_features', type=str, default='sqrt', help='maximum features to sample.')\n parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')\n\n args = parser.parse_args()\n main(args)\n", "\"\"\"\nOrganize results into a single CSV.\n\"\"\"\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import sem\nfrom tqdm import tqdm\n\nhere = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, here + '/../')\nfrom utility import print_util\n\n\ndef get_result(template, in_dir):\n \"\"\"\n Obtain results.\n \"\"\"\n result = template.copy()\n\n fp = os.path.join(in_dir, 'results.npy')\n\n if not os.path.exists(fp):\n result = None\n\n else:\n d = np.load(fp, allow_pickle=True)[()]\n result.update(d)\n\n return result\n\n\ndef process_results(df):\n \"\"\"\n Average results over different random states.\n \"\"\"\n groups = ['dataset', 'criterion', 'method']\n\n main_result_list = []\n\n for tup, gf in tqdm(df.groupby(groups)):\n main_result = {k: v for k, v in zip(groups, tup)}\n main_result['num_runs'] = len(gf)\n\n # aggregate list results\n auc_list = []\n acc_list = []\n ap_list = []\n percentage_list = []\n for row in gf.itertuples(index=False):\n auc_list.append(list(row.auc))\n acc_list.append(list(row.acc))\n ap_list.append(list(row.ap))\n percentage_list.append(list(row.percentage))\n\n main_result['auc'] = np.mean(auc_list, axis=0)\n main_result['acc'] = np.mean(acc_list, axis=0)\n main_result['ap'] = np.mean(ap_list, axis=0)\n\n main_result['auc_std'] = sem(auc_list, axis=0)\n main_result['acc_std'] = sem(acc_list, axis=0)\n main_result['ap_std'] = sem(ap_list, axis=0)\n\n main_result['percentage'] = np.mean(percentage_list, axis=0)\n\n main_result_list.append(main_result)\n\n main_df = pd.DataFrame(main_result_list)\n\n return main_df\n\n\ndef create_csv(args, out_dir, logger):\n\n logger.info('\\nGathering results...')\n\n experiment_settings = list(product(*[args.dataset, args.criterion, args.method, args.rs]))\n\n results = []\n for dataset, criterion, method, rs in tqdm(experiment_settings):\n template = {'dataset': dataset, 'criterion': criterion, 'method': method, 'rs': rs}\n experiment_dir = os.path.join(args.in_dir, dataset, criterion, method, 'rs_{}'.format(rs))\n\n # skip empty experiments\n if not os.path.exists(experiment_dir):\n continue\n\n # add results to result dict\n result = get_result(template, experiment_dir)\n if result is not None:\n results.append(result)\n\n # pd.set_option('display.max_columns', 100)\n # pd.set_option('display.width', 180)\n\n df = pd.DataFrame(results)\n logger.info('\\nRaw results:\\n{}'.format(df))\n\n logger.info('\\nProcessing results...')\n main_df = process_results(df)\n logger.info('\\nProcessed results:\\n{}'.format(main_df))\n\n main_df.to_csv(os.path.join(out_dir, 'results.csv'), index=None)\n\n\ndef main(args):\n\n out_dir = os.path.join(args.out_dir)\n\n # create logger\n os.makedirs(out_dir, exist_ok=True)\n logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))\n logger.info(args)\n logger.info(datetime.now())\n\n create_csv(args, out_dir, logger)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # I/O settings\n parser.add_argument('--in_dir', type=str, default='output/roar/', help='input directory.')\n parser.add_argument('--out_dir', type=str, default='output/roar/csv/', help='output directory.')\n\n # experiment settings\n parser.add_argument('--dataset', type=str, nargs='+',\n default=['surgical', 'vaccine', 'adult', 'bank_marketing', 'flight_delays', 'diabetes',\n 'olympics', 'census', 'credit_card', 'synthetic', 'higgs'], help='dataset.')\n parser.add_argument('--criterion', type=str, nargs='+', default=['gini', 'entropy'], help='criterion.')\n parser.add_argument('--rs', type=int, nargs='+', default=list(range(1, 11)), help='random state.')\n parser.add_argument('--method', type=str, nargs='+', default=['random', 'dart1', 'dart2', 'dart3'], help='method.')\n\n args = parser.parse_args()\n main(args)\n", "\"\"\"\nThis experiment tests predictive performance.\n\"\"\"\nimport os\nimport sys\nimport time\nimport argparse\nimport resource\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nhere = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, here + '/../../')\nsys.path.insert(0, here + '/../')\nimport dare\nfrom utility import data_util\nfrom utility import exp_util\nfrom utility import print_util\n\n\ndef _get_model(args):\n \"\"\"\n Return the appropriate model.\n \"\"\"\n\n if args.model in ['dare']:\n model = dare.Forest(criterion=args.criterion,\n max_depth=args.max_depth,\n n_estimators=args.n_estimators,\n max_features=args.max_features,\n topd=args.topd,\n k=args.k,\n verbose=args.verbose,\n random_state=args.rs)\n\n elif args.model == 'extra_trees':\n model = ExtraTreesClassifier(n_estimators=args.n_estimators,\n max_depth=args.max_depth,\n max_features=args.max_features,\n criterion=args.criterion,\n random_state=args.rs)\n\n elif args.model == 'extra_trees_k1':\n model = ExtraTreesClassifier(n_estimators=args.n_estimators,\n max_depth=args.max_depth,\n max_features=1,\n criterion=args.criterion,\n random_state=args.rs)\n\n elif args.model == 'sklearn':\n model = RandomForestClassifier(n_estimators=args.n_estimators,\n max_depth=args.max_depth,\n max_features=args.max_features,\n criterion=args.criterion,\n random_state=args.rs,\n bootstrap=args.bootstrap)\n else:\n raise ValueError('model {} unknown!'.format(args.model))\n\n return model\n\n\ndef _get_model_dict(args, params):\n \"\"\"\n Return the appropriate model.\n \"\"\"\n\n if args.model == 'dare':\n model = dare.Forest(criterion=args.criterion,\n max_depth=params['max_depth'],\n n_estimators=params['n_estimators'],\n max_features=args.max_features,\n topd=args.topd,\n k=params['k'],\n verbose=args.verbose,\n random_state=args.rs)\n\n elif args.model == 'extra_trees':\n model = ExtraTreesClassifier(n_estimators=params['n_estimators'],\n max_depth=params['max_depth'],\n max_features=args.max_features,\n criterion=args.criterion,\n random_state=args.rs)\n\n elif args.model == 'extra_trees_k1':\n model = ExtraTreesClassifier(n_estimators=params['n_estimators'],\n max_depth=params['max_depth'],\n max_features=1,\n criterion=args.criterion,\n random_state=args.rs)\n\n elif args.model == 'sklearn':\n model = RandomForestClassifier(n_estimators=params['n_estimators'],\n max_depth=params['max_depth'],\n max_features=args.max_features,\n criterion=args.criterion,\n random_state=args.rs,\n bootstrap=args.bootstrap)\n else:\n raise ValueError('model {} unknown!'.format(args.model))\n\n return model\n\n\ndef _get_best_params(gs, param_grid, keys, logger, tol=1e-3):\n \"\"\"\n Chooses the set of hyperparameters whose `mean_fit_score` is within\n `tol` of the best `mean_fit_score` and has the lowest `mean_fit_time`.\n \"\"\"\n pd.set_option('display.max_columns', 100)\n pd.set_option('display.max_rows', 100)\n\n cols = ['mean_fit_time', 'mean_test_score', 'rank_test_score']\n cols += ['param_{}'.format(param) for param in keys]\n\n df = pd.DataFrame(gs.cv_results_)\n logger.info('gridsearch results:')\n logger.info(df[cols].sort_values('rank_test_score'))\n\n # filter the parameters with the highest performances\n logger.info('tolerance: {}'.format(args.tol))\n df = df[df['mean_test_score'].max() - df['mean_test_score'] <= tol]\n\n best_df = df.sort_values('mean_fit_time').reset_index().loc[0]\n best_ndx = best_df['index']\n best_params = best_df['params']\n logger.info('best_index: {}, best_params: {}'.format(best_ndx, best_params))\n\n return best_params\n\n\ndef performance(args, out_dir, logger):\n\n begin = time.time()\n\n # obtain data\n X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)\n\n # dataset statistics\n logger.info('train instances: {:,}'.format(X_train.shape[0]))\n logger.info('test instances: {:,}'.format(X_test.shape[0]))\n logger.info('attributes: {:,}'.format(X_train.shape[1]))\n logger.info('split criterion: {}'.format(args.criterion))\n\n # tune on a fraction of the training data\n if not args.no_tune:\n\n if args.tune_frac < 1.0:\n sss = StratifiedShuffleSplit(n_splits=1, test_size=2,\n train_size=args.tune_frac,\n random_state=args.rs)\n tune_indices, _ = list(sss.split(X_train, y_train))[0]\n X_train_sub, y_train_sub = X_train[tune_indices], y_train[tune_indices]\n logger.info('tune instances: {:,}'.format(X_train_sub.shape[0]))\n\n else:\n X_train_sub, y_train_sub = X_train, y_train\n else:\n X_train_sub, y_train_sub = X_train, y_train\n\n # hyperparameter values\n n_estimators = [10, 50, 100, 250]\n max_depth = [1, 3, 5, 10, 20]\n\n # set hyperparameter grid\n param_grid = {'max_depth': max_depth,\n 'n_estimators': n_estimators}\n\n # add additional parameter for DaRE\n if args.model == 'dare':\n param_grid['k'] = [5, 10, 25, 50]\n\n # get hyperparameter names\n keys = list(param_grid.keys())\n\n # test model\n logger.info('\\n{}'.format(args.model.capitalize()))\n start = time.time()\n model = _get_model(args)\n\n # tune hyperparameters\n if not args.no_tune:\n logger.info('param_grid: {}'.format(param_grid))\n\n # cross-validation\n skf = StratifiedKFold(n_splits=args.cv, shuffle=True, random_state=args.rs)\n gs = GridSearchCV(model, param_grid, scoring=args.scoring,\n cv=skf, verbose=args.verbose, refit=False)\n gs = gs.fit(X_train_sub, y_train_sub)\n\n best_params = _get_best_params(gs, param_grid, keys, logger, args.tol)\n model = _get_model_dict(args, best_params)\n\n # record time it takes to tune the model\n tune_time = time.time() - start\n\n # train best model\n start = time.time()\n model = model.fit(X_train, y_train)\n train_time = time.time() - start\n logger.info('train time: {:.3f}s'.format(train_time))\n\n n_nodes, n_random, n_greedy = model.trees_[0].get_node_statistics()\n print('[Tree 0] no. nodes: {:,}, no. random: {:,}, no. greedy: {:,}'.format(n_nodes, n_random, n_greedy))\n print('[Tree 0] memory usage: {:,} bytes'.format(model.trees_[0].get_memory_usage()))\n print('[Forest] memory usage: {:,} bytes'.format(model.get_memory_usage()))\n print('max_rss: {:,}'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))\n exit(0)\n\n # evaluate\n auc, acc, ap = exp_util.performance(model, X_test, y_test, name=args.model, logger=logger)\n\n # save results\n result = model.get_params()\n result['model'] = args.model\n result['bootstrap'] = args.bootstrap\n result['auc'] = auc\n result['acc'] = acc\n result['ap'] = ap\n result['train_time'] = train_time\n result['tune_train_time'] = tune_time + train_time\n result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n np.save(os.path.join(out_dir, 'results.npy'), result)\n\n logger.info('total time: {:.3f}s'.format(time.time() - begin))\n logger.info('max_rss: {:,}'.format(result['max_rss']))\n\n\ndef main(args):\n\n # create output dir\n out_dir = os.path.join(args.out_dir, args.dataset, args.criterion)\n\n # add tuning to filepath\n if args.no_tune:\n out_dir = os.path.join(out_dir, 'no_tune', 'rs_{}'.format(args.rs))\n else:\n out_dir = os.path.join(out_dir, 'tuned', 'rs_{}'.format(args.rs))\n\n # create filename\n if args.model == 'sklearn':\n out_dir = os.path.join(out_dir, args.model)\n\n if args.bootstrap:\n out_dir = os.path.join(out_dir, 'bootstrap')\n\n elif args.model == 'dare':\n assert args.topd == 0\n out_dir = os.path.join(out_dir, args.model)\n\n elif args.model in ['extra_trees', 'extra_trees_k1', 'borat']:\n out_dir = os.path.join(out_dir, args.model)\n\n else:\n raise ValueError('model {} unknown!'.format(args.model))\n\n # create output directory and clear any previous contents\n os.makedirs(out_dir, exist_ok=True)\n print_util.clear_dir(out_dir)\n\n # create logger\n logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))\n logger.info(args)\n logger.info(datetime.now())\n\n # write everything printed to stdout to this log file\n logfile, stdout, stderr = print_util.stdout_stderr_to_log(os.path.join(out_dir, 'log+.txt'))\n\n # run experiment\n performance(args, out_dir, logger)\n\n # restore original stdout and stderr settings\n print_util.reset_stdout_stderr(logfile, stdout, stderr)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # I/O settings\n parser.add_argument('--data_dir', type=str, default='data', help='data directory.')\n parser.add_argument('--out_dir', type=str, default='output/performance/', help='output directory.')\n parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')\n\n # experiment settings\n parser.add_argument('--rs', type=int, default=1, help='random state.')\n parser.add_argument('--model', type=str, default='dare', help='type of model.')\n parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')\n parser.add_argument('--topd', type=int, default=0, help='0 for exact, 1000 for random.')\n parser.add_argument('--k', type=int, default=25, help='no. of candidate thresholds to sample.')\n parser.add_argument('--bootstrap', action='store_true', default=False, help='use bootstrapping with sklearn.')\n\n # tuning settings\n parser.add_argument('--no_tune', action='store_true', default=False, help='do not tune.')\n parser.add_argument('--tune_frac', type=float, default=1.0, help='fraction of training to use for tuning.')\n parser.add_argument('--cv', type=int, default=5, help='number of cross-validation folds for tuning.')\n parser.add_argument('--scoring', type=str, default='roc_auc', help='metric for tuning.')\n parser.add_argument('--tol', type=float, default=1e-3, help='allowable accuracy difference from the best.')\n\n # tree/forest hyperparameters\n parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.')\n parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.')\n parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.')\n\n # display settings\n parser.add_argument('--verbose', type=int, default=2, help='verbosity level.')\n\n args = parser.parse_args()\n main(args)\n" ]
[ [ "numpy.random.seed", "numpy.random.choice", "numpy.unique", "numpy.arange", "numpy.delete", "numpy.argsort", "numpy.random.randint" ], [ "numpy.load", "scipy.stats.sem", "numpy.mean", "pandas.DataFrame" ], [ "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.ExtraTreesClassifier", "sklearn.model_selection.StratifiedKFold", "pandas.DataFrame", "pandas.set_option", "sklearn.model_selection.StratifiedShuffleSplit" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iostermann/deeplab2
[ "e0f7eecfac5d35c3e9e66f061098d5f5f15a7152", "e0f7eecfac5d35c3e9e66f061098d5f5f15a7152", "e0f7eecfac5d35c3e9e66f061098d5f5f15a7152", "e0f7eecfac5d35c3e9e66f061098d5f5f15a7152" ]
[ "data/dataset_utils_test.py", "data/preprocessing/input_preprocessing.py", "evaluation/test_utils.py", "model/decoder/panoptic_deeplab_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for dataset_utils.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom deeplab2.data import dataset_utils\n\n\nclass DatasetUtilsTest(tf.test.TestCase):\n\n def _get_test_labels(self, num_classes, shape, label_divisor):\n num_ids_per_class = 35\n semantic_labels = np.random.randint(num_classes, size=shape)\n panoptic_labels = np.random.randint(\n num_ids_per_class, size=shape) + semantic_labels * label_divisor\n\n semantic_labels = tf.convert_to_tensor(semantic_labels, dtype=tf.int32)\n panoptic_labels = tf.convert_to_tensor(panoptic_labels, dtype=tf.int32)\n\n return panoptic_labels, semantic_labels\n\n def setUp(self):\n super().setUp()\n self._first_thing_class = 9\n self._num_classes = 19\n self._dataset_info = {\n 'panoptic_label_divisor': 1000,\n 'class_has_instances_list': tf.range(self._first_thing_class,\n self._num_classes)\n }\n self._num_ids = 37\n self._labels, self._semantic_classes = self._get_test_labels(\n self._num_classes, [2, 33, 33],\n self._dataset_info['panoptic_label_divisor'])\n\n def test_get_panoptic_and_semantic_label(self):\n # Note: self._labels contains one crowd instance per class.\n (returned_sem_labels, returned_pan_labels, returned_thing_mask,\n returned_crowd_region) = (\n dataset_utils.get_semantic_and_panoptic_label(\n self._dataset_info, self._labels, ignore_label=255))\n\n expected_semantic_labels = self._semantic_classes\n condition = self._labels % self._dataset_info['panoptic_label_divisor'] == 0\n condition = tf.logical_and(\n condition,\n tf.math.greater_equal(expected_semantic_labels,\n self._first_thing_class))\n expected_crowd_labels = tf.where(condition, 1.0, 0.0)\n expected_pan_labels = tf.where(\n condition, 255 * self._dataset_info['panoptic_label_divisor'],\n self._labels)\n expected_thing_mask = tf.where(\n tf.math.greater_equal(expected_semantic_labels,\n self._first_thing_class), 1.0, 0.0)\n\n self.assertListEqual(returned_sem_labels.shape.as_list(),\n expected_semantic_labels.shape.as_list())\n self.assertListEqual(returned_pan_labels.shape.as_list(),\n expected_pan_labels.shape.as_list())\n self.assertListEqual(returned_crowd_region.shape.as_list(),\n expected_crowd_labels.shape.as_list())\n self.assertListEqual(returned_thing_mask.shape.as_list(),\n expected_thing_mask.shape.as_list())\n np.testing.assert_equal(returned_sem_labels.numpy(),\n expected_semantic_labels.numpy())\n np.testing.assert_equal(returned_pan_labels.numpy(),\n expected_pan_labels.numpy())\n np.testing.assert_equal(returned_crowd_region.numpy(),\n expected_crowd_labels.numpy())\n np.testing.assert_equal(returned_thing_mask.numpy(),\n expected_thing_mask.numpy())\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2021 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This file contains functions to preprocess images and labels.\"\"\"\n\nimport tensorflow as tf\n\nfrom deeplab2.data.preprocessing import autoaugment_utils\nfrom deeplab2.data.preprocessing import preprocess_utils\n\n# The probability of flipping the images and labels\n# left-right during training\n_PROB_OF_FLIP = 0.5\n\n_MEAN_PIXEL = [127.5, 127.5, 127.5]\n\n\ndef _pad_image_and_label(image,\n label,\n offset_height,\n offset_width,\n target_height,\n target_width,\n ignore_label=None):\n \"\"\"Pads the image and the label to the given size.\n\n Args:\n image: A tf.Tensor of shape [height, width, channels].\n label: A tf.Tensor of shape [height, width, 1] or None.\n offset_height: The number of rows of zeros to add on top of the image and\n label.\n offset_width: The number of columns of zeros to add on the left of the image\n and label.\n target_height: The total height after padding.\n target_width: The total width after padding.\n ignore_label: The ignore_label for the label. Must only be set when label is\n given.\n\n Returns:\n The padded image and label as a tuple (padded_image, padded_label).\n\n Raises:\n tf.errors.InvalidArgumentError: An error occurs if the padding configuration\n is invalid.\n ValueError: An error occurs if label is given without an ignore_label.\n \"\"\"\n height = tf.shape(image)[0]\n width = tf.shape(image)[1]\n original_dtype = image.dtype\n if original_dtype not in (tf.float32, tf.float64):\n image = tf.cast(image, tf.float32)\n\n bottom_padding = target_height - offset_height - height\n right_padding = target_width - offset_width - width\n\n assert_bottom_padding = tf.assert_greater(\n bottom_padding, -1,\n 'The padding configuration is not valid. Please either increase the '\n 'target size or reduce the padding offset.')\n assert_right_padding = tf.assert_greater(\n right_padding, -1, 'The padding configuration is not valid. Please either'\n ' increase the target size or reduce the padding offset.')\n with tf.control_dependencies([assert_bottom_padding, assert_right_padding]):\n paddings = [[offset_height, bottom_padding], [offset_width, right_padding],\n [0, 0]]\n\n image = image - _MEAN_PIXEL\n image = tf.pad(image, paddings)\n image = image + _MEAN_PIXEL\n image = tf.cast(image, original_dtype)\n\n if label is not None:\n if ignore_label is None:\n raise ValueError(\n 'If a label is given, the ignore label must be set too.')\n label = tf.pad(label, paddings, constant_values=ignore_label)\n\n return image, label\n\n\ndef _update_max_resize_value(max_resize_value, crop_size, is_inference=False):\n \"\"\"Checks and may update max_resize_value.\n\n Args:\n max_resize_value: A 2-tuple of (height, width), maximum allowed value after\n resize. If a single element is given, then height and width share the same\n value. None, empty or having 0 indicates no maximum value will be used.\n crop_size: A 2-tuple of (height, width), crop size used.\n is_inference: Boolean, whether the model is performing inference or not.\n\n Returns:\n Updated max_resize_value.\n \"\"\"\n max_resize_value = preprocess_utils.process_resize_value(max_resize_value)\n if max_resize_value is None and is_inference:\n # During inference, default max_resize_value to crop size to allow\n # model taking input images with larger sizes.\n max_resize_value = crop_size\n\n if max_resize_value is None:\n return None\n\n if max_resize_value[0] > crop_size[0] or max_resize_value[1] > crop_size[1]:\n raise ValueError(\n 'Maximum resize value provided (%s) exceeds model crop size (%s)' %\n (max_resize_value, crop_size))\n return max_resize_value\n\n\ndef preprocess_image_and_label(image,\n label,\n crop_height,\n crop_width,\n prev_image=None,\n prev_label=None,\n depth=None,\n min_resize_value=None,\n max_resize_value=None,\n resize_factor=None,\n min_scale_factor=1.,\n max_scale_factor=1.,\n scale_factor_step_size=0,\n ignore_label=None,\n ignore_depth=None,\n is_training=True,\n autoaugment_policy_name=None):\n \"\"\"Preprocesses the image and label.\n\n Args:\n image: A tf.Tensor containing the image with shape [height, width, 3].\n label: A tf.Tensor containing the label with shape [height, width, 1] or\n None.\n crop_height: The height value used to crop the image and label.\n crop_width: The width value used to crop the image and label.\n prev_image: An optional tensor of shape [image_height, image_width, 3].\n prev_label: An optional tensor of shape [label_height, label_width, 1].\n depth: An optional tensor of shape [label_height, label_width, 1].\n min_resize_value: A 2-tuple of (height, width), desired minimum value after\n resize. If a single element is given, then height and width share the same\n value. None, empty or having 0 indicates no minimum value will be used.\n max_resize_value: A 2-tuple of (height, width), maximum allowed value after\n resize. If a single element is given, then height and width share the same\n value. None, empty or having 0 indicates no maximum value will be used.\n resize_factor: Resized dimensions are multiple of factor plus one.\n min_scale_factor: Minimum scale factor for random scale augmentation.\n max_scale_factor: Maximum scale factor for random scale augmentation.\n scale_factor_step_size: The step size from min scale factor to max scale\n factor. The input is randomly scaled based on the value of\n (min_scale_factor, max_scale_factor, scale_factor_step_size).\n ignore_label: The label value which will be ignored for training and\n evaluation.\n ignore_depth: The depth value which will be ignored for training and\n evaluation.\n is_training: If the preprocessing is used for training or not.\n autoaugment_policy_name: String, autoaugment policy name. See\n autoaugment_policy.py for available policies.\n\n Returns:\n resized_image: The resized input image without other augmentations as a\n tf.Tensor.\n processed_image: The preprocessed image as a tf.Tensor.\n label: The preprocessed groundtruth segmentation label as a tf.Tensor.\n preprocessed_prev_image: The preprocessed prev_image as a tf.Tensor.\n prev_label: The preprocessed prev_label as a tf.Tensor.\n depth: The preprocessed depth as a tf.Tensor.\n\n Raises:\n ValueError: Ground truth label not provided during training.\n ValueError: Setting min_resize_value or max_resize_value for depth dataset.\n \"\"\"\n if is_training and label is None:\n raise ValueError('During training, label must be provided.')\n\n image.get_shape().assert_is_compatible_with(tf.TensorShape([None, None, 3]))\n\n # Keep reference to original image.\n resized_image = image\n if prev_image is not None:\n image = tf.concat([image, prev_image], axis=2)\n processed_image = tf.cast(image, tf.float32)\n processed_prev_image = None\n\n if label is not None:\n label.get_shape().assert_is_compatible_with(tf.TensorShape([None, None, 1]))\n if prev_label is not None:\n label = tf.concat([label, prev_label], axis=2)\n label = tf.cast(label, tf.int32)\n\n if depth is not None:\n if (any(value != 0 for value in min_resize_value) or\n any(value != 0 for value in max_resize_value)):\n raise ValueError(\n 'Depth prediction with non-zero min_resize_value or max_resize_value'\n 'is not supported.')\n depth.get_shape().assert_is_compatible_with(tf.TensorShape([None, None, 1]))\n depth = tf.cast(depth, tf.int32)\n\n # Resize image and label to the desired range.\n if any([min_resize_value, max_resize_value, not is_training]):\n max_resize_value = _update_max_resize_value(\n max_resize_value,\n crop_size=(crop_height, crop_width),\n is_inference=not is_training)\n\n processed_image, label = (\n preprocess_utils.resize_to_range(\n image=processed_image,\n label=label,\n min_size=min_resize_value,\n max_size=max_resize_value,\n factor=resize_factor,\n align_corners=True))\n if prev_image is None:\n resized_image = tf.identity(processed_image)\n else:\n resized_image, _ = tf.split(processed_image, 2, axis=2)\n\n if prev_image is not None:\n processed_image, processed_prev_image = tf.split(processed_image, 2, axis=2)\n\n if prev_label is not None:\n label, prev_label = tf.split(label, 2, axis=2)\n\n if not is_training:\n image_height = tf.shape(processed_image)[0]\n image_width = tf.shape(processed_image)[1]\n\n offset_height = 0\n offset_width = 0\n image_before_padding = processed_image\n processed_image, label = _pad_image_and_label(processed_image, label,\n offset_height, offset_width,\n crop_height, crop_width,\n ignore_label)\n processed_image.set_shape([crop_height, crop_width, 3])\n if label is not None:\n label.set_shape([crop_height, crop_width, 1])\n if prev_image is not None:\n processed_prev_image, prev_label = _pad_image_and_label(\n processed_prev_image, prev_label, offset_height, offset_width,\n crop_height, crop_width, ignore_label)\n processed_prev_image.set_shape([crop_height, crop_width, 3])\n if prev_label is not None:\n prev_label.set_shape([crop_height, crop_width, 1])\n if depth is not None:\n _, depth = _pad_image_and_label(image_before_padding, depth,\n offset_height, offset_width, crop_height,\n crop_width, ignore_depth)\n depth.set_shape([crop_height, crop_width, 1])\n return (resized_image, processed_image, label, processed_prev_image,\n prev_label, depth)\n\n # Data augmentation by randomly scaling the inputs.\n scale = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor,\n scale_factor_step_size)\n image_before_scaling = processed_image\n processed_image, label = preprocess_utils.randomly_scale_image_and_label(\n processed_image, label, scale)\n if processed_prev_image is not None:\n (processed_prev_image,\n prev_label) = preprocess_utils.randomly_scale_image_and_label(\n processed_prev_image, prev_label, scale)\n if depth is not None:\n _, depth = preprocess_utils.randomly_scale_image_and_label(\n image_before_scaling, depth, scale)\n # Scaling depth maps also changes the depth values: the larger, the closer.\n depth = tf.cast(depth, tf.float32)\n depth = depth / scale\n depth = tf.cast(depth, tf.int32)\n\n # Apply autoaugment if any.\n if autoaugment_policy_name:\n processed_image, label = _autoaugment_helper(processed_image, label,\n ignore_label,\n autoaugment_policy_name)\n if processed_prev_image is not None:\n processed_prev_image, prev_label = _autoaugment_helper(\n processed_prev_image, prev_label, ignore_label,\n autoaugment_policy_name)\n\n # Pad image and label to have dimensions >= [crop_height, crop_width].\n image_height = tf.shape(processed_image)[0]\n image_width = tf.shape(processed_image)[1]\n target_height = image_height + tf.maximum(crop_height - image_height, 0)\n target_width = image_width + tf.maximum(crop_width - image_width, 0)\n\n # Randomly crop the image and label.\n def _uniform_offset(margin):\n return tf.random.uniform([],\n minval=0,\n maxval=tf.maximum(margin, 1),\n dtype=tf.int32)\n\n offset_height = _uniform_offset(crop_height - image_height)\n offset_width = _uniform_offset(crop_width - image_width)\n image_before_padding = processed_image\n processed_image, label = _pad_image_and_label(processed_image, label,\n offset_height, offset_width,\n target_height, target_width,\n ignore_label)\n if processed_prev_image is not None:\n processed_prev_image, prev_label = _pad_image_and_label(\n processed_prev_image, prev_label, offset_height, offset_width,\n target_height, target_width, ignore_label)\n\n if depth is not None:\n _, depth = _pad_image_and_label(image_before_padding, depth, offset_height,\n offset_width, target_height, target_width,\n ignore_depth)\n\n if processed_prev_image is not None:\n if depth is not None:\n (processed_image, label, processed_prev_image, prev_label,\n depth) = preprocess_utils.random_crop(\n [processed_image, label, processed_prev_image, prev_label, depth],\n crop_height, crop_width)\n # Randomly left-right flip the image and label.\n (processed_image, label, processed_prev_image, prev_label, depth,\n _) = preprocess_utils.flip_dim(\n [processed_image, label, processed_prev_image, prev_label, depth],\n _PROB_OF_FLIP,\n dim=1)\n else:\n (processed_image, label, processed_prev_image,\n prev_label) = preprocess_utils.random_crop(\n [processed_image, label, processed_prev_image, prev_label],\n crop_height, crop_width)\n # Randomly left-right flip the image and label.\n (processed_image, label, processed_prev_image, prev_label,\n _) = preprocess_utils.flip_dim(\n [processed_image, label, processed_prev_image, prev_label],\n _PROB_OF_FLIP,\n dim=1)\n else:\n processed_image, label = preprocess_utils.random_crop(\n [processed_image, label], crop_height, crop_width)\n # Randomly left-right flip the image and label.\n processed_image, label, _ = preprocess_utils.flip_dim(\n [processed_image, label], _PROB_OF_FLIP, dim=1)\n\n return (resized_image, processed_image, label, processed_prev_image,\n prev_label, depth)\n\n\ndef _autoaugment_helper(image, label, ignore_label, policy_name):\n image = tf.cast(image, tf.uint8)\n label = tf.cast(label, tf.int32)\n image, label = autoaugment_utils.distort_image_with_autoaugment(\n image, label, ignore_label, policy_name)\n image = tf.cast(image, tf.float32)\n return image, label\n", "# coding=utf-8\n# Copyright 2021 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions to set up unit tests on Panoptic Segmentation code.\"\"\"\n\nimport os\nfrom typing import Mapping, Optional, Tuple\n\nfrom absl import flags\nimport numpy as np\nfrom PIL import Image\n\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\n_TEST_DATA_DIR = ('deeplab2/'\n 'evaluation/testdata')\n\n\ndef read_test_image(testdata_path: str,\n image_format: Optional[str] = None) -> np.ndarray:\n \"\"\"Loads a test image.\n\n Args:\n testdata_path: Image path relative to panoptic_segmentation/testdata as a\n string.\n image_format: Format of the image. Can be one of 'RGBA', 'RGB', or 'L'.\n\n Returns:\n The image, as a numpy array.\n \"\"\"\n image_path = os.path.join(_TEST_DATA_DIR, testdata_path)\n with tf.io.gfile.GFile(image_path, 'rb') as f:\n image = Image.open(f)\n if image_format is not None:\n image = image.convert(image_format)\n return np.array(image)\n\n\ndef read_segmentation_with_rgb_color_map(\n image_testdata_path: str,\n rgb_to_semantic_label: Mapping[Tuple[int, int, int], int],\n output_dtype: Optional[np.dtype] = None) -> np.ndarray:\n \"\"\"Reads a test segmentation as an image and a map from colors to labels.\n\n Args:\n image_testdata_path: Image path relative to panoptic_segmentation/testdata\n as a string.\n rgb_to_semantic_label: Mapping from RGB colors to integer labels as a\n dictionary.\n output_dtype: Type of the output labels. If None, defaults to the type of\n the provided color map.\n\n Returns:\n A 2D numpy array of labels.\n\n Raises:\n ValueError: On an incomplete `rgb_to_semantic_label`.\n \"\"\"\n rgb_image = read_test_image(image_testdata_path, image_format='RGB')\n if len(rgb_image.shape) != 3 or rgb_image.shape[2] != 3:\n raise AssertionError('Expected RGB image, actual shape is %s' %\n (rgb_image.shape,))\n\n num_pixels = rgb_image.shape[0] * rgb_image.shape[1]\n unique_colors = np.unique(np.reshape(rgb_image, [num_pixels, 3]), axis=0)\n if not set(map(tuple, unique_colors)).issubset(rgb_to_semantic_label.keys()):\n raise ValueError('RGB image has colors not in color map.')\n\n output_dtype = output_dtype or type(\n next(iter(rgb_to_semantic_label.values())))\n output_labels = np.empty(rgb_image.shape[:2], dtype=output_dtype)\n for rgb_color, int_label in rgb_to_semantic_label.items():\n color_array = np.array(rgb_color, ndmin=3)\n output_labels[np.all(rgb_image == color_array, axis=2)] = int_label\n return output_labels\n\n\ndef panoptic_segmentation_with_class_map(\n instance_testdata_path: str, instance_label_to_semantic_label: Mapping[int,\n int]\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Reads in a panoptic segmentation with an instance map and a map to classes.\n\n Args:\n instance_testdata_path: Path to a grayscale instance map, given as a string\n and relative to panoptic_segmentation/testdata.\n instance_label_to_semantic_label: A map from instance labels to class\n labels.\n\n Returns:\n A tuple `(instance_labels, class_labels)` of numpy arrays.\n\n Raises:\n ValueError: On a mismatched set of instances in\n the\n `instance_label_to_semantic_label`.\n \"\"\"\n instance_labels = read_test_image(instance_testdata_path, image_format='L')\n if set(np.unique(instance_labels)) != set(\n instance_label_to_semantic_label.keys()):\n raise ValueError('Provided class map does not match present instance ids.')\n\n class_labels = np.empty_like(instance_labels)\n for instance_id, class_id in instance_label_to_semantic_label.items():\n class_labels[instance_labels == instance_id] = class_id\n\n return instance_labels, class_labels\n", "# coding=utf-8\n# Copyright 2021 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for panoptic_deeplab.\"\"\"\n\nimport tensorflow as tf\n\nfrom deeplab2 import common\nfrom deeplab2 import config_pb2\nfrom deeplab2.model.decoder import panoptic_deeplab\nfrom deeplab2.utils import test_utils\n\n\ndef _create_panoptic_deeplab_example_proto(num_classes=19):\n semantic_decoder = config_pb2.DecoderOptions(\n feature_key='res5', atrous_rates=[6, 12, 18])\n semantic_head = config_pb2.HeadOptions(\n output_channels=num_classes, head_channels=256)\n\n instance_decoder = config_pb2.DecoderOptions(\n feature_key='res5', decoder_channels=128, atrous_rates=[6, 12, 18])\n center_head = config_pb2.HeadOptions(\n output_channels=1, head_channels=32)\n regression_head = config_pb2.HeadOptions(\n output_channels=2, head_channels=32)\n\n instance_branch = config_pb2.InstanceOptions(\n instance_decoder_override=instance_decoder,\n center_head=center_head,\n regression_head=regression_head)\n\n panoptic_deeplab_options = config_pb2.ModelOptions.PanopticDeeplabOptions(\n semantic_head=semantic_head, instance=instance_branch)\n # Add features from lowest to highest.\n panoptic_deeplab_options.low_level.add(\n feature_key='res3', channels_project=64)\n panoptic_deeplab_options.low_level.add(\n feature_key='res2', channels_project=32)\n\n return config_pb2.ModelOptions(\n decoder=semantic_decoder, panoptic_deeplab=panoptic_deeplab_options)\n\n\ndef _create_expected_shape(input_shape, output_channels):\n output_shape = input_shape.copy()\n output_shape[3] = output_channels\n return output_shape\n\n\nclass PanopticDeeplabTest(tf.test.TestCase):\n\n def test_panoptic_deeplab_single_decoder_init_errors(self):\n with self.assertRaises(ValueError):\n _ = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='test',\n low_level_feature_names=['only_one_name'], # Error: Only one name.\n low_level_channels_project=[64, 32],\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n with self.assertRaises(ValueError):\n _ = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='test',\n low_level_feature_names=['one', 'two'],\n low_level_channels_project=[64], # Error: Only one projection size.\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n def test_panoptic_deeplab_single_decoder_call_errors(self):\n decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='high',\n low_level_feature_names=['low_one', 'low_two'],\n low_level_channels_project=[64, 32],\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n with self.assertRaises(KeyError):\n input_dict = {'not_high': tf.random.uniform(shape=(2, 32, 32, 512)),\n 'low_one': tf.random.uniform(shape=(2, 128, 128, 128)),\n 'low_two': tf.random.uniform(shape=(2, 256, 256, 64))}\n _ = decoder(input_dict)\n with self.assertRaises(KeyError):\n input_dict = {'high': tf.random.uniform(shape=(2, 32, 32, 512)),\n 'not_low_one': tf.random.uniform(shape=(2, 128, 128, 128)),\n 'low_two': tf.random.uniform(shape=(2, 256, 256, 64))}\n _ = decoder(input_dict)\n with self.assertRaises(KeyError):\n input_dict = {'high': tf.random.uniform(shape=(2, 32, 32, 512)),\n 'low_one': tf.random.uniform(shape=(2, 128, 128, 128)),\n 'not_low_two': tf.random.uniform(shape=(2, 256, 256, 64))}\n _ = decoder(input_dict)\n\n def test_panoptic_deeplab_single_decoder_reset_pooling(self):\n decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='high',\n low_level_feature_names=['low_one', 'low_two'],\n low_level_channels_project=[64, 32],\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n pool_size = (None, None)\n decoder.reset_pooling_layer()\n\n self.assertTupleEqual(decoder._aspp._aspp_pool._pool_size,\n pool_size)\n\n def test_panoptic_deeplab_single_decoder_set_pooling(self):\n decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='high',\n low_level_feature_names=['low_one', 'low_two'],\n low_level_channels_project=[64, 32],\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n pool_size = (10, 10)\n decoder.set_pool_size(pool_size)\n\n self.assertTupleEqual(decoder._aspp._aspp_pool._pool_size,\n pool_size)\n\n def test_panoptic_deeplab_single_decoder_output_shape(self):\n decoder_channels = 256\n decoder = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='high',\n low_level_feature_names=['low_one', 'low_two'],\n low_level_channels_project=[64, 32],\n aspp_output_channels=256,\n decoder_output_channels=decoder_channels,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n input_shapes_list = [[[2, 128, 128, 128], [2, 256, 256, 64],\n [2, 32, 32, 512]],\n [[2, 129, 129, 128], [2, 257, 257, 64],\n [2, 33, 33, 512]]]\n\n for shapes in input_shapes_list:\n input_dict = {'low_one': tf.random.uniform(shape=shapes[0]),\n 'low_two': tf.random.uniform(shape=shapes[1]),\n 'high': tf.random.uniform(shape=shapes[2])}\n\n expected_shape = _create_expected_shape(shapes[1], decoder_channels)\n\n resulting_tensor = decoder(input_dict)\n self.assertListEqual(resulting_tensor.shape.as_list(), expected_shape)\n\n def test_panoptic_deeplab_single_head_output_shape(self):\n output_channels = 19\n head = panoptic_deeplab.PanopticDeepLabSingleHead(\n intermediate_channels=256,\n output_channels=output_channels,\n pred_key='pred',\n name='test_head')\n\n input_shapes_list = [[2, 256, 256, 48], [2, 257, 257, 48]]\n for shape in input_shapes_list:\n input_tensor = tf.random.uniform(shape=shape)\n expected_shape = _create_expected_shape(shape, output_channels)\n\n resulting_tensor = head(input_tensor)\n self.assertListEqual(resulting_tensor['pred'].shape.as_list(),\n expected_shape)\n\n def test_panoptic_deeplab_decoder_output_shape(self):\n num_classes = 31\n model_options = _create_panoptic_deeplab_example_proto(\n num_classes=num_classes)\n decoder = panoptic_deeplab.PanopticDeepLab(\n panoptic_deeplab_options=model_options.panoptic_deeplab,\n decoder_options=model_options.decoder)\n\n input_shapes_list = [[[2, 256, 256, 64], [2, 128, 128, 128],\n [2, 32, 32, 512]],\n [[2, 257, 257, 64], [2, 129, 129, 128],\n [2, 33, 33, 512]]]\n\n for shapes in input_shapes_list:\n input_dict = {'res2': tf.random.uniform(shape=shapes[0]),\n 'res3': tf.random.uniform(shape=shapes[1]),\n 'res5': tf.random.uniform(shape=shapes[2])}\n\n expected_semantic_shape = _create_expected_shape(shapes[0], num_classes)\n expected_instance_center_shape = _create_expected_shape(shapes[0], 1)\n expected_instance_regression_shape = _create_expected_shape(shapes[0], 2)\n\n resulting_dict = decoder(input_dict)\n self.assertListEqual(\n resulting_dict[common.PRED_SEMANTIC_LOGITS_KEY].shape.as_list(),\n expected_semantic_shape)\n self.assertListEqual(\n resulting_dict[common.PRED_CENTER_HEATMAP_KEY].shape.as_list(),\n expected_instance_center_shape)\n self.assertListEqual(\n resulting_dict[common.PRED_OFFSET_MAP_KEY].shape.as_list(),\n expected_instance_regression_shape)\n\n @test_utils.test_all_strategies\n def test_panoptic_deeplab_sync_bn(self, strategy):\n num_classes = 31\n model_options = _create_panoptic_deeplab_example_proto(\n num_classes=num_classes)\n input_dict = {'res2': tf.random.uniform(shape=[2, 257, 257, 64]),\n 'res3': tf.random.uniform(shape=[2, 129, 129, 128]),\n 'res5': tf.random.uniform(shape=[2, 33, 33, 512])}\n\n with strategy.scope():\n for bn_layer in test_utils.NORMALIZATION_LAYERS:\n decoder = panoptic_deeplab.PanopticDeepLab(\n panoptic_deeplab_options=model_options.panoptic_deeplab,\n decoder_options=model_options.decoder,\n bn_layer=bn_layer)\n _ = decoder(input_dict)\n\n def test_panoptic_deeplab_single_decoder_logging_feature_order(self):\n with self.assertLogs(level='WARN'):\n _ = panoptic_deeplab.PanopticDeepLabSingleDecoder(\n high_level_feature_name='high',\n low_level_feature_names=['low_two', 'low_one'],\n low_level_channels_project=[32, 64], # Potentially wrong order.\n aspp_output_channels=256,\n decoder_output_channels=256,\n atrous_rates=[6, 12, 18],\n name='test_decoder')\n\n def test_panoptic_deeplab_decoder_ckpt_tems(self):\n num_classes = 31\n model_options = _create_panoptic_deeplab_example_proto(\n num_classes=num_classes)\n decoder = panoptic_deeplab.PanopticDeepLab(\n panoptic_deeplab_options=model_options.panoptic_deeplab,\n decoder_options=model_options.decoder)\n ckpt_dict = decoder.checkpoint_items\n self.assertIn(common.CKPT_SEMANTIC_DECODER, ckpt_dict)\n self.assertIn(common.CKPT_SEMANTIC_HEAD_WITHOUT_LAST_LAYER, ckpt_dict)\n self.assertIn(common.CKPT_SEMANTIC_LAST_LAYER, ckpt_dict)\n self.assertIn(common.CKPT_INSTANCE_DECODER, ckpt_dict)\n self.assertIn(common.CKPT_INSTANCE_REGRESSION_HEAD_WITHOUT_LAST_LAYER,\n ckpt_dict)\n self.assertIn(common.CKPT_INSTANCE_REGRESSION_HEAD_LAST_LAYER, ckpt_dict)\n self.assertIn(common.CKPT_INSTANCE_CENTER_HEAD_WITHOUT_LAST_LAYER,\n ckpt_dict)\n self.assertIn(common.CKPT_INSTANCE_CENTER_HEAD_LAST_LAYER, ckpt_dict)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.range", "tensorflow.test.main", "tensorflow.math.greater_equal", "tensorflow.where", "numpy.random.randint" ], [ "tensorflow.assert_greater", "tensorflow.TensorShape", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.shape", "tensorflow.maximum", "tensorflow.cast", "tensorflow.identity", "tensorflow.pad", "tensorflow.split" ], [ "numpy.unique", "numpy.reshape", "numpy.empty_like", "tensorflow.io.gfile.GFile", "numpy.all", "numpy.array", "numpy.empty" ], [ "tensorflow.random.uniform", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jwfromm/relax
[ "f120282007778706199243ee88b50697c2b9550c", "f120282007778706199243ee88b50697c2b9550c" ]
[ "python/tvm/relay/frontend/pytorch.py", "tests/python/contrib/test_cmsisnn/test_pooling.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks\n# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except\n# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda\n# pylint: disable=missing-function-docstring\n\"\"\"PT: PyTorch frontend.\"\"\"\nimport functools\nimport itertools\nimport math\nimport sys\nimport logging\n\nimport numpy as np\nimport tvm\nfrom tvm.ir import IRModule\nfrom tvm.topi.utils import get_const_tuple\n\nfrom .. import analysis as _analysis\nfrom .. import expr as _expr\nfrom .. import function as _function\nfrom .. import op as _op\nfrom .. import qnn, transform\nfrom ..expr_functor import ExprMutator\nfrom ..loops import while_loop\nfrom ..prelude import Prelude, StaticTensorArrayOps\nfrom ..ty import Any, TensorType, TupleType\nfrom . import qnn_torch\nfrom .common import AttrCvt, get_relay_op, gru_cell, logger\nfrom .common import infer_shape as _infer_shape\nfrom .common import infer_value as _infer_value\nfrom .common import infer_value_simulated as _infer_value_simulated\nfrom .common import lstm_cell, try_infer_value, unbind\nfrom .pytorch_utils import is_version_greater_than\n\n__all__ = [\"from_pytorch\"]\n\n# This returns a \"subgraph\" which puts variables whenever\n# the type is known. It also records things to map the input\n# nodes to the extracted graph's nodes.\n# As Python objects are not round-trippable through C++, and\n# our type annotations only live in Python, we need to map\n# the we need to map the nodes we get in visiting to the nodes\n# we used to construct the graph (they are the same in C++,\n# match each other in dictionary lookups, but are not the same\n# in Python) by using the hint dictionary filled as\n# {node: node for node in nodes} to get the type annotations.\n# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440\nclass _TypeFinder(ExprMutator):\n def __init__(self, types):\n super().__init__()\n self.counter = 0\n self.vars = {}\n self.types = types\n self.leave = set() # some variables are not inputs\n\n def visit_let(self, let):\n self.leave.add(let.var)\n return super().visit_let(let)\n\n def visit_function(self, fn):\n self.leave.update(fn.params)\n return super().visit_function(fn)\n\n def visit(self, expr):\n if expr in self.leave:\n return super().visit(expr)\n if expr in self.vars:\n return self.vars[expr]\n if isinstance(expr, tvm.relay.Var):\n self.vars[expr] = expr\n return expr\n if expr in self.types:\n ty = self.types[expr]\n v = tvm.relay.var(f\"_{self.counter}\", type_annotation=ty)\n self.counter += 1\n self.vars[expr] = v\n return v\n v = super().visit(expr)\n return v\n\n\ndef _should_construct_dynamic_list(list_construct_node):\n # if this list is element-accessed or modified at runtime, generate List ADT\n def inplace_add_to_add(op_name):\n if op_name == \"aten::add_\":\n return \"aten::add\"\n else:\n return op_name\n\n uses = _get_uses(list_construct_node)\n\n for loop_use in filter(lambda use: use.user.kind() == \"prim::Loop\", uses):\n block_input_index = loop_use.offset - 1\n block = list(loop_use.user.blocks())[0]\n list_loop_var = list(block.inputs())[block_input_index]\n uses += _get_uses(list_loop_var.node())\n\n op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))\n\n list_ops = set([\"aten::add\", \"aten::__getitem__\"])\n intersect = list_ops.intersection(op_names)\n\n if len(intersect) > 0 and intersect != set([\"aten::add\"]):\n return True\n\n # if add op outputs list, it is dynamic so we need to construct List ADT\n for use in filter(lambda use: use.user.kind() in [\"aten::add\", \"aten::add_\"], uses):\n output_type = _get_node_type(use.user)\n if output_type == \"ListType\":\n return True\n\n return False\n\n\ndef _is_int_seq(seq):\n # TODO (t-vi): handle non-int constants? (like numpy.intXX)\n return len(seq) > 0 and all([isinstance(i, int) for i in seq])\n\n\n# operator implementation\nclass PyTorchOpConverter:\n \"\"\"A helper class for holding PyTorch op converters.\"\"\"\n\n def __init__(self, prelude, default_dtype):\n self.prelude = prelude\n self.default_dtype = default_dtype\n self.create_convert_map()\n self.types = {} # map from nodes to (Relay) type annotations\n\n # this incrementally infers the type, see the comments on the type visitor\n # above.\n def infer_type(self, node, mod=None):\n \"\"\"An incremental method to infer the type of a node in the relay graph.\"\"\"\n\n if node in self.types:\n return self.types[node]\n if isinstance(node, tvm.relay.Var):\n return node.type_annotation\n\n tf = _TypeFinder(types=self.types)\n new_node = tf.visit(node)\n fn = _function.Function(list(tf.vars.values()), new_node)\n new_mod = IRModule({\"main\": fn})\n if mod is not None:\n new_mod.update(mod)\n new_mod = transform.RemoveUnusedFunctions()(new_mod)\n new_mod = transform.InferType()(new_mod)\n entry = new_mod[\"main\"]\n ty = entry.body.checked_type\n self.types[node] = ty\n return self.types[node]\n\n def infer_type_with_prelude(self, val):\n body = self.infer_type(val, self.prelude.mod)\n return body\n\n # list ADT utilities\n def convert_to_list_adt(self, py_lst):\n elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]\n msg = \"List elements should have identical types\"\n assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg\n\n # get_type returns type_name, ctor1, ..., ctorN\n # 1 is nil\n _, cons, nil = self.prelude.mod.get_type(\"List\")\n adt_lst = nil()\n for elem in reversed(py_lst):\n adt_lst = cons(elem, adt_lst)\n return adt_lst\n\n def map_tensor_array_constructor(self, adt_lst, shape):\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", shape)\n static_tensor_array_ops.register()\n tensor_create = self.prelude.get_tensor_ctor_static(\"tensor_constructor\", \"float32\", shape)\n return self.prelude.map(tensor_create, adt_lst)\n\n def convert_to_tensor_array(self, adt_lst):\n _, cons, nil = self.prelude.mod.get_type(\"List\")\n if self.prelude.length(adt_lst) == 0:\n return nil()\n\n checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))\n shape = checked_type.shape\n tensor_array = self.map_tensor_array_constructor(adt_lst, shape)\n return tensor_array, tuple(shape)\n\n def infer_shape(self, inputs, mod=None):\n \"\"\"A method to get the output type of an intermediate node in the graph.\"\"\"\n typ = self.infer_type(inputs, mod=mod)\n if hasattr(typ, \"shape\"):\n # Regular operator that outputs tensors\n return get_const_tuple(typ.shape)\n # The return type is not a tensor, for example List\n return typ\n\n def infer_shape_with_prelude(self, inputs):\n return self.infer_shape(inputs, mod=self.prelude.mod)\n\n def record_output_type(self, output):\n if isinstance(output, tuple):\n cleaned_output = [o for o in output if o is not None]\n types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))\n for o, t in zip(cleaned_output, types.fields):\n self.types[o] = t\n elif isinstance(output, _expr.Expr):\n self.infer_type_with_prelude(output)\n # it can also happen that the type is int or so\n\n def pytorch_promote_types(self, inputs, dtypes):\n \"\"\"This promotes TVM inputs with TVM dtypes passed like PyTorch would\"\"\"\n actual_dtypes = []\n for i, inp in enumerate(inputs):\n if isinstance(inp, _expr.Expr):\n idt = self.infer_type(inp).dtype\n actual_dtypes.append(idt)\n else:\n actual_dtypes.append(dtypes[i])\n dtypes = actual_dtypes\n tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]\n non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]\n result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)\n results = []\n for inp, dt in zip(inputs, dtypes):\n if np.isscalar(inp):\n results.append(_expr.const(inp, dtype=result_type))\n elif dt == result_type:\n results.append(inp)\n else:\n results.append(_op.cast(inp, result_type))\n return results\n\n def is_quantized_tensor(self, data):\n # If a quantized Torch module is saved and loaded back, dtype will be dropped\n # Since dtypes from Torch tensors are not reliable in such cases, we use\n # Relay's type inference result to decide if an input tensor is quantized\n ty = self.infer_type_with_prelude(data)\n return ty.dtype == \"uint8\"\n\n # Operator implementations\n def make_elemwise(self, name):\n def elemwise(inputs, input_types):\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n return get_relay_op(name)(data0, data1)\n\n return elemwise\n\n def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):\n if len(inputs) == 1:\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n return get_relay_op(name_reduce)(data[0])\n elif len(inputs) >= 2 and isinstance(inputs[1], int):\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n dim = inputs[1]\n keepdims = inputs[2] if len(inputs) > 2 else False\n # also return dummy indices\n return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None\n else:\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n return get_relay_op(name_elemwise)(data0, data1)\n\n def max(self, inputs, input_types):\n return self.min_max_common(\"maximum\", \"max\", inputs, input_types)\n\n def min(self, inputs, input_types):\n return self.min_max_common(\"minimum\", \"min\", inputs, input_types)\n\n def make_unary(self, name):\n def unary(inputs, input_types):\n # this is just to ensure tensor input\n (data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])\n return get_relay_op(name)(data)\n\n return unary\n\n def log1p(self, inputs, input_types):\n # 1_plus_log x = log(x + 1)\n (dtype,) = input_types\n one = _expr.const(1, dtype=dtype)\n return _op.log(inputs[0] + one)\n\n def arange(self, inputs, input_types):\n def _get_value(val, dtype):\n # dtype is a tvm dtype\n if isinstance(val, _expr.Expr):\n inp = _op.cast(val, dtype)\n ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))\n else:\n ret = _create_typed_const(val, dtype)\n return ret\n\n def _get_type(val, inp_type):\n if isinstance(val, _expr.Expr):\n dtype = str(self.infer_type(val))\n return dtype\n return inp_type\n\n # PyTorch arange uses the following type semantics:\n # - if a dtype is given, start, stop, step are converted to that dtype\n # - if no dtype is given and all args are integral, dtype is int64\n # - if no dtype is given and there is a float arg, dtype is float32\n if len(inputs) == 5:\n dtype0 = _get_type(inputs[0], input_types[0])\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n elif dtype0.startswith(\"float\"):\n dtype = \"float32\"\n else:\n dtype = \"int64\"\n start = _expr.const(0, dtype)\n stop = _get_value(inputs[0], dtype)\n step = _expr.const(1, dtype)\n elif len(inputs) == 7:\n types = [_get_type(inputs[i], input_types[i]) for i in range(3)]\n if inputs[3] is not None:\n dtype = _convert_dtype_value(inputs[3])\n elif any([t.startswith(\"float\") for t in types]):\n dtype = \"float32\"\n else:\n dtype = \"int64\"\n start = _get_value(inputs[0], dtype)\n stop = _get_value(inputs[1], dtype)\n step = _get_value(inputs[2], dtype)\n else:\n msg = \"Unknown number of arguments (%d) to parse.\" % (len(inputs))\n raise AssertionError(msg)\n\n return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)\n\n def squeeze(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 1:\n axis = None\n else:\n # TODO (t-vi): why is the cast to int needed? similarly elsewhere\n axis = [int(inputs[1])]\n\n return _op.transform.squeeze(data, axis)\n\n def unsqueeze(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n\n return _op.transform.expand_dims(data, int(axis), 1)\n\n def concatenate(self, inputs, input_types):\n def tensor_array_concat(lst, axis):\n assert axis == 0, \"Tensor array concat supported only for axis 0\"\n tensor_array, shape = self.convert_to_tensor_array(lst)\n concat_shape = (Any(),) + shape[1:]\n concat = self.prelude.get_global_var_static(\"tensor_array_concat\", \"float32\", shape)\n concatenated = concat(tensor_array)\n\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", concat_shape)\n static_tensor_array_ops.register()\n get_tensor = self.prelude.get_global_var_static(\n \"tensor_get_data\", \"float32\", concat_shape\n )\n return get_tensor(concatenated)\n\n data = inputs[0]\n axis = inputs[1]\n\n if not isinstance(data, list):\n return tensor_array_concat(data, axis)\n\n if isinstance(data, _expr.Expr):\n data = [data]\n\n return _op.tensor.concatenate(data, int(axis))\n\n def slice(self, inputs, input_types):\n axis_dtype = \"int64\"\n index_size_limit = sys.maxsize\n data = inputs[0]\n dshape = self.infer_shape(data)\n ndim = len(dshape)\n dim = int(inputs[1])\n stride = inputs[4]\n\n target_begin, is_begin_const = try_infer_value(\n inputs[2], lambda ret: ret.astype(np.int).item(0)\n )\n target_end, is_end_const = try_infer_value(\n inputs[3], lambda ret: ret.astype(np.int).item(0)\n )\n\n # A fast path when slicing is nop.\n if (\n isinstance(target_begin, int)\n and isinstance(target_end, int)\n and target_begin == 0\n and target_end >= index_size_limit\n and stride == 1\n ):\n return data\n\n if target_begin is None and target_end is None:\n return data\n\n # Process begin\n begin = [0] * ndim\n\n if target_begin is not None:\n begin[dim] = target_begin\n\n if target_begin is not None and not isinstance(begin[dim], int):\n tmp = []\n for b in begin:\n if isinstance(b, int):\n tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))\n else:\n tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))\n begin = _op.concatenate(tmp, axis=0)\n btype = self.infer_type(begin).dtype\n if str(btype) != axis_dtype:\n begin = _op.cast(begin, axis_dtype)\n\n # Process end\n if isinstance(target_end, int) and target_end >= index_size_limit:\n target_end = dshape[dim]\n\n if any([isinstance(d, tvm.tir.Any) for d in dshape]):\n end = _op.shape_of(data)\n else:\n end = dshape\n\n if isinstance(target_end, int):\n if isinstance(end, list):\n end[dim] = target_end\n else:\n all_static = True\n for i, shape_dim in enumerate(dshape):\n if i != dim and isinstance(shape_dim, tvm.tir.Any):\n all_static = False\n\n if all_static:\n end = list(get_const_tuple(dshape))\n end[dim] = target_end\n else:\n target_end = _expr.const(target_end)\n end = _op.scatter(\n end,\n _op.expand_dims(_expr.const(dim), axis=0),\n _op.expand_dims(target_end, axis=0),\n axis=0,\n )\n else:\n end = _op.cast(_op.shape_of(data), axis_dtype)\n if target_end is not None and not isinstance(target_end, tvm.tir.Any):\n ttype = self.infer_type(target_end).dtype\n if str(ttype) != axis_dtype:\n target_end = _op.cast(target_end, axis_dtype)\n end = _op.scatter(\n end,\n _op.expand_dims(_expr.const(dim), axis=0),\n _op.expand_dims(target_end, axis=0),\n axis=0,\n )\n\n if not isinstance(end, list):\n etype = self.infer_type(end).dtype\n if str(etype) != axis_dtype:\n end = _op.cast(end, axis_dtype)\n\n strides = [1] * ndim\n strides[dim] = stride\n\n return _op.transform.strided_slice(\n data, begin=begin, end=end, strides=strides, slice_mode=\"end\"\n )\n\n def narrow(self, inputs, input_types):\n # Inputs are:\n # 0 - the tensor to narrow\n # 1 - the dimension along which to narrow\n # 2 - the starting dimension\n # 3 - the distance to the ending dimension\n # Lets find the ending dimension\n end = self.add(inputs[2:4], input_types[2:4])\n stride = 1\n slice_input = inputs[:3] + [end, stride]\n slice_types = input_types + [\"int32\"]\n return self.slice(slice_input, slice_types)\n\n def split(self, inputs, input_types):\n data = inputs[0]\n split_size = int(inputs[1])\n dim = int(inputs[2])\n\n split_index = split_size\n indices = []\n while split_index < self.infer_shape(data)[dim]:\n indices.append(split_index)\n split_index += split_size\n\n return _op.split(data, indices, dim)\n\n def split_with_sizes(self, inputs, input_types):\n data = inputs[0]\n sections = inputs[1]\n dim = int(inputs[2])\n\n if len(sections) == 1:\n # a special case used in torchvision detection models\n return _expr.TupleWrapper(_expr.Tuple([data]), 1)\n\n split_index = 0\n indices = []\n for i in range(len(sections) - 1):\n index, _ = try_infer_value(sections[i], lambda ret: int(ret))\n split_index += index\n indices.append(split_index)\n\n return _op.split(data, indices, dim)\n\n def select(self, inputs, input_types):\n data = inputs[0]\n dim = int(inputs[1])\n index = _wrap_const(inputs[2])\n return _op.transform.take(data, index, axis=dim, mode=\"wrap\")\n\n def take(self, inputs, input_types):\n data = inputs[0]\n indices = _op.cast(inputs[1], \"int32\")\n\n return _op.transform.take(data, indices=indices, mode=\"wrap\")\n\n def topk(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[2])\n is_ascend = not bool(inputs[3])\n sort = bool(inputs[4])\n\n if isinstance(inputs[1], _expr.Expr):\n k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())\n else:\n k = inputs[1]\n\n if not sort:\n msg = \"Currently supports only sorted output for topk operator.\"\n raise AssertionError(msg)\n\n outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type=\"both\", dtype=\"int64\")\n\n return outs[0], outs[1]\n\n def reciprocal(self, inputs, input_types):\n data = inputs[0]\n return _expr.const(1.0, dtype=input_types[0]) / data\n\n def repeat(self, inputs, input_types):\n data = inputs[0]\n reps = []\n for r in inputs[1]:\n if isinstance(r, int):\n reps.append(r)\n else:\n reps.append(int(_infer_value(r, {}).numpy()))\n\n return _op.transform.tile(data, reps=reps)\n\n def repeat_interleave(self, inputs, input_types):\n data = inputs[0]\n if isinstance(inputs[1], int):\n repeats = inputs[1]\n axis = inputs[2]\n elif isinstance(inputs[1], _expr.Expr):\n if isinstance(inputs[1], _expr.Constant):\n repeats = int(inputs[1].data.numpy())\n else:\n repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())\n axis = inputs[2]\n else:\n msg = \"Only repeat with one value as repeat is currently supported.\"\n raise AssertionError(msg)\n if axis is None: # Flatten the data if no axis is given from torch\n data = _op.transform.reshape(data, [-1])\n axis = 0\n return _op.transform.repeat(data, repeats=repeats, axis=axis)\n\n def addcdiv(self, inputs, input_types):\n data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])\n return data + (c * (t1 / t2))\n\n def addcmul(self, inputs, input_types):\n data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])\n return data + (c * (t1 * t2))\n\n def where(self, inputs, input_types):\n if len(inputs) == 1:\n return self.nonzero([inputs[0], True], input_types)\n\n cond = inputs[0]\n x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])\n return _op.where(cond, x, y)\n\n def full_impl(self, data, fill_value, dtype):\n size = []\n need_reshape = False\n new_shape = []\n for dim in data:\n if isinstance(dim, _expr.Expr):\n if isinstance(dim, _expr.Constant):\n dim = int(dim.data.numpy())\n if isinstance(size, list):\n size.append(dim)\n new_shape.append(dim)\n else:\n dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)\n new_shape.append(dim)\n\n if success:\n if isinstance(size, list):\n size.append(dim)\n else:\n size = None\n need_reshape = True\n else:\n if isinstance(size, list):\n size.append(dim)\n new_shape.append(dim)\n\n if size is None:\n tmp = []\n for dim in data:\n tmp.append(_op.cast(_op.expand_dims(dim, axis=0), \"int64\"))\n size = _op.concatenate(tmp, axis=0)\n\n out = _op.full(_expr.const(fill_value), size, dtype=dtype)\n if need_reshape:\n out = _op.reshape(out, new_shape)\n return out\n\n def ones(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in ones op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n return self.full_impl(data, 1, dtype)\n\n def ones_like(self, inputs, input_types):\n data = inputs[0]\n out = _op.ones_like(data)\n\n # If the input and the output datatype is different, do a cast\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n if input_types[0] != dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def zeros(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in zeros op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n return self.full_impl(data, 0, dtype)\n\n def zeros_like(self, inputs, input_types):\n data = inputs[0]\n out = _op.zeros_like(data)\n\n # If the input and the output datatype is different, do a cast\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n if input_types[0] not in dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def full(self, inputs, input_types):\n data = inputs[0]\n fill_value = inputs[1]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in full op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[2] is not None: # dtype given\n dtype = _convert_dtype_value(inputs[2])\n else:\n # if dtype is None, torch uses a global default set by torch.set_default_tensor_type()\n dtype = self.default_dtype\n\n return self.full_impl(data, fill_value, dtype)\n\n def full_like(self, inputs, input_types):\n data = inputs[0]\n fill_value = inputs[1]\n\n out = _op.full_like(data, _expr.const(fill_value))\n\n # If the input and the output datatype is different, do a cast\n if inputs[2] is not None: # dtype given\n dtype = _convert_dtype_value(inputs[2])\n else:\n # if dtype is None, torch uses a global default set by torch.set_default_tensor_type()\n dtype = self.default_dtype\n if input_types[0] not in dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def linspace(self, inputs, input_types):\n start = inputs[0]\n stop = inputs[1]\n step = inputs[2]\n\n # Find the spacing between values as step\n if step != 1:\n step = (stop - start) / (step - 1)\n stop = stop + step\n else:\n stop = start + step\n\n if inputs[3] is None:\n import torch\n\n dtype = _convert_data_type(str(torch.get_default_dtype()))\n else:\n dtype = _convert_dtype_value(inputs[3])\n\n start = _create_typed_const(start, dtype)\n stop = _create_typed_const(stop, dtype)\n step = _create_typed_const(step, dtype)\n\n return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)\n\n def relu(self, inputs, input_types):\n data = inputs[0]\n if self.is_quantized_tensor(data):\n assert len(inputs) == 3, \"Input quant param not found in op inputs\"\n input_zero_point = _expr.const(inputs[2], dtype=\"int32\")\n return qnn_torch.quantized_relu(data, input_zero_point)\n return _op.nn.relu(data)\n\n def prelu(self, inputs, input_types):\n # Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU\n data = inputs[0]\n dim = self.get_dims(data)\n ndims = len(dim)\n axis = 0 if ndims == 1 else 1\n alpha = _op.broadcast_to(inputs[1], (dim[axis]))\n return _op.nn.prelu(data, alpha, axis)\n\n def leaky_relu(self, inputs, input_types):\n data = inputs[0]\n alpha = float(inputs[1])\n return _op.nn.leaky_relu(data, alpha)\n\n def elu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n alpha = _expr.const(-float(inputs[1]), dtype=dtype)\n return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)\n\n def celu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n alpha = _expr.const(float(inputs[1]), dtype=dtype)\n return alpha * _op.nn.relu(\n _expr.const(1, dtype=dtype) - _op.exp(data / alpha)\n ) + _op.nn.relu(data)\n\n def gelu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n # gelu is data * normcdf(data)\n # normcdf expressed as erf because we don't currently have that intrinsic\n # note that there is also a fastgelu variant approximating normcdf\n # with tanh and third order polynomials, but this is \"true\" gelu\n return data * (\n _expr.const(0.5, dtype=dtype)\n + _op.erf(data * _expr.const(0.5 ** 0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)\n )\n\n def selu(self, inputs, input_types):\n data = inputs[0]\n # https://pytorch.org/docs/stable/nn.html#selu\n dtype = input_types[0]\n alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)\n gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)\n return gamma * (\n alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)\n )\n\n def silu(self, inputs, input_types):\n data = inputs[0]\n return data * _op.tensor.sigmoid(data)\n\n def log_sigmoid(self, inputs, input_types):\n data = inputs[0]\n return _op.log(_op.tensor.sigmoid(data))\n\n def hard_sigmoid(self, inputs, input_types):\n def _relu6(x):\n return _op.tensor.clip(x, 0.0, 6.0)\n\n def func(x):\n return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)\n\n if self.is_quantized_tensor(inputs[0]):\n input_scale = _expr.const(inputs[1])\n input_zero_point = _expr.const(inputs[2])\n # PyTorch seems to use the following output qparams, but accuracy\n # is broken if we use this.\n # TODO(masahi): Revisit this parameter choice\n #\n # Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp\n # output_scale = _expr.const(0.00390625) # 1.0 / 2^8\n # output_zero_point = _expr.const(-128)\n output_scale = input_scale\n output_zero_point = input_zero_point\n\n data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)\n out = func(data)\n return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype=\"uint8\")\n\n return func(inputs[0])\n\n def hard_swish(self, inputs, input_types):\n data = inputs[0]\n return data * self.hard_sigmoid(inputs, input_types)\n\n def adaptive_avg_pool(self, op, inputs, input_types):\n data = inputs[0]\n output_size = inputs[1]\n\n def func(x):\n return op(x, output_size=output_size)\n\n if self.is_quantized_tensor(data):\n return qnn_torch.apply_with_upcast(data, func)\n\n return func(data)\n\n def adaptive_max_pool(self, op, inputs, input_types):\n data = inputs[0]\n output_size = inputs[1]\n # returns dummy indices too\n return op(data, output_size=output_size), None\n\n @staticmethod\n def convert_const_list(data):\n if isinstance(data, list):\n for i, _ in enumerate(data):\n if isinstance(data[i], _expr.Expr):\n data[i] = int(_infer_value_simulated(data[i], {}).numpy())\n return data\n\n def maxpool_2d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = self.convert_const_list(inputs[1])\n strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool2d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n layout=\"NCHW\",\n ceil_mode=ceil_mode,\n )\n\n def maxpool_2d_with_indices(self, inputs, input_types):\n # returns dummy indices too\n return self.maxpool_2d(inputs, input_types), None\n\n def maxpool_1d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = inputs[1]\n strides = inputs[2] if inputs[2] else pool_size\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool1d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n layout=\"NCW\",\n ceil_mode=ceil_mode,\n )\n\n def maxpool_3d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = inputs[1]\n strides = inputs[2] if inputs[2] else pool_size\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool3d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n ceil_mode=ceil_mode,\n )\n\n def hardtanh(self, inputs, input_types):\n a = inputs[0]\n tanh_min = float(inputs[1])\n tanh_max = float(inputs[2])\n return _op.tensor.clip(a, tanh_min, tanh_max)\n\n def convolution(self, inputs, input_types):\n # Use transpose or normal\n use_transpose = True if inputs[6] == 1 else False\n\n data = inputs[0]\n weight = inputs[1]\n bias = inputs[2]\n strides = tuple(inputs[3])\n padding = tuple(inputs[4])\n dilation = tuple(inputs[5])\n\n if isinstance(weight, _expr.Expr):\n inferred_shape = self.infer_shape(weight)\n weight_shape = []\n for infer in inferred_shape:\n weight_shape.append(infer)\n else:\n msg = \"Data type %s could not be parsed in conv op\" % (type(weight))\n raise AssertionError(msg)\n\n # Transposed convolutions have IOHW layout.\n if use_transpose:\n weight_shape[0], weight_shape[1] = weight_shape[1], weight_shape[0]\n\n channels = weight_shape[0]\n groups = int(inputs[8])\n\n # Check if this is depth wise convolution\n # We need to reshape weight so that Relay could recognize this is depth wise\n # weight_shape[1] is always in_channels // groups\n # For depthwise, in_channels == groups, so weight_shape[1] == 1\n # If groups > 1 but weight_shape[1] != 1, this is group convolution\n if groups > 1 and weight_shape[1] == 1:\n channel_multiplier = channels // groups\n new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])\n weight = _op.transform.reshape(weight, new_weight_shape)\n\n kernel_size = weight_shape[2:]\n use_bias = isinstance(bias, _expr.Expr)\n\n # We are trying to invoke various relay operations through a single conv_op variable.\n # However the function signatures for some operations have additional attributes so we\n # pass these in along with the standard ones.\n additional_arguments = dict()\n\n if use_transpose:\n if len(kernel_size) == 3:\n conv_op = _op.nn.conv3d_transpose\n elif len(kernel_size) == 2:\n conv_op = _op.nn.conv2d_transpose\n else:\n conv_op = _op.nn.conv1d_transpose\n output_padding = tuple(inputs[7])\n additional_arguments[\"output_padding\"] = output_padding\n\n else:\n if len(kernel_size) == 3:\n conv_op = _op.nn.conv3d\n elif len(kernel_size) == 2:\n conv_op = _op.nn.conv2d\n else:\n conv_op = _op.nn.conv1d\n\n if len(kernel_size) == 3:\n data_layout = \"NCDHW\"\n kernel_layout = \"OIDHW\"\n elif len(kernel_size) == 2:\n data_layout = \"NCHW\"\n kernel_layout = \"OIHW\"\n if use_transpose:\n # Transposed convolutions have IOHW layout.\n kernel_layout = \"IOHW\"\n else:\n data_layout = \"NCW\"\n kernel_layout = \"OIW\"\n\n # Conv1d does not currently support grouped convolution so we convert it to conv2d\n is_grouped_conv1d = False\n if groups > 1 and len(kernel_size) == 1 and not use_transpose:\n is_grouped_conv1d = True\n conv_op = _op.nn.conv2d\n kernel_size = [1] + kernel_size\n strides = (1,) + strides\n padding = (0,) + padding\n dilation = (1,) + dilation\n data = _op.expand_dims(data, axis=2)\n weight = _op.expand_dims(weight, axis=2)\n data_layout = \"NCHW\"\n kernel_layout = \"OIHW\"\n\n conv_out = conv_op(\n data,\n weight,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n channels=channels,\n kernel_size=kernel_size,\n data_layout=data_layout,\n kernel_layout=kernel_layout,\n out_layout=\"\",\n out_dtype=\"\",\n **additional_arguments,\n )\n if use_bias:\n res = _op.nn.bias_add(conv_out, bias)\n else:\n res = conv_out\n if is_grouped_conv1d:\n # Because we conducted grouped conv1d convolution through conv2d we must\n # squeeze the output to get the correct result.\n res = _op.squeeze(res, axis=[2])\n return res\n\n def softmax(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n if isinstance(axis, str):\n axis = int(axis)\n\n return _op.nn.softmax(data, axis=axis)\n\n def threshold(self, inputs, input_types):\n data = inputs[0]\n return _op.nn.relu(data)\n\n def contiguous(self, inputs, input_types):\n return inputs[0]\n\n def batch_norm(self, inputs, input_types):\n data = inputs[0]\n data_type = input_types[0]\n\n channels = self.infer_shape(data)\n\n if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):\n scale = center = True\n weight = inputs[1]\n beta = inputs[2]\n gamma = weight\n else:\n scale = center = False\n\n if not scale:\n gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)\n\n if not center:\n beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)\n\n moving_mean = inputs[3]\n moving_var = inputs[4]\n epsilon = float(inputs[7])\n\n return _op.nn.batch_norm(\n data,\n gamma,\n beta,\n moving_mean,\n moving_var,\n axis=1,\n epsilon=epsilon,\n center=center,\n scale=scale,\n )[0]\n\n def instance_norm(self, inputs, input_types):\n data = inputs[0]\n data_type = input_types[0]\n channels = self.infer_shape(data)\n\n if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):\n scale = center = True\n weight = inputs[1]\n beta = inputs[2]\n gamma = weight\n else:\n scale = center = False\n\n if not scale:\n gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)\n\n if not center:\n beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)\n\n epsilon = float(inputs[7])\n return _op.nn.instance_norm(\n data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale\n )\n\n def get_dims(self, data):\n import torch\n\n if isinstance(data, _expr.Expr):\n dims = self.infer_shape(data)\n elif isinstance(data, list):\n dims = data\n elif isinstance(data, (torch.Tensor, np.ndarray)):\n dims = data.shape\n else:\n msg = \"Data type %s could not be parsed\" % type(data)\n raise AssertionError(msg)\n return dims\n\n def layer_norm(self, inputs, input_types):\n data = inputs[0]\n ndims = len(self.get_dims(inputs[1]))\n assert ndims == 1, \"Support only normalization over last one dimension.\"\n\n return _op.nn.layer_norm(\n data,\n gamma=inputs[2],\n beta=inputs[3],\n axis=-1,\n epsilon=float(inputs[4]),\n center=True,\n scale=True,\n )\n\n def group_norm(self, inputs, input_types):\n data = inputs[0]\n gamma = inputs[2]\n beta = inputs[3]\n num_groups = inputs[1]\n epsilon = float(inputs[4])\n\n return _op.nn.group_norm(\n data,\n gamma=gamma,\n beta=beta,\n num_groups=num_groups,\n axis=1,\n epsilon=epsilon,\n center=True,\n scale=True,\n )\n\n def transpose(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if isinstance(data, _expr.Expr):\n ndims = len(self.infer_shape_with_prelude(data))\n elif isinstance(data, list):\n ndims = data\n elif isinstance(data, (torch.Tensor, np.ndarray)):\n ndims = data.shape\n else:\n msg = \"Data type %s could not be parsed in transpose op\" % (type(data))\n raise AssertionError(msg)\n\n if isinstance(data, tvm.runtime.NDArray):\n ndims = len(data.shape)\n axes = list(range(ndims))\n\n num_inputs = len(inputs)\n\n if num_inputs == 1:\n if ndims >= 2:\n axes[-1] = ndims - 2\n axes[-2] = ndims - 1\n if not isinstance(data, _expr.Expr):\n data = _expr.const(data)\n\n elif num_inputs == 3:\n parse = lambda i: ndims * (i < 0) + i\n src, dst = [parse(int(inputs[i])) for i in [1, 2]]\n axes[src] = dst\n axes[dst] = src\n else:\n axes = inputs[1]\n return _op.transform.transpose(data, axes)\n\n def flatten(self, inputs, input_types):\n data = inputs[0]\n start = int(inputs[1])\n end = int(inputs[2])\n dshape = get_const_tuple(self.infer_shape_with_prelude(data))\n ndim = len(dshape)\n if end < 0:\n end += ndim\n new_shape = [0] * start\n\n new_shape.append(-1)\n squeeze_axes = []\n for i in range(start + 1, end + 1):\n new_shape.append(1)\n squeeze_axes.append(i)\n for _ in range(end + 1, ndim):\n new_shape.append(0)\n out = _op.reshape(data, new_shape)\n if squeeze_axes:\n out = _op.squeeze(out, axis=squeeze_axes)\n return out\n\n def addmm(self, inputs, input_types):\n input_mat = inputs[0]\n mat1 = inputs[1]\n data_type = input_types[1]\n mat2 = inputs[2]\n\n beta = inputs[3]\n alpha = inputs[4]\n\n if not isinstance(alpha, _expr.Expr) and alpha != 1:\n alpha = _create_typed_const(alpha, data_type)\n mat1 *= alpha\n\n if not isinstance(beta, _expr.Expr) and beta != 1:\n beta = _create_typed_const(beta, data_type)\n mat2 *= beta\n\n transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])\n\n units = self.infer_shape(transposed_mat2)[0]\n dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)\n\n return dense_out + input_mat\n\n def size(self, inputs, input_types):\n shape = self.infer_shape_with_prelude(inputs[0])\n axis = None\n if len(inputs) > 1:\n axis = int(inputs[1])\n\n if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):\n if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):\n shape_dynamic = _op.shape_of(inputs[0], dtype=\"int32\")\n if axis is not None:\n return _op.take(shape_dynamic, _expr.const(axis), 0)\n return shape_dynamic\n\n if axis is not None:\n return _expr.const(shape[axis])\n return _expr.const(shape)\n\n def numtotensor(self, inputs, input_types):\n val = inputs[0]\n dtype = input_types[0]\n\n if isinstance(val, _expr.Expr):\n return val\n\n if isinstance(val, tvm.tir.IntImm):\n val = val.__int__()\n dtype = int\n\n arr = val * np.ones([]).astype(dtype)\n return arr\n\n def tensortonum(self, inputs, input_types):\n return inputs[0]\n\n def view(self, inputs, input_types):\n data = inputs[0]\n\n if len(inputs) == 3:\n shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]\n else:\n if isinstance(inputs[1], list):\n shape_inp = inputs[1]\n else:\n shape_inp = self.infer_shape(inputs[1])\n new_shape = shape_inp\n for i, shape in enumerate(shape_inp):\n if isinstance(shape, _expr.Expr):\n val = _infer_value_simulated(shape, {})\n new_shape[i] = val.numpy().item(0)\n\n return _op.transform.reshape(data, new_shape)\n\n def reshape(self, inputs, input_types):\n data = inputs[0]\n new_shape = inputs[1]\n\n tmp_shape = []\n is_dyn = False\n for s in new_shape:\n if isinstance(s, _expr.Constant):\n tmp_shape.append(int(s.data.numpy()))\n elif isinstance(s, _expr.Expr):\n dim, success = try_infer_value(s, lambda ret: int(ret))\n tmp_shape.append(dim)\n\n if not success:\n is_dyn = True\n else:\n tmp_shape.append(s)\n\n if is_dyn:\n new_shape = []\n for i, s in enumerate(tmp_shape):\n if not isinstance(s, _expr.Expr):\n s = _expr.const(s, \"int64\")\n else:\n s = _op.cast(s, \"int64\")\n new_shape.append(_op.expand_dims(s, axis=0))\n new_shape = _op.concatenate(new_shape, axis=0)\n else:\n new_shape = tmp_shape\n return _op.transform.reshape(data, new_shape)\n\n def pixel_shuffle(self, inputs, input_types):\n data = inputs[0]\n upscale_factor = inputs[1]\n upscale_squared = upscale_factor * upscale_factor\n b, c, h, w = self.infer_shape(data)\n assert (\n c % upscale_squared == 0\n ), \"input channel should be divisible by square of upscale_factor\"\n\n ndims = len(self.infer_shape_with_prelude(data))\n axes = list(range(ndims))\n num_inputs = len(inputs)\n oc = c // upscale_squared\n oh = h * upscale_factor\n ow = w * upscale_factor\n\n new_shape = [b, oc, upscale_factor, upscale_factor, h, w]\n out_shape = [b, oc, oh, ow]\n\n data = _op.transform.reshape(data, new_shape)\n # The data will be transposed to\n # [b, oc, h, upscale_factor, w, upscale_factor]\n # for further reshape\n axes = [0, 1, 4, 2, 5, 3]\n data = _op.transform.transpose(data, axes)\n return _op.transform.reshape(data, out_shape)\n\n def clone(self, inputs, input_types):\n data = inputs[0]\n return _op.tensor.copy(data)\n\n def log_softmax(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n return _op.nn.log_softmax(data, axis)\n\n def sigmoid(self, inputs, input_types):\n data = inputs[0]\n return _op.tensor.sigmoid(data)\n\n def softplus(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n beta = _expr.const(float(inputs[1]), dtype=dtype)\n return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta\n\n def make_avg_pool(self, dim):\n def avg_pool(inputs, input_types):\n data = inputs[0]\n\n pool_size = self.convert_const_list(inputs[1])\n strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)\n padding = inputs[3]\n ceil_mode = int(inputs[4])\n count_include_pad = int(inputs[5])\n\n def func(x):\n if dim == 1:\n return _op.nn.avg_pool1d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1,),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n elif dim == 2:\n return _op.nn.avg_pool2d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1, 1),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n elif dim == 3:\n return _op.nn.avg_pool3d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1, 1, 1),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n else:\n msg = \"Average Pooling dimension should be between 1 and 3\"\n raise RuntimeError(msg)\n\n if self.is_quantized_tensor(data):\n return qnn_torch.apply_with_upcast(data, func)\n\n return func(data)\n\n return avg_pool\n\n def linear(self, inputs, input_types):\n # https://pytorch.org/docs/stable/nn.functional.html#linear\n # 0 - input\n # 1 - weight\n bias = inputs[2]\n a_shape = self.infer_shape_with_prelude(inputs[0])\n b_shape = self.infer_shape_with_prelude(inputs[1])\n if len(a_shape) == 2 and len(b_shape) == 2:\n mm_out = _op.nn.dense(inputs[0], inputs[1])\n elif len(b_shape) == 1:\n mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])\n else:\n mm_out = self.matmul(\n [inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]\n )\n if isinstance(bias, _expr.Expr):\n bias_ndims = len(self.infer_shape_with_prelude(bias))\n if bias_ndims == 1:\n return _op.nn.bias_add(mm_out, bias, axis=-1)\n mm_dtype = self.infer_type_with_prelude(mm_out).dtype\n return self.add([mm_out, bias], [mm_dtype, input_types[2]])\n return mm_out\n\n def dropout(self, inputs, input_types):\n data = inputs[0]\n rate = float(inputs[1])\n\n return _op.nn.dropout(data, rate)\n\n def make_reduce(self, name):\n def reduce(inputs, input_types):\n data = inputs[0]\n axis = None\n keepdims = False\n\n if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False\n if isinstance(inputs[1], int):\n axis = int(inputs[1])\n elif _is_int_seq(inputs[1]):\n axis = inputs[1]\n else:\n axis = list(self.infer_shape(inputs[1]))\n keepdims = bool(inputs[2])\n\n return get_relay_op(name)(data, axis=axis, keepdims=keepdims)\n\n return reduce\n\n def norm(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n axis = None\n keepdims = False\n if len(inputs) > 3:\n axis = inputs[2]\n keepdims = bool(inputs[3])\n\n order = inputs[1]\n if order == np.inf:\n return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)\n elif order == np.NINF:\n return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)\n else:\n reci_order = _expr.const(1.0 / order, dtype=dtype)\n order = _expr.const(order)\n return _op.power(\n _op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),\n reci_order,\n )\n\n def frobenius_norm(self, inputs, input_types):\n data = inputs[0]\n axis = None\n keepdims = False\n if len(inputs) > 2:\n axis = inputs[1] if len(inputs[1]) > 0 else None\n keepdims = bool(inputs[2])\n\n return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))\n\n def std(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 2:\n axis = None\n keepdims = False\n unbiased = bool(inputs[1])\n else:\n axis = inputs[1]\n keepdims = bool(inputs[3])\n unbiased = bool(inputs[2])\n\n return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)\n\n def variance(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 2:\n axis = None\n keepdims = False\n unbiased = bool(inputs[1])\n else:\n axis = inputs[1]\n keepdims = bool(inputs[3])\n unbiased = bool(inputs[2])\n\n return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)\n\n def mean(self, inputs, input_types):\n data = inputs[0]\n\n if inputs[1]:\n axis = inputs[1]\n else:\n axis = None\n\n if len(inputs) > 2 and inputs[2]:\n keepdims = int(inputs[2])\n else:\n keepdims = False\n if len(inputs) > 3 and inputs[3]:\n exclude = int(inputs[3])\n else:\n exclude = False\n\n def func(x):\n return _op.mean(x, axis, keepdims, exclude)\n\n if self.is_quantized_tensor(data):\n assert len(inputs) == 6, \"Input quant param not found in op inputs\"\n input_scale = _expr.const(inputs[4])\n input_zero_point = _expr.const(inputs[5])\n return qnn_torch.quantized_mean(data, input_scale, input_zero_point, func)\n\n return func(data)\n\n def chunk(self, inputs, input_types):\n data = inputs[0]\n\n num_chunks = int(inputs[1])\n axis = int(inputs[2])\n\n if isinstance(data, _expr.Expr):\n inferred_shape = self.infer_shape_with_prelude(data)\n\n shape = []\n for infer in inferred_shape:\n shape.append(infer)\n\n dim = int(shape[axis])\n\n if dim % num_chunks:\n unif_size = int(dim / (num_chunks - 1))\n else:\n unif_size = int(dim / num_chunks)\n\n indeces = []\n for i in range(unif_size, dim, unif_size):\n indeces.append(i)\n\n return _op.split(data, indeces, axis)\n\n def matmul(self, inputs, input_types):\n\n inputs_0 = inputs[0]\n inputs_1 = inputs[1]\n\n # Need to check input shape as batch matmul must be supported.\n a_shape = self.infer_shape_with_prelude(inputs_0)\n b_shape = self.infer_shape_with_prelude(inputs_1)\n\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if len(a_shape) > 2 and len(b_shape) > 2:\n # Convert a into a 3 dimensional tensors.\n need_reshape_output = False\n if len(a_shape) != 3:\n a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]])\n need_reshape_output = True\n else:\n a = inputs_0\n\n # Transpose matrix dimensions of b.\n trans_axes = list(range(len(b_shape)))\n trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]\n b = _op.transpose(inputs_1, trans_axes)\n\n # Convert b into a 3 dimensional tensor. Note that the last two dimensions\n # are transposed.\n if len(b_shape) != 3:\n b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]])\n\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n\n # Reshape output to original dimensions.\n if need_reshape_output:\n return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])\n return output\n elif len(a_shape) > 2:\n inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]])\n\n if len(b_shape) > 2:\n trans_axes = list(range(len(b_shape)))\n trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]\n input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]])\n elif len(b_shape) == 2:\n input_1 = _op.transpose(inputs_1, axes=(1, 0))\n elif len(b_shape) == 1:\n input_1 = _op.expand_dims(inputs_1, 0, 1)\n\n out = _op.nn.dense(inputs_0, input_1)\n\n if len(b_shape) == 1:\n out = _op.squeeze(out, axis=[-1])\n\n # Reshape output into a N dimensional tensor when a or b dim > 2\n if len(a_shape) > 2:\n out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]])\n elif len(b_shape) > 2:\n out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]])\n out = _op.reshape(\n _op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]]\n )\n\n return out\n\n def expand(self, inputs, input_types):\n data_in = inputs[0]\n shape = list(self.infer_shape(data_in))\n\n ndims = len(shape)\n sizes = inputs[1]\n out = data_in\n\n out_dims = len(sizes)\n if ndims < out_dims:\n num_newaxis = out_dims - ndims\n out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)\n shape = [1] * num_newaxis + shape\n\n for i in range(out_dims):\n if sizes[i] != -1 and shape[i] == 1:\n if not isinstance(sizes[i], int):\n sizes[i] = int(_infer_value(sizes[i], {}).numpy())\n out = _op.repeat(out, sizes[i], axis=i)\n\n return out\n\n def int(self, inputs, input_types):\n if isinstance(inputs[0], _expr.Expr):\n return inputs[0]\n return int(inputs[0])\n\n def identity(self, inputs, input_types):\n return inputs[0]\n\n def none(self, inputs, input_types):\n return None\n\n def make_pad(self, mode):\n def pad(inputs, input_types):\n data = inputs[0]\n if isinstance(inputs[1], list):\n pad_list = inputs[1]\n else:\n pad_list = list(self.infer_shape(inputs[1]))\n\n # initialize paddings based on input len\n pad_len = len(self.infer_shape(data)) * 2\n paddings = [0] * pad_len\n\n if len(pad_list) >= 2:\n paddings[-1] = pad_list[1]\n paddings[-2] = pad_list[0]\n if len(pad_list) >= 4:\n paddings[-3] = pad_list[3]\n paddings[-4] = pad_list[2]\n if len(pad_list) >= 6:\n paddings[-5] = pad_list[5]\n paddings[-6] = pad_list[4]\n\n # group into tuple of 2 ints\n paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]\n\n const_paddings = []\n non_zero_found = False\n for pad in paddings:\n const_paddings.append([])\n for p in pad:\n if not isinstance(p, int):\n p = int(_infer_value(p, {}).numpy())\n const_paddings[-1].append(p)\n if p != 0:\n non_zero_found = True\n\n if not non_zero_found:\n return data\n elif mode == \"constant\":\n return _op.nn.pad(data, const_paddings, pad_value=inputs[2], pad_mode=mode)\n else:\n return _op.nn.pad(data, const_paddings, pad_mode=mode)\n\n return pad\n\n def clamp(self, inputs, input_types):\n data = inputs[0]\n\n def get_v(v, default_v):\n if isinstance(v, _expr.Constant):\n return float(v.data.numpy())\n if isinstance(v, _expr.Expr):\n infer_v, success = try_infer_value(v, lambda ret: float(ret))\n if success:\n return infer_v\n if v is not None:\n return v\n return default_v\n\n amin = get_v(inputs[1], np.finfo(np.float32).min)\n amax = get_v(inputs[2], np.finfo(np.float32).max)\n return _op.clip(data, amin, amax)\n\n def to(self, inputs, input_types):\n data = inputs[0]\n dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]\n # special handling for aten::to(data, 6, _, _, _) case\n # 6 means dtype = float\n # this happens when converting upsampling with scale factor\n cast_map = {\n 5: \"float16\",\n 6: \"float32\",\n 7: \"float64\",\n 3: \"int32\",\n 4: \"int64\",\n }\n\n cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}\n\n ret = data\n if isinstance(data, _expr.Expr):\n actual_dtype = str(self.infer_type(data).dtype)\n if dtype in cast_map and cast_map[dtype] != actual_dtype:\n ret = _op.cast(data, cast_map[dtype])\n elif dtype in cast_map:\n ret = cast_func[dtype](data)\n\n return ret\n\n def get_upsample_out_size(self, inputs, method):\n # This assumes a static shape\n out_size = []\n if inputs[1] is not None:\n for size in inputs[1]:\n if not isinstance(size, int):\n out_size.append(int(_infer_value(size, {}).numpy()))\n else:\n out_size.append(size)\n else:\n scale_index = 3 if method != \"nearest_neighbor\" else 2\n scales = inputs[scale_index]\n assert scales is not None, \"neither out size nor scale provided\"\n assert isinstance(scales, list)\n ishape = self.infer_shape(inputs[0])\n for i, scale in enumerate(scales):\n out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))\n\n return out_size\n\n def make_upsample(self, method):\n def upsample(inputs, input_types):\n data = inputs[0]\n out_size = self.get_upsample_out_size(inputs, method)\n\n if len(inputs) > 2 and method != \"nearest_neighbor\":\n align_corners = inputs[2]\n else:\n align_corners = False\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n def func(x):\n return _op.image.resize2d(\n x, out_size, None, \"NCHW\", method, coord_trans, cubic_alpha=-0.75\n )\n\n if self.is_quantized_tensor(data):\n # input qparams are manually appended by us\n assert isinstance(inputs[-2], float)\n assert isinstance(inputs[-1], int)\n input_scale = _expr.const(inputs[-2])\n input_zero_point = _expr.const(inputs[-1])\n return qnn_torch.quantized_upsample(data, input_scale, input_zero_point, func)\n\n return func(data)\n\n return upsample\n\n def make_upsample3d(self, method):\n def upsample3d(inputs, input_types):\n data = inputs[0]\n out_size = self.get_upsample_out_size(inputs, method)\n\n if len(inputs) > 2 and method == \"linear\":\n align_corners = inputs[2]\n else:\n align_corners = False\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n return _op.image.resize3d(data, out_size, None, \"NCDHW\", method, coord_trans)\n\n return upsample3d\n\n def expand_as(self, inputs, input_types):\n target = inputs[1]\n t0 = self.infer_type(inputs[0]).dtype\n t1 = self.infer_type(inputs[1]).dtype\n if str(t0) != str(t1):\n target = _op.cast(target, t0)\n return _op.broadcast_to_like(inputs[0], target)\n\n def Bool(self, inputs, input_types):\n assert len(inputs) == 1\n return inputs[0]\n\n def Float(self, inputs, input_types):\n assert len(inputs) == 1\n return _op.cast(inputs[0], \"float32\")\n\n def bitwise_not(self, inputs, input_types):\n data = inputs[0]\n # The input tensor must be of integral or Boolean types.\n # For bool tensors, it computes the logical NOT\n if input_types[0] == \"bool\":\n out = _op.logical_not(_op.cast(data, \"bool\"))\n else:\n out = _op.bitwise_not(_op.cast(data, \"int\"))\n\n return out\n\n def bitwise_xor(self, inputs, input_types):\n lhs = inputs[0]\n rhs = inputs[1]\n lhs = _op.cast(lhs, \"bool\") if input_types[0] == \"bool\" else _op.cast(lhs, \"int\")\n rhs = _op.cast(rhs, \"bool\") if input_types[1] == \"bool\" else _op.cast(rhs, \"int\")\n\n return _op.bitwise_xor(lhs, rhs)\n\n def logical_not(self, inputs, input_types):\n data = _wrap_const(inputs[0])\n return _op.logical_not(_op.cast(data, \"bool\"))\n\n def logical_xor(self, inputs, input_types):\n lhs = _op.cast(inputs[0], \"bool\")\n rhs = _op.cast(inputs[1], \"bool\")\n\n return _op.logical_xor(lhs, rhs)\n\n def list_getitem(self, inputs, input_types):\n return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))\n\n def list_len(self, inputs, input_types):\n return self.prelude.length(inputs[0])\n\n def type_as(self, inputs, input_types):\n assert len(inputs) == 2\n assert len(input_types) == 2\n return _op.cast(inputs[0], input_types[1])\n\n def gather(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n indices = inputs[2]\n\n return _op.gather(data, axis, indices)\n\n def add(self, inputs, input_types):\n # add_ is overloaded for tensor add and list concat\n if input_types[0] == \"ListType\":\n return self.prelude.concat(inputs[0], inputs[1])\n return self.make_elemwise(\"add\")(inputs, input_types)\n\n def tensor_array_stack(self, inputs, input_types):\n dim = inputs[1]\n assert dim == 0, \"stacking on a dynamic tensor list only supported on a first axis\"\n tensor_array, shape = self.convert_to_tensor_array(inputs[0])\n\n stacked_shape = (Any(),) + shape\n stack = self.prelude.get_global_var_static(\"tensor_array_stack\", \"float32\", shape)\n stacked = stack(tensor_array)\n\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", stacked_shape)\n static_tensor_array_ops.register()\n get_tensor = self.prelude.get_global_var_static(\"tensor_get_data\", \"float32\", stacked_shape)\n return get_tensor(stacked)\n\n def stack(self, inputs, input_types):\n if isinstance(inputs[0], list):\n # a static python list of tensors\n dim = inputs[1]\n return _op.stack(inputs[0], dim)\n else:\n # List ADT case\n assert isinstance(inputs[0], _expr.Expr)\n ty = self.infer_type_with_prelude(inputs[0])\n list_ty = self.prelude.mod.get_global_type_var(\"List\")\n msg = \"The input list is expected to be List ADT\"\n assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg\n return self.tensor_array_stack(inputs, input_types)\n\n def rsub(self, inputs, input_types):\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n\n # TODO (t-vi): should this also be part of the type promotion?\n alpha = _expr.const(float(inputs[2]))\n\n # note: rsub means data0 and data1 swap places\n return get_relay_op(\"subtract\")(data1, alpha * data0)\n\n def embedding(self, inputs, input_types):\n weight = inputs[0]\n indices = inputs[1]\n\n return _op.take(weight, indices.astype(\"int32\"), axis=0)\n\n def one_hot(self, inputs, input_types):\n indices = inputs[0].astype(\"int32\")\n num_classes = inputs[1]\n if num_classes == -1:\n msg = \"Inferring the number of classes is not yet supported.\"\n raise NotImplementedError(msg)\n\n dtype = \"int32\"\n on_value = tvm.relay.const(1.0, dtype)\n off_value = tvm.relay.const(0.0, dtype)\n\n return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)\n\n def index(self, inputs, input_types):\n data = inputs[0]\n indices = inputs[1]\n return _op.adv_index([data] + indices)\n\n def meshgrid(self, inputs, input_types):\n data = inputs[0]\n return _op.meshgrid(data, indexing=\"ij\")\n\n def nms(self, inputs, input_types):\n boxes = inputs[0]\n scores = inputs[1]\n iou_threshold = inputs[2]\n\n # TVM NMS assumes score > 0\n scores = scores - _op.min(scores) + _op.const(1.0)\n\n num_boxes = _op.shape_of(scores)\n # PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count\n indices = _op.transform.arange(_op.squeeze(num_boxes), dtype=\"int32\")\n indices = _op.expand_dims(indices, 0, 1)\n\n # Generate data with shape (1, num_anchors, 5)\n scores = AttrCvt(op_name=\"expand_dims\", extras={\"axis\": -1, \"num_newaxis\": 1})([scores], {})\n data = _op.concatenate([scores, boxes], -1)\n data = _op.expand_dims(data, 0, 1)\n\n # Perform Non-Maximum Suppression,\n # PyTorch NMS doesn't have parameter top_k and max_output_size\n score_index = 0\n top_k = max_out_size = -1\n nms_ret = get_relay_op(\"non_max_suppression\")(\n data=data,\n valid_count=num_boxes,\n indices=indices,\n max_output_size=max_out_size,\n iou_threshold=iou_threshold,\n force_suppress=True,\n top_k=top_k,\n coord_start=1,\n score_index=score_index,\n id_index=-1,\n return_indices=True,\n invalid_to_bottom=False,\n )\n\n # squeeze the two outputs of nms for strided_slice\n size = get_relay_op(\"squeeze\")(nms_ret[1], axis=[1])\n data_slice = get_relay_op(\"squeeze\")(nms_ret[0], axis=[0])\n\n # strided slice to get the dynamic result\n ret = get_relay_op(\"strided_slice\")(\n data_slice, begin=_expr.const([0]), end=size, slice_mode=\"size\"\n )\n # in torchvision, indices from nms are int64\n return _op.cast(ret, \"int64\")\n\n def logsumexp(self, inputs, input_types):\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n dim_list = inputs[1]\n keepdim = inputs[2] if len(inputs) > 2 else False\n # dim is output of prim::ListConstruct, even if it is int in python code\n assert isinstance(dim_list, list), \"dim is expected to be a list\"\n return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)\n\n def roi_align(self, inputs, input_types):\n data = inputs[0]\n boxes = inputs[1]\n\n output_size = (inputs[3], inputs[4])\n spatial_scale = inputs[2]\n sample_ratio = inputs[5]\n aligned = False if len(inputs) < 7 else inputs[6]\n\n if aligned:\n boxes -= _expr.const(0.5 / spatial_scale)\n\n return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)\n\n def deform_conv2d(self, inputs, input_types):\n data = inputs[0]\n weight = inputs[1]\n offset = inputs[2]\n\n if len(inputs) > 12:\n strides_offset = 5\n bias = inputs[4]\n logging.warning(\"mask argument in deformable conv2d is not supported and ignored\")\n else:\n strides_offset = 4\n bias = inputs[3]\n\n strides = (inputs[strides_offset], inputs[strides_offset + 1])\n padding = (inputs[strides_offset + 2], inputs[strides_offset + 3])\n dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5])\n groups = inputs[strides_offset + 6]\n deformable_groups = inputs[strides_offset + 7]\n weight_shape = self.infer_shape(weight)\n output_channels = weight_shape[0]\n kernel_size = (weight_shape[2], weight_shape[3])\n\n conv_out = _op.nn.deformable_conv2d(\n data,\n offset,\n weight,\n strides,\n padding,\n dilation,\n deformable_groups,\n groups,\n output_channels,\n kernel_size,\n )\n\n return _op.nn.bias_add(conv_out, bias)\n\n def unbind(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n return unbind(data, axis)\n\n def shape_as_tensor(self, inputs, input_types):\n is_symbolic_shape = False\n input_shape = self.infer_shape(inputs[0], self.prelude.mod)\n for axis in input_shape:\n if not isinstance(axis, (int, tvm.tir.IntImm)):\n is_symbolic_shape = True\n break\n\n if is_symbolic_shape:\n ret = _op.shape_of(inputs[0], dtype=\"int64\")\n else:\n ret = _expr.const(np.array(input_shape), dtype=\"int64\")\n\n return ret\n\n def logical_and(self, inputs, input_types):\n lhs = _op.cast(inputs[0], \"bool\")\n rhs = _op.cast(inputs[1], \"bool\")\n\n return _op.logical_and(lhs, rhs)\n\n def nonzero(self, inputs, input_types, is_numpy_style=False):\n data = inputs[0]\n ret = _op.transform.argwhere(data)\n if is_numpy_style or (len(inputs) > 1 and inputs[1]):\n return unbind(ret, 1)\n return ret\n\n def nonzero_numpy(self, inputs, input_types):\n return self.nonzero(inputs, input_types, is_numpy_style=False)\n\n def scatter(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n index = inputs[2]\n src = inputs[3]\n return _op.transform.scatter(data, index, src, axis)\n\n def index_put(self, inputs, input_types):\n in_tensor = inputs[0]\n indices = inputs[1]\n values = inputs[2]\n accumulate = inputs[3]\n if not accumulate:\n mode = \"update\"\n else:\n mode = \"add\"\n # Combine array of index tensors into one index tensor with shape (N,_)\n index_tensor = _op.stack(indices, axis=0)\n return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)\n\n def scalar_tensor(self, inputs, input_types):\n data = inputs[0]\n cast_map = {\n 6: \"float32\",\n 7: \"float64\",\n 3: \"int32\",\n 4: \"int64\",\n }\n type_key = inputs[1]\n if isinstance(data, _expr.Constant):\n data = data.data.numpy().tolist()\n return _expr.const(data, cast_map[type_key])\n\n def interpolate(self, inputs, input_types):\n if isinstance(inputs[1], _expr.Expr):\n out_size = inputs[1]\n elif isinstance(inputs[1], list):\n out_size = []\n for i in [0, 1]:\n size, _ = try_infer_value(\n inputs[1][i],\n lambda ret: ret.astype(np.int),\n lambda: _op.expand_dims(inputs[1][i], axis=0),\n )\n out_size.append(size)\n out_size = _op.concatenate(out_size, axis=0)\n\n data = inputs[0]\n align_corners = inputs[4]\n method = inputs[3]\n if method.startswith(\"nearest\"):\n method = \"nearest_neighbor\"\n elif method[0:2] == \"bi\":\n method = method[2:]\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n return _op.image.resize2d(\n data, out_size, None, \"NCHW\", method, coord_trans, cubic_alpha=-0.75\n )\n\n def numel(self, inputs, input_types):\n return _op.ndarray_size(inputs[0])\n\n def empty(self, inputs, input_types):\n shape = inputs[0]\n return _op.zeros(shape, _convert_dtype_value(inputs[1]))\n\n def bincount(self, inputs, input_types):\n data = inputs[0]\n weights = inputs[1]\n input_type = self.infer_type(data).dtype\n if input_type == \"int64\":\n logger.warning(\n \"Casting an int64 input to int32, since we do not have int64 atomic add\"\n \"needed for bincount yet.\"\n )\n data = _op.cast(data, \"int32\")\n maximum = _op.max(data)\n dim = maximum + _expr.const(1, dtype=\"int32\")\n if weights:\n weight_type = self.infer_type(weights)\n out_dtype = weight_type.dtype\n updates = weights\n else:\n out_dtype = \"int32\"\n updates = _op.ones_like(data)\n\n counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)\n out = _op.scatter_add(counts, data, updates, axis=0)\n if input_type == \"int32\":\n # Torch always outputs int64 results for bincount\n return _op.cast(out, \"int64\")\n return out\n\n def scatter_add(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n index = inputs[2]\n src = inputs[3]\n return _op.scatter_add(data, index, src, axis=axis)\n\n def cumsum(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n dtype = inputs[2]\n\n if inputs[2] is not None:\n dtype = _convert_dtype_value(inputs[2])\n\n return _op.cumsum(data, axis=dim, dtype=dtype)\n\n def masked_fill(self, inputs, input_types):\n mask = inputs[1]\n value = _op.cast(_wrap_const(inputs[2]), input_types[0])\n return _op.where(mask, value, inputs[0])\n\n def masked_select(self, inputs, input_types):\n mask = inputs[1]\n indices = self.nonzero([mask], input_types, is_numpy_style=True)\n return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])\n\n def sort(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n is_descending = inputs[2]\n # pytorch sort returns both sorted indices and values\n indices = _op.argsort(data, dim, not is_descending)\n return _op.gather(data, dim, indices), indices\n\n def argsort(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n is_descending = inputs[2]\n return _op.argsort(data, dim, not is_descending)\n\n def is_floating_point(self, inputs, input_types):\n assert len(inputs) == 1\n\n if isinstance(inputs[0], _expr.Expr):\n input_type = self.infer_type(inputs[0]).dtype\n else:\n input_type = input_types[0]\n\n is_float = input_type in [\"float32\", \"float64\", \"float16\", \"bfloat16\"]\n return _expr.const(is_float)\n\n def unique(self, inputs, input_types):\n assert len(inputs) == 4\n [data, is_sorted, return_inverse, return_counts] = inputs\n if not is_sorted:\n logger.warning(\"TVM always assumes sorted=True for torch.unique\")\n is_sorted = True\n if return_counts:\n [unique, indices, inverse_indices, num_uniq, counts] = _op.unique(\n data, is_sorted=is_sorted, return_counts=True\n )\n unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode=\"size\")\n counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode=\"size\")\n return (unique_sliced, inverse_indices, counts_sliced)\n else:\n [unique, indices, inverse_indices, num_uniq] = _op.unique(\n data, is_sorted=is_sorted, return_counts=False\n )\n unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode=\"size\")\n return (unique_sliced, inverse_indices)\n\n def nll_loss(self, inputs, input_types):\n assert len(inputs) == 5\n [predictions, targets, weights, reduction, ignore_index] = inputs\n num_class = self.infer_shape(predictions)[1]\n if reduction == 0:\n reduction = \"none\"\n elif reduction == 1:\n reduction = \"mean\"\n else:\n reduction = \"sum\"\n if weights is None:\n weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])\n return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)\n\n def flip(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n return _op.transform.reverse(data, axis=axis[0])\n\n def bidir_gru_cell(\n self,\n input_seqs,\n weights_dicts,\n ):\n \"\"\"\n Bidirectional GRU cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t = gru_cell(\n input_seqs,\n **weights_dicts[0],\n )\n\n reverse_outputs, rev_H_t = gru_cell(\n input_seqs,\n **weights_dicts[1],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)\n )\n\n return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)\n\n def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):\n \"\"\"\n Methods iterates layers for Stacked GRU\n \"\"\"\n layers_num = len(layer_weights_dicts)\n # split input sequence to samples set\n input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]\n output_hiddens = []\n for i in range(layers_num):\n weights_dicts = layer_weights_dicts[i]\n # input_seqs shape = [seq_num, (batch, feature_size)] or\n # [seq_num, (batch, 2*feature_size)] for bidirectional\n if bidirectional:\n input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)\n else:\n input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])\n\n output_hiddens.append(H_t)\n\n # TODO (vvchernov): in pytorch implementation train is also checked\n # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339\n # /aten/src/ATen/native/RNN.cpp#L1054\n if dropout_p != 0 and i < layers_num - 1:\n # for input in input_seqs:\n # input = _op.dropout(input, dropout_p)\n raise NotImplementedError(\"Dropout for GRU has not been supported yet!\")\n\n return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)\n\n def gru(self, inputs, input_types):\n \"\"\"\n Description of GRU in pytorch:\n https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU\n \"\"\"\n # TODO (vvchernov): support dropout\n assert len(inputs) == 9, \"Input of size 9 is expected\"\n # Unpack inputs, note that if optional and not provided then value will be None.\n _X = inputs[0]\n # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)\n\n hidden_state = inputs[1]\n # Hidden state shape (hidden_layers_num, batch, hidden_size)\n\n _weights = inputs[2]\n # Wi layer[0] shape (3 * hidden_size, feature_size)\n # Wh layer[0] shape (3 * hidden_size, hidden_size)\n # Bi layer[0] shape (3 * hidden_size)\n # Bh layer[0] shape (3 * hidden_size)\n\n # Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)\n # Wh layer[>0] shape (3 * hidden_size, hidden_size)\n # Bi layer[>0] shape (3 * hidden_size)\n # Bh layer[>0] shape (3 * hidden_size)\n\n # Scalar inputs\n has_biases = inputs[3]\n num_layers = inputs[4]\n dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout\n # train = inputs[6]\n bidirectional = inputs[7]\n batch_first = inputs[8]\n\n num_directions = 1\n if bidirectional:\n num_directions = 2\n\n rsd = len(_weights) % num_layers\n assert rsd == 0, \"The number of weights must be a multiple of the number of layers!\"\n rsd = (len(_weights) / num_layers) % num_directions\n assert (\n rsd == 0\n ), \"The number of weights in layer must be a multiple of the number of directions!\"\n\n weights_num = int(len(_weights) / num_layers / num_directions)\n if has_biases:\n assert weights_num == 4, \"The weights number in layer is expected equal to 4\"\n else:\n assert weights_num == 2, \"The weights number in layer is expected equal to 2\"\n\n X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X\n # TODO (vvchernov): Which data type should be used? from input or weights?\n # Instead of it _infer_type(X).checked_type.dtype can be used\n X_dtype = input_types[0]\n X_shape = _infer_shape(X) # (seq_num, batch, feature_size)\n\n hidden_size = int(_infer_shape(_weights[0])[0] / 3)\n batch_size = X_shape[1]\n\n # Initialize hidden states if not provided.\n layers_h = []\n hidden_layers_num = num_directions * num_layers\n if hidden_state is None:\n h_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_h.append(h_0)\n else:\n layers_h = unbind(hidden_state, 0)\n\n layer_weights_dicts = []\n k = 0 # layer counter\n if has_biases:\n names = [\"hidden_state\", \"w_inp\", \"w_hid\", \"b_inp\", \"b_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n else:\n names = [\"hidden_state\", \"w_inp\", \"w_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n assert (\n len(layer_weights_dicts) == num_layers and k == num_layers\n ), \"For stacked GRU number of weights sets should be the same as number of layers!\"\n\n output, out_hidden_state = self.gru_layers(\n X,\n layer_weights_dicts,\n bidirectional,\n dropout_p=dropout_p,\n )\n\n # output shape = (seq_num, batch, hidden_size) or\n # (seq_num, batch, 2*feature_size) for bidirectional\n if batch_first:\n output = _op.transpose(output, (1, 0, 2))\n\n return (output, out_hidden_state)\n\n def bidir_lstm_cell(\n self,\n input_seqs,\n weights_dicts,\n ):\n \"\"\"\n Bidirectional LSTM cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t, fw_C_t = lstm_cell(\n input_seqs,\n **weights_dicts[0],\n )\n\n reverse_outputs, rev_H_t, rev_C_t = lstm_cell(\n input_seqs,\n **weights_dicts[1],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)\n )\n\n return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)\n\n def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):\n \"\"\"\n Methods iterates layers for Stacked LSTM\n \"\"\"\n layers_num = len(layer_weights_dicts)\n # split input sequence to samples set\n input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]\n output_hiddens = []\n for i in range(layers_num):\n weights_dicts = layer_weights_dicts[i]\n # input_seqs shape = [seq_num, (batch, feature_size)] or\n # [seq_num, (batch, 2*feature_size)] for bidirectional\n if bidirectional:\n input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)\n else:\n input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])\n\n output_hiddens.append((H_t, C_t))\n\n # TODO (vvchernov): in pytorch implementation train is also checked\n # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339\n # /aten/src/ATen/native/RNN.cpp#L1054\n if dropout_p != 0 and i < layers_num - 1:\n # for input in input_seqs:\n # input = _op.dropout(input, dropout_p)\n raise NotImplementedError(\"Dropout for LSTM has not been supported yet!\")\n final_hiddens = []\n if bidirectional:\n for output_hidden in output_hiddens:\n final_hiddens.append(output_hidden[0])\n final_hiddens.append(output_hidden[1])\n else:\n final_hiddens = output_hiddens\n\n return _op.stack(input_seqs, 0), final_hiddens\n\n def lstm(self, inputs, input_types):\n \"\"\"\n Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html\n Native implementation for torch version less than 1.8.0 (projection is unsupported):\n https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \\\n src/ATen/native/RNN.cpp#L1396\n Native implementation for torch version from 1.8.0 and higher (projection is supported):\n https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483\n \"\"\"\n # TODO (vvchernov): support dropout\n assert len(inputs) == 9, \"Input of size 9 is expected\"\n # Unpack inputs, note that if optional and not provided then value will be None.\n _X = inputs[0]\n # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)\n\n hidden_states = inputs[1]\n assert len(hidden_states) == 2, \"lstm expects two hidden states\"\n h_0 = hidden_states[0]\n c_0 = hidden_states[1]\n # H0 shape (hidden_layers_num, batch, proj_size) if projection\n # else (hidden_layers_num, batch, hidden_size)\n # C0 shape (hidden_layers_num, batch, hidden_size)\n\n _weights = inputs[2]\n # If no projection\n # Wi layer[0] shape (4 * hidden_size, feature_size)\n # Wh layer[0] shape (4 * hidden_size, hidden_size)\n # Bi layer[0] shape (4 * hidden_size)\n # Bh layer[0] shape (4 * hidden_size)\n\n # Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)\n # Wh layer[>0] shape (4 * hidden_size, hidden_size)\n # Bi layer[>0] shape (4 * hidden_size)\n # Bh layer[>0] shape (4 * hidden_size)\n\n # If projection\n # Wi layer[0] shape (4 * hidden_size, feature_size)\n # Wh layer[0] shape (4 * hidden_size, proj_size)\n # Bi layer[0] shape (4 * hidden_size)\n # Bh layer[0] shape (4 * hidden_size)\n # P layer[0] shape (proj_size, hidden_size)\n\n # Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)\n # Wh layer[>0] shape (4 * hidden_size, proj_size)\n # Bi layer[>0] shape (4 * hidden_size)\n # Bh layer[>0] shape (4 * hidden_size)\n # P layer[>0] shape (proj_size, hidden_size)\n\n # Scalar inputs\n has_biases = inputs[3]\n num_layers = inputs[4]\n dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout\n # train = inputs[6]\n bidirectional = inputs[7]\n batch_first = inputs[8]\n\n num_directions = 1\n if bidirectional:\n num_directions = 2\n\n rsd = len(_weights) % num_layers\n assert rsd == 0, \"The number of weights must be a multiple of the number of layers!\"\n rsd = (len(_weights) / num_layers) % num_directions\n assert (\n rsd == 0\n ), \"The number of weights in layer must be a multiple of the number of directions!\"\n has_proj = False\n proj_size = 0\n weights_num = int(len(_weights) / num_layers / num_directions)\n if has_biases:\n if weights_num == 5:\n has_proj = True\n proj_size = _infer_shape(_weights[4])[0]\n else:\n assert weights_num == 4, \"The weights number in layer is expected equal to 4\"\n else:\n if weights_num == 3:\n has_proj = True\n proj_size = _infer_shape(_weights[2])[0]\n else:\n assert weights_num == 2, \"The weights number in layer is expected equal to 2\"\n\n X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X\n # TODO (vvchernov): Which data type should be used? from input or weights?\n # Instead of it _infer_type(X).checked_type.dtype can be used\n X_dtype = input_types[0]\n X_shape = _infer_shape(X) # (seq_num, batch, feature_size)\n\n hidden_size = _infer_shape(_weights[0])[0] / 4\n batch_size = X_shape[1]\n\n # Initialize hidden states if not provided.\n layers_h = []\n layers_c = []\n hidden_layers_num = num_directions * num_layers\n if h_0 is None:\n if has_proj:\n h_0 = _op.zeros((batch_size, proj_size), X_dtype)\n else:\n h_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_h.append(h_0)\n else:\n layers_h = unbind(h_0, 0)\n if c_0 is None:\n c_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_c.append(c_0)\n else:\n layers_c = unbind(c_0, 0)\n\n layer_weights_dicts = []\n k = 0 # layer counter\n if has_biases:\n names = [\"hidden_state\", \"cell_state\", \"w_inp\", \"w_hid\", \"b_inp\", \"b_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 4]\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n if has_proj:\n rev_weights_dict[\"proj\"] = _weights[j + 4]\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 4]\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n else:\n names = [\"hidden_state\", \"cell_state\", \"w_inp\", \"w_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 2]\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n if has_proj:\n rev_weights_dict[\"proj\"] = _weights[j + 2]\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 2]\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n assert (\n len(layer_weights_dicts) == num_layers and k == num_layers\n ), \"For stacked LSTM number of weights sets should be the same as number of layers!\"\n\n outputs = self.lstm_layers(\n X,\n layer_weights_dicts,\n bidirectional,\n dtype=X_dtype,\n dropout_p=dropout_p,\n )\n\n # output shape = (seq_num, batch, hidden_size) or\n # (seq_num, batch, 2*feature_size) for bidirectional\n output = outputs[0]\n\n hy = []\n cy = []\n for hidden in outputs[1]:\n hy.append(hidden[0])\n cy.append(hidden[1])\n\n if batch_first:\n output = _op.transpose(output, (1, 0, 2))\n\n return (output, _op.stack(hy, 0), _op.stack(cy, 0))\n\n def all_any_common(self, op, inputs, input_types):\n dim = inputs[1]\n keepdim = inputs[2]\n if self.infer_type(inputs[0]).dtype != \"bool\":\n # The input dtype can be uint8.\n inp = _op.cast(inputs[0], \"bool\")\n else:\n inp = inputs[0]\n return op(inp, axis=dim, keepdims=keepdim)\n\n def searchsorted_common(self, sorted_sequence, values, out_int32, right):\n dtype = \"int32\" if out_int32 else \"int64\"\n values_shape = _infer_shape(values)\n\n if len(values_shape) == 0:\n values = _op.expand_dims(values, 0)\n\n out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)\n\n if len(values_shape) == 0:\n return _op.squeeze(out)\n\n return out\n\n def searchsorted(self, inputs, input_types):\n return self.searchsorted_common(*inputs)\n\n def bucketize(self, inputs, input_types):\n return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])\n\n def roll(self, inputs, input_types):\n def slide_axes(inp, shape, ax):\n axes = list(range(len(shape)))\n axes = axes[:ax] + [-1] + axes[ax:-1]\n return _op.transpose(inp, axes)\n\n x = inputs[0]\n shifts = inputs[1]\n dims = inputs[2]\n shape = self.infer_shape(x)\n start = _expr.const(0, \"int64\")\n step = _expr.const(1, \"int64\")\n\n out = x\n for i, dim in enumerate(dims):\n roll_dim = _expr.const(shape[dim], \"int64\")\n indices_1d = _op.mod(\n _op.transform.arange(start, roll_dim, step, \"int64\")\n - _expr.const(shifts[i], \"int64\")\n + roll_dim,\n roll_dim,\n )\n # First fill in the last axis with roll indices, and then do transpose to\n # bring the roll indices into the desired axis.\n indices = slide_axes(\n _op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)),\n shape,\n dim,\n )\n out = _op.gather(out, dim, indices)\n\n return out\n\n def einsum(self, inputs, input_types):\n equation, data = inputs\n return _op.einsum(data, equation)\n\n def dot(self, inputs, _):\n lhs, rhs = inputs\n return _op.sum(_op.multiply(lhs, rhs))\n\n def mv(self, inputs, _):\n lhs, rhs = inputs\n\n # Convert the 1D matrix (vector) into a 2D matrix with the extra\n # dimension=1\n rhs_matrix = _op.transform.expand_dims(rhs, 0)\n\n # Run multiplication\n dense_result = _op.nn.dense(lhs, rhs_matrix, units=None)\n\n # Chop off the extra result dimension\n return _op.transform.squeeze(dense_result)\n\n # Operator mappings\n def create_convert_map(self):\n self.convert_map = {\n \"aten::is_floating_point\": self.is_floating_point,\n \"aten::pixel_shuffle\": self.pixel_shuffle,\n \"aten::device\": self.none,\n \"prim::device\": self.none,\n \"aten::sub\": self.make_elemwise(\"subtract\"),\n \"aten::max\": self.max,\n \"aten::min\": self.min,\n \"aten::mul\": self.make_elemwise(\"multiply\"),\n \"aten::pow\": self.make_elemwise(\"power\"),\n \"aten::arange\": self.arange,\n \"aten::meshgrid\": self.meshgrid,\n \"aten::div\": self.make_elemwise(\"divide\"),\n \"aten::floor_divide\": self.make_elemwise(\"floor_divide\"),\n \"aten::true_divide\": self.make_elemwise(\"divide\"),\n \"aten::addcdiv\": self.addcdiv,\n \"aten::addcmul\": self.addcmul,\n \"aten::ones\": self.ones,\n \"aten::ones_like\": self.ones_like,\n \"aten::zeros\": self.zeros,\n \"aten::zeros_like\": self.zeros_like,\n \"aten::full\": self.full,\n \"aten::full_like\": self.full_like,\n \"aten::linspace\": self.linspace,\n \"aten::reciprocal\": self.reciprocal,\n \"aten::repeat\": self.repeat,\n \"aten::repeat_interleave\": self.repeat_interleave,\n \"aten::to\": self.to,\n \"aten::squeeze\": self.squeeze,\n \"aten::unsqueeze\": self.unsqueeze,\n \"aten::cat\": self.concatenate,\n \"aten::slice\": self.slice,\n \"aten::narrow\": self.narrow,\n \"aten::split\": self.split,\n \"aten::split_with_sizes\": self.split_with_sizes,\n \"aten::select\": self.select,\n \"aten::take\": self.take,\n \"aten::where\": self.where,\n \"aten::topk\": self.topk,\n \"aten::relu\": self.relu,\n \"aten::prelu\": self.prelu,\n \"aten::leaky_relu\": self.leaky_relu,\n \"aten::elu\": self.elu,\n \"aten::celu\": self.celu,\n \"aten::gelu\": self.gelu,\n \"aten::selu\": self.selu,\n \"aten::silu\": self.silu,\n \"aten::log_sigmoid\": self.log_sigmoid,\n \"aten::adaptive_avg_pool1d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d\n ),\n \"aten::adaptive_avg_pool2d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d\n ),\n \"aten::adaptive_avg_pool3d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d\n ),\n \"aten::adaptive_max_pool1d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool1d\n ),\n \"aten::adaptive_max_pool2d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool2d\n ),\n \"aten::adaptive_max_pool3d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool3d\n ),\n \"aten::max_pool2d\": self.maxpool_2d,\n \"aten::max_pool2d_with_indices\": self.maxpool_2d_with_indices,\n \"aten::max_pool1d\": self.maxpool_1d,\n \"aten::max_pool3d\": self.maxpool_3d,\n \"aten::hardtanh\": self.hardtanh,\n \"aten::_convolution\": self.convolution,\n \"aten::softmax\": self.softmax,\n \"aten::threshold\": self.threshold,\n \"aten::contiguous\": self.contiguous,\n \"aten::batch_norm\": self.batch_norm,\n \"aten::instance_norm\": self.instance_norm,\n \"aten::layer_norm\": self.layer_norm,\n \"aten::group_norm\": self.group_norm,\n \"aten::transpose\": self.transpose,\n \"aten::t\": self.transpose,\n \"aten::flatten\": self.flatten,\n \"aten::addmm\": self.addmm,\n \"aten::size\": self.size,\n \"aten::view\": self.view,\n \"aten::reshape\": self.reshape,\n \"aten::clone\": self.clone,\n \"aten::log_softmax\": self.log_softmax,\n \"aten::sigmoid\": self.sigmoid,\n \"aten::softplus\": self.softplus,\n \"aten::avg_pool1d\": self.make_avg_pool(1),\n \"aten::avg_pool2d\": self.make_avg_pool(2),\n \"aten::avg_pool3d\": self.make_avg_pool(3),\n \"aten::linear\": self.linear,\n \"aten::dropout\": self.dropout,\n \"aten::feature_dropout\": self.dropout,\n \"aten::alpha_dropout\": self.dropout,\n \"aten::mean\": self.mean,\n \"aten::chunk\": self.chunk,\n \"aten::unsafe_chunk\": self.chunk,\n \"aten::matmul\": self.matmul,\n \"aten::bmm\": self.matmul,\n \"aten::expand\": self.expand,\n \"aten::Int\": self.int,\n \"prim::NumToTensor\": self.numtotensor,\n \"prim::ImplicitTensorToNum\": self.tensortonum,\n \"aten::ScalarImplicit\": self.tensortonum,\n \"aten::constant_pad_nd\": self.make_pad(\"constant\"),\n \"aten::reflection_pad1d\": self.make_pad(\"reflect\"),\n \"aten::reflection_pad2d\": self.make_pad(\"reflect\"),\n \"aten::replication_pad1d\": self.make_pad(\"edge\"),\n \"aten::replication_pad2d\": self.make_pad(\"edge\"),\n \"aten::replication_pad3d\": self.make_pad(\"edge\"),\n \"aten::permute\": self.transpose,\n \"aten::sum\": self.make_reduce(\"sum\"),\n \"aten::prod\": self.make_reduce(\"prod\"),\n \"aten::argmin\": self.make_reduce(\"argmin\"),\n \"aten::argmax\": self.make_reduce(\"argmax\"),\n \"aten::norm\": self.norm,\n \"aten::frobenius_norm\": self.frobenius_norm,\n \"aten::std\": self.std,\n \"aten::var\": self.variance,\n \"aten::abs\": self.make_unary(\"abs\"),\n \"aten::neg\": self.make_unary(\"negative\"),\n \"aten::cos\": self.make_unary(\"cos\"),\n \"aten::cosh\": self.make_unary(\"cosh\"),\n \"aten::sin\": self.make_unary(\"sin\"),\n \"aten::sinh\": self.make_unary(\"sinh\"),\n \"aten::tan\": self.make_unary(\"tan\"),\n \"aten::tanh\": self.make_unary(\"tanh\"),\n \"aten::acos\": self.make_unary(\"acos\"),\n \"aten::asin\": self.make_unary(\"asin\"),\n \"aten::atan\": self.make_unary(\"atan\"),\n \"aten::log\": self.make_unary(\"log\"),\n \"aten::log2\": self.make_unary(\"log2\"),\n \"aten::log10\": self.make_unary(\"log10\"),\n \"aten::log1p\": self.log1p,\n \"aten::exp\": self.make_unary(\"exp\"),\n \"aten::erf\": self.make_unary(\"erf\"),\n \"aten::trunc\": self.make_unary(\"trunc\"),\n \"aten::sign\": self.make_unary(\"sign\"),\n \"aten::sqrt\": self.make_unary(\"sqrt\"),\n \"aten::rsqrt\": self.make_unary(\"rsqrt\"),\n \"aten::ceil\": self.make_unary(\"ceil\"),\n \"aten::floor\": self.make_unary(\"floor\"),\n \"aten::round\": self.make_unary(\"round\"),\n \"aten::isfinite\": self.make_unary(\"isfinite\"),\n \"aten::isinf\": self.make_unary(\"isinf\"),\n \"aten::isnan\": self.make_unary(\"isnan\"),\n \"aten::clamp\": self.clamp,\n \"aten::detach\": self.identity,\n \"aten::upsample_bilinear2d\": self.make_upsample(\"linear\"),\n \"aten::upsample_bicubic2d\": self.make_upsample(\"cubic\"),\n \"aten::upsample_nearest2d\": self.make_upsample(\"nearest_neighbor\"),\n \"aten::upsample_trilinear3d\": self.make_upsample3d(\"linear\"),\n \"aten::upsample_nearest3d\": self.make_upsample3d(\"nearest_neighbor\"),\n \"aten::expand_as\": self.expand_as,\n \"aten::lt\": self.make_elemwise(\"less\"),\n \"aten::gt\": self.make_elemwise(\"greater\"),\n \"aten::le\": self.make_elemwise(\"less_equal\"),\n \"aten::ge\": self.make_elemwise(\"greater_equal\"),\n \"aten::ne\": self.make_elemwise(\"not_equal\"),\n \"aten::eq\": self.make_elemwise(\"equal\"),\n \"aten::logical_not\": self.logical_not,\n \"aten::logical_xor\": self.logical_xor,\n \"aten::bitwise_not\": self.bitwise_not,\n \"aten::bitwise_xor\": self.bitwise_xor,\n \"aten::Bool\": self.Bool,\n \"aten::Float\": self.Float,\n \"aten::rsub\": self.rsub,\n \"aten::embedding\": self.embedding,\n \"aten::one_hot\": self.one_hot,\n \"aten::mm\": self.matmul,\n \"aten::add\": self.add,\n \"aten::stack\": self.stack,\n \"aten::__getitem__\": self.list_getitem,\n \"aten::len\": self.list_len,\n \"aten::type_as\": self.type_as,\n \"aten::gather\": self.gather,\n \"aten::index_select\": self.select,\n \"aten::index\": self.index,\n \"torchvision::nms\": self.nms,\n \"aten::logsumexp\": self.logsumexp,\n \"torchvision::roi_align\": self.roi_align,\n \"torchvision::deform_conv2d\": self.deform_conv2d,\n \"aten::unbind\": self.unbind,\n \"aten::__and__\": self.logical_and,\n \"aten::logical_and\": self.logical_and,\n \"aten::_shape_as_tensor\": self.shape_as_tensor,\n \"aten::nonzero\": self.nonzero,\n \"aten::nonzero_numpy\": self.nonzero_numpy,\n \"aten::scatter\": self.scatter,\n \"aten::index_put\": self.index_put,\n \"aten::scalar_tensor\": self.scalar_tensor,\n \"aten::__interpolate\": self.interpolate,\n \"aten::IntImplicit\": self.identity,\n \"aten::tensor\": self.identity, # used for example in tensor(1.0)\n \"aten::numel\": self.numel,\n \"aten::empty\": self.empty,\n \"aten::bincount\": self.bincount,\n \"aten::scatter_add\": self.scatter_add,\n \"aten::__not__\": self.logical_not,\n \"aten::hardswish\": self.hard_swish,\n \"aten::hardsigmoid\": self.hard_sigmoid,\n \"aten::cumsum\": self.cumsum,\n \"aten::masked_fill\": self.masked_fill,\n \"aten::masked_select\": self.masked_select,\n \"aten::argsort\": self.argsort,\n \"aten::sort\": self.sort,\n \"aten::_unique2\": self.unique,\n \"aten::nll_loss\": self.nll_loss,\n \"aten::nll_loss2d\": self.nll_loss,\n \"aten::nll_loss_nd\": self.nll_loss,\n \"aten::flip\": self.flip,\n \"aten::gru\": self.gru,\n \"aten::lstm\": self.lstm,\n \"aten::all\": functools.partial(self.all_any_common, _op.all),\n \"aten::any\": functools.partial(self.all_any_common, _op.any),\n \"aten::searchsorted\": self.searchsorted,\n \"aten::bucketize\": self.bucketize,\n \"aten::roll\": self.roll,\n \"aten::einsum\": self.einsum,\n \"aten::dot\": self.dot,\n \"aten::mv\": self.mv,\n }\n\n def update_convert_map(self, custom_map):\n self.convert_map.update(custom_map)\n\n def report_missing_conversion(self, op_names):\n \"\"\"Check if all ops in an input graph are supported by TVM\"\"\"\n known_ops = [\n \"prim::Constant\",\n \"prim::GetAttr\",\n \"prim::ListConstruct\",\n \"prim::ListUnpack\",\n \"prim::TupleConstruct\",\n \"prim::TupleUnpack\",\n \"prim::RaiseException\",\n \"prim::If\",\n \"prim::Loop\",\n ]\n known_ops += list(self.convert_map.keys())\n known_ops += list(qnn_torch.convert_map.keys())\n\n missing = []\n\n for op_name in op_names:\n # Also take care of in-place variant ops like aten::relu_\n if op_name not in known_ops and not (\n op_name.endswith(\"_\") and op_name[:-1] in known_ops\n ):\n missing.append(op_name)\n\n if missing:\n msg = \"The following operators are not implemented: {}\".format(missing)\n raise NotImplementedError(msg)\n\n def convert_block(self, block, outputs):\n \"\"\"Translate Torch \"Block\", used for prim::If and prim::Loop\"\"\"\n ops = _get_operator_nodes(block.nodes())\n ret_names = _get_input_names(block.returnNode())\n return self.convert_operators(ops, outputs, ret_names)\n\n def convert_if(self, if_node, outputs):\n \"\"\"Translate Torch prim::If to Relay If\"\"\"\n cond = outputs[if_node.inputsAt(0).debugName()]\n blocks = list(if_node.blocks())\n true_branch = self.convert_block(blocks[0], outputs)\n false_branch = self.convert_block(blocks[1], outputs)\n assert len(true_branch) == 1 and len(false_branch) == 1\n return _expr.If(cond, true_branch[0], false_branch[0])\n\n def convert_loop(self, loop_node, outputs):\n \"\"\"Translate Torch prim::Loop to Relay while_loop\"\"\"\n\n def get_input(index):\n ivalue = loop_node.inputsAt(index)\n inode = ivalue.node()\n if inode.kind() == \"prim::Constant\":\n return _expr.const(_get_constant(inode))\n var_name = ivalue.debugName()\n assert var_name in outputs\n return _wrap_const(outputs[var_name])\n\n # Refer to the spec for prim::Loop below\n # https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops\n # The first input: %max_trip_count\n # The second input: %initial_condition\n # The rest of input: loop variables\n max_loop_count = get_input(0)\n init_cond = get_input(1)\n num_loop_var = len(list(loop_node.inputs())) - 2\n init_vals = [get_input(i + 2) for i in range(num_loop_var)]\n\n # while loop has always max_loop_count being int64 max\n # max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again\n is_while_loop = (\n isinstance(max_loop_count, _expr.Constant)\n and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize\n )\n\n if is_while_loop:\n loop_iter_dtype = \"bool\"\n # while loop with non input dependent condition such as while i < 10:\n # init_cond is int, need to cast to bool to type check\n if isinstance(init_cond, _expr.Constant):\n init_cond = _op.cast(init_cond, \"bool\")\n init_loop_iter_val = init_cond\n else:\n loop_iter_dtype = \"int32\"\n # always count from 0\n init_loop_iter_val = _expr.const(0, dtype=\"int32\")\n\n body_block = list(loop_node.blocks())[0]\n block_input_names = _get_input_names(body_block)\n num_block_inputs = len(block_input_names)\n name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))\n outputs.update(name_val_pairs)\n\n def get_var(name, val):\n if val:\n checked_type = self.infer_type_with_prelude(val)\n if hasattr(checked_type, \"shape\"):\n shape = get_const_tuple(checked_type.shape)\n actual_shape = []\n for dim in shape:\n if isinstance(dim, int) and dim == 0:\n actual_shape.append(Any())\n else:\n actual_shape.append(dim)\n return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)\n else:\n return _expr.var(name, type_annotation=checked_type)\n return _expr.var(name)\n\n loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype)\n loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]]\n\n # Add non constant free variables to loop variables to prevent code blow up\n # Without this, if there are two for loops in a row, which often happens\n # if the outer loop is unrolled, the computation corresponding to the first for loop\n # is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).\n # This issue was found when converting from Stacked LSTM test. Torch does not add the\n # outputof the eariler loop into loop variables of the next loop.\n # So the variable corresponding to the first loop output appears free in the second\n # loop body.\n free_vars = [\n var\n for var in _get_free_vars_from_block(body_block)\n if var in outputs\n and not isinstance(outputs[var], (_expr.Constant, int, float, str))\n and outputs[var]\n ]\n\n prev_outputs = {}\n for name in free_vars:\n prev_output = outputs[name]\n new_loop_var = get_var(name, prev_output)\n prev_outputs[name] = prev_output\n outputs[name] = new_loop_var\n loop_vars.append(new_loop_var)\n init_vals.append(prev_output)\n\n def cond(*current_vals):\n i = current_vals[0]\n\n if is_while_loop:\n return _op.equal(i, _expr.const(True, \"bool\"))\n\n return _op.less(i, max_loop_count)\n\n def body(*current_vals):\n # Update loop variables using the prev iteration outputs\n assert len(current_vals) == num_block_inputs + len(free_vars)\n\n for (i, val) in enumerate(current_vals):\n if i < num_block_inputs:\n outputs[block_input_names[i]] = val\n else:\n outputs[free_vars[i - num_block_inputs]] = val\n\n block_outputs = self.convert_block(body_block, outputs)\n block_outputs += [outputs[name] for name in free_vars]\n\n if not is_while_loop:\n # iter var increment implicit in torch, so do it manually\n # for while loop, block_outputs[0] is already a boolean,\n # the result of termination check\n incr = _expr.const(1, dtype=\"int32\")\n block_outputs[0] = current_vals[0] + incr\n\n return block_outputs\n\n loop = while_loop(cond, [loop_iter_var] + loop_vars, body)\n loop_val = loop(init_loop_iter_val, *init_vals)\n\n # restore original output values for free vars\n outputs.update(prev_outputs)\n\n # The first element is a loop counter or boolean condition, ignore it\n return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]\n\n def convert_operators(self, operators, outputs, ret_names):\n \"\"\"Convert each Torch IR operators to Relay equivalent\"\"\"\n for node_name, op_node in operators:\n operator = op_node.kind()\n inputs = _get_op_inputs(op_node, outputs)\n\n if operator == \"prim::Constant\":\n outputs[node_name] = _get_constant(op_node)\n elif operator == \"prim::ListConstruct\" and _should_construct_dynamic_list(op_node):\n outputs[node_name] = self.convert_to_list_adt(inputs)\n elif operator == \"prim::ListConstruct\":\n # This assumes that no more elements will be appended to this list\n # In this case, we keep the Python list\n outputs[node_name] = inputs\n elif operator == \"prim::TupleConstruct\":\n\n def _handel_nested_input(inputs):\n inputs_list = []\n for i, _ in enumerate(inputs):\n if isinstance(inputs[i], list):\n inputs_list.append(_handel_nested_input(inputs[i]))\n else:\n assert isinstance(inputs[i], _expr.Expr)\n inputs_list.append(inputs[i])\n return _expr.Tuple(inputs_list)\n\n outputs[node_name] = _handel_nested_input(inputs)\n elif operator in [\"prim::ListUnpack\", \"prim::TupleUnpack\"]:\n assert len(inputs) == 1\n if isinstance(inputs[0], (list, _expr.TupleWrapper)):\n unpacked = inputs[0]\n else:\n unpacked = _unpack_tuple(inputs[0])\n outputs.update(zip(_get_output_names(op_node), unpacked))\n elif operator == \"prim::prim::RaiseException\":\n logger.warning(\"raising exceptions is ignored\")\n outputs[node_name] = None\n elif operator == \"prim::If\":\n if_out = self.convert_if(op_node, outputs)\n outputs[node_name] = if_out\n elif operator == \"prim::Loop\":\n loop_out = self.convert_loop(op_node, outputs)\n unpacked_names = _get_output_names(op_node)\n assert len(loop_out) == len(unpacked_names)\n outputs.update(zip(unpacked_names, loop_out))\n else:\n if operator not in self.convert_map:\n # At this point, the only possible ops that are not in convert_map are\n # in-place variant of ops like aten::relu_\n assert operator.endswith(\"_\")\n logger.warning(\n \"An in-place op %s found, the result will not be correct \"\n \"if the model depends on side-effects by this op.\",\n operator,\n )\n relay_op = self.convert_map[operator[:-1]]\n else:\n relay_op = self.convert_map[operator]\n\n relay_out = relay_op(\n inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype)\n )\n self.record_output_type(relay_out)\n\n if isinstance(relay_out, tuple):\n # This is for torch operators that return multiple outputs\n # See _adaptive_max_2d above for example\n out_names = _get_output_names(op_node)\n outputs.update(zip(out_names, relay_out))\n else:\n assert op_node.outputsSize() == 1\n outputs[node_name] = relay_out\n\n return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]\n\n\ndef _pytorch_result_type(dtypes, non_tensor_inputs):\n \"\"\"This promotes TVM dtypes like PyTorch would\"\"\"\n import torch\n\n dtype_map = {\n \"float64\": torch.float64,\n \"float32\": torch.float32,\n \"float16\": torch.float16,\n \"bfloat16\": torch.bfloat16,\n \"int64\": torch.int64,\n \"int32\": torch.int32,\n \"int16\": torch.int16,\n \"int8\": torch.int8,\n \"uint8\": torch.uint8,\n \"bool\": torch.bool,\n }\n if len(dtypes) > 0:\n result_type = dtypes[0]\n for dt in dtypes[1:]:\n if dt != result_type: # we don't want to work with same types as we\n # don't do quantized here (which cannot be promoted?)\n result_type = _convert_data_type(\n str(\n torch.result_type(\n torch.zeros((), dtype=dtype_map[result_type]),\n torch.zeros((), dtype=dtype_map[dt]),\n )\n )\n )\n else:\n result_type = \"bool\" # this is the smallest type...\n for inp in non_tensor_inputs:\n result_type = _convert_data_type(\n str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))\n )\n return result_type\n\n\n# Helper functions for operator implementation\ndef _convert_dtype_value(val):\n \"\"\"converts a PyTorch the PyTorch numeric type id to a torch scalar type.\"\"\"\n convert_torch_dtype_map = {\n 7: \"torch.float64\",\n 6: \"torch.float32\",\n 5: \"torch.float16\",\n 4: \"torch.int64\",\n 3: \"torch.int32\",\n 2: \"torch.int16\",\n 1: \"torch.int8\",\n 0: \"torch.unit8\",\n None: \"torch.int64\",\n } # Default is torch.int64\n if val in convert_torch_dtype_map:\n return _convert_data_type(convert_torch_dtype_map[val])\n else:\n msg = \"Torch data type value %d is not handled yet.\" % (val)\n raise NotImplementedError(msg)\n\n\ndef _convert_data_type(input_type, default_dtype=None):\n \"\"\"converts the PyTorch scalar type input_type to a TVM dtype.\n optionally, default_dtype can be a TVM dtype that is used\n if input_type is None (but not when it is unknown)\"\"\"\n if input_type is None and default_dtype is not None:\n return default_dtype\n\n input_type = input_type.lower()\n if input_type in [\"double\", \"float64\", \"torch.float64\"]:\n return \"float64\"\n elif input_type in [\"float\", \"float32\", \"torch.float32\"]:\n return \"float32\"\n elif input_type in [\"half\", \"float16\", \"torch.float16\"]:\n return \"float16\"\n elif input_type in [\"long\", \"int64\", \"torch.int64\"]:\n return \"int64\"\n elif input_type in [\"int\", \"int32\", \"torch.int32\"]:\n return \"int32\"\n elif input_type in [\"short\", \"int16\", \"torch.int16\"]:\n return \"int16\"\n elif input_type in [\"char\", \"int8\", \"torch.int8\"]:\n return \"int8\"\n elif input_type in [\"byte\", \"uint8\", \"torch.uint8\"]:\n return \"uint8\"\n elif input_type in [\"quint8\", \"torch.quint8\"]:\n return \"quint8\"\n elif input_type in [\"qint8\", \"torch.qint8\"]:\n return \"qint8\"\n elif input_type in [\"qint32\", \"torch.qint32\"]:\n return \"qint32\"\n elif input_type in [\"bool\", \"torch.bool\"]:\n return \"bool\"\n elif input_type in [\"str\"]:\n return \"str\"\n else:\n raise NotImplementedError(\"input_type {} is not handled yet\".format(input_type))\n return \"float32\" # Never reached\n\n\ndef _create_typed_const(data, dtype):\n \"\"\"create a (scalar) constant of given value and dtype.\n dtype should be a TVM dtype\"\"\"\n\n if dtype == \"float64\":\n typed_data = _expr.const(np.float64(data), dtype=dtype)\n elif dtype == \"float32\":\n typed_data = _expr.const(np.float32(data), dtype=dtype)\n elif dtype == \"float16\":\n typed_data = _expr.const(np.float16(data), dtype=dtype)\n elif dtype == \"int64\":\n typed_data = _expr.const(np.int64(data), dtype=dtype)\n elif dtype == \"int32\":\n typed_data = _expr.const(np.int32(data), dtype=dtype)\n elif dtype == \"int16\":\n typed_data = _expr.const(np.int16(data), dtype=dtype)\n elif dtype == \"int8\":\n typed_data = _expr.const(np.int8(data), dtype=dtype)\n elif dtype == \"uint8\":\n typed_data = _expr.const(np.uint8(data), dtype=dtype)\n else:\n raise NotImplementedError(\"input_type {} is not handled yet\".format(dtype))\n return typed_data\n\n\ndef _wrap_const(c):\n if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):\n return _expr.const(c)\n return c\n\n\ndef _run_jit_passes(graph):\n \"\"\"The inline pass is necessary to unwrap prim::CallMethod\"\"\"\n # pylint: disable=c-extension-no-member\n import torch\n\n if is_version_greater_than(\"1.5.1\"):\n # This is required for torchvision detection models from 1.6 above\n # It is the same as _jit_pass_inline, except that it has some special\n # case behaviors for some ops such as aten::__interpolate()\n torch._C._jit_pass_onnx_function_substitution(graph)\n else:\n torch._C._jit_pass_inline(graph)\n\n\ndef _get_tensor_and_var(torch_tensor, name):\n tensor = tvm.nd.array(torch_tensor.cpu().numpy())\n var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)\n return tensor, var\n\n\ndef _get_output_name(node):\n assert node.outputsSize() == 1\n return node.output().debugName()\n\n\ndef _get_output_names(node):\n return [output.debugName() for output in node.outputs()]\n\n\ndef _get_input_names(node_or_graph):\n return [inp.debugName() for inp in node_or_graph.inputs()]\n\n\ndef _get_op_inputs(op_node, outputs):\n return [outputs[name] for name in _get_input_names(op_node)]\n\n\ndef _get_node_type(node):\n assert node.outputsSize() == 1\n return node.output().type().kind()\n\n\ndef _get_uses(node):\n uses = []\n for output in node.outputs():\n uses += output.uses()\n return uses\n\n\ndef _get_users(node):\n return [use.user for use in _get_uses(node)]\n\n\ndef _getattr_attr_name(node):\n attribute_names = node.attributeNames()\n assert len(attribute_names) == 1\n attr_name = node.s(attribute_names[0])\n return attr_name\n\n\ndef _getattr_full_name(getattrs, sep=\".\"):\n return sep.join([_getattr_attr_name(node) for node in getattrs])\n\n\ndef _get_pytorch_value_type(typ, default_dtype=\"float32\"):\n kind = typ.kind()\n if kind == \"TensorType\":\n if typ.scalarType() is None:\n # Tensor's type can be unknown if we use torch.jit.script(...)\n # Defaults can be passed in, if not it is float32\n logger.warning(\"Untyped Tensor found, assume it is %s\", default_dtype)\n return default_dtype\n else:\n return _convert_data_type(typ.scalarType())\n\n elif kind == \"ListType\":\n return \"ListType\"\n elif kind in [\"IntType\", \"FloatType\", \"BoolType\", \"StringType\", \"OptionalType\"]:\n pt_dtype = str(typ).lower()\n dtype = pt_dtype if pt_dtype == \"OptionalType\" else _convert_data_type(pt_dtype)\n return dtype\n else:\n return \"UnsupportedType\"\n\n\ndef _get_input_types(op_node, outputs, default_dtype=\"float32\"):\n \"\"\"Returns a TVM dtype for each input nodes derived from the torch type\"\"\"\n in_types = []\n for inp in op_node.inputs():\n if inp.node().kind() == \"prim::GetAttr\":\n # GetAttr nodes always return None when we call scalarType() on it\n name = inp.debugName()\n assert name in outputs\n if isinstance(outputs[name], _expr.Var):\n in_types.append(outputs[name].type_annotation.dtype)\n else:\n # For quantized modules with parameters, here we would get\n # \"prim::GetAttr[name=\"_packed_params\"]\". Since the dtype corresponding to\n # _packed_params is not needed by quantized ops, we return an arbitrary type.\n in_types.append(default_dtype)\n else:\n in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))\n\n return in_types\n\n\ndef _get_constant(node):\n \"\"\"Retrieve a constant associated with this prim::Constant node\"\"\"\n attribute_names = node.attributeNames()\n num_attributes = len(attribute_names)\n\n if num_attributes == 1:\n attr_name = attribute_names[0]\n ty = node.output().type().kind()\n\n if ty == \"IntType\":\n return node.i(attr_name)\n elif ty == \"BoolType\":\n return bool(node.i(attr_name))\n elif ty in [\"FloatType\", \"LongType\"]:\n return node.f(attr_name)\n elif ty in [\"TensorType\", \"CompleteTensorType\"]:\n tensor = node.t(attr_name)\n if tensor.is_cuda:\n tensor = tensor.cpu()\n if len(tensor.shape) == 0: # tensor(0.1)\n # TODO(t-vi): When is this needed?\n return tensor.item()\n return _wrap_const(tensor.numpy())\n elif ty in [\"DeviceObjType\", \"StringType\"]:\n return node.s(attr_name)\n elif ty == \"FunctionType\":\n return None\n else:\n raise NotImplementedError(\"Unsupported type: %s\" % ty)\n else:\n assert num_attributes == 0\n return None\n\n\ndef _get_operator_nodes(nodes):\n \"\"\"Returns torch IR nodes that need conversion to Relay\"\"\"\n ops = []\n # Traverse nodes and add to graph\n for node in nodes:\n if node.outputsSize() == 0:\n continue\n if node.outputsSize() > 1:\n node_name = \"_\".join(_get_output_names(node))\n else:\n node_name = _get_output_name(node)\n\n if node.kind() != \"prim::GetAttr\":\n ops.append((node_name, node))\n\n return ops\n\n\ndef _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype=\"float32\"):\n \"\"\"\n Return Relay vars from input shapes and create entries based on\n expected graph inputs - to allow translation\n \"\"\"\n\n graph_inputs = list(graph.inputs())\n if is_module:\n # a module has \"self\" as first input, which we do not need/want\n graph_inputs = graph_inputs[1:]\n\n if not isinstance(input_infos, list):\n msg = \"Graph inputs input_infos should be a list\"\n raise RuntimeError(msg)\n\n if len(graph_inputs) != len(input_infos):\n msg = \"PyTorch has {} inputs and input_infos lists {}.\".format(\n len(graph_inputs), len(input_infos)\n )\n raise RuntimeError(msg)\n\n def get_relay_ty(ishape, itype, pt_type):\n if pt_type.kind() == \"TensorType\":\n if not (_is_int_seq(ishape) or len(ishape) == 0):\n msg = \"Shape for Tensors must be lists of ints\"\n raise RuntimeError(msg)\n if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (\n pt_type.sizes() is not None\n and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])\n ):\n msg = \"Shapes of input list and information in the graph do not match\"\n raise RuntimeError(msg)\n pt_dtype = pt_type.scalarType()\n if not pt_dtype and itype:\n pt_dtype = itype\n dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)\n return TensorType(ishape, dtype)\n elif pt_type.kind() == \"TupleType\":\n if not isinstance(ishape, tuple):\n msg = \"Shapes for tuples must be tuples\"\n raise RuntimeError(msg)\n return TupleType(\n [get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]\n )\n elif pt_type.kind() == \"ListType\":\n if not isinstance(ishape, list):\n msg = \"Shapes for lists must be lists\"\n raise RuntimeError(msg)\n pt_elemtype = pt_type.getElementType()\n elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]\n if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):\n msg = \"List elements need have identical types\"\n raise RuntimeError(msg)\n rlist, _, _ = prelude.mod.get_type(\"List\")\n return rlist(elem_tys[0])\n elif pt_type.kind() == \"OptionalType\":\n # we do not support None yet, so we fill in the type\n return get_relay_ty(ishape, itype, pt_type.getElementType())\n # TODO: scalar inputs\n raise NotImplementedError(\"unsupported input type\")\n\n input_vars = {}\n\n new_input_infos = []\n for num, inp in enumerate(input_infos):\n if not isinstance(inp, tuple):\n msg = \"Graph input {} is not a tuple\".format(num)\n raise RuntimeError(msg)\n if len(inp) != 2 or not isinstance(inp[0], str):\n msg = (\n \"Graph input {} is not valid,\"\n \" expected ('name', shape) or ('name', (shape, dtype))\".format(inp)\n )\n raise RuntimeError(msg)\n if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):\n new_input_infos.append((inp[0], (inp[1], default_dtype)))\n else:\n new_input_infos.append(inp)\n\n input_types = [\n (name, get_relay_ty(info[0], info[1], gi.type()))\n for (name, info), gi in zip(new_input_infos, graph_inputs)\n ]\n\n ir_inputs = [i.debugName() for i in graph_inputs]\n for ir_input, (name, itype) in zip(ir_inputs, input_types):\n inp = _expr.var(name, type_annotation=itype)\n # Translate from graph input to user input name\n input_vars[ir_input] = inp\n\n return input_vars\n\n\ndef _unpack_tuple(tup):\n def unpack(tup, num_fields):\n return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]\n\n if isinstance(tup, _expr.Tuple):\n return unpack(tup, len(tup.fields))\n elif isinstance(tup.type_annotation, TupleType):\n return unpack(tup, len(tup.type_annotation.fields))\n # shouldn't happen\n assert False\n\n\ndef _get_free_vars_from_block(block):\n block_inp_names = _get_input_names(block)\n bound_names = block_inp_names\n free_vars = set()\n\n for node in block.nodes():\n inp_names = _get_input_names(node)\n list_diff = [name for name in inp_names if name not in bound_names]\n free_vars.update(list_diff)\n bound_names += _get_output_names(node)\n\n return free_vars\n\n\ndef get_use_chains(root_node, terminate=lambda _: False):\n \"\"\"\n Track a chain of users of this node forward, returning a list of chains\n See get_attr_chains below for its usage\n \"\"\"\n\n def concat_lists(lists):\n return itertools.chain.from_iterable(lists)\n\n def inner(current, accum):\n users = _get_users(current)\n\n if not users or terminate(users):\n return [accum]\n\n return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])\n\n return inner(root_node, [root_node])\n\n\ndef get_attr_chains(root_getattr_node):\n \"\"\"Returns chains of attribute access starting from root_getattr_node\n\n For example, given attribute \"block\", as in \"self.block\" when \"self\" points\n to the top level torch.nn.Module, it returns lists of attribute \"chains\",\n e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']\n\n These sets of attributes form full attribute accessors. For example,\n \"self.block.1\", \"self.block.2\" will return the second and third submodule,\n and \"self.block.0._packed_params\" will return the parameters of the first\n submodule.\n \"\"\"\n\n def terminate(users):\n next_attrs = [user for user in users if user.kind() == \"prim::GetAttr\"]\n return len(next_attrs) == 0\n\n return get_use_chains(root_getattr_node, terminate)\n\n\ndef convert_params(graph, state_dict, use_parser_friendly_name=False):\n \"\"\"\n Return Relay vars and TVM NDArrays for input parameters\n A chain of prim::GetAttr nodes is processed one at a time\n \"\"\"\n getattr_nodes = graph.findAllNodes(\"prim::GetAttr\", recurse=True)\n params = {}\n param_tensors = {}\n packed_param_map = {}\n vars_by_name = {}\n seen = set()\n attr_name_sep = \"_\" if use_parser_friendly_name else \".\"\n\n for node in getattr_nodes:\n if _get_output_name(node) in seen:\n continue\n\n for getattrs in get_attr_chains(node):\n seen.update(map(_get_output_name, getattrs))\n\n full_attr = _getattr_full_name(getattrs, attr_name_sep)\n full_attr_node_name = _get_output_name(getattrs[-1])\n\n if full_attr.endswith(\"_packed_params\"): # for quantized models\n packed_param_map[full_attr_node_name] = full_attr\n elif full_attr in state_dict:\n if full_attr in vars_by_name:\n var = vars_by_name[full_attr]\n else:\n torch_tensor = state_dict[full_attr]\n tensor, var = _get_tensor_and_var(torch_tensor, full_attr)\n param_tensors[full_attr] = tensor\n vars_by_name[full_attr] = var\n params[full_attr_node_name] = var\n\n return params, param_tensors, packed_param_map\n\n\ndef get_all_op_names(graph):\n \"\"\"Return all operator names in the input graph\"\"\"\n nodes = list(graph.nodes())\n prim_with_blocks = [\"prim::If\", \"prim::Loop\"]\n for prim in prim_with_blocks:\n prim_nodes = graph.findAllNodes(prim, recurse=True)\n for prim_node in prim_nodes:\n for block in prim_node.blocks():\n nodes += block.nodes()\n return set(node.kind() for node in nodes)\n\n\ndef from_pytorch(\n script_module,\n input_infos,\n custom_convert_map=None,\n default_dtype=\"float32\",\n use_parser_friendly_name=False,\n keep_quantized_weight=False,\n):\n \"\"\"Load PyTorch model in the form of a scripted PyTorch model and convert into relay.\n The companion parameters will be handled automatically.\n\n Parameters\n ----------\n script_module : TopLevelTracedModule object\n TorchScripted PyTorch graph\n Note: We currently only support traces (ie: torch.jit.trace(model, input))\n\n input_infos : List of tuples\n Can be (input name, input shape) or (input name, (input shape, input types))\n Graph level input shape and type list\n The same input names need to be used for deployment, so choose easy to\n remember names (such as: input0, input1)\n e.g.\n [('input0', (1, 2)), ('input1', (3, 4))]\n or\n [('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]\n\n custom_convert_map : Dictionary of str to Relay op\n A custom op conversion map in the same format as _convert_map above\n\n default_type : str\n The default dtype to use when type information is not provided by PyTorch.\n\n use_parser_friendly_name : bool\n When True, replace '.' with `_' in a original parameter name.\n The Relay text parser treats a variable name followed by a period as a tuple element access,\n so a variable name like \"dense.weight\" cannot be parsed correctly.\n Use this option when you want to run the AnnotateSpans pass on the imported module.\n\n keep_quantized_weight : bool\n Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights\n in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use\n a PyTorch function to unpack quantized weights into float32 arrays and quantization\n parameters. By default, we return float32 weights and rely on the QNN lowering and the\n Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,\n we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,\n we quantize weights in the frontend using a function that is equivalent to\n qnn.op.quantize(...) operating on Numpy arrays.\n\n Returns\n -------\n mod : tvm.IRModule\n The module that optimizations will be performed on.\n\n params : dict of str to tvm.runtime.NDArray\n Dict of converted parameters stored in tvm.runtime.ndarray format\n \"\"\"\n import torch\n\n mod = tvm.IRModule()\n prelude = Prelude(mod)\n\n converter = PyTorchOpConverter(prelude, default_dtype)\n\n graph = script_module.graph.copy()\n _run_jit_passes(graph)\n\n if custom_convert_map:\n converter.update_convert_map(custom_convert_map)\n\n op_names = get_all_op_names(graph)\n converter.report_missing_conversion(op_names)\n\n is_module = isinstance(script_module, torch.jit.ScriptModule)\n params = script_module.state_dict() if is_module else {}\n outputs = _get_relay_input_vars(\n graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module\n )\n\n if use_parser_friendly_name:\n new_names = [key.replace(\".\", \"_\") for key in params.keys()]\n params = dict(zip(new_names, params.values()))\n\n param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name)\n\n tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}\n\n outputs.update(param_vars)\n ret_name = _get_input_names(graph.return_node())\n\n # For quantized models\n quantized_ops = set([\"aten::quantize_per_tensor\", \"quantized::linear_dynamic\"])\n if len(quantized_ops.intersection(set(op_names))) > 0:\n weight_quant_params = qnn_torch.get_weight_quant_params(\n script_module, packed_param_map.values()\n )\n input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)\n qnn_torch.add_quant_params_to_outputs(\n outputs,\n packed_param_map,\n weight_quant_params,\n input_scales_for_bias,\n keep_quantized_weight,\n )\n qnn_torch.add_quant_params(tvm_params, weight_quant_params)\n converter.update_convert_map(qnn_torch.convert_map)\n\n ret = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)[0]\n if isinstance(ret, list):\n # ListConstruct kept original python list. Convert to tuple.\n ret = _expr.Tuple(ret)\n\n # Separate data inputs and parameters to make sure data inputs come first.\n func_args = []\n data_inputs = []\n for arg in _analysis.free_vars(ret):\n if arg.name_hint not in tvm_params.keys():\n data_inputs.append(arg)\n else:\n func_args.append(arg)\n func_args = data_inputs + func_args\n\n mod[\"main\"] = tvm.relay.Function(func_args, ret)\n\n return transform.RemoveUnusedFunctions()(mod), tvm_params\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"CMSIS-NN integration tests: Conv2D\"\"\"\nimport itertools\nimport numpy as np\nimport pytest\nimport tvm\nfrom tvm import relay\nfrom tvm.relay.op.contrib import cmsisnn\n\n\nfrom tests.python.relay.aot.aot_test_utils import (\n AOTTestModel,\n AOT_CORSTONE300_RUNNER,\n AOT_DEFAULT_RUNNER,\n generate_ref_data,\n compile_and_run,\n)\nfrom utils import (\n skip_if_no_reference_system,\n make_module,\n count_num_calls,\n get_range_for_dtype_str,\n get_same_padding,\n get_conv2d_qnn_params,\n make_qnn_relu,\n)\n\n\ndef make_model(pool_op, shape, pool_size, strides, padding, dtype, scale, zero_point, relu_type):\n \"\"\"Return a model and any parameters it may have\"\"\"\n op = relay.var(\"input\", shape=shape, dtype=dtype)\n pad_ = (0, 0, 0, 0)\n if padding == \"SAME\":\n dilation = (1, 1)\n pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)\n op = relay.nn.pad(\n op,\n pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],\n pad_value=zero_point,\n pad_mode=\"constant\",\n )\n if pool_op == relay.nn.avg_pool2d:\n op = relay.cast(op, \"int32\")\n op = pool_op(\n op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=\"NHWC\"\n )\n if pool_op == relay.nn.avg_pool2d:\n op = relay.cast(op, dtype)\n op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)\n return op\n\n\[email protected]_cmsisnn\[email protected](\"in_shape\", [(1, 28, 28, 12), (1, 64, 100, 4)])\[email protected](\n \"pool_size, strides, padding\", [((3, 3), (2, 2), \"SAME\"), ((2, 2), (1, 1), \"VALID\")]\n)\[email protected](\"relu_type\", [\"RELU\"])\[email protected](\"pool_type\", [relay.nn.max_pool2d, relay.nn.avg_pool2d])\[email protected](\"zero_point, scale\", [(-34, 0.0256)])\ndef test_op_int8(\n in_shape,\n pool_size,\n strides,\n padding,\n relu_type,\n pool_type,\n zero_point,\n scale,\n):\n interface_api = \"c\"\n use_unpacked_api = True\n test_runner = AOT_CORSTONE300_RUNNER\n\n dtype = \"int8\"\n\n model = make_model(\n pool_type,\n in_shape,\n pool_size,\n strides,\n padding,\n dtype,\n scale,\n zero_point,\n relu_type,\n )\n orig_mod = make_module(model)\n\n cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)\n\n # validate pattern matching\n attrs = [\n cmsisnn_mod[var.name_hint].attrs\n for var in cmsisnn_mod.get_global_vars()\n if cmsisnn_mod[var.name_hint].attrs\n ]\n assert any(attrs), \"At least one function with external attributes was expected.\"\n\n compilers = [\n key == \"Compiler\" and value == \"cmsis-nn\" for attr in attrs for key, value in attr.items()\n ]\n assert any(compilers), \"Module does not contain function for cmsisnn target.\"\n\n assert count_num_calls(orig_mod) == count_num_calls(\n cmsisnn_mod\n ), \"Number of calls changed during partitioning\"\n\n # validate the output\n in_min, in_max = get_range_for_dtype_str(dtype)\n np.random.seed(0)\n inputs = {\n \"input\": np.random.randint(in_min, high=in_max, size=in_shape, dtype=\"int8\"),\n }\n output_list = generate_ref_data(orig_mod[\"main\"], inputs)\n compile_and_run(\n AOTTestModel(\n module=cmsisnn_mod,\n inputs=inputs,\n outputs=output_list,\n params=None,\n output_tolerance=1,\n ),\n test_runner,\n interface_api,\n use_unpacked_api,\n )\n\n\[email protected]_cmsisnn\ndef test_invalid_parameters():\n model = make_model(\n pool_op=relay.nn.avg_pool2d,\n shape=(1, 28, 28, 12),\n pool_size=(1, 1),\n strides=(1, 1),\n padding=\"VALID\",\n dtype=\"uint8\",\n scale=1,\n zero_point=-33,\n relu_type=\"RELU\",\n )\n\n orig_mod = make_module(model)\n cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)\n\n # validate pattern matching\n attrs = [\n cmsisnn_mod[var.name_hint].attrs\n for var in cmsisnn_mod.get_global_vars()\n if cmsisnn_mod[var.name_hint].attrs\n ]\n assert not any(attrs), \"No function should have an external attribute.\"\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n" ]
[ [ "torch.zeros", "torch._C._jit_pass_onnx_function_substitution", "numpy.float16", "numpy.int32", "numpy.int8", "numpy.uint8", "torch._C._jit_pass_inline", "numpy.finfo", "numpy.ones", "numpy.int64", "numpy.int16", "numpy.float64", "numpy.isscalar", "numpy.float32", "torch.get_default_dtype", "numpy.array" ], [ "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexandresalvatierra/python-data-preprocessing
[ "48c3f465db0b5b5785d217d0f8c9524bb3980c48" ]
[ "missing-data-one-hot-encoding.py" ]
[ "import pandas as pd\npd.set_option( 'display.max_columns', 24 )\n\nfile_path = 'C:/Users/profAlexandre/Desktop/inteligencia artificial/data-preprocessing/dataset/'\nfile_name = 'traffic-collision-data-from-2010-to-present.csv'\n\ndf = pd.read_csv( file_path + file_name )\n\n# show head from dataset\n#print( df.head() )\n\n# show total rows and columns from dataset\n#print( df.shape )\n\n# encoding 'Area Name'\nencoding = pd.get_dummies( df['Area Name'] )\n\n#print( encoding.head() )\n\n# concat encoding to dataset\nconcat = pd.concat( [df, encoding], axis = 1 )\n\n# remove column Area Name\nconcat.drop( 'Area Name', axis = 1 )\n\nprint( concat.head() )" ]
[ [ "pandas.set_option", "pandas.read_csv", "pandas.concat", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
giprayogo/binyard
[ "c1cfa880cb9907416da2363fa0e4ca2de920543e" ]
[ "binyard/plot_column.py" ]
[ "#!/usr/bin/env python\n# for all to often task: text data file, plot single column\n\nimport matplotlib\nmatplotlib.rc('axes.formatter', useoffset=False)\nimport matplotlib.pyplot as plt\nimport argparse\nfrom numpy import arange\nfrom numpy import loadtxt\nfrom numpy import transpose\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--file', required=True, nargs='+')\nparser.add_argument('-d', '--domain-column')\nparser.add_argument('-c', '--columns', nargs='+', required=True)\nargs = parser.parse_args()\n\ndatafiles = args.file\n\nfig, ax = plt.subplots()\nfor datafile in datafiles:\n columns = map(int, args.columns)\n data = transpose(loadtxt(datafile, comments='#', usecols=columns))\n\n try:\n domain_column = int(args.domain_column)\n domain = loadtxt(datafile, comments='#', usecols=domain_column)\n except TypeError:\n domain = arange(0, data.shape[-1])\n\n if len(data.shape) > 1:\n for _ in data:\n ax.plot(domain, _)\n else:\n ax.plot(domain, data)\n\nfig.tight_layout()\nplt.show()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.rc", "matplotlib.pyplot.show", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
c-hydro/hmc
[ "66470234e126e4a727e1faf2fe64bd58547220ed" ]
[ "tools/processing_tool_ensemble_maker/lib_utils_time.py" ]
[ "\"\"\"\nLibrary Features:\n\nName: lib_utils_time\nAuthor(s): Fabio Delogu ([email protected])\nDate: '20211208'\nVersion: '1.0.0'\n\"\"\"\n\n#######################################################################################\n# Libraries\nimport logging\nimport pandas as pd\nfrom datetime import date\n\nfrom tools.processing_tool_datasets_merger.lib_info_args import logger_name\n\n# Logging\nlog_stream = logging.getLogger(logger_name)\n#######################################################################################\n\n# -------------------------------------------------------------------------------------\n# Method to set time run\ndef set_time(time_run_args=None, time_run_file=None, time_format='%Y-%m-%d %H:$M',\n time_period=1, time_frequency='H', time_rounding='H', time_reverse=True):\n\n logging.info(' ---> Set time run ... ')\n if time_run_args is not None:\n time_run = time_run_args\n logging.info(' ----> Time ' + time_run + ' set by argument')\n elif (time_run_args is None) and (time_run_file is not None):\n time_run = time_run_file\n logging.info(' ----> Time ' + time_run + ' set by user')\n elif (time_run_args is None) and (time_run_file is None):\n time_now = date.today()\n time_run = time_now.strftime(time_format)\n logging.info(' ----> Time ' + time_run + ' set by system')\n else:\n logging.info(' ---> Set time run ... FAILED')\n logging.error(' ===> Time is not correctly set')\n raise IOError('Time type or format is wrong')\n\n logging.info(' ---> Set time run ... DONE')\n\n time_tmp = pd.Timestamp(time_run)\n time_run = time_tmp.floor(time_rounding)\n\n time_now = time_tmp.floor('H')\n\n if time_period > 0:\n time_range = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)\n else:\n logging.warning(' ===> TimePeriod must be greater then 0. TimePeriod is set automatically to 1')\n time_range = pd.DatetimeIndex([time_now], freq=time_frequency)\n\n if time_reverse:\n time_range = time_range[::-1]\n\n return time_now, time_run, time_range\n\n# -------------------------------------------------------------------------------------\n\n" ]
[ [ "pandas.Timestamp", "pandas.DatetimeIndex", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
JurriaanBerger/AdventOfCode2021
[ "666d9b36d87bfaec3da8b700832bf0a5791b840a" ]
[ "day11/flash.py" ]
[ "import numpy as np\n\n\ndef read_octo():\n octo_map = []\n\n file = open('octo.txt', \"r\")\n f1 = file.read()\n file.close()\n\n f2 = f1.split(\"\\n\")\n\n for h in f2:\n list_h = []\n list_h[:0] = h\n octo_map.append([int(l) for l in list_h])\n\n return np.array(octo_map)\n\n\ndef energy_increase(cur_energy_map):\n new_energy_map = cur_energy_map + np.ones([len(cur_energy_map), len(cur_energy_map)])\n return new_energy_map\n\n\ndef find_flash(cur_energy_map,flashed_map):\n i = 0\n\n while i < len(cur_energy_map[0]):\n flashed = False\n for j in range(len(cur_energy_map)):\n if flashed_map[i, j] == 0 and cur_energy_map[i, j] > 9:\n flashed = True\n flashed_map[i, j] = 1\n for p in range(max(i - 1, 0), min(i + 2,len(cur_energy_map))):\n for q in range(max(j - 1, 0), min(j + 2, len(cur_energy_map))):\n if flashed_map[p, q] == 0: cur_energy_map[p, q] += 1\n break\n\n if flashed: i = 0\n else: i += 1\n\n return cur_energy_map, flashed_map\n\n\ndef reset_flashed(cur_energy_map, flashed_map):\n to_subtract = cur_energy_map*flashed_map\n new_energy_map = cur_energy_map-to_subtract\n return new_energy_map\n\n\n# Part 1\nenergy_map = read_octo()\n\ntotal_sum = 0\nfor e in range(0, 100):\n energy_map_increase = energy_increase(energy_map)\n did_flash_map = np.zeros([len(energy_map), len(energy_map)])\n energy_map, did_flash_map = find_flash(energy_map_increase, did_flash_map)\n energy_map = reset_flashed(energy_map, did_flash_map)\n total_sum += np.sum(did_flash_map)\n\n\nprint('ANSWER 1:', total_sum)\n\n\n# Part 2\nenergy_map = read_octo()\ndid_flash_map = np.zeros([len(energy_map), len(energy_map)])\n\nstep_counter = 0\nwhile np.sum(did_flash_map) < 100:\n energy_map_increase = energy_increase(energy_map)\n did_flash_map = np.zeros([len(energy_map), len(energy_map)])\n energy_map, did_flash_map = find_flash(energy_map_increase, did_flash_map)\n energy_map = reset_flashed(energy_map, did_flash_map)\n step_counter += 1\n\nprint('ANSWER 2:', step_counter)\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
7D24H/testGPU
[ "d9341e1a60d51c895c7e5090990bc13920ba7257" ]
[ "ops.py" ]
[ "\nfrom glob import glob\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport cv2\n\n\ndef block_patch(input, patch_size=15, margin=5):\n\tshape = input.get_shape().as_list()\n\n\t#create patch in random size\n\tpad_size = tf.random_uniform([2], minval=int(patch_size*0.1), maxval=patch_size, dtype=tf.int32)\n\tpatch = tf.zeros([pad_size[0], pad_size[1], shape[-1]], dtype=tf.float32)\n\n\th_ = tf.random_uniform([1], minval=margin, maxval=shape[0]-pad_size[0]-margin, dtype=tf.int32)[0]\n\tw_ = tf.random_uniform([1], minval=margin, maxval=shape[1]-pad_size[1]-margin, dtype=tf.int32)[0]\n\n\tpadding = [[h_, shape[0]-h_-pad_size[0]], [w_, shape[1]-w_-pad_size[1]], [0, 0]]\n\tpadded = tf.pad(patch, padding, \"CONSTANT\", constant_values=1)\n\n\tcoord = h_, w_\n\n\tres = tf.multiply(input, padded)\n\n\treturn res, padded, coord, pad_size\n\n#function to get training data\ndef load_train_data(args):\n\tpaths = os.path.join(args.data, \"img_align_celeba/*.jpg\")\n\tdata_count = len(glob(paths))\n\n\tfilename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(paths))\n\n\timage_reader = tf.WholeFileReader()\n\t_, image_file = image_reader.read(filename_queue)\n\timages = tf.image.decode_jpeg(image_file, channels=3)\n\n\n\t#input image range from -1 to 1\n\t#center crop 32x32 since raw images are not center cropped.\n\timages = tf.image.central_crop(images, 0.75)\n\timages = tf.image.resize_images(images ,[args.input_height, args.input_width])\n\timages = tf.image.convert_image_dtype(images, dtype=tf.float32) / 127.5 - 1\n\n\torig_images = images\n\timages, mask, coord, pad_size = block_patch(images, patch_size=args.patch_size, margin=args.margin)\n\tmask = tf.reshape(mask, [args.input_height, args.input_height, 3])\n\n\t#flip mask values\n\tmask = -(mask - 1)\n\timages += mask\n\n\torig_imgs, perturbed_imgs, mask, coord, pad_size = tf.train.shuffle_batch([orig_images, images, mask, coord, pad_size],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t capacity=args.batch_size*2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t min_after_dequeue=args.batch_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\n\n\treturn orig_imgs, perturbed_imgs, mask, coord, pad_size, data_count\n\ndef load_test_data(args):\n\tpaths = glob(\"./data/test/*.jpg\")\n\tdata_count = len(paths)\n\n\tfilename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(paths))\n\n\timage_reader = tf.WholeFileReader()\n\t_, image_file = image_reader.read(filename_queue)\n\timages = tf.image.decode_jpeg(image_file, channels=3)\n\n\n\t#input image range from -1 to 1\n\t# uncomment to center crop\n\t# images = tf.image.central_crop(images, 0.5)\n\timages = tf.image.resize_images(images ,[args.input_height, args.input_width])\n\timages = tf.image.convert_image_dtype(images, dtype=tf.float32) / 127.5 - 1\n\n\torig_images = images\n\timages, mask, coord, pad_size = block_patch(images, margin=args.margin)\n\tmask = tf.reshape(mask, [args.input_height, args.input_height, 3])\n\n\t#flip mask values\n\tmask = -(mask - 1)\n\timages += mask\n\n\torig_imgs, mask, test_imgs = tf.train.batch([orig_images, mask, images],\n\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\tcapacity=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t )\n\n\n\treturn orig_imgs, test_imgs, mask, data_count\n\n\n#function to save images in tile\n#comment this function block if you don't have opencv\ndef img_tile(epoch, args, imgs, aspect_ratio=1.0, tile_shape=None, border=1, border_color=0):\n\tif imgs.ndim != 3 and imgs.ndim != 4:\n\t\traise ValueError('imgs has wrong number of dimensions.')\n\tn_imgs = imgs.shape[0]\n\n\ttile_shape = None\n\t# Grid shape\n\timg_shape = np.array(imgs.shape[1:3])\n\tif tile_shape is None:\n\t\timg_aspect_ratio = img_shape[1] / float(img_shape[0])\n\t\taspect_ratio *= img_aspect_ratio\n\t\ttile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))\n\t\ttile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))\n\t\tgrid_shape = np.array((tile_height, tile_width))\n\telse:\n\t\tassert len(tile_shape) == 2\n\t\tgrid_shape = np.array(tile_shape)\n\n\t# Tile image shape\n\ttile_img_shape = np.array(imgs.shape[1:])\n\ttile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border\n\n\t# Assemble tile image\n\ttile_img = np.empty(tile_img_shape)\n\ttile_img[:] = border_color\n\tfor i in range(grid_shape[0]):\n\t\tfor j in range(grid_shape[1]):\n\t\t\timg_idx = j + i*grid_shape[1]\n\t\t\tif img_idx >= n_imgs:\n\t\t\t\t# No more images - stop filling out the grid.\n\t\t\t\tbreak\n\t\t\timg = imgs[img_idx]\n\t\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\t\t\tyoff = (img_shape[0] + border) * i\n\t\t\txoff = (img_shape[1] + border) * j\n\t\t\ttile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img\n\n\tcv2.imwrite(args.images_path+\"/img_\"+str(epoch) + \".jpg\", (tile_img + 1)*127.5)\n" ]
[ [ "tensorflow.image.central_crop", "tensorflow.multiply", "numpy.sqrt", "tensorflow.train.match_filenames_once", "tensorflow.zeros", "tensorflow.WholeFileReader", "tensorflow.image.resize_images", "tensorflow.reshape", "tensorflow.pad", "tensorflow.image.convert_image_dtype", "tensorflow.train.batch", "numpy.array", "tensorflow.random_uniform", "tensorflow.train.shuffle_batch", "numpy.empty", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Lakoc/SFC_project
[ "00b98f0d9651138c30f567ae4775a624e511a392" ]
[ "gui/utils.py" ]
[ "import numpy as np\n\n\ndef remove_values_from_list(the_list, val):\n return [value for value in the_list if value != val]\n\n\ndef remove_values_from_list_to_float(the_list, val):\n return [float(value) for value in the_list if value != val]\n\n\ndef load_3d_arr_from_string(arr):\n arr = arr.replace('[', '').replace(']', '').split('\\n')\n count = arr.count('') + 1\n\n arr = remove_values_from_list(arr, '')\n group_size = len(arr) // count\n groups = [remove_values_from_list_to_float(val.split(' '), '') for group in range(count) for val in\n arr[group * group_size: (group + 1) * group_size]]\n groups = [groups[group * group_size: (group + 1) * group_size] for group in range(count)]\n return np.array(groups)\n\n\ndef normalize_config(config):\n config['variances'] = load_3d_arr_from_string(config['variances'])\n config['means'] = load_3d_arr_from_string(config['means'])[0, :]\n config['counts'] = load_3d_arr_from_string(config['counts'])[0, 0, :]\n config['layers'] = load_3d_arr_from_string(config['layers'])[0, 0, :]\n config['layer'] = int(config['layer'])\n config['batch_size'] = int(config['batch_size'])\n config['iterations'] = int(config['iterations'])\n config['epsilon'] = float(config['epsilon'])\n config['eta'] = float(config['eta'])\n config['beta1'] = float(config['beta1'])\n config['beta2'] = float(config['beta2'])\n config['a_func'] = config['a_func'][0].casefold()\n config['optimizer'] = config['optimizer'][0]\n return config\n\n\ndef validate_config(config):\n errors = []\n n_clusters = config['counts'].shape[0]\n if config['means'].shape[0] != n_clusters or config['variances'].shape[0] != n_clusters:\n errors.append(\n f\"Count of clusters differ in mean, count and variance field - {n_clusters}, {config['means'].shape[0]}, \"\n f\"{config['variances'].shape[0]}.\")\n cluster_dimensionality = config['means'].shape[1]\n if config['variances'].shape[1] != cluster_dimensionality or config['variances'].shape[2] != cluster_dimensionality:\n errors.append(\n f\"Clusters differ in mean, and variance field - {cluster_dimensionality}, {config['variances'].shape[1:]}.\")\n if len(config['layers']) < 3:\n errors.append(\n f\"Ensure to have at least 3 layers.\")\n if config['layer'] >= len(config['layers']):\n errors.append(\n f\"Layer index out of range.\")\n elif config['layers'][config['layer']] != 2:\n errors.append(\n f\"Selected layer does not have specified dimensionality (2).\")\n if config['layers'][0] != config['layers'][-1]:\n errors.append(\n f\"Input and output layer dimensionality differs.\")\n for index, layer in enumerate(config['layers']):\n if layer < 1:\n errors.append(\n f\"Layer {index} has invalid dimensionality - {layer}.\")\n for key in ['layer', 'batch_size', 'iterations', 'epsilon', 'beta1', 'beta2', 'eta']:\n if config[key] < 0:\n errors.append(\n f\"Invalid option for {key} - {config[key]}.\")\n return errors\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ASMDS/PATREC
[ "091df6ec20e0736340a2b2ff9a25ac81bec48259" ]
[ "utils/Dataset.py" ]
[ "\nimport pandas as pd\nimport numpy as np\nfrom utils.DatasetFilter import DatasetFilter\nfrom utils.DatasetSplitter import DatasetSplitter\n\n\nclass Dataset:\n \n def __init__(self, dataset_options):\n self.options = dataset_options;\n self.df = None;\n self.columns_df = None;\n self.data = None;\n self.columns_data = None;\n return;\n\n\n # maybe stop creating separate files for filtered datasets and just create the df on the fly\n def _filterData(self):\n diseases = self.options.getDiseaseNames();\n filter = DatasetFilter(self.options)\n options_filtering = self.options.getOptionsFiltering();\n if options_filtering in diseases:\n self.df = filter.filterDataDisease()\n elif options_filtering.split(\"_\")[0] in self.options.getCategoricalFeatures() and not self.options.getEncodingScheme() == 'categorical':\n self.df = filter.filterCategoricalColumn(options_filtering)\n else:\n self.df = filter.filterDataBinaryColumns(options_filtering)\n\n\n def _getDf(self):\n if self.options.getOptionsFiltering() is not None:\n self._filterData();\n else:\n filename = self.options.getFilename()\n df = pd.read_csv(filename);\n self.df = df;\n \n\n def _getColumnsDf(self):\n cols = list(self.df.columns);\n self.columns_df = cols;\n\n def _getColumnsData(self):\n if self.data is None:\n self._getData();\n cols = list(self.data.columns);\n self.columns_data = cols;\n\n\n def _removeNotNeededColumns(self):\n not_needed_columns = self.options.getColumnsToRemove();\n columns_data = list(self.data.columns);\n for col in not_needed_columns:\n if col in columns_data:\n try:\n self.data = self.data.drop(col, axis=1);\n except ValueError or KeyError:\n pass;\n\n\n def _normalizeNumericalColumns(self):\n if self.columns_data is None:\n self._getColumnsData();\n for feat in self.columns_data:\n max_value = self.data[feat].max()\n min_value = self.data[feat].min()\n if not max_value == min_value:\n self.data[feat] = (self.data[feat] - min_value) / (max_value - min_value)\n\n\n def _getData(self):\n if self.df is None:\n self._getDf();\n self.data = self.df.copy();\n self.data = self.data.fillna(0.0);\n self._removeNotNeededColumns();\n if self.options.getEncodingScheme() == 'categorical':\n self._normalizeNumericalColumns();\n\n\n def _splitData(self):\n if self.data is None:\n self.getData();\n\n early_readmission_flagname = self.options.getEarlyReadmissionFlagname();\n df_pos = self.data.loc[self.data[early_readmission_flagname] == 1]\n df_neg = self.data.loc[self.data[early_readmission_flagname] == 0]\n df_pos = df_pos.sample(frac=1);\n df_neg = df_neg.sample(frac=1);\n return [df_pos, df_neg];\n\n\n def _getBalancedSubset(self):\n [df_pos, df_neg] = self._splitData();\n num_pos_samples = df_pos.shape[0];\n num_neg_samples = df_neg.shape[0];\n min_num_samples = int(np.min([num_pos_samples, num_neg_samples]));\n df_pos_balanced = df_pos[:min_num_samples];\n df_neg_balanced = df_neg[:min_num_samples];\n return [df_pos_balanced, df_neg_balanced];\n\n\n def _getTrainingTesting(self):\n ratio_training_samples = self.options.getRatioTrainingSamples();\n\n [df_pos, df_neg] = self._splitData();\n num_pos_samples = df_pos.shape[0];\n num_pos_samples_training = int(round(ratio_training_samples * num_pos_samples));\n num_pos_samples_testing = num_pos_samples - num_pos_samples_training;\n\n df_pos_training = df_pos.iloc[:num_pos_samples_training, :];\n df_pos_testing = df_pos.iloc[num_pos_samples_training:, :];\n print('df_pos_training: ' + str(df_pos_training.shape))\n print('df_pos_testing: ' + str(df_pos_testing.shape))\n df_neg_testing = df_neg.iloc[:num_pos_samples_testing, :];\n df_neg_training = df_neg.iloc[num_pos_samples_testing:, :];\n print('df_neg_training: ' + str(df_neg_training.shape))\n print('df_neg_testing: ' + str(df_neg_testing.shape))\n training = [df_pos_training, df_neg_training];\n testing = [df_pos_testing, df_neg_testing];\n return [training, testing]\n\n\n def getColumnsDf(self):\n if self.df is None:\n self._getDf();\n if self.columns_df is None:\n self._getColumnsDf();\n return self.columns_df;\n\n\n def getColumnsData(self):\n if self.data is None:\n self._getData();\n if self.columns_data is None:\n self._getColumnsData();\n return self.columns_data;\n\n\n def getDf(self):\n if self.df is None:\n self._getDf();\n return self.df;\n\n\n def getData(self):\n if self.data is None:\n self._getData();\n return self.data;\n\n\n def getFilename(self, filteroptions=False):\n return self.options.getFilename(filteroptions);\n\n\n def getFilenameOptions(self, filteroptions=False):\n return self.options.getFilenameOptions(filteroptions);\n\n\n def getBalancedSubsetTrainingAndTesting(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n ratio_training_samples = self.options.getRatioTrainingSamples();\n num_pos_samples = df_pos.shape[0];\n num_pos_samples_training = int(round(ratio_training_samples * num_pos_samples));\n num_pos_samples_testing = num_pos_samples - num_pos_samples_training;\n\n df_pos_training = df_pos.iloc[:num_pos_samples_training, :];\n df_neg_training = df_neg.iloc[:num_pos_samples_training, :];\n df_pos_testing = df_pos.iloc[-num_pos_samples_testing:, :];\n df_neg_testing = df_neg.iloc[-num_pos_samples_testing:, :];\n\n df_balanced_training = df_pos_training.append(df_neg_training);\n df_balanced_training = df_balanced_training.sample(frac=1);\n df_balanced_testing = df_pos_testing.append(df_neg_testing);\n df_balanced_testing = df_balanced_testing.sample(frac=1);\n\n return [df_balanced_training, df_balanced_testing];\n\n\n def getTrainingAndTestingSet(self):\n [training, testing] = self._getTrainingTesting();\n return [training, testing]\n\n\n def getBalancedSubSet(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n df_balanced = df_pos.append(df_neg);\n df_balanced = df_balanced.sample(frac=1);\n return df_balanced;\n\n\n def splitDatasetIntoTrainingTestingSet(self):\n datasplitter = DatasetSplitter(self.options)\n datasplitter.splitDatasetIntoTrainingTesting();\n\n\n def getNumSamplesBalancedSubset(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n df_balanced = df_pos.append(df_neg);\n num_samples = df_balanced.shape[0];\n return num_samples;\n\n def getNumSamples(self):\n if self.df is None:\n self._getDf();\n num_samples = self.df.shape[0];\n return num_samples;\n\n" ]
[ [ "pandas.read_csv", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
shiningsunnyday/ssd_keras
[ "3a2ea0ceaa676f59b1787ad5f5e8337520c7c056", "3a2ea0ceaa676f59b1787ad5f5e8337520c7c056" ]
[ "data_generator/object_detection_2d_data_generator_custom.py", "misc_utils/sample_new_model.py" ]
[ "'''\nA data generator for 2D object detection.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport inspect\nfrom collections import defaultdict\nimport warnings\nimport sklearn.utils\nfrom copy import deepcopy\nfrom PIL import Image\nimport cv2\nimport csv\nimport os\nimport sys\nfrom tqdm import tqdm, trange\ntry:\n import h5py\nexcept ImportError:\n warnings.warn(\"'h5py' module is missing. The fast HDF5 dataset option will be unavailable.\")\ntry:\n import json\nexcept ImportError:\n warnings.warn(\"'json' module is missing. The JSON-parser will be unavailable.\")\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n warnings.warn(\"'BeautifulSoup' module is missing. The XML-parser will be unavailable.\")\ntry:\n import pickle\nexcept ImportError:\n warnings.warn(\"'pickle' module is missing. You won't be able to save parsed file lists and annotations as pickled files.\")\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter\n\nclass DegenerateBatchError(Exception):\n '''\n An exception class to be raised if a generated batch ends up being degenerate,\n e.g. if a generated batch is empty.\n '''\n pass\n\nclass DatasetError(Exception):\n '''\n An exception class to be raised if a anything is wrong with the dataset,\n in particular if you try to generate batches when no dataset was loaded.\n '''\n pass\n\nimport pdb\n\nclass DataGenerator:\n '''\n A generator to generate batches of samples and corresponding labels indefinitely.\n\n Can shuffle the dataset consistently after each complete pass.\n\n Currently provides three methods to parse annotation data: A general-purpose CSV parser,\n an XML parser for the Pascal VOC datasets, and a JSON parser for the MS COCO datasets.\n If the annotations of your dataset are in a format that is not supported by these parsers,\n you could just add another parser method and still use this generator.\n\n Can perform image transformations for data conversion and data augmentation,\n for details please refer to the documentation of the `generate()` method.\n '''\n\n def __init__(self,\n load_images_into_memory=False,\n hdf5_dataset_path=None,\n filenames=None,\n filenames_type='text',\n images_dir=None,\n labels=None,\n image_ids=None,\n eval_neutral=None,\n labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),\n verbose=True):\n self.class_counts = None\n '''\n Initializes the data generator. You can either load a dataset directly here in the constructor,\n e.g. an HDF5 dataset, or you can use one of the parser methods to read in a dataset.\n\n Arguments:\n load_images_into_memory (bool, optional): If `True`, the entire dataset will be loaded into memory.\n This enables noticeably faster data generation than loading batches of images into memory ad hoc.\n Be sure that you have enough memory before you activate this option.\n hdf5_dataset_path (str, optional): The full file path of an HDF5 file that contains a dataset in the\n format that the `create_hdf5_dataset()` method produces. If you load such an HDF5 dataset, you\n don't need to use any of the parser methods anymore, the HDF5 dataset already contains all relevant\n data.\n filenames (string or list, optional): `None` or either a Python list/tuple or a string representing\n a filepath. If a list/tuple is passed, it must contain the file names (full paths) of the\n images to be used. Note that the list/tuple must contain the paths to the images,\n not the images themselves. If a filepath string is passed, it must point either to\n (1) a pickled file containing a list/tuple as described above. In this case the `filenames_type`\n argument must be set to `pickle`.\n Or\n (2) a text file. Each line of the text file contains the file name (basename of the file only,\n not the full directory path) to one image and nothing else. In this case the `filenames_type`\n argument must be set to `text` and you must pass the path to the directory that contains the\n images in `images_dir`.\n filenames_type (string, optional): In case a string is passed for `filenames`, this indicates what\n type of file `filenames` is. It can be either 'pickle' for a pickled file or 'text' for a\n plain text file.\n images_dir (string, optional): In case a text file is passed for `filenames`, the full paths to\n the images will be composed from `images_dir` and the names in the text file, i.e. this\n should be the directory that contains the images to which the text file refers.\n If `filenames_type` is not 'text', then this argument is irrelevant.\n labels (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain Numpy arrays\n that represent the labels of the dataset.\n image_ids (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain the image\n IDs of the images in the dataset.\n eval_neutral (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain for each image\n a list that indicates for each ground truth object in the image whether that object is supposed\n to be treated as neutral during an evaluation.\n labels_output_format (list, optional): A list of five strings representing the desired order of the five\n items class ID, xmin, ymin, xmax, ymax in the generated ground truth data (if any). The expected\n strings are 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.\n verbose (bool, optional): If `True`, prints out the progress for some constructor operations that may\n take a bit longer.\n '''\n self.labels_output_format = labels_output_format\n self.labels_format={'class_id': labels_output_format.index('class_id'),\n 'xmin': labels_output_format.index('xmin'),\n 'ymin': labels_output_format.index('ymin'),\n 'xmax': labels_output_format.index('xmax'),\n 'ymax': labels_output_format.index('ymax')} # This dictionary is for internal use.\n\n self.dataset_size = 0 # As long as we haven't loaded anything yet, the dataset size is zero.\n self.load_images_into_memory = load_images_into_memory\n self.images = None # The only way that this list will not stay `None` is if `load_images_into_memory == True`.\n\n # `self.filenames` is a list containing all file names of the image samples (full paths).\n # Note that it does not contain the actual image files themselves. This list is one of the outputs of the parser methods.\n # In case you are loading an HDF5 dataset, this list will be `None`.\n if not filenames is None:\n if isinstance(filenames, (list, tuple)):\n self.filenames = filenames\n elif isinstance(filenames, str):\n with open(filenames, 'rb') as f:\n if filenames_type == 'pickle':\n self.filenames = pickle.load(f)\n elif filenames_type == 'text':\n self.filenames = [os.path.join(images_dir, line.strip()) for line in f]\n else:\n raise ValueError(\"`filenames_type` can be either 'text' or 'pickle'.\")\n else:\n raise ValueError(\"`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.\")\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n else:\n self.filenames = None\n\n # In case ground truth is available, `self.labels` is a list containing for each image a list (or NumPy array)\n # of ground truth bounding boxes for that image.\n if not labels is None:\n if isinstance(labels, str):\n with open(labels, 'rb') as f:\n self.labels = pickle.load(f)\n elif isinstance(labels, (list, tuple)):\n self.labels = labels\n else:\n raise ValueError(\"`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.labels = None\n\n if not image_ids is None:\n if isinstance(image_ids, str):\n with open(image_ids, 'rb') as f:\n self.image_ids = pickle.load(f)\n elif isinstance(image_ids, (list, tuple)):\n self.image_ids = image_ids\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.image_ids = None\n\n if not eval_neutral is None:\n if isinstance(eval_neutral, str):\n with open(eval_neutral, 'rb') as f:\n self.eval_neutral = pickle.load(f)\n elif isinstance(eval_neutral, (list, tuple)):\n self.eval_neutral = eval_neutral\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.eval_neutral = None\n\n if not hdf5_dataset_path is None:\n self.hdf5_dataset_path = hdf5_dataset_path\n self.load_hdf5_dataset(verbose=verbose)\n else:\n self.hdf5_dataset = None\n\n\n def load_hdf5_dataset(self, verbose=True):\n '''\n Loads an HDF5 dataset that is in the format that the `create_hdf5_dataset()` method\n produces.\n\n Arguments:\n verbose (bool, optional): If `True`, prints out the progress while loading\n the dataset.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.\n\n if self.load_images_into_memory:\n self.images = []\n if verbose: tr = trange(self.dataset_size, desc='Loading images into memory', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n\n if self.hdf5_dataset.attrs['has_labels']:\n self.labels = []\n labels = self.hdf5_dataset['labels']\n label_shapes = self.hdf5_dataset['label_shapes']\n if verbose: tr = trange(self.dataset_size, desc='Loading labels', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.labels.append(labels[i].reshape(label_shapes[i]))\n\n if self.hdf5_dataset.attrs['has_image_ids']:\n self.image_ids = []\n image_ids = self.hdf5_dataset['image_ids']\n if verbose: tr = trange(self.dataset_size, desc='Loading image IDs', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.image_ids.append(image_ids[i])\n\n if self.hdf5_dataset.attrs['has_eval_neutral']:\n self.eval_neutral = []\n eval_neutral = self.hdf5_dataset['eval_neutral']\n if verbose: tr = trange(self.dataset_size, desc='Loading evaluation-neutrality annotations', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.eval_neutral.append(eval_neutral[i])\n\n def parse_csv(self,\n images_dir,\n labels_filename,\n input_format,\n include_classes='all',\n random_sample=False,\n ret=False,\n verbose=True):\n '''\n Arguments:\n images_dir (str): The path to the directory that contains the images.\n labels_filename (str): The filepath to a CSV file that contains one ground truth bounding box per line\n and each line contains the following six items: image file name, class ID, xmin, xmax, ymin, ymax.\n The six items do not have to be in a specific order, but they must be the first six columns of\n each line. The order of these items in the CSV file must be specified in `input_format`.\n The class ID is an integer greater than zero. Class ID 0 is reserved for the background class.\n `xmin` and `xmax` are the left-most and right-most absolute horizontal coordinates of the box,\n `ymin` and `ymax` are the top-most and bottom-most absolute vertical coordinates of the box.\n The image name is expected to be just the name of the image file without the directory path\n at which the image is located.\n input_format (list): A list of six strings representing the order of the six items\n image file name, class ID, xmin, xmax, ymin, ymax in the input CSV file. The expected strings\n are 'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n random_sample (float, optional): Either `False` or a float in `[0,1]`. If this is `False`, the\n full dataset will be used by the generator. If this is a float in `[0,1]`, a randomly sampled\n fraction of the dataset will be used, where `random_sample` is the fraction of the dataset\n to be used. For example, if `random_sample = 0.2`, 20 precent of the dataset will be randomly selected,\n the rest will be ommitted. The fraction refers to the number of images, not to the number\n of boxes, i.e. each image that will be added to the dataset will always be added with all\n of its boxes.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, and image IDs.\n '''\n\n # Set class members.\n self.images_dir = images_dir\n self.labels_filename = labels_filename\n self.input_format = input_format\n self.include_classes = include_classes\n\n # Before we begin, make sure that we have a labels_filename and an input_format\n if self.labels_filename is None or self.input_format is None:\n raise ValueError(\"`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.\")\n\n # Erase data that might have been parsed before\n self.filenames = []\n self.image_ids = []\n self.labels = []\n\n # First, just read in the CSV file lines and sort them.\n\n data = []\n\n with open(self.labels_filename,mode='r') as csvfile:\n csvread = csv.reader(csvfile, delimiter=',')\n next(csvread) # Skip the header row.\n for row in csvread: # For every line (i.e for every bounding box) in the CSV file...\n \n if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...\n box = [] # Store the box class and coordinates here\n box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`\n for element in self.labels_output_format: # For each element in the output format (where the elements are the class ID and the four box coordinates)...\n box.append(int(row[self.input_format.index(element)].strip())) # ...select the respective column in the input format and append it to `box`.\n data.append(box)\n\n data = sorted(data) # The data needs to be sorted, otherwise the next step won't give the correct result\n\n # Now that we've made sure that the data is sorted by file names,\n # we can compile the actual samples and labels lists\n\n current_file = data[0][0] # The current image for which we're collecting the ground truth boxes\n current_image_id = data[0][0].split('.')[0] # The image ID will be the portion of the image name before the first dot.\n current_labels = [] # The list where we collect all ground truth boxes for a given image\n add_to_dataset = False\n for i, box in enumerate(data):\n\n if box[0] == current_file: # If this box (i.e. this line of the CSV file) belongs to the current image file\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else: # If this box belongs to a new image file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n current_labels = [] # Reset the labels list because this is a new file.\n current_file = box[0]\n current_image_id = box[0].split('.')[0]\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret: # In case we want to return these\n return self.images, self.filenames, self.labels, self.image_ids\n\n def parse_xml(self,\n images_dirs,\n image_set_filenames,\n annotations_dirs=[],\n classes=['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'],\n include_classes = 'all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False,\n verbose=True):\n self.class_counts = np.zeros(len(classes))\n '''\n This is an XML parser for the Pascal VOC datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the data format and XML tags of the Pascal VOC datasets.\n\n Arguments:\n images_dirs (list): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for Pascal VOC 2007, another that contains\n the images for Pascal VOC 2012, etc.).\n image_set_filenames (list): A list of strings, where each string is the path of the text file with the image\n set to be loaded. Must be one file per image directory given. These text files define what images in the\n respective image directories are to be part of the dataset and simply contains one image ID per line\n and nothing else.\n annotations_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains the annotations (XML files) that belong to the images in the respective image directories given.\n The directories must contain one XML file per image and the name of an XML file must be the image ID\n of the image it belongs to. The content of the XML files must be in the Pascal VOC format.\n classes (list, optional): A list containing the names of the object classes as found in the\n `name` XML tags. Must include the class `background` as the first list item. The order of this list\n defines the class IDs.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n exclude_truncated (bool, optional): If `True`, excludes boxes that are labeled as 'truncated'.\n exclude_difficult (bool, optional): If `True`, excludes boxes that are labeled as 'difficult'.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, image IDs,\n and a list indicating which boxes are annotated with the label \"difficult\".\n '''\n # Set class members.\n self.images_dirs = images_dirs\n self.annotations_dirs = annotations_dirs\n self.image_set_filenames = image_set_filenames\n self.classes = classes\n self.include_classes = include_classes\n\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n self.eval_neutral = []\n if not annotations_dirs:\n self.labels = None\n self.eval_neutral = None\n annotations_dirs = [None] * len(images_dirs)\n\n for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):\n # Read the image set file that so that we know all the IDs of all the images to be included in the dataset.\n with open(image_set_filename) as f:\n image_ids = [line.strip() for line in f] # Note: These are strings, not integers.\n self.image_ids += image_ids\n\n if verbose: it = tqdm(image_ids, desc=\"Processing image set '{}'\".format(os.path.basename(image_set_filename)), file=sys.stdout)\n else: it = image_ids\n\n # Loop over all images in this dataset.\n for image_id in it:\n\n filename = '{}'.format(image_id) + '.jpg'\n self.filenames.append(os.path.join(images_dir, filename))\n\n if not annotations_dir is None:\n # Parse the XML file for this image.\n with open(os.path.join(annotations_dir, image_id + '.xml')) as f:\n soup = BeautifulSoup(f, 'xml')\n\n folder = soup.folder.text # In case we want to return the folder in addition to the image file name. Relevant for determining which dataset an image belongs to.\n #filename = soup.filename.text\n\n boxes = [] # We'll store all boxes for this image here.\n eval_neutr = [] # We'll store whether a box is annotated as \"difficult\" here.\n objects = soup.find_all('object') # Get a list of all objects in this image.\n\n # Parse the data for each object.\n for obj in objects:\n class_name = obj.find('name', recursive=False).text\n try:\n class_id = self.classes.index(class_name)\n class_name = self.classes[class_id]\n except ValueError:\n class_id = -1\n for c in classes:\n if class_name.find(c) > -1:\n class_id = self.classes.index(c)\n class_name = self.classes[class_id]\n if class_id < 0:\n class_id = 0;\n class_name = '0samples'\n\n self.class_counts[class_id] += 1\n # Check whether this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue\n pose = obj.find('pose', recursive=False).text\n truncated = int(obj.find('truncated', recursive=False).text)\n if exclude_truncated and (truncated == 1): continue\n difficult = int(obj.find('difficult', recursive=False).text)\n if exclude_difficult and (difficult == 1): continue\n # Get the bounding box coordinates.\n bndbox = obj.find('bndbox', recursive=False)\n xmin = int(bndbox.xmin.text)\n ymin = int(bndbox.ymin.text)\n xmax = int(bndbox.xmax.text)\n ymax = int(bndbox.ymax.text)\n item_dict = {'folder': folder,\n 'image_name': filename,\n 'image_id': image_id,\n 'class_name': class_name,\n 'class_id': class_id,\n 'pose': pose,\n 'truncated': truncated,\n 'difficult': difficult,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n if difficult: eval_neutr.append(True)\n else: eval_neutr.append(False)\n\n self.labels.append(boxes)\n self.eval_neutral.append(eval_neutr)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def parse_json(self,\n images_dirs,\n annotations_filenames,\n ground_truth_available=False,\n include_classes='all',\n ret=False,\n verbose=True):\n '''\n This is an JSON parser for the MS COCO datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the JSON format of the MS COCO datasets.\n\n Arguments:\n images_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for MS COCO Train 2014, another one for MS COCO\n Val 2014, another one for MS COCO Train 2017 etc.).\n annotations_filenames (list): A list of strings, where each string is the path of the JSON file\n that contains the annotations for the images in the respective image directories given, i.e. one\n JSON file per image directory that contains the annotations for all images in that directory.\n The content of the JSON files must be in MS COCO object detection format. Note that these annotations\n files do not necessarily need to contain ground truth information. MS COCO also provides annotations\n files without ground truth information for the test datasets, called `image_info_[...].json`.\n ground_truth_available (bool, optional): Set `True` if the annotations files contain ground truth information.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels and image IDs.\n '''\n self.images_dirs = images_dirs\n self.annotations_filenames = annotations_filenames\n self.include_classes = include_classes\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n if not ground_truth_available:\n self.labels = None\n\n # Build the dictionaries that map between class names and class IDs.\n with open(annotations_filenames[0], 'r') as f:\n annotations = json.load(f)\n # Unfortunately the 80 MS COCO class IDs are not all consecutive. They go\n # from 1 to 90 and some numbers are skipped. Since the IDs that we feed\n # into a neural network must be consecutive, we'll save both the original\n # (non-consecutive) IDs as well as transformed maps.\n # We'll save both the map between the original\n self.cats_to_names = {} # The map between class names (values) and their original IDs (keys)\n self.classes_to_names = [] # A list of the class names with their indices representing the transformed IDs\n self.classes_to_names.append('background') # Need to add the background class first so that the indexing is right.\n self.cats_to_classes = {} # A dictionary that maps between the original (keys) and the transformed IDs (values)\n self.classes_to_cats = {} # A dictionary that maps between the transformed (keys) and the original IDs (values)\n for i, cat in enumerate(annotations['categories']):\n self.cats_to_names[cat['id']] = cat['name']\n self.classes_to_names.append(cat['name'])\n self.cats_to_classes[cat['id']] = i + 1\n self.classes_to_cats[i + 1] = cat['id']\n\n # Iterate over all datasets.\n for images_dir, annotations_filename in zip(self.images_dirs, self.annotations_filenames):\n # Load the JSON file.\n with open(annotations_filename, 'r') as f:\n annotations = json.load(f)\n\n if ground_truth_available:\n # Create the annotations map, a dictionary whose keys are the image IDs\n # and whose values are the annotations for the respective image ID.\n image_ids_to_annotations = defaultdict(list)\n for annotation in annotations['annotations']:\n image_ids_to_annotations[annotation['image_id']].append(annotation)\n\n if verbose: it = tqdm(annotations['images'], desc=\"Processing '{}'\".format(os.path.basename(annotations_filename)), file=sys.stdout)\n else: it = annotations['images']\n\n # Loop over all images in this dataset.\n for img in it:\n\n self.filenames.append(os.path.join(images_dir, img['file_name']))\n self.image_ids.append(img['id'])\n\n if ground_truth_available:\n # Get all annotations for this image.\n annotations = image_ids_to_annotations[img['id']]\n boxes = []\n for annotation in annotations:\n cat_id = annotation['category_id']\n # Check if this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not cat_id in self.include_classes): continue\n # Transform the original class ID to fit in the sequence of consecutive IDs.\n class_id = self.cats_to_classes[cat_id]\n xmin = annotation['bbox'][0]\n ymin = annotation['bbox'][1]\n width = annotation['bbox'][2]\n height = annotation['bbox'][3]\n # Compute `xmax` and `ymax`.\n xmax = xmin + width\n ymax = ymin + height\n item_dict = {'image_name': img['file_name'],\n 'image_id': img['id'],\n 'class_id': class_id,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n self.labels.append(boxes)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids\n\n def create_hdf5_dataset(self,\n file_path='dataset.h5',\n resize=False,\n variable_image_size=True,\n verbose=True):\n '''\n Converts the currently loaded dataset into a HDF5 file. This HDF5 file contains all\n images as uncompressed arrays in a contiguous block of memory, which allows for them\n to be loaded faster. Such an uncompressed dataset, however, may take up considerably\n more space on your hard drive than the sum of the source images in a compressed format\n such as JPG or PNG.\n\n It is recommended that you always convert the dataset into an HDF5 dataset if you\n have enugh hard drive space since loading from an HDF5 dataset accelerates the data\n generation noticeably.\n\n Note that you must load a dataset (e.g. via one of the parser methods) before creating\n an HDF5 dataset from it.\n\n The created HDF5 dataset will remain open upon its creation so that it can be used right\n away.\n\n Arguments:\n file_path (str, optional): The full file path under which to store the HDF5 dataset.\n You can load this output file via the `DataGenerator` constructor in the future.\n resize (tuple, optional): `False` or a 2-tuple `(height, width)` that represents the\n target size for the images. All images in the dataset will be resized to this\n target size before they will be written to the HDF5 file. If `False`, no resizing\n will be performed.\n variable_image_size (bool, optional): The only purpose of this argument is that its\n value will be stored in the HDF5 dataset in order to be able to quickly find out\n whether the images in the dataset all have the same size or not.\n verbose (bool, optional): Whether or not prit out the progress of the dataset creation.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset_path = file_path\n\n dataset_size = len(self.filenames)\n\n # Create the HDF5 file.\n hdf5_dataset = h5py.File(file_path, 'w')\n\n # Create a few attributes that tell us what this dataset contains.\n # The dataset will obviously always contain images, but maybe it will\n # also contain labels, image IDs, etc.\n hdf5_dataset.attrs.create(name='has_labels', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_image_ids', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_eval_neutral', data=False, shape=None, dtype=np.bool_)\n # It's useful to be able to quickly check whether the images in a dataset all\n # have the same size or not, so add a boolean attribute for that.\n if variable_image_size and not resize:\n hdf5_dataset.attrs.create(name='variable_image_size', data=True, shape=None, dtype=np.bool_)\n else:\n hdf5_dataset.attrs.create(name='variable_image_size', data=False, shape=None, dtype=np.bool_)\n\n # Create the dataset in which the images will be stored as flattened arrays.\n # This allows us, among other things, to store images of variable size.\n hdf5_images = hdf5_dataset.create_dataset(name='images',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.uint8))\n\n # Create the dataset that will hold the image heights, widths and channels that\n # we need in order to reconstruct the images from the flattened arrays later.\n hdf5_image_shapes = hdf5_dataset.create_dataset(name='image_shapes',\n shape=(dataset_size, 3),\n maxshape=(None, 3),\n dtype=np.int32)\n\n if not (self.labels is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_labels = hdf5_dataset.create_dataset(name='labels',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.int32))\n\n # Create the dataset that will hold the dimensions of the labels arrays for\n # each image so that we can restore the labels from the flattened arrays later.\n hdf5_label_shapes = hdf5_dataset.create_dataset(name='label_shapes',\n shape=(dataset_size, 2),\n maxshape=(None, 2),\n dtype=np.int32)\n\n hdf5_dataset.attrs.modify(name='has_labels', value=True)\n\n if not (self.image_ids is None):\n\n hdf5_image_ids = hdf5_dataset.create_dataset(name='image_ids',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=str))\n\n hdf5_dataset.attrs.modify(name='has_image_ids', value=True)\n\n if not (self.eval_neutral is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_eval_neutral = hdf5_dataset.create_dataset(name='eval_neutral',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.bool_))\n\n hdf5_dataset.attrs.modify(name='has_eval_neutral', value=True)\n\n if verbose:\n tr = trange(dataset_size, desc='Creating HDF5 dataset', file=sys.stdout)\n else:\n tr = range(dataset_size)\n\n # Iterate over all images in the dataset.\n for i in tr:\n\n # Store the image.\n with Image.open(self.filenames[i]) as image:\n\n image = np.asarray(image, dtype=np.uint8)\n\n # Make sure all images end up having three channels.\n if image.ndim == 2:\n image = np.stack([image] * 3, axis=-1)\n elif image.ndim == 3:\n if image.shape[2] == 1:\n image = np.concatenate([image] * 3, axis=-1)\n elif image.shape[2] == 4:\n image = image[:,:,:3]\n\n if resize:\n image = cv2.resize(image, dsize=(resize[1], resize[0]))\n\n # Flatten the image array and write it to the images dataset.\n hdf5_images[i] = image.reshape(-1)\n # Write the image's shape to the image shapes dataset.\n hdf5_image_shapes[i] = image.shape\n\n # Store the ground truth if we have any.\n if not (self.labels is None):\n\n labels = np.asarray(self.labels[i])\n # Flatten the labels array and write it to the labels dataset.\n hdf5_labels[i] = labels.reshape(-1)\n # Write the labels' shape to the label shapes dataset.\n hdf5_label_shapes[i] = labels.shape\n\n # Store the image ID if we have one.\n if not (self.image_ids is None):\n\n hdf5_image_ids[i] = self.image_ids[i]\n\n # Store the evaluation-neutrality annotations if we have any.\n if not (self.eval_neutral is None):\n\n hdf5_eval_neutral[i] = self.eval_neutral[i]\n\n hdf5_dataset.close()\n self.hdf5_dataset = h5py.File(file_path, 'r')\n self.hdf5_dataset_path = file_path\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset, we will shuffle this index list.\n\n def generate(self,\n batch_size=32,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images', 'encoded_labels'},\n keep_images_without_gt=False,\n degenerate_box_handling='remove'):\n '''\n Generates batches of samples and (optionally) corresponding labels indefinitely.\n\n Can shuffle the samples consistently after each complete pass.\n\n Optionally takes a list of arbitrary image transformations to apply to the\n samples ad hoc.\n\n Arguments:\n batch_size (int, optional): The size of the batches to be generated.\n shuffle (bool, optional): Whether or not to shuffle the dataset before each pass.\n This option should always be `True` during training, but it can be useful to turn shuffling off\n for debugging or if you're using the generator for prediction.\n transformations (list, optional): A list of transformations that will be applied to the images and labels\n in the given order. Each transformation is a callable that takes as input an image (as a Numpy array)\n and optionally labels (also as a Numpy array) and returns an image and optionally labels in the same\n format.\n label_encoder (callable, optional): Only relevant if labels are given. A callable that takes as input the\n labels of a batch (as a list of Numpy arrays) and returns some structure that represents those labels.\n The general use case for this is to convert labels from their input format to a format that a given object\n detection model needs as its training targets.\n returns (set, optional): A set of strings that determines what outputs the generator yields. The generator's output\n is always a tuple that contains the outputs specified in this set and only those. If an output is not available,\n it will be `None`. The output tuple can contain the following outputs according to the specified keyword strings:\n * 'processed_images': An array containing the processed images. Will always be in the outputs, so it doesn't\n matter whether or not you include this keyword in the set.\n * 'encoded_labels': The encoded labels tensor. Will always be in the outputs if a label encoder is given,\n so it doesn't matter whether or not you include this keyword in the set if you pass a label encoder.\n * 'matched_anchors': Only available if `labels_encoder` is an `SSDInputEncoder` object. The same as 'encoded_labels',\n but containing anchor box coordinates for all matched anchor boxes instead of ground truth coordinates.\n This can be useful to visualize what anchor boxes are being matched to each ground truth box. Only available\n in training mode.\n * 'processed_labels': The processed, but not yet encoded labels. This is a list that contains for each\n batch image a Numpy array with all ground truth boxes for that image. Only available if ground truth is available.\n * 'filenames': A list containing the file names (full paths) of the images in the batch.\n * 'image_ids': A list containing the integer IDs of the images in the batch. Only available if there\n are image IDs available.\n * 'evaluation-neutral': A nested list of lists of booleans. Each list contains `True` or `False` for every ground truth\n bounding box of the respective image depending on whether that bounding box is supposed to be evaluation-neutral (`True`)\n or not (`False`). May return `None` if there exists no such concept for a given dataset. An example for\n evaluation-neutrality are the ground truth boxes annotated as \"difficult\" in the Pascal VOC datasets, which are\n usually treated to be neutral in a model evaluation.\n * 'inverse_transform': A nested list that contains a list of \"inverter\" functions for each item in the batch.\n These inverter functions take (predicted) labels for an image as input and apply the inverse of the transformations\n that were applied to the original image to them. This makes it possible to let the model make predictions on a\n transformed image and then convert these predictions back to the original image. This is mostly relevant for\n evaluation: If you want to evaluate your model on a dataset with varying image sizes, then you are forced to\n transform the images somehow (e.g. by resizing or cropping) to make them all the same size. Your model will then\n predict boxes for those transformed images, but for the evaluation you will need predictions with respect to the\n original images, not with respect to the transformed images. This means you will have to transform the predicted\n box coordinates back to the original image sizes. Note that for each image, the inverter functions for that\n image need to be applied in the order in which they are given in the respective list for that image.\n * 'original_images': A list containing the original images in the batch before any processing.\n * 'original_labels': A list containing the original ground truth boxes for the images in this batch before any\n processing. Only available if ground truth is available.\n The order of the outputs in the tuple is the order of the list above. If `returns` contains a keyword for an\n output that is unavailable, that output omitted in the yielded tuples and a warning will be raised.\n keep_images_without_gt (bool, optional): If `False`, images for which there aren't any ground truth boxes before\n any transformations have been applied will be removed from the batch. If `True`, such images will be kept\n in the batch.\n degenerate_box_handling (str, optional): How to handle degenerate boxes, which are boxes that have `xmax <= xmin` and/or\n `ymax <= ymin`. Degenerate boxes can sometimes be in the dataset, or non-degenerate boxes can become degenerate\n after they were processed by transformations. Note that the generator checks for degenerate boxes after all\n transformations have been applied (if any), but before the labels were passed to the `label_encoder` (if one was given).\n Can be one of 'warn' or 'remove'. If 'warn', the generator will merely print a warning to let you know that there\n are degenerate boxes in a batch. If 'remove', the generator will remove degenerate boxes from the batch silently.\n\n Yields:\n The next batch as a tuple of items as defined by the `returns` argument.\n '''\n\n if self.dataset_size == 0:\n raise DatasetError(\"Cannot generate batches because you did not load a dataset.\")\n\n #############################################################################################\n # Warn if any of the set returns aren't possible.\n #############################################################################################\n\n if self.labels is None:\n if any([ret in returns for ret in ['original_labels', 'processed_labels', 'encoded_labels', 'matched_anchors', 'evaluation-neutral']]):\n warnings.warn(\"Since no labels were given, none of 'original_labels', 'processed_labels', 'evaluation-neutral', 'encoded_labels', and 'matched_anchors' \" +\n \"are possible returns, but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif label_encoder is None:\n if any([ret in returns for ret in ['encoded_labels', 'matched_anchors']]):\n warnings.warn(\"Since no label encoder was given, 'encoded_labels' and 'matched_anchors' aren't possible returns, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif not isinstance(label_encoder, SSDInputEncoder):\n if 'matched_anchors' in returns:\n warnings.warn(\"`label_encoder` is not an `SSDInputEncoder` object, therefore 'matched_anchors' is not a possible return, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n\n #############################################################################################\n # Do a few preparatory things like maybe shuffling the dataset initially.\n #############################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n if degenerate_box_handling == 'remove':\n box_filter = BoxFilter(check_overlap=False,\n check_min_area=False,\n check_degenerate=True,\n labels_format=self.labels_format)\n\n # Override the labels formats of all the transformations to make sure they are set correctly.\n if not (self.labels is None):\n for transform in transformations:\n transform.labels_format = self.labels_format\n\n #############################################################################################\n # Generate mini batches.\n #############################################################################################\n\n current = 0\n\n while True:\n\n batch_X, batch_y = [], []\n\n if current >= self.dataset_size:\n current = 0\n\n #########################################################################################\n # Maybe shuffle the dataset if a full pass over the dataset has finished.\n #########################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n #########################################################################################\n # Get the images, (maybe) image IDs, (maybe) labels, etc. for this batch.\n #########################################################################################\n\n # We prioritize our options in the following order:\n # 1) If we have the images already loaded in memory, get them from there.\n # 2) Else, if we have an HDF5 dataset, get the images from there.\n # 3) Else, if we have neither of the above, we'll have to load the individual image\n # files from disk.\n batch_indices = self.dataset_indices[current:current+batch_size]\n if not (self.images is None):\n for i in batch_indices:\n batch_X.append(self.images[i])\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n elif not (self.hdf5_dataset is None):\n for i in batch_indices:\n batch_X.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n else:\n batch_filenames = self.filenames[current:current+batch_size]\n for filename in batch_filenames:\n with Image.open(filename) as image:\n batch_X.append(np.array(image, dtype=np.uint8))\n\n # Get the labels for this batch (if there are any).\n if not (self.labels is None):\n batch_y = deepcopy(self.labels[current:current+batch_size])\n else:\n batch_y = None\n\n if not (self.eval_neutral is None):\n batch_eval_neutral = self.eval_neutral[current:current+batch_size]\n else:\n batch_eval_neutral = None\n\n # Get the image IDs for this batch (if there are any).\n if not (self.image_ids is None):\n batch_image_ids = self.image_ids[current:current+batch_size]\n else:\n batch_image_ids = None\n\n if 'original_images' in returns:\n batch_original_images = deepcopy(batch_X) # The original, unaltered images\n if 'original_labels' in returns:\n batch_original_labels = deepcopy(batch_y) # The original, unaltered labels\n\n current += batch_size\n\n #########################################################################################\n # Maybe perform image transformations.\n #########################################################################################\n\n batch_items_to_remove = [] # In case we need to remove any images from the batch, store their indices in this list.\n batch_inverse_transforms = []\n\n for i in range(len(batch_X)):\n\n if not (self.labels is None):\n # Convert the labels for this image to an array (in case they aren't already).\n batch_y[i] = np.array(batch_y[i])\n # If this image has no ground truth boxes, maybe we don't want to keep it in the batch.\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n # Apply any image transformations we may have received.\n if transformations:\n\n inverse_transforms = []\n\n for transform in transformations:\n\n if not (self.labels is None):\n \n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], batch_y[i], inverse_transform = transform(batch_X[i], batch_y[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i], batch_y[i] = transform(batch_X[i], batch_y[i])\n\n if batch_X[i] is None: # In case the transform failed to produce an output image, which is possible for some random transforms.\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n else:\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], inverse_transform = transform(batch_X[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i] = transform(batch_X[i])\n\n batch_inverse_transforms.append(inverse_transforms[::-1])\n\n #########################################################################################\n # Check for degenerate boxes in this batch item.\n #########################################################################################\n\n if not (self.labels is None):\n\n xmin = self.labels_format['xmin']\n ymin = self.labels_format['ymin']\n xmax = self.labels_format['xmax']\n ymax = self.labels_format['ymax']\n\n if np.any(batch_y[i][:,xmax] - batch_y[i][:,xmin] <= 0) or np.any(batch_y[i][:,ymax] - batch_y[i][:,ymin] <= 0):\n if degenerate_box_handling == 'warn':\n warnings.warn(\"Detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, \".format(i, batch_y[i]) +\n \"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \" +\n \"This could mean that your dataset contains degenerate ground truth boxes, or that any image transformations you may apply might \" +\n \"result in degenerate ground truth boxes, or that you are parsing the ground truth in the wrong coordinate format.\" +\n \"Degenerate ground truth bounding boxes may lead to NaN errors during the training.\")\n elif degenerate_box_handling == 'remove':\n batch_y[i] = box_filter(batch_y[i])\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n\n #########################################################################################\n # Remove any items we might not want to keep from the batch.\n #########################################################################################\n\n if batch_items_to_remove:\n for j in sorted(batch_items_to_remove, reverse=True):\n # This isn't efficient, but it hopefully shouldn't need to be done often anyway.\n batch_X.pop(j)\n batch_filenames.pop(j)\n if batch_inverse_transforms: batch_inverse_transforms.pop(j)\n if not (self.labels is None): batch_y.pop(j)\n if not (self.image_ids is None): batch_image_ids.pop(j)\n if not (self.eval_neutral is None): batch_eval_neutral.pop(j)\n if 'original_images' in returns: batch_original_images.pop(j)\n if 'original_labels' in returns and not (self.labels is None): batch_original_labels.pop(j)\n\n #########################################################################################\n\n # CAUTION: Converting `batch_X` into an array will result in an empty batch if the images have varying sizes\n # or varying numbers of channels. At this point, all images must have the same size and the same\n # number of channels.\n batch_X = np.array(batch_X)\n if (batch_X.size == 0):\n raise DegenerateBatchError(\"You produced an empty batch. This might be because the images in the batch vary \" +\n \"in their size and/or number of channels. Note that after all transformations \" +\n \"(if any were given) have been applied to all images in the batch, all images \" +\n \"must be homogenous in size along all axes.\")\n\n #########################################################################################\n # If we have a label encoder, encode our labels.\n #########################################################################################\n\n if not (label_encoder is None or self.labels is None):\n\n if ('matched_anchors' in returns) and isinstance(label_encoder, SSDInputEncoder):\n batch_y_encoded, batch_matched_anchors = label_encoder(batch_y, diagnostics=True)\n else:\n batch_y_encoded = label_encoder(batch_y, diagnostics=False)\n batch_matched_anchors = None\n\n else:\n batch_y_encoded = None\n batch_matched_anchors = None\n\n #########################################################################################\n # Compose the output.\n #########################################################################################\n\n ret = []\n\n if 'bankofamerica/img00002' in batch_image_ids:\n pdb.set_trace()\n if 'processed_images' in returns: ret.append(batch_X)\n if 'encoded_labels' in returns: ret.append(batch_y_encoded)\n if 'matched_anchors' in returns: ret.append(batch_matched_anchors)\n if 'processed_labels' in returns: ret.append(batch_y)\n if 'filenames' in returns: ret.append(batch_filenames)\n if 'image_ids' in returns: ret.append(batch_image_ids)\n if 'evaluation-neutral' in returns: ret.append(batch_eval_neutral)\n if 'inverse_transform' in returns: ret.append(batch_inverse_transforms)\n if 'original_images' in returns: ret.append(batch_original_images)\n if 'original_labels' in returns: ret.append(batch_original_labels)\n\n yield ret\n\n def save_dataset(self,\n filenames_path='filenames.pkl',\n labels_path=None,\n image_ids_path=None,\n eval_neutral_path=None):\n '''\n Writes the current `filenames`, `labels`, and `image_ids` lists to the specified files.\n This is particularly useful for large datasets with annotations that are\n parsed from XML files, which can take quite long. If you'll be using the\n same dataset repeatedly, you don't want to have to parse the XML label\n files every time.\n\n Arguments:\n filenames_path (str): The path under which to save the filenames pickle.\n labels_path (str): The path under which to save the labels pickle.\n image_ids_path (str, optional): The path under which to save the image IDs pickle.\n eval_neutral_path (str, optional): The path under which to save the pickle for\n the evaluation-neutrality annotations.\n '''\n with open(filenames_path, 'wb') as f:\n pickle.dump(self.filenames, f)\n if not labels_path is None:\n with open(labels_path, 'wb') as f:\n pickle.dump(self.labels, f)\n if not image_ids_path is None:\n with open(image_ids_path, 'wb') as f:\n pickle.dump(self.image_ids, f)\n if not eval_neutral_path is None:\n with open(eval_neutral_path, 'wb') as f:\n pickle.dump(self.eval_neutral, f)\n\n def get_dataset(self):\n '''\n Returns:\n 4-tuple containing lists and/or `None` for the filenames, labels, image IDs,\n and evaluation-neutrality annotations.\n '''\n return self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def get_dataset_size(self):\n '''\n Returns:\n The number of images in the dataset.\n '''\n return self.dataset_size\n", "import h5py\nimport numpy as np\nimport shutil\n\nfrom tensor_sampling_utils import sample_tensors\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--output\",required=True)\nparser.add_argument(\"--num_classes\",required=True)\nparser.add_argument(\"--source\",default='../base_models/VGG_coco_SSD_300x300_iter_400000.h5')\nflags = parser.parse_args()\nweights_source_path = flags.source\nweights_destination_path = flags.output\n\n# Make a copy of the weights file.\nshutil.copy(weights_source_path, weights_destination_path)\n\n\n# In[4]:\n\n\n# Load both the source weights file and the copy we made.\n# We will load the original weights file in read-only mode so that we can't mess up anything.\nweights_source_file = h5py.File(weights_source_path, 'r')\nweights_destination_file = h5py.File(weights_destination_path)\n\n\n# ## 2. Figure out which weight tensors we need to sub-sample\n# \n# Next, we need to figure out exactly which weight tensors we need to sub-sample. As mentioned above, the weights for all layers except the classification layers are fine, we don't need to change anything about those.\n# \n# So which are the classification layers in SSD300? Their names are:\n\n# In[5]:\n\n\nclassifier_names = ['conv4_3_norm_mbox_conf',\n 'fc7_mbox_conf',\n 'conv6_2_mbox_conf',\n 'conv7_2_mbox_conf',\n 'conv8_2_mbox_conf',\n 'conv9_2_mbox_conf']\n\n\n# ## 3. Figure out which slices to pick\n# \n# The following section is optional. I'll look at one classification layer and explain what we want to do, just for your understanding. If you don't care about that, just skip ahead to the next section.\n# \n# We know which weight tensors we want to sub-sample, but we still need to decide which (or at least how many) elements of those tensors we want to keep. Let's take a look at the first of the classifier layers, \"`conv4_3_norm_mbox_conf`\". Its two weight tensors, the kernel and the bias, have the following shapes:\n\n# In[6]:\n\n\nconv4_3_norm_mbox_conf_kernel = weights_source_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_source_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\n\n# So the last axis has 324 elements. Why is that?\n# \n# - MS COCO has 80 classes, but the model also has one 'backgroud' class, so that makes 81 classes effectively.\n# - The 'conv4_3_norm_mbox_loc' layer predicts 4 boxes for each spatial position, so the 'conv4_3_norm_mbox_conf' layer has to predict one of the 81 classes for each of those 4 boxes.\n# \n# That's why the last axis has 4 * 81 = 324 elements.\n# \n# So how many elements do we want in the last axis for this layer?\n# \n# Let's do the same calculation as above:\n# \n# - Our dataset has 8 classes, but our model will also have a 'background' class, so that makes 9 classes effectively.\n# - We need to predict one of those 9 classes for each of the four boxes at each spatial position.\n# \n# That makes 4 * 9 = 36 elements.\n# \n# Now we know that we want to keep 36 elements in the last axis and leave all other axes unchanged. But which 36 elements out of the original 324 elements do we want?\n# \n# Should we just pick them randomly? If the object classes in our dataset had absolutely nothing to do with the classes in MS COCO, then choosing those 36 elements randomly would be fine (and the next section covers this case, too). But in our particular example case, choosing these elements randomly would be a waste. Since MS COCO happens to contain exactly the 8 classes that we need, instead of sub-sampling randomly, we'll just take exactly those elements that were trained to predict our 8 classes.\n# \n# Here are the indices of the 9 classes in MS COCO that we are interested in:\n# \n# `[0, 1, 2, 3, 4, 6, 8, 10, 12]`\n# \n# The indices above represent the following classes in the MS COCO datasets:\n# \n# `['background', 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign']`\n# \n# How did I find out those indices? I just looked them up in the annotations of the MS COCO dataset.\n# \n# While these are the classes we want, we don't want them in this order. In our dataset, the classes happen to be in the following order as stated at the top of this notebook:\n# \n# `['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'traffic_light', 'motorcycle', 'bus', 'stop_sign']`\n# \n# For example, '`traffic_light`' is class ID 5 in our dataset but class ID 10 in the SSD300 MS COCO model. So the order in which I actually want to pick the 9 indices above is this:\n# \n# `[0, 3, 8, 1, 2, 10, 4, 6, 12]`\n# \n# So out of every 81 in the 324 elements, I want to pick the 9 elements above. This gives us the following 36 indices:\n\n# In[7]:\n\n\nn_classes_source = 81\nclasses_of_interest = int(flags.num_classes)\n\nsubsampling_indices = []\n# for i in range(int(324/n_classes_source)):\n# indices = np.array(classes_of_interest) + i * n_classes_source\n# subsampling_indices.append(indices)\n# subsampling_indices = list(np.concatenate(subsampling_indices))\n\n# print(subsampling_indices)\n\n\n# These are the indices of the 36 elements that we want to pick from both the bias vector and from the last axis of the kernel tensor.\n# \n# This was the detailed example for the '`conv4_3_norm_mbox_conf`' layer. And of course we haven't actually sub-sampled the weights for this layer yet, we have only figured out which elements we want to keep. The piece of code in the next section will perform the sub-sampling for all the classifier layers.\n\n# ## 4. Sub-sample the classifier weights\n# \n# The code in this section iterates over all the classifier layers of the source weights file and performs the following steps for each classifier layer:\n# \n# 1. Get the kernel and bias tensors from the source weights file.\n# 2. Compute the sub-sampling indices for the last axis. The first three axes of the kernel remain unchanged.\n# 3. Overwrite the corresponding kernel and bias tensors in the destination weights file with our newly created sub-sampled kernel and bias tensors.\n# \n# The second step does what was explained in the previous section.\n# \n# In case you want to **up-sample** the last axis rather than sub-sample it, simply set the `classes_of_interest` variable below to the length you want it to have. The added elements will be initialized either randomly or optionally with zeros. Check out the documentation of `sample_tensors()` for details.\n\n# In[39]:\n\n\n# TODO: Set the number of classes in the source weights file. Note that this number must include\n# the background class, so for MS COCO's 80 classes, this must be 80 + 1 = 81.\nn_classes_source = 81\n# TODO: Set the indices of the classes that you want to pick for the sub-sampled weight tensors.\n# In case you would like to just randomly sample a certain number of classes, you can just set\n# `classes_of_interest` to an integer instead of the list below. Either way, don't forget to\n# include the background class. That is, if you set an integer, and you want `n` positive classes,\n# then you must set `classes_of_interest = n + 1`.\n# classes_of_interest = [0, 3, 8, 1, 2, 10, 4, 6, 12]\nclasses_of_interest = int(flags.num_classes) # Uncomment this in case you want to just randomly sub-sample the last axis instead of providing a list of indices.\n\nfor name in classifier_names:\n # Get the trained weights for this layer from the source HDF5 weights file.\n kernel = weights_source_file[name][name]['kernel:0'].value\n bias = weights_source_file[name][name]['bias:0'].value\n\n # Get the shape of the kernel. We're interested in sub-sampling\n # the last dimension, 'o'.\n height, width, in_channels, out_channels = kernel.shape\n \n # Compute the indices of the elements we want to sub-sample.\n # Keep in mind that each classification predictor layer predicts multiple\n # bounding boxes for every spatial location, so we want to sub-sample\n # the relevant classes for each of these boxes.\n if isinstance(classes_of_interest, (list, tuple)):\n subsampling_indices = []\n for i in range(int(out_channels/n_classes_source)):\n indices = np.array(classes_of_interest) + i * n_classes_source\n subsampling_indices.append(indices)\n subsampling_indices = list(np.concatenate(subsampling_indices))\n elif isinstance(classes_of_interest, int):\n subsampling_indices = int(classes_of_interest * (out_channels/n_classes_source))\n else:\n raise ValueError(\"`classes_of_interest` must be either an integer or a list/tuple.\")\n \n # Sub-sample the kernel and bias.\n # The `sample_tensors()` function used below provides extensive\n # documentation, so don't hesitate to read it if you want to know\n # what exactly is going on here.\n new_kernel, new_bias = sample_tensors(weights_list=[kernel, bias],\n sampling_instructions=[height, width, in_channels, subsampling_indices],\n axes=[[3]], # The one bias dimension corresponds to the last kernel dimension.\n init=['gaussian', 'zeros'],\n mean=0.0,\n stddev=0.005)\n \n # Delete the old weights from the destination file.\n del weights_destination_file[name][name]['kernel:0']\n del weights_destination_file[name][name]['bias:0']\n # Create new datasets for the sub-sampled weights.\n weights_destination_file[name][name].create_dataset(name='kernel:0', data=new_kernel)\n weights_destination_file[name][name].create_dataset(name='bias:0', data=new_bias)\n\n# Make sure all data is written to our output file before this sub-routine exits.\nweights_destination_file.flush()\n\n\n# That's it, we're done.\n# \n# Let's just quickly inspect the shapes of the weights of the '`conv4_3_norm_mbox_conf`' layer in the destination weights file:\n\n# In[44]:\n\n\nconv4_3_norm_mbox_conf_kernel = weights_destination_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_destination_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.stack", "numpy.concatenate", "numpy.any", "numpy.random.uniform", "numpy.array" ], [ "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Yeachan-Heo/ray
[ "a73c488c74b1e01da3961db2eb538c43c29753f5", "a73c488c74b1e01da3961db2eb538c43c29753f5", "a73c488c74b1e01da3961db2eb538c43c29753f5", "a73c488c74b1e01da3961db2eb538c43c29753f5" ]
[ "rllib/agents/dqn/dqn_torch_model.py", "python/ray/serve/master.py", "rllib/tests/test_rollout_worker.py", "python/ray/parameter.py" ]
[ "import numpy as np\n\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils import try_import_torch\n\ntorch, nn = try_import_torch()\n\n\nclass DQNTorchModel(TorchModelV2):\n \"\"\"Extension of standard TorchModelV2 to provide dueling-Q functionality.\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n *,\n dueling=False,\n q_hiddens=(256, ),\n dueling_activation=\"relu\",\n use_noisy=False,\n sigma0=0.5,\n # TODO(sven): Move `add_layer_norm` into ModelCatalog as\n # generic option, then error if we use ParameterNoise as\n # Exploration type and do not have any LayerNorm layers in\n # the net.\n add_layer_norm=False):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n dueling (bool): Whether to build the advantage(A)/value(V) heads\n for DDQN. If True, Q-values are calculated as:\n Q = (A - mean[A]) + V. If False, raw NN output is interpreted\n as Q-values.\n q_hiddens (List[int]): List of layer-sizes after(!) the\n Advantages(A)/Value(V)-split. Hence, each of the A- and V-\n branches will have this structure of Dense layers. To define\n the NN before this A/V-split, use - as always -\n config[\"model\"][\"fcnet_hiddens\"].\n dueling_activation (str): The activation to use for all dueling\n layers (A- and V-branch). One of \"relu\", \"tanh\", \"linear\".\n use_noisy (bool): use noisy nets\n sigma0 (float): initial value of noisy nets\n add_layer_norm (bool): Enable layer norm (for param noise).\n \"\"\"\n\n super(DQNTorchModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n self.dueling = dueling\n ins = num_outputs\n\n # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n value_module = None\n if self.dueling:\n value_module = nn.Sequential()\n for i, n in enumerate(q_hiddens):\n advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n value_module.add_module(\"dueling_V_{}\".format(i),\n nn.Linear(ins, n))\n # Add activations if necessary.\n if dueling_activation == \"relu\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.ReLU())\n elif dueling_activation == \"tanh\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.Tanh())\n\n # Add LayerNorm after each Dense.\n if add_layer_norm:\n advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n value_module.add_module(\"LayerNorm_V_{}\".format(i),\n nn.LayerNorm(n))\n ins = n\n # Actual Advantages layer (nodes=num-actions) and\n # value layer (nodes=1).\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n value_module.add_module(\"V\", nn.Linear(ins, 1))\n # Non-dueling:\n # Q-value layer (use main module's outputs as Q-values).\n else:\n pass\n\n self.advantage_module = advantage_module\n self.value_module = value_module\n\n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\n\n Override this in your custom model to customize the Q output head.\n\n Arguments:\n model_out (Tensor): embedding from the model layers\n\n Returns:\n (action_scores, logits, dist) if num_atoms == 1, otherwise\n (action_scores, z, support_logits_per_action, logits, dist)\n \"\"\"\n\n return self.advantage_module(model_out)\n\n def get_state_value(self, model_out):\n \"\"\"Returns the state value prediction for the given state embedding.\"\"\"\n\n return self.value_module(model_out)\n\n def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):\n \"\"\"\n a common dense layer: y = w^{T}x + b\n a noisy layer: y = (w + \\\\epsilon_w*\\\\sigma_w)^{T}x +\n (b+\\\\epsilon_b*\\\\sigma_b)\n where \\epsilon are random variables sampled from factorized normal\n distributions and \\\\sigma are trainable variables which are expected to\n vanish along the training procedure\n \"\"\"\n in_size = int(action_in.shape[1])\n\n epsilon_in = torch.normal(\n mean=torch.zeros([in_size]), std=torch.ones([in_size]))\n epsilon_out = torch.normal(\n mean=torch.zeros([out_size]), std=torch.ones([out_size]))\n epsilon_in = self._f_epsilon(epsilon_in)\n epsilon_out = self._f_epsilon(epsilon_out)\n epsilon_w = torch.matmul(\n torch.unsqueeze(epsilon_in, -1),\n other=torch.unsqueeze(epsilon_out, 0))\n epsilon_b = epsilon_out\n\n sigma_w = torch.Tensor(\n data=np.random.uniform(\n low=-1.0 / np.sqrt(float(in_size)),\n high=1.0 / np.sqrt(float(in_size)),\n size=[in_size, out_size]),\n dtype=torch.float32,\n requires_grad=True)\n # TF noise generation can be unreliable on GPU\n # If generating the noise on the CPU,\n # lowering sigma0 to 0.1 may be helpful\n sigma_b = torch.Tensor(\n data=np.full(\n shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),\n requires_grad=True)\n w = torch.Tensor(\n data=np.full(\n shape=[in_size, out_size],\n fill_value=6 / np.sqrt(float(in_size) + float(out_size))),\n requires_grad=True)\n b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)\n action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \\\n + b + sigma_b * epsilon_b\n\n if not non_linear:\n return action_activation\n return nn.functional.relu(action_activation)\n\n def _f_epsilon(self, x):\n return torch.sign(x) * torch.pow(torch.abs(x), 0.5)\n", "import asyncio\nfrom collections import defaultdict\nimport os\nimport random\nimport time\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.serve.backend_worker import create_backend_worker\nfrom ray.serve.constants import (ASYNC_CONCURRENCY, SERVE_ROUTER_NAME,\n SERVE_PROXY_NAME, SERVE_METRIC_SINK_NAME)\nfrom ray.serve.http_proxy import HTTPProxyActor\nfrom ray.serve.kv_store import RayInternalKVStore\nfrom ray.serve.metric.exporter import MetricExporterActor\nfrom ray.serve.router import Router\nfrom ray.serve.utils import (async_retryable, format_actor_name,\n get_random_letters, logger)\n\nimport numpy as np\n\n# Used for testing purposes only. If this is set, the master actor will crash\n# after writing each checkpoint with the specified probability.\n_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0.0\nCHECKPOINT_KEY = \"serve-master-checkpoint\"\n\n\[email protected]\nclass ServeMaster:\n \"\"\"Responsible for managing the state of the serving system.\n\n The master actor implements fault tolerance by persisting its state in\n a new checkpoint each time a state change is made. If the actor crashes,\n the latest checkpoint is loaded and the state is recovered. Checkpoints\n are written/read using a provided KV-store interface.\n\n All hard state in the system is maintained by this actor and persisted via\n these checkpoints. Soft state required by other components is fetched by\n those actors from this actor on startup and updates are pushed out from\n this actor.\n\n All other actors started by the master actor are named, detached actors\n so they will not fate share with the master if it crashes.\n\n The following guarantees are provided for state-changing calls to the\n master actor:\n - If the call succeeds, the change was made and will be reflected in\n the system even if the master actor or other actors die unexpectedly.\n - If the call fails, the change may have been made but isn't guaranteed\n to have been. The client should retry in this case. Note that this\n requires all implementations here to be idempotent.\n \"\"\"\n\n async def __init__(self, cluster_name, start_http_proxy, http_node_id,\n http_proxy_host, http_proxy_port,\n metric_exporter_class):\n # Unique name of the serve cluster managed by this actor. Used to\n # namespace child actors and checkpoints.\n self.cluster_name = cluster_name\n # Used to read/write checkpoints.\n self.kv_store = RayInternalKVStore()\n # path -> (endpoint, methods).\n self.routes = {}\n # backend -> (backend_worker, backend_config, replica_config).\n self.backends = {}\n # backend -> replica_tags.\n self.replicas = defaultdict(list)\n # replicas that should be started if recovering from a checkpoint.\n self.replicas_to_start = defaultdict(list)\n # replicas that should be stopped if recovering from a checkpoint.\n self.replicas_to_stop = defaultdict(list)\n # backends that should be removed from the router if recovering from a\n # checkpoint.\n self.backends_to_remove = list()\n # endpoints that should be removed from the router if recovering from a\n # checkpoint.\n self.endpoints_to_remove = list()\n # endpoint -> traffic_dict\n self.traffic_policies = dict()\n # Dictionary of backend tag to dictionaries of replica tag to worker.\n # TODO(edoakes): consider removing this and just using the names.\n self.workers = defaultdict(dict)\n\n # Used to ensure that only a single state-changing operation happens\n # at any given time.\n self.write_lock = asyncio.Lock()\n\n # Cached handles to actors in the system.\n self.router = None\n self.http_proxy = None\n self.metric_exporter = None\n\n # If starting the actor for the first time, starts up the other system\n # components. If recovering, fetches their actor handles.\n self._get_or_start_metric_exporter(metric_exporter_class)\n self._get_or_start_router()\n if start_http_proxy:\n self._get_or_start_http_proxy(http_node_id, http_proxy_host,\n http_proxy_port)\n\n # NOTE(edoakes): unfortunately, we can't completely recover from a\n # checkpoint in the constructor because we block while waiting for\n # other actors to start up, and those actors fetch soft state from\n # this actor. Because no other tasks will start executing until after\n # the constructor finishes, if we were to run this logic in the\n # constructor it could lead to deadlock between this actor and a child.\n # However we do need to guarantee that we have fully recovered from a\n # checkpoint before any other state-changing calls run. We address this\n # by acquiring the write_lock and then posting the task to recover from\n # a checkpoint to the event loop. Other state-changing calls acquire\n # this lock and will be blocked until recovering from the checkpoint\n # finishes.\n checkpoint_key = CHECKPOINT_KEY\n if self.cluster_name is not None:\n checkpoint_key = \"{}:{}\".format(self.cluster_name, checkpoint_key)\n checkpoint = self.kv_store.get(checkpoint_key)\n if checkpoint is None:\n logger.debug(\"No checkpoint found\")\n else:\n await self.write_lock.acquire()\n asyncio.get_event_loop().create_task(\n self._recover_from_checkpoint(checkpoint))\n\n def _get_or_start_router(self):\n \"\"\"Get the router belonging to this serve cluster.\n\n If the router does not already exist, it will be started.\n \"\"\"\n router_name = format_actor_name(SERVE_ROUTER_NAME, self.cluster_name)\n try:\n self.router = ray.util.get_actor(router_name)\n except ValueError:\n logger.info(\"Starting router with name '{}'\".format(router_name))\n self.router = async_retryable(ray.remote(Router)).options(\n detached=True,\n name=router_name,\n max_concurrency=ASYNC_CONCURRENCY,\n max_restarts=-1,\n ).remote(cluster_name=self.cluster_name)\n\n def get_router(self):\n \"\"\"Returns a handle to the router managed by this actor.\"\"\"\n return [self.router]\n\n def _get_or_start_http_proxy(self, node_id, host, port):\n \"\"\"Get the HTTP proxy belonging to this serve cluster.\n\n If the HTTP proxy does not already exist, it will be started.\n \"\"\"\n proxy_name = format_actor_name(SERVE_PROXY_NAME, self.cluster_name)\n try:\n self.http_proxy = ray.util.get_actor(proxy_name)\n except ValueError:\n logger.info(\n \"Starting HTTP proxy with name '{}' on node '{}'\".format(\n proxy_name, node_id))\n self.http_proxy = async_retryable(HTTPProxyActor).options(\n detached=True,\n name=proxy_name,\n max_concurrency=ASYNC_CONCURRENCY,\n max_restarts=-1,\n resources={\n node_id: 0.01\n },\n ).remote(\n host, port, cluster_name=self.cluster_name)\n\n def get_http_proxy(self):\n \"\"\"Returns a handle to the HTTP proxy managed by this actor.\"\"\"\n return [self.http_proxy]\n\n def get_http_proxy_config(self):\n \"\"\"Called by the HTTP proxy on startup to fetch required state.\"\"\"\n return self.routes, self.get_router()\n\n def _get_or_start_metric_exporter(self, metric_exporter_class):\n \"\"\"Get the metric exporter belonging to this serve cluster.\n\n If the metric exporter does not already exist, it will be started.\n \"\"\"\n metric_sink_name = format_actor_name(SERVE_METRIC_SINK_NAME,\n self.cluster_name)\n try:\n self.metric_exporter = ray.util.get_actor(metric_sink_name)\n except ValueError:\n logger.info(\"Starting metric exporter with name '{}'\".format(\n metric_sink_name))\n self.metric_exporter = MetricExporterActor.options(\n detached=True,\n name=metric_sink_name).remote(metric_exporter_class)\n\n def get_metric_exporter(self):\n \"\"\"Returns a handle to the metric exporter managed by this actor.\"\"\"\n return [self.metric_exporter]\n\n def _checkpoint(self):\n \"\"\"Checkpoint internal state and write it to the KV store.\"\"\"\n logger.debug(\"Writing checkpoint\")\n start = time.time()\n checkpoint = pickle.dumps(\n (self.routes, self.backends, self.traffic_policies, self.replicas,\n self.replicas_to_start, self.replicas_to_stop,\n self.backends_to_remove, self.endpoints_to_remove))\n\n self.kv_store.put(CHECKPOINT_KEY, checkpoint)\n logger.debug(\"Wrote checkpoint in {:.2f}\".format(time.time() - start))\n\n if random.random() < _CRASH_AFTER_CHECKPOINT_PROBABILITY:\n logger.warning(\"Intentionally crashing after checkpoint\")\n os._exit(0)\n\n async def _recover_from_checkpoint(self, checkpoint_bytes):\n \"\"\"Recover the cluster state from the provided checkpoint.\n\n Performs the following operations:\n 1) Deserializes the internal state from the checkpoint.\n 2) Pushes the latest configuration to the HTTP proxy and router\n in case we crashed before updating them.\n 3) Starts/stops any worker replicas that are pending creation or\n deletion.\n\n NOTE: this requires that self.write_lock is already acquired and will\n release it before returning.\n \"\"\"\n assert self.write_lock.locked()\n\n start = time.time()\n logger.info(\"Recovering from checkpoint\")\n\n # Load internal state from the checkpoint data.\n (\n self.routes,\n self.backends,\n self.traffic_policies,\n self.replicas,\n self.replicas_to_start,\n self.replicas_to_stop,\n self.backends_to_remove,\n self.endpoints_to_remove,\n ) = pickle.loads(checkpoint_bytes)\n\n # Fetch actor handles for all of the backend replicas in the system.\n # All of these workers are guaranteed to already exist because they\n # would not be written to a checkpoint in self.workers until they\n # were created.\n for backend_tag, replica_tags in self.replicas.items():\n for replica_tag in replica_tags:\n replica_name = format_actor_name(replica_tag,\n self.cluster_name)\n self.workers[backend_tag][replica_tag] = ray.util.get_actor(\n replica_name)\n\n # Push configuration state to the router.\n # TODO(edoakes): should we make this a pull-only model for simplicity?\n for endpoint, traffic_policy in self.traffic_policies.items():\n await self.router.set_traffic.remote(endpoint, traffic_policy)\n\n for backend_tag, replica_dict in self.workers.items():\n for replica_tag, worker in replica_dict.items():\n await self.router.add_new_worker.remote(\n backend_tag, replica_tag, worker)\n\n for backend, (_, backend_config, _) in self.backends.items():\n await self.router.set_backend_config.remote(\n backend, backend_config)\n\n # Push configuration state to the HTTP proxy.\n await self.http_proxy.set_route_table.remote(self.routes)\n\n # Start/stop any pending backend replicas.\n await self._start_pending_replicas()\n await self._stop_pending_replicas()\n\n # Remove any pending backends and endpoints.\n await self._remove_pending_backends()\n await self._remove_pending_endpoints()\n\n logger.info(\n \"Recovered from checkpoint in {:.3f}s\".format(time.time() - start))\n\n self.write_lock.release()\n\n def get_backend_configs(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n backend_configs = {}\n for backend, (_, backend_config, _) in self.backends.items():\n backend_configs[backend] = backend_config\n return backend_configs\n\n def get_traffic_policies(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n return self.traffic_policies\n\n def _list_replicas(self, backend_tag):\n \"\"\"Used only for testing.\"\"\"\n return self.replicas[backend_tag]\n\n def get_traffic_policy(self, endpoint):\n \"\"\"Fetched by serve handles.\"\"\"\n return self.traffic_policies[endpoint]\n\n async def _start_backend_worker(self, backend_tag, replica_tag):\n \"\"\"Creates a backend worker and waits for it to start up.\n\n Assumes that the backend configuration has already been registered\n in self.backends.\n \"\"\"\n logger.debug(\"Starting worker '{}' for backend '{}'.\".format(\n replica_tag, backend_tag))\n (backend_worker, backend_config,\n replica_config) = self.backends[backend_tag]\n\n replica_name = format_actor_name(replica_tag, self.cluster_name)\n worker_handle = async_retryable(ray.remote(backend_worker)).options(\n detached=True,\n name=replica_name,\n max_restarts=-1,\n **replica_config.ray_actor_options).remote(\n backend_tag,\n replica_tag,\n replica_config.actor_init_args,\n cluster_name=self.cluster_name)\n # TODO(edoakes): we should probably have a timeout here.\n await worker_handle.ready.remote()\n return worker_handle\n\n async def _start_pending_replicas(self):\n \"\"\"Starts the pending backend replicas in self.replicas_to_start.\n\n Starts the worker, then pushes an update to the router to add it to\n the proper backend. If the worker has already been started, only\n updates the router.\n\n Clears self.replicas_to_start.\n \"\"\"\n for backend_tag, replicas_to_create in self.replicas_to_start.items():\n for replica_tag in replicas_to_create:\n # NOTE(edoakes): the replicas may already be created if we\n # failed after creating them but before writing a checkpoint.\n try:\n worker_handle = ray.util.get_actor(replica_tag)\n except ValueError:\n worker_handle = await self._start_backend_worker(\n backend_tag, replica_tag)\n\n self.replicas[backend_tag].append(replica_tag)\n self.workers[backend_tag][replica_tag] = worker_handle\n\n # Register the worker with the router.\n await self.router.add_new_worker.remote(\n backend_tag, replica_tag, worker_handle)\n\n self.replicas_to_start.clear()\n\n async def _stop_pending_replicas(self):\n \"\"\"Stops the pending backend replicas in self.replicas_to_stop.\n\n Stops workers by telling the router to remove them.\n\n Clears self.replicas_to_stop.\n \"\"\"\n for backend_tag, replicas_to_stop in self.replicas_to_stop.items():\n for replica_tag in replicas_to_stop:\n # NOTE(edoakes): the replicas may already be stopped if we\n # failed after stopping them but before writing a checkpoint.\n try:\n # Remove the replica from router.\n # This will also submit __ray_terminate__ on the worker.\n # NOTE(edoakes): we currently need to kill the worker from\n # the router to guarantee that the router won't submit any\n # more requests to it.\n await self.router.remove_worker.remote(\n backend_tag, replica_tag)\n except ValueError:\n pass\n\n self.replicas_to_stop.clear()\n\n async def _remove_pending_backends(self):\n \"\"\"Removes the pending backends in self.backends_to_remove.\n\n Clears self.backends_to_remove.\n \"\"\"\n for backend_tag in self.backends_to_remove:\n await self.router.remove_backend.remote(backend_tag)\n self.backends_to_remove.clear()\n\n async def _remove_pending_endpoints(self):\n \"\"\"Removes the pending endpoints in self.endpoints_to_remove.\n\n Clears self.endpoints_to_remove.\n \"\"\"\n for endpoint_tag in self.endpoints_to_remove:\n await self.router.remove_endpoint.remote(endpoint_tag)\n self.endpoints_to_remove.clear()\n\n def _scale_replicas(self, backend_tag, num_replicas):\n \"\"\"Scale the given backend to the number of replicas.\n\n NOTE: this does not actually start or stop the replicas, but instead\n adds the intention to start/stop them to self.workers_to_start and\n self.workers_to_stop. The caller is responsible for then first writing\n a checkpoint and then actually starting/stopping the intended replicas.\n This avoids inconsistencies with starting/stopping a worker and then\n crashing before writing a checkpoint.\n \"\"\"\n logger.debug(\"Scaling backend '{}' to {} replicas\".format(\n backend_tag, num_replicas))\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n assert num_replicas >= 0, (\"Number of replicas must be\"\n \" greater than or equal to 0.\")\n\n current_num_replicas = len(self.replicas[backend_tag])\n delta_num_replicas = num_replicas - current_num_replicas\n\n if delta_num_replicas > 0:\n logger.debug(\"Adding {} replicas to backend {}\".format(\n delta_num_replicas, backend_tag))\n for _ in range(delta_num_replicas):\n replica_tag = \"{}#{}\".format(backend_tag, get_random_letters())\n self.replicas_to_start[backend_tag].append(replica_tag)\n\n elif delta_num_replicas < 0:\n logger.debug(\"Removing {} replicas from backend {}\".format(\n -delta_num_replicas, backend_tag))\n assert len(self.replicas[backend_tag]) >= delta_num_replicas\n for _ in range(-delta_num_replicas):\n replica_tag = self.replicas[backend_tag].pop()\n if len(self.replicas[backend_tag]) == 0:\n del self.replicas[backend_tag]\n del self.workers[backend_tag][replica_tag]\n if len(self.workers[backend_tag]) == 0:\n del self.workers[backend_tag]\n\n self.replicas_to_stop[backend_tag].append(replica_tag)\n\n def get_all_worker_handles(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n return self.workers\n\n def get_all_backends(self):\n \"\"\"Used for validation by the API client.\"\"\"\n return list(self.backends.keys())\n\n def get_all_endpoints(self):\n \"\"\"Used for validation by the API client.\"\"\"\n return [endpoint for endpoint, methods in self.routes.values()]\n\n async def set_traffic(self, endpoint_name, traffic_policy_dictionary):\n \"\"\"Sets the traffic policy for the specified endpoint.\"\"\"\n async with self.write_lock:\n if endpoint_name not in self.get_all_endpoints():\n raise ValueError(\n \"Attempted to assign traffic for an endpoint '{}'\"\n \" that is not registered.\".format(endpoint_name))\n\n assert isinstance(traffic_policy_dictionary,\n dict), \"Traffic policy must be dictionary\"\n prob = 0\n for backend, weight in traffic_policy_dictionary.items():\n if weight < 0:\n raise ValueError(\n \"Attempted to assign a weight of {} to backend '{}'. \"\n \"Weights cannot be negative.\".format(weight, backend))\n prob += weight\n if backend not in self.backends:\n raise ValueError(\n \"Attempted to assign traffic to a backend '{}' that \"\n \"is not registered.\".format(backend))\n\n # These weights will later be plugged into np.random.choice, which\n # uses a tolerance of 1e-8.\n assert np.isclose(\n prob, 1, atol=1e-8\n ), \"weights must sum to 1, currently they sum to {}\".format(prob)\n\n self.traffic_policies[endpoint_name] = traffic_policy_dictionary\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n await self.router.set_traffic.remote(endpoint_name,\n traffic_policy_dictionary)\n\n async def create_endpoint(self, route, endpoint, methods):\n \"\"\"Create a new endpoint with the specified route and methods.\n\n If the route is None, this is a \"headless\" endpoint that will not\n be added to the HTTP proxy (can only be accessed via a handle).\n \"\"\"\n async with self.write_lock:\n # If this is a headless endpoint with no route, key the endpoint\n # based on its name.\n # TODO(edoakes): we should probably just store routes and endpoints\n # separately.\n if route is None:\n route = endpoint\n\n # TODO(edoakes): move this to client side.\n err_prefix = \"Cannot create endpoint.\"\n if route in self.routes:\n if self.routes[route] == (endpoint, methods):\n return\n else:\n raise ValueError(\n \"{} Route '{}' is already registered.\".format(\n err_prefix, route))\n\n if endpoint in self.get_all_endpoints():\n raise ValueError(\n \"{} Endpoint '{}' is already registered.\".format(\n err_prefix, endpoint))\n\n logger.info(\n \"Registering route {} to endpoint {} with methods {}.\".format(\n route, endpoint, methods))\n\n self.routes[route] = (endpoint, methods)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n await self.http_proxy.set_route_table.remote(self.routes)\n\n async def delete_endpoint(self, endpoint):\n \"\"\"Delete the specified endpoint.\n\n Does not modify any corresponding backends.\n \"\"\"\n logger.info(\"Deleting endpoint '{}'\".format(endpoint))\n async with self.write_lock:\n # This method must be idempotent. We should validate that the\n # specified endpoint exists on the client.\n for route, (route_endpoint, _) in self.routes.items():\n if route_endpoint == endpoint:\n route_to_delete = route\n break\n else:\n logger.info(\"Endpoint '{}' doesn't exist\".format(endpoint))\n return\n\n # Remove the routing entry.\n del self.routes[route_to_delete]\n\n # Remove the traffic policy entry if it exists.\n if endpoint in self.traffic_policies:\n del self.traffic_policies[endpoint]\n\n self.endpoints_to_remove.append(endpoint)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # updates to the HTTP proxy and router to avoid inconsistent state\n # if we crash after pushing the update.\n self._checkpoint()\n\n # Update the HTTP proxy first to ensure no new requests for the\n # endpoint are sent to the router.\n await self.http_proxy.set_route_table.remote(self.routes)\n await self._remove_pending_endpoints()\n\n async def create_backend(self, backend_tag, backend_config,\n replica_config):\n \"\"\"Register a new backend under the specified tag.\"\"\"\n async with self.write_lock:\n backend_worker = create_backend_worker(\n replica_config.func_or_class)\n\n # Save creator that starts replicas, the arguments to be passed in,\n # and the configuration for the backends.\n self.backends[backend_tag] = (backend_worker, backend_config,\n replica_config)\n\n self._scale_replicas(backend_tag, backend_config.num_replicas)\n\n # NOTE(edoakes): we must write a checkpoint before starting new\n # or pushing the updated config to avoid inconsistent state if we\n # crash while making the change.\n self._checkpoint()\n await self._start_pending_replicas()\n\n # Set the backend config inside the router\n # (particularly for max-batch-size).\n await self.router.set_backend_config.remote(\n backend_tag, backend_config)\n\n async def delete_backend(self, backend_tag):\n async with self.write_lock:\n # This method must be idempotent. We should validate that the\n # specified backend exists on the client.\n if backend_tag not in self.backends:\n return\n\n # Check that the specified backend isn't used by any endpoints.\n for endpoint, traffic_dict in self.traffic_policies.items():\n if backend_tag in traffic_dict:\n raise ValueError(\"Backend '{}' is used by endpoint '{}' \"\n \"and cannot be deleted. Please remove \"\n \"the backend from all endpoints and try \"\n \"again.\".format(backend_tag, endpoint))\n\n # Scale its replicas down to 0. This will also remove the backend\n # from self.backends and self.replicas.\n self._scale_replicas(backend_tag, 0)\n\n # Remove the backend's metadata.\n del self.backends[backend_tag]\n\n # Add the intention to remove the backend from the router.\n self.backends_to_remove.append(backend_tag)\n\n # NOTE(edoakes): we must write a checkpoint before removing the\n # backend from the router to avoid inconsistent state if we crash\n # after pushing the update.\n self._checkpoint()\n await self._stop_pending_replicas()\n await self._remove_pending_backends()\n\n async def update_backend_config(self, backend_tag, config_options):\n \"\"\"Set the config for the specified backend.\"\"\"\n async with self.write_lock:\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n assert isinstance(config_options, dict)\n backend_worker, backend_config, replica_config = self.backends[\n backend_tag]\n\n backend_config.update(config_options)\n self.backends[backend_tag] = (backend_worker, backend_config,\n replica_config)\n\n # Scale the replicas with the new configuration.\n self._scale_replicas(backend_tag, backend_config.num_replicas)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n\n # Inform the router about change in configuration\n # (particularly for setting max_batch_size).\n await self.router.set_backend_config.remote(\n backend_tag, backend_config)\n\n await self._start_pending_replicas()\n await self._stop_pending_replicas()\n\n def get_backend_config(self, backend_tag):\n \"\"\"Get the current config for the specified backend.\"\"\"\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n return self.backends[backend_tag][2]\n", "from collections import Counter\nimport gym\nimport numpy as np\nimport os\nimport random\nimport time\nimport unittest\n\nimport ray\nfrom ray.rllib.agents.pg import PGTrainer\nfrom ray.rllib.agents.a3c import A2CTrainer\nfrom ray.rllib.env.vector_env import VectorEnv\nfrom ray.rllib.evaluation.rollout_worker import RolloutWorker\nfrom ray.rllib.evaluation.metrics import collect_metrics\nfrom ray.rllib.evaluation.postprocessing import compute_advantages\nfrom ray.rllib.examples.policy.random_policy import RandomPolicy\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch\nfrom ray.rllib.utils.test_utils import check\nfrom ray.tune.registry import register_env\n\n\nclass MockPolicy(RandomPolicy):\n def compute_actions(self,\n obs_batch,\n state_batches=None,\n prev_action_batch=None,\n prev_reward_batch=None,\n episodes=None,\n explore=None,\n timestep=None,\n **kwargs):\n return np.array([random.choice([0, 1])] * len(obs_batch)), [], {}\n\n def postprocess_trajectory(self,\n batch,\n other_agent_batches=None,\n episode=None):\n assert episode is not None\n return compute_advantages(\n batch, 100.0, 0.9, use_gae=False, use_critic=False)\n\n\nclass BadPolicy(RandomPolicy):\n def compute_actions(self,\n obs_batch,\n state_batches=None,\n prev_action_batch=None,\n prev_reward_batch=None,\n episodes=None,\n explore=None,\n timestep=None,\n **kwargs):\n raise Exception(\"intentional error\")\n\n\nclass FailOnStepEnv(gym.Env):\n def __init__(self):\n self.observation_space = gym.spaces.Discrete(1)\n self.action_space = gym.spaces.Discrete(2)\n\n def reset(self):\n raise ValueError(\"kaboom\")\n\n def step(self, action):\n raise ValueError(\"kaboom\")\n\n\nclass MockEnv(gym.Env):\n def __init__(self, episode_length, config=None):\n self.episode_length = episode_length\n self.config = config\n self.i = 0\n self.observation_space = gym.spaces.Discrete(1)\n self.action_space = gym.spaces.Discrete(2)\n\n def reset(self):\n self.i = 0\n return self.i\n\n def step(self, action):\n self.i += 1\n return 0, 1, self.i >= self.episode_length, {}\n\n\nclass MockEnv2(gym.Env):\n def __init__(self, episode_length):\n self.episode_length = episode_length\n self.i = 0\n self.observation_space = gym.spaces.Discrete(100)\n self.action_space = gym.spaces.Discrete(2)\n\n def reset(self):\n self.i = 0\n return self.i\n\n def step(self, action):\n self.i += 1\n return self.i, 100, self.i >= self.episode_length, {}\n\n\nclass MockVectorEnv(VectorEnv):\n def __init__(self, episode_length, num_envs):\n self.envs = [MockEnv(episode_length) for _ in range(num_envs)]\n self.observation_space = gym.spaces.Discrete(1)\n self.action_space = gym.spaces.Discrete(2)\n self.num_envs = num_envs\n\n def vector_reset(self):\n return [e.reset() for e in self.envs]\n\n def reset_at(self, index):\n return self.envs[index].reset()\n\n def vector_step(self, actions):\n obs_batch, rew_batch, done_batch, info_batch = [], [], [], []\n for i in range(len(self.envs)):\n obs, rew, done, info = self.envs[i].step(actions[i])\n obs_batch.append(obs)\n rew_batch.append(rew)\n done_batch.append(done)\n info_batch.append(info)\n return obs_batch, rew_batch, done_batch, info_batch\n\n def get_unwrapped(self):\n return self.envs\n\n\nclass TestRolloutWorker(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n ray.init(num_cpus=5)\n\n @classmethod\n def tearDownClass(cls):\n ray.shutdown()\n\n def test_basic(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"), policy=MockPolicy)\n batch = ev.sample()\n for key in [\n \"obs\", \"actions\", \"rewards\", \"dones\", \"advantages\",\n \"prev_rewards\", \"prev_actions\"\n ]:\n self.assertIn(key, batch)\n self.assertGreater(np.abs(np.mean(batch[key])), 0)\n\n def to_prev(vec):\n out = np.zeros_like(vec)\n for i, v in enumerate(vec):\n if i + 1 < len(out) and not batch[\"dones\"][i]:\n out[i + 1] = v\n return out.tolist()\n\n self.assertEqual(batch[\"prev_rewards\"].tolist(),\n to_prev(batch[\"rewards\"]))\n self.assertEqual(batch[\"prev_actions\"].tolist(),\n to_prev(batch[\"actions\"]))\n self.assertGreater(batch[\"advantages\"][0], 1)\n\n def test_batch_ids(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"), policy=MockPolicy)\n batch1 = ev.sample()\n batch2 = ev.sample()\n self.assertEqual(len(set(batch1[\"unroll_id\"])), 1)\n self.assertEqual(len(set(batch2[\"unroll_id\"])), 1)\n self.assertEqual(\n len(set(SampleBatch.concat(batch1, batch2)[\"unroll_id\"])), 2)\n\n def test_global_vars_update(self):\n # Allow for Unittest run.\n ray.init(num_cpus=5, ignore_reinit_error=True)\n agent = A2CTrainer(\n env=\"CartPole-v0\",\n config={\n \"num_workers\": 1,\n \"lr_schedule\": [[0, 0.1], [100000, 0.000001]],\n })\n result = agent.train()\n for i in range(10):\n result = agent.train()\n print(\"num_steps_sampled={}\".format(\n result[\"info\"][\"num_steps_sampled\"]))\n print(\"num_steps_trained={}\".format(\n result[\"info\"][\"num_steps_trained\"]))\n print(\"num_steps_sampled={}\".format(\n result[\"info\"][\"num_steps_sampled\"]))\n print(\"num_steps_trained={}\".format(\n result[\"info\"][\"num_steps_trained\"]))\n if i == 0:\n self.assertGreater(result[\"info\"][\"learner\"][\"cur_lr\"], 0.01)\n if result[\"info\"][\"learner\"][\"cur_lr\"] < 0.07:\n break\n self.assertLess(result[\"info\"][\"learner\"][\"cur_lr\"], 0.07)\n\n def test_no_step_on_init(self):\n # Allow for Unittest run.\n ray.init(num_cpus=5, ignore_reinit_error=True)\n register_env(\"fail\", lambda _: FailOnStepEnv())\n pg = PGTrainer(env=\"fail\", config={\"num_workers\": 1})\n self.assertRaises(Exception, lambda: pg.train())\n\n def test_callbacks(self):\n counts = Counter()\n pg = PGTrainer(\n env=\"CartPole-v0\", config={\n \"num_workers\": 0,\n \"rollout_fragment_length\": 50,\n \"train_batch_size\": 50,\n \"callbacks\": {\n \"on_episode_start\": lambda x: counts.update({\"start\": 1}),\n \"on_episode_step\": lambda x: counts.update({\"step\": 1}),\n \"on_episode_end\": lambda x: counts.update({\"end\": 1}),\n \"on_sample_end\": lambda x: counts.update({\"sample\": 1}),\n },\n })\n pg.train()\n pg.train()\n pg.train()\n pg.train()\n self.assertGreater(counts[\"sample\"], 0)\n self.assertGreater(counts[\"start\"], 0)\n self.assertGreater(counts[\"end\"], 0)\n self.assertGreater(counts[\"step\"], 0)\n\n def test_query_evaluators(self):\n # Allow for Unittest run.\n ray.init(num_cpus=5, ignore_reinit_error=True)\n register_env(\"test\", lambda _: gym.make(\"CartPole-v0\"))\n pg = PGTrainer(\n env=\"test\",\n config={\n \"num_workers\": 2,\n \"rollout_fragment_length\": 5,\n \"num_envs_per_worker\": 2,\n })\n results = pg.workers.foreach_worker(\n lambda ev: ev.rollout_fragment_length)\n results2 = pg.workers.foreach_worker_with_index(\n lambda ev, i: (i, ev.rollout_fragment_length))\n results3 = pg.workers.foreach_worker(\n lambda ev: ev.foreach_env(lambda env: 1))\n self.assertEqual(results, [10, 10, 10])\n self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])\n self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])\n\n def test_reward_clipping(self):\n # clipping on\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy=MockPolicy,\n clip_rewards=True,\n batch_mode=\"complete_episodes\")\n self.assertEqual(max(ev.sample()[\"rewards\"]), 1)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episode_reward_mean\"], 1000)\n\n # clipping off\n ev2 = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy=MockPolicy,\n clip_rewards=False,\n batch_mode=\"complete_episodes\")\n self.assertEqual(max(ev2.sample()[\"rewards\"]), 100)\n result2 = collect_metrics(ev2, [])\n self.assertEqual(result2[\"episode_reward_mean\"], 1000)\n\n def test_hard_horizon(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=4,\n soft_horizon=False)\n samples = ev.sample()\n # Three logical episodes and correct episode resets (always after 4\n # steps).\n self.assertEqual(len(set(samples[\"eps_id\"])), 3)\n for i in range(4):\n self.assertEqual(np.argmax(samples[\"obs\"][i]), i)\n self.assertEqual(np.argmax(samples[\"obs\"][4]), 0)\n # 3 done values.\n self.assertEqual(sum(samples[\"dones\"]), 3)\n\n # A gym env's max_episode_steps is smaller than Trainer's horizon.\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=6,\n soft_horizon=False)\n samples = ev.sample()\n # 12 steps due to `complete_episodes` batch_mode.\n self.assertEqual(len(samples[\"eps_id\"]), 12)\n # Two logical episodes and correct episode resets (always after 6(!)\n # steps).\n self.assertEqual(len(set(samples[\"eps_id\"])), 2)\n # 2 done values after 6 and 12 steps.\n check(samples[\"dones\"], [\n False, False, False, False, False, True, False, False, False,\n False, False, True\n ])\n\n def test_soft_horizon(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=4,\n soft_horizon=True)\n samples = ev.sample()\n # three logical episodes\n self.assertEqual(len(set(samples[\"eps_id\"])), 3)\n # only 1 hard done value\n self.assertEqual(sum(samples[\"dones\"]), 1)\n\n def test_metrics(self):\n # Allow for Unittest run.\n ray.init(num_cpus=5, ignore_reinit_error=True)\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy=MockPolicy,\n batch_mode=\"complete_episodes\")\n remote_ev = RolloutWorker.as_remote().remote(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy=MockPolicy,\n batch_mode=\"complete_episodes\")\n ev.sample()\n ray.get(remote_ev.sample.remote())\n result = collect_metrics(ev, [remote_ev])\n self.assertEqual(result[\"episodes_this_iter\"], 20)\n self.assertEqual(result[\"episode_reward_mean\"], 10)\n\n def test_async(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n sample_async=True,\n policy=MockPolicy)\n batch = ev.sample()\n for key in [\"obs\", \"actions\", \"rewards\", \"dones\", \"advantages\"]:\n self.assertIn(key, batch)\n self.assertGreater(batch[\"advantages\"][0], 1)\n\n def test_auto_vectorization(self):\n ev = RolloutWorker(\n env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),\n policy=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=2,\n num_envs=8)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 8)\n indices = []\n for env in ev.async_env.vector_env.envs:\n self.assertEqual(env.unwrapped.config.worker_index, 0)\n indices.append(env.unwrapped.config.vector_index)\n self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])\n\n def test_batches_larger_when_vectorized(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=8),\n policy=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=4,\n num_envs=4)\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n batch = ev.sample()\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 4)\n\n def test_vector_env_support(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),\n policy=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=10)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 8)\n\n def test_truncate_episodes(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy=MockPolicy,\n rollout_fragment_length=15,\n batch_mode=\"truncate_episodes\")\n batch = ev.sample()\n self.assertEqual(batch.count, 15)\n\n def test_complete_episodes(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy=MockPolicy,\n rollout_fragment_length=5,\n batch_mode=\"complete_episodes\")\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n\n def test_complete_episodes_packing(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy=MockPolicy,\n rollout_fragment_length=15,\n batch_mode=\"complete_episodes\")\n batch = ev.sample()\n self.assertEqual(batch.count, 20)\n self.assertEqual(\n batch[\"t\"].tolist(),\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n def test_filter_sync(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n time.sleep(2)\n ev.sample()\n filters = ev.get_filters(flush_after=True)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertNotEqual(obs_f.rs.n, 0)\n self.assertNotEqual(obs_f.buffer.n, 0)\n\n def test_get_filters(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n self.sample_and_flush(ev)\n filters = ev.get_filters(flush_after=False)\n time.sleep(2)\n filters2 = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n obs_f2 = filters2[DEFAULT_POLICY_ID]\n self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)\n self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)\n\n def test_sync_filter(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n obs_f = self.sample_and_flush(ev)\n\n # Current State\n filters = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n\n self.assertLessEqual(obs_f.buffer.n, 20)\n\n new_obsf = obs_f.copy()\n new_obsf.rs._n = 100\n ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})\n filters = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertGreaterEqual(obs_f.rs.n, 100)\n self.assertLessEqual(obs_f.buffer.n, 20)\n\n def sample_and_flush(self, ev):\n time.sleep(2)\n ev.sample()\n filters = ev.get_filters(flush_after=True)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertNotEqual(obs_f.rs.n, 0)\n self.assertNotEqual(obs_f.buffer.n, 0)\n return obs_f\n\n def test_extra_python_envs(self):\n extra_envs = {\"env_key_1\": \"env_value_1\", \"env_key_2\": \"env_value_2\"}\n self.assertFalse(\"env_key_1\" in os.environ)\n self.assertFalse(\"env_key_2\" in os.environ)\n RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy=MockPolicy,\n extra_python_environs=extra_envs)\n self.assertTrue(\"env_key_1\" in os.environ)\n self.assertTrue(\"env_key_2\" in os.environ)\n\n # reset to original\n del os.environ[\"env_key_1\"]\n del os.environ[\"env_key_2\"]\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n", "import logging\n\nimport numpy as np\n\nimport ray.ray_constants as ray_constants\n\nlogger = logging.getLogger(__name__)\n\n\nclass RayParams:\n \"\"\"A class used to store the parameters used by Ray.\n\n Attributes:\n redis_address (str): The address of the Redis server to connect to. If\n this address is not provided, then this command will start Redis, a\n raylet, a plasma store, a plasma manager, and some workers.\n It will also kill these processes when Python exits.\n redis_port (int): The port that the primary Redis shard should listen\n to. If None, then a random port will be chosen.\n redis_shard_ports: A list of the ports to use for the non-primary Redis\n shards.\n num_cpus (int): Number of CPUs to configure the raylet with.\n num_gpus (int): Number of GPUs to configure the raylet with.\n resources: A dictionary mapping the name of a resource to the quantity\n of that resource available.\n memory: Total available memory for workers requesting memory.\n object_store_memory: The amount of memory (in bytes) to start the\n object store with.\n redis_max_memory: The max amount of memory (in bytes) to allow redis\n to use, or None for no limit. Once the limit is exceeded, redis\n will start LRU eviction of entries. This only applies to the\n sharded redis tables (task and object tables).\n object_manager_port int: The port to use for the object manager.\n node_manager_port: The port to use for the node manager.\n node_ip_address (str): The IP address of the node that we are on.\n raylet_ip_address (str): The IP address of the raylet that this node\n connects to.\n object_id_seed (int): Used to seed the deterministic generation of\n object IDs. The same value can be used across multiple runs of the\n same job in order to generate the object IDs in a consistent\n manner. However, the same ID should not be used for different jobs.\n redirect_worker_output: True if the stdout and stderr of worker\n processes should be redirected to files.\n redirect_output (bool): True if stdout and stderr for non-worker\n processes should be redirected to files and false otherwise.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n redis_max_clients: If provided, attempt to configure Redis with this\n maxclients number.\n redis_password (str): Prevents external clients without the password\n from connecting to Redis if provided.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n worker_path (str): The path of the source code that will be run by the\n worker.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n include_webui: Boolean flag indicating whether to start the web\n UI, which displays the status of the Ray cluster. If this value is\n None, then the UI will be started if the relevant dependencies are\n present.\n webui_host: The host to bind the web UI server to. Can either be\n localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).\n By default, this is set to localhost to prevent access from\n external machines.\n logging_level: Logging level, default will be logging.INFO.\n logging_format: Logging format, default contains a timestamp,\n filename, line number, and message. See ray_constants.py.\n plasma_store_socket_name (str): If provided, it will specify the socket\n name used by the plasma store.\n raylet_socket_name (str): If provided, it will specify the socket path\n used by the raylet process.\n temp_dir (str): If provided, it will specify the root temporary\n directory for the Ray process.\n include_log_monitor (bool): If True, then start a log monitor to\n monitor the log files for all processes on this node and push their\n contents to Redis.\n autoscaling_config: path to autoscaling config file.\n include_java (bool): If True, the raylet backend can also support\n Java worker.\n java_worker_options (list): The command options for Java worker.\n load_code_from_local: Whether load code from local file or from GCS.\n _internal_config (str): JSON configuration for overriding\n RayConfig defaults. For testing purposes ONLY.\n \"\"\"\n\n def __init__(self,\n redis_address=None,\n num_cpus=None,\n num_gpus=None,\n resources=None,\n memory=None,\n object_store_memory=None,\n redis_max_memory=None,\n redis_port=None,\n redis_shard_ports=None,\n object_manager_port=None,\n node_manager_port=None,\n node_ip_address=None,\n raylet_ip_address=None,\n object_id_seed=None,\n driver_mode=None,\n redirect_worker_output=None,\n redirect_output=None,\n num_redis_shards=None,\n redis_max_clients=None,\n redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,\n plasma_directory=None,\n worker_path=None,\n huge_pages=False,\n include_webui=None,\n webui_host=\"localhost\",\n logging_level=logging.INFO,\n logging_format=ray_constants.LOGGER_FORMAT,\n plasma_store_socket_name=None,\n raylet_socket_name=None,\n temp_dir=None,\n include_log_monitor=None,\n autoscaling_config=None,\n include_java=False,\n java_worker_options=None,\n load_code_from_local=False,\n _internal_config=None):\n self.object_id_seed = object_id_seed\n self.redis_address = redis_address\n self.num_cpus = num_cpus\n self.num_gpus = num_gpus\n self.memory = memory\n self.object_store_memory = object_store_memory\n self.resources = resources\n self.redis_max_memory = redis_max_memory\n self.redis_port = redis_port\n self.redis_shard_ports = redis_shard_ports\n self.object_manager_port = object_manager_port\n self.node_manager_port = node_manager_port\n self.node_ip_address = node_ip_address\n self.raylet_ip_address = raylet_ip_address\n self.driver_mode = driver_mode\n self.redirect_worker_output = redirect_worker_output\n self.redirect_output = redirect_output\n self.num_redis_shards = num_redis_shards\n self.redis_max_clients = redis_max_clients\n self.redis_password = redis_password\n self.plasma_directory = plasma_directory\n self.worker_path = worker_path\n self.huge_pages = huge_pages\n self.include_webui = include_webui\n self.webui_host = webui_host\n self.plasma_store_socket_name = plasma_store_socket_name\n self.raylet_socket_name = raylet_socket_name\n self.temp_dir = temp_dir\n self.include_log_monitor = include_log_monitor\n self.autoscaling_config = autoscaling_config\n self.include_java = include_java\n self.java_worker_options = java_worker_options\n self.load_code_from_local = load_code_from_local\n self._internal_config = _internal_config\n self._check_usage()\n\n def update(self, **kwargs):\n \"\"\"Update the settings according to the keyword arguments.\n\n Args:\n kwargs: The keyword arguments to set corresponding fields.\n \"\"\"\n for arg in kwargs:\n if hasattr(self, arg):\n setattr(self, arg, kwargs[arg])\n else:\n raise ValueError(\"Invalid RayParams parameter in\"\n \" update: %s\" % arg)\n\n self._check_usage()\n\n def update_if_absent(self, **kwargs):\n \"\"\"Update the settings when the target fields are None.\n\n Args:\n kwargs: The keyword arguments to set corresponding fields.\n \"\"\"\n for arg in kwargs:\n if hasattr(self, arg):\n if getattr(self, arg) is None:\n setattr(self, arg, kwargs[arg])\n else:\n raise ValueError(\"Invalid RayParams parameter in\"\n \" update_if_absent: %s\" % arg)\n\n self._check_usage()\n\n def _check_usage(self):\n if self.resources is not None:\n assert \"CPU\" not in self.resources, (\n \"'CPU' should not be included in the resource dictionary. Use \"\n \"num_cpus instead.\")\n assert \"GPU\" not in self.resources, (\n \"'GPU' should not be included in the resource dictionary. Use \"\n \"num_gpus instead.\")\n\n if self.redirect_worker_output is not None:\n raise DeprecationWarning(\n \"The redirect_worker_output argument is deprecated. To \"\n \"control logging to the driver, use the 'log_to_driver' \"\n \"argument to 'ray.init()'\")\n\n if self.redirect_output is not None:\n raise DeprecationWarning(\n \"The redirect_output argument is deprecated.\")\n\n # Parse the numpy version.\n numpy_version = np.__version__.split(\".\")\n numpy_major, numpy_minor = int(numpy_version[0]), int(numpy_version[1])\n if numpy_major <= 1 and numpy_minor < 16:\n logger.warning(\"Using ray with numpy < 1.16.0 will result in slow \"\n \"serialization. Upgrade numpy if using with ray.\")\n" ]
[ [ "numpy.zeros" ], [ "numpy.isclose" ], [ "numpy.argmax", "numpy.zeros_like", "numpy.mean" ], [ "numpy.__version__.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
huynhngoc/deoxys
[ "b2e9936b723807e129fda36d8d6131ca00db558f", "b2e9936b723807e129fda36d8d6131ca00db558f" ]
[ "src/deoxys/experiment/postprocessor.py", "src/deoxys/model/callbacks.py" ]
[ "from ..loaders import load_data\nfrom ..utils import load_json_config\n\n\nfrom deoxys_image.patch_sliding import get_patch_indice\nfrom deoxys_vis import read_csv\n\nimport numpy as np\nimport h5py\nimport pandas as pd\nimport os\nfrom time import time\nimport shutil\nimport matplotlib.pyplot as plt\nimport warnings\n\n\nclass H5Metric:\n def __init__(self, ref_file, save_file, metric_name='score',\n predicted_dataset='predicted',\n target_dataset='y', batch_size=4,\n map_file=None, map_column=None):\n self.metric_name = metric_name\n self.ref_file = ref_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if target_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n\n self.batch_size = batch_size\n\n self.res_file = save_file\n self.map_file = map_file\n self.map_column = map_column\n\n def get_img_batch(self):\n self.scores = []\n\n if self.map_file is None:\n if type(self.predicted) == str:\n with h5py.File(self.ref_file, 'r') as f:\n size = f[self.target].shape[0]\n\n for i in range(0, size, self.batch_size):\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[self.predicted][i:i+self.batch_size]\n targets = f[self.target][i:i+self.batch_size]\n yield targets, predicted\n else:\n for pred, target in zip(self.predicted, self.target):\n with h5py.File(self.ref_file, 'r') as f:\n size = f[target].shape[0]\n\n for i in range(0, size, self.batch_size):\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[pred][i:i+self.batch_size]\n targets = f[target][i:i+self.batch_size]\n yield targets, predicted\n else: # handle 3d with different sizes\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n for idx in map_data:\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[self.predicted][str(idx)][:]\n targets = f[self.target][str(idx)][:]\n yield np.expand_dims(targets, axis=0), np.expand_dims(\n predicted, axis=0)\n\n def update_score(self, scores):\n self.scores.extend(scores)\n\n def save_score(self):\n if os.path.isfile(self.res_file):\n df = pd.read_csv(self.res_file)\n df[f'{self.metric_name}'] = self.scores\n else:\n df = pd.DataFrame(self.scores, columns=[f'{self.metric_name}'])\n\n df.to_csv(self.res_file, index=False)\n\n def post_process(self, **kwargs):\n for targets, prediction in self.get_img_batch():\n scores = self.calculate_metrics(\n targets, prediction, **kwargs)\n self.update_score(scores)\n\n self.save_score()\n\n def calculate_metrics(targets, predictions, **kwargs):\n raise NotImplementedError\n\n\nclass H5CalculateFScore(H5Metric):\n def __init__(self, ref_file, save_file, metric_name='f1_score',\n predicted_dataset='predicted',\n target_dataset='y', batch_size=4, beta=1, threshold=None,\n map_file=None, map_column=None):\n super().__init__(ref_file, save_file, metric_name,\n predicted_dataset,\n target_dataset, batch_size,\n map_file, map_column)\n self.threshold = 0.5 if threshold is None else threshold\n self.beta = beta\n\n def calculate_metrics(self, y_true, y_pred, **kwargs):\n assert len(y_true) == len(y_pred), \"Shape not match\"\n eps = 1e-8\n size = len(y_true.shape)\n reduce_ax = tuple(range(1, size))\n\n y_pred = (y_pred > self.threshold).astype(y_pred.dtype)\n if y_pred.ndim - y_true.ndim == 1 and y_pred.shape[-1] == 1:\n y_pred = y_pred[..., 0]\n\n true_positive = np.sum(y_pred * y_true, axis=reduce_ax)\n target_positive = np.sum(y_true, axis=reduce_ax)\n predicted_positive = np.sum(y_pred, axis=reduce_ax)\n\n fb_numerator = (1 + self.beta ** 2) * true_positive + eps\n fb_denominator = (\n (self.beta ** 2) * target_positive + predicted_positive + eps\n )\n\n return fb_numerator / fb_denominator\n\n\nclass H5MetaDataMapping:\n def __init__(self, ref_file, save_file, folds, fold_prefix='fold',\n dataset_names=None):\n self.ref_file = ref_file\n self.save_file = save_file\n if fold_prefix:\n self.folds = ['{}_{}'.format(\n fold_prefix, fold) for fold in folds]\n else:\n self.folds = folds\n\n self.dataset_names = dataset_names\n\n def post_process(self, *args, **kwargs):\n data = {dataset_name: [] for dataset_name in self.dataset_names}\n for fold in self.folds:\n with h5py.File(self.ref_file, 'r') as f:\n for dataset_name in self.dataset_names:\n meta_data = f[fold][dataset_name][:]\n dtype = meta_data.dtype.name\n if 'int' not in dtype and 'float' not in dtype:\n meta_data = meta_data.astype(str)\n data[dataset_name].extend(meta_data)\n\n df = pd.DataFrame(data)\n df.to_csv(self.save_file, index=False)\n\n\nclass H5Merge2dSlice:\n def __init__(self, ref_file, map_file, map_column, merge_file, save_file,\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'):\n self.ref_file = ref_file\n self.map_file = map_file\n self.map_column = map_column\n self.merge_file = merge_file\n self.save_file = save_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if input_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n self.inputs = [f'{key}/{input_dataset}' for key in keys]\n\n def post_process(self):\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n unique_val = []\n\n first, last = map_data[0], map_data[-1]\n\n tmp = np.concatenate([[first], map_data, [last]])\n indice = np.where(tmp[1:] != tmp[:-1])[0]\n indice = np.concatenate([[0], indice, [len(map_data)]])\n\n if type(self.inputs) == str:\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n for i in range(len(indice) - 1):\n start = indice[i]\n end = indice[i+1]\n\n unique_val.append(map_data[start])\n\n assert map_data[start] == map_data[end-1], \"id not match\"\n\n curr_name = str(map_data[start])\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n else:\n inputs = self.inputs[0].split('/')[-1]\n target = self.target[0].split('/')[-1]\n predicted = self.predicted[0].split('/')[-1]\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(inputs)\n mf.create_group(target)\n mf.create_group(predicted)\n\n offset = 0\n curr_data_idx = 0\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n for i in range(len(indice) - 1):\n if indice[i] - offset >= total:\n offset = indice[i]\n curr_data_idx += 1\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n map_start, map_end = indice[i], indice[i+1]\n\n start = indice[i] - offset\n end = indice[i+1] - offset\n\n unique_val.append(map_data[map_start])\n\n assert map_data[map_start] == map_data[map_end -\n 1], \"id not match\"\n\n curr_name = str(map_data[map_start])\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n df = pd.DataFrame(data=unique_val, columns=[self.map_column])\n df.to_csv(self.save_file, index=False)\n\n\nclass H5Transform3d:\n def __init__(self, ref_file, map_file, map_column, merge_file,\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'):\n self.ref_file = ref_file\n self.map_file = map_file\n self.map_column = map_column\n self.merge_file = merge_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if input_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n self.inputs = [f'{key}/{input_dataset}' for key in keys]\n\n def post_process(self):\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n first, last = map_data[0], map_data[-1]\n\n tmp = np.concatenate([[first], map_data, [last]])\n indice = np.where(tmp[1:] != tmp[:-1])[0]\n indice = np.concatenate([[0], indice, [len(map_data)]])\n\n if type(self.inputs) == str:\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n for i in range(len(map_data)):\n curr_name = str(map_data[i])\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n else: # pragma: no cover\n inputs = self.inputs[0].split('/')[-1]\n target = self.target[0].split('/')[-1]\n predicted = self.predicted[0].split('/')[-1]\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(inputs)\n mf.create_group(target)\n mf.create_group(predicted)\n\n offset = 0\n curr_data_idx = 0\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n for i in range(len(map_data)):\n if i - offset >= total:\n offset = i\n curr_data_idx += 1\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n curr_name = str(map_data[i])\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n # df = pd.DataFrame(data=unique_val, columns=[self.map_column])\n # df.to_csv(self.save_file, index=False)\n\n\nclass H5MergePatches: # pragma: no cover\n def __init__(self, ref_file, predicted_file,\n map_column, merge_file, save_file,\n patch_size, overlap,\n folds, fold_prefix='fold',\n original_input_dataset='x',\n original_target_dataset='y',\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'\n ):\n\n self.ref_file = ref_file\n self.predicted_file = predicted_file\n self.map_column = map_column\n self.merge_file = merge_file\n self.save_file = save_file\n\n self.ref_inputs = original_input_dataset\n self.ref_targets = original_target_dataset\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n if fold_prefix:\n self.folds = ['{}_{}'.format(\n fold_prefix, fold) for fold in folds]\n else:\n self.folds = folds\n\n self.patch_size = patch_size\n self.overlap = overlap\n\n print('merge images of patch', patch_size)\n\n def _save_inputs_target_to_merge_file(self, fold, meta, index):\n with h5py.File(self.ref_file, 'r') as f:\n inputs = f[fold][self.ref_inputs][index]\n targets = f[fold][self.ref_targets][index]\n\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n meta, data=inputs, compression=\"gzip\")\n mf[self.target].create_dataset(\n meta, data=targets, compression=\"gzip\")\n\n def _merge_patches_to_merge_file(self, meta, start_cursor):\n with h5py.File(self.merge_file, 'r') as mf:\n shape = mf[self.target][meta].shape[:-1]\n\n # fix patch size\n if '__iter__' not in dir(self.patch_size):\n self.patch_size = [self.patch_size] * len(shape)\n\n indice = get_patch_indice(shape, self.patch_size, self.overlap)\n next_cursor = start_cursor + len(indice)\n\n with h5py.File(self.predicted_file, 'r') as f:\n data = f[self.predicted][start_cursor: next_cursor]\n\n predicted = np.zeros(shape)\n weight = np.zeros(shape)\n\n for i in range(len(indice)):\n x, y, z = indice[i]\n w, h, d = self.patch_size\n predicted[x:x+w, y:y+h, z:z+d] = predicted[x:x+w, y:y+h, z:z+d] \\\n + data[i][..., 0]\n weight[x:x+w, y:y+h, z:z+d] = weight[x:x+w, y:y+h, z:z+d] \\\n + np.ones(self.patch_size)\n\n predicted = (predicted/weight)[..., np.newaxis]\n\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n meta, data=predicted, compression=\"gzip\")\n\n return next_cursor\n\n def post_process(self):\n # create merge file\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n data = []\n start_cursor = 0\n for fold in self.folds:\n with h5py.File(self.ref_file, 'r') as f:\n meta_data = f[fold][self.map_column][:]\n data.extend(meta_data)\n for index, meta in enumerate(meta_data):\n self._save_inputs_target_to_merge_file(\n fold, str(meta), index)\n start_cursor = self._merge_patches_to_merge_file(\n str(meta), start_cursor)\n\n # create map file\n df = pd.DataFrame(data, columns=[self.map_column])\n df.to_csv(self.save_file, index=False)\n\n\nclass AnalysisPerEpoch: # pragma: no cover\n _markers = ['o-', 'v-', '^-', '<-', '>-',\n '1-', '2-', 's-', 'p-', 'P-',\n '*-', '+-', 'x-', 'D-', 'd-'] * 10 + ['--']\n\n def __init__(self, save_path, log_file_templates, epochs,\n map_column='patient idx', monitor='', model_name=''):\n self.save_path = save_path\n self.log_file_templates = log_file_templates\n self.epochs = epochs\n self.map_column = map_column\n self.monitor = monitor\n self.model_name = model_name or save_path.split('/')[-2]\n\n def post_process(self):\n patient_dice_per_epoch = []\n monitor = self.monitor\n epochs = self.epochs\n map_column = self.map_column\n for epoch in epochs:\n # load each log file\n data = pd.read_csv(self.log_file_templates.format(epoch))\n\n # metric column\n if not monitor:\n monitor = data.columns[-1]\n\n patient_dice_per_epoch.append(data[monitor].values)\n\n # Plot dice per epoch\n patient_idx = data[map_column].values\n\n # print(patient_dice_per_epoch)\n all_data = np.vstack(patient_dice_per_epoch)\n\n df = pd.DataFrame(all_data, columns=patient_idx)\n df.index = epochs\n df.index.name = 'epoch'\n # df['mean'] = df.mean(axis=1)\n df['mean'] = df[[pid for pid in patient_idx]].mean(axis=1)\n best_epoch = df['mean'].idxmax()\n best_metric = df['mean'].max()\n\n plt.figure(figsize=(10, 8))\n df.plot(style=self._markers[:len(patient_idx) + 1], ax=plt.gca())\n plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n plt.title(\n f'Model {self.model_name}' +\n f'\\nBest Epoch {best_epoch} - Mean {monitor} {best_metric:.6f}')\n plt.savefig(self.save_path + '/dice_per_epoch.png')\n plt.savefig(self.save_path + '/dice_per_epoch.pdf')\n plt.close('all')\n\n # save to csv\n df.to_csv(self.save_path + '/dice_per_epoch.csv')\n\n violin_df = df[df.columns[:-1]]\n group_df = violin_df.reset_index().melt(\n id_vars=violin_df.columns[:-len(patient_idx)],\n var_name=map_column, value_name=monitor)\n\n def Q1(x):\n return x.quantile(0.25)\n\n def Q3(x):\n return x.quantile(0.75)\n\n def to_int(x):\n return x.astype(int)\n\n group_df.groupby('epoch').agg(\n {monitor: ['min', Q1, 'median', Q3, 'max', 'mean', 'std']})\n\n with open(self.save_path + '/val_summary.txt') as f:\n f.write(str(group_df))\n\n\nclass PostProcessor:\n MODEL_PATH = '/model'\n MODEL_NAME = '/model.{epoch:03d}.h5'\n BEST_MODEL_PATH = '/best'\n PREDICTION_PATH = '/prediction'\n PREDICTION_NAME = '/prediction.{epoch:03d}.h5'\n LOG_FILE = '/logs.csv'\n PERFORMANCE_PATH = '/performance'\n TEST_OUTPUT_PATH = '/test'\n PREDICT_TEST_NAME = '/prediction_test.h5'\n\n def __init__(self, log_base_path='logs',\n temp_base_path='',\n analysis_base_path='',\n run_test=False, new_dataset_params=None):\n self.temp_base_path = temp_base_path\n self.log_base_path = log_base_path\n\n self.update_data_reader(new_dataset_params)\n\n try:\n model_path = log_base_path + self.MODEL_PATH\n model_files = os.listdir(model_path)\n\n self.epochs = [int(filename[-6:-3])\n for filename in model_files]\n except Exception as e: # pragma: no cover\n print('No saved models', e)\n warnings.warn('load_best_model does not work')\n\n if len(self.epochs) == 0:\n print('No saved models in', model_path)\n warnings.warn('load_best_model does not work')\n\n self.run_test = run_test\n\n def update_data_reader(self, new_dataset_params):\n model_path = self.log_base_path + self.MODEL_PATH\n\n sample_model_filename = model_path + '/' + os.listdir(model_path)[0]\n\n with h5py.File(sample_model_filename, 'r') as f:\n config = f.attrs['deoxys_config']\n config = load_json_config(config)\n dataset_params = config['dataset_params']\n # update until level 2\n if new_dataset_params is not None:\n for key in new_dataset_params:\n if key in dataset_params:\n dataset_params[key].update(new_dataset_params[key])\n else:\n dataset_params[key] = new_dataset_params[key]\n\n self.dataset_filename = dataset_params['config']['filename']\n self.data_reader = load_data(dataset_params)\n self.dataset_params = dataset_params\n\n def _best_epoch_from_raw_log(self, monitor='', mode='max'):\n print(F'Finding best model based on the {mode}imum {monitor} from '\n 'raw logs')\n\n epochs = self.epochs\n if len(epochs) == 0:\n print('No saved models in', self.log_base_path)\n raise Exception('load_best_model does not work')\n\n logger_path = self.log_base_path + self.LOG_FILE\n if os.path.isfile(logger_path):\n df = read_csv(logger_path, usecols=['epoch', monitor])\n df['epoch'] = df['epoch'] + 1\n # only compare models that were saved\n min_df = df[df['epoch'].isin(epochs)].min()\n min_epoch = df[df['epoch'].isin(epochs)].idxmin()\n max_df = df[df['epoch'].isin(epochs)].max()\n max_epoch = df[df['epoch'].isin(epochs)].idxmax()\n if mode == 'min':\n val = min_df[monitor]\n best_epoch = min_epoch[monitor] + 1\n else:\n val = max_df[monitor]\n best_epoch = max_epoch[monitor] + 1\n else:\n warnings.warn('No log files to check for best model')\n\n print('Best epoch:', best_epoch, f', with {monitor}={val}')\n\n return best_epoch\n\n def get_best_model(self, monitor='', mode='max',\n keep_best_only=True): # pragma: no cover\n best_epoch = self._best_epoch_from_raw_log(monitor, mode)\n\n epochs = self.epochs\n\n for epoch in epochs:\n if epoch == best_epoch or not keep_best_only:\n shutil.copy(\n self.temp_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n\n return self.log_base_path + self.MODEL_PATH + \\\n self.MODEL_NAME.format(epoch=best_epoch)\n\n\nclass SegmentationPostProcessor(PostProcessor):\n MODEL_PATH = '/model'\n MODEL_NAME = '/model.{epoch:03d}.h5'\n BEST_MODEL_PATH = '/best'\n PREDICTION_PATH = '/prediction'\n PREDICTION_NAME = '/prediction.{epoch:03d}.h5'\n LOG_FILE = '/logs.csv'\n PERFORMANCE_PATH = '/performance'\n PREDICTED_IMAGE_PATH = '/images'\n TEST_OUTPUT_PATH = '/test'\n PREDICT_TEST_NAME = '/prediction_test.h5'\n SINGLE_MAP_PATH = '/single_map'\n SINGLE_MAP_NAME = '/logs.{epoch:03d}.csv'\n\n MAP_PATH = '/logs'\n MAP_NAME = '/logs.{epoch:03d}.csv'\n\n TEST_SINGLE_MAP_NAME = '/single_result.csv'\n TEST_MAP_NAME = '/result.csv'\n\n def __init__(self, log_base_path='logs',\n temp_base_path='',\n analysis_base_path='',\n map_meta_data=None, main_meta_data='',\n run_test=False, new_dataset_params=None):\n self.temp_base_path = temp_base_path\n self.log_base_path = log_base_path\n self.analysis_base_path = analysis_base_path or log_base_path\n\n if not os.path.exists(self.analysis_base_path):\n os.mkdir(self.analysis_base_path)\n\n if not os.path.exists(self.analysis_base_path + self.PREDICTION_PATH):\n os.mkdir(self.analysis_base_path + self.PREDICTION_PATH)\n\n self.update_data_reader(new_dataset_params)\n try:\n temp_prediction_path = temp_base_path + self.PREDICTION_PATH\n predicted_files = os.listdir(temp_prediction_path)\n\n self.epochs = [int(filename[-6:-3])\n for filename in predicted_files]\n except Exception as e: # pragma: no cover\n print(\"Error while getting epochs by temp folder:\", e)\n print(\"Using post-process log files as alternative\")\n try:\n log_files = os.listdir(self.log_base_path + self.MAP_PATH)\n self.epochs = [int(filename[-7:-4])\n for filename in log_files]\n except Exception as e:\n print(\"Error while getting epochs by log files:\", e)\n print(\"Using dummy epochs as alternative.\")\n self.epochs = [5]\n print(\"Post-process only works on test data.\")\n\n if map_meta_data:\n if type(map_meta_data) == str:\n self.map_meta_data = map_meta_data.split(',')\n else:\n self.map_meta_data = map_meta_data\n else:\n self.map_meta_data = ['patient_idx', 'slice_idx']\n\n if main_meta_data:\n self.main_meta_data = main_meta_data\n else:\n self.main_meta_data = self.map_meta_data[0]\n\n self.run_test = run_test\n\n # def update_data_reader(self, new_dataset_params):\n # model_path = self.log_base_path + self.MODEL_PATH\n\n # sample_model_filename = model_path + '/' + os.listdir(model_path)[0]\n\n # with h5py.File(sample_model_filename, 'r') as f:\n # config = f.attrs['deoxys_config']\n # config = load_json_config(config)\n # dataset_params = config['dataset_params']\n # # update until level 2\n # if new_dataset_params is not None:\n # for key in new_dataset_params:\n # if key in dataset_params:\n # dataset_params[key].update(new_dataset_params[key])\n # else:\n # dataset_params[key] = new_dataset_params[key]\n\n # self.dataset_filename = dataset_params['config']['filename']\n # self.data_reader = load_data(dataset_params)\n # self.dataset_params = dataset_params\n\n def map_2d_meta_data(self):\n print('mapping 2d meta data')\n if not self.run_test:\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n\n if not os.path.exists(map_folder):\n os.makedirs(map_folder)\n map_filename = map_folder + self.SINGLE_MAP_NAME\n\n for epoch in self.epochs:\n H5MetaDataMapping(\n ref_file=self.dataset_filename,\n save_file=map_filename.format(epoch=epoch),\n folds=self.data_reader.val_folds,\n fold_prefix='',\n dataset_names=self.map_meta_data).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n H5MetaDataMapping(\n ref_file=self.dataset_filename,\n save_file=map_filename,\n folds=self.data_reader.test_folds,\n fold_prefix='',\n dataset_names=self.map_meta_data).post_process()\n\n return self\n\n def calculate_fscore_single(self):\n if not self.run_test:\n print('calculating dice score per items in val set')\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n map_filename = map_folder + self.SINGLE_MAP_NAME\n for epoch in self.epochs:\n H5CalculateFScore(\n predicted_path.format(epoch=epoch),\n map_filename.format(epoch=epoch)\n ).post_process()\n else:\n print('calculating dice score per items in test set')\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n\n H5CalculateFScore(\n predicted_path,\n map_filename\n ).post_process()\n\n return self\n\n def calculate_fscore_single_3d(self):\n self.calculate_fscore_single()\n if not self.run_test:\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n try:\n os.rename(map_folder, main_log_folder)\n except Exception as e:\n print(\"Files exist:\", e)\n print(\"Copying new logs file\")\n os.rename(main_log_folder,\n main_log_folder + '-' + str(time()))\n os.rename(map_folder, main_log_folder)\n\n for epoch in self.epochs:\n H5Transform3d(\n ref_file=self.temp_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n map_file=main_log_folder +\n self.MAP_NAME.format(epoch=epoch),\n map_column=self.main_meta_data,\n merge_file=self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n ).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n try:\n os.rename(map_filename, main_result_file_name)\n except Exception as e:\n print(\"Files exist:\", e)\n print(\"Copying new result file\")\n os.rename(main_result_file_name,\n main_result_file_name + '-' + str(time()) + '.csv')\n os.rename(map_filename, main_result_file_name)\n\n H5Transform3d(\n ref_file=self.temp_base_path + self.TEST_OUTPUT_PATH +\n self.PREDICT_TEST_NAME,\n map_file=main_result_file_name,\n map_column=self.main_meta_data,\n merge_file=test_folder + self.PREDICT_TEST_NAME,\n ).post_process()\n\n def merge_2d_slice(self):\n print('merge 2d slice to 3d images')\n if not self.run_test:\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n map_filename = map_folder + self.SINGLE_MAP_NAME\n\n merge_path = self.log_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n\n if not os.path.exists(main_log_folder):\n os.makedirs(main_log_folder)\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5Merge2dSlice(\n predicted_path.format(epoch=epoch),\n map_filename.format(epoch=epoch),\n self.main_meta_data,\n merge_path.format(epoch=epoch),\n main_log_filename.format(epoch=epoch)\n ).post_process()\n else:\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n H5Merge2dSlice(\n predicted_path,\n map_filename,\n self.main_meta_data,\n merge_path,\n main_result_file_name\n ).post_process()\n\n return self\n\n def merge_3d_patches(self): # pragma: no cover\n print('merge 3d patches to 3d images')\n if not self.run_test:\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n # map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n # map_filename = map_folder + self.SINGLE_MAP_NAME\n\n merge_path = self.analysis_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n\n if not os.path.exists(main_log_folder):\n os.makedirs(main_log_folder)\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5MergePatches(\n ref_file=self.dataset_filename,\n predicted_file=predicted_path.format(epoch=epoch),\n map_column=self.main_meta_data,\n merge_file=merge_path.format(epoch=epoch),\n save_file=main_log_filename.format(epoch=epoch),\n patch_size=self.data_reader.patch_size,\n overlap=self.data_reader.overlap,\n folds=self.data_reader.val_folds,\n fold_prefix='',\n original_input_dataset=self.data_reader.x_name,\n original_target_dataset=self.data_reader.y_name,\n ).post_process()\n else:\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n\n H5MergePatches(\n ref_file=self.dataset_filename,\n predicted_file=predicted_path,\n map_column=self.main_meta_data,\n merge_file=merge_path,\n save_file=main_result_file_name,\n patch_size=self.data_reader.patch_size,\n overlap=self.data_reader.overlap,\n folds=self.data_reader.test_folds,\n fold_prefix='',\n original_input_dataset=self.data_reader.x_name,\n original_target_dataset=self.data_reader.y_name,\n ).post_process()\n\n return self\n\n def calculate_fscore(self):\n print('calculating dice score per 3d image')\n if not self.run_test:\n merge_path = self.analysis_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5CalculateFScore(\n merge_path.format(epoch=epoch),\n main_log_filename.format(epoch=epoch),\n map_file=main_log_filename.format(epoch=epoch),\n map_column=self.main_meta_data\n ).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n H5CalculateFScore(\n merge_path,\n main_result_file_name,\n map_file=main_result_file_name,\n map_column=self.main_meta_data\n ).post_process()\n\n return self\n\n def get_best_model(self, monitor='', mode='max', keep_best_only=True,\n use_raw_log=False):\n print('finding best model')\n\n epochs = self.epochs\n\n if use_raw_log:\n best_epoch = self._best_epoch_from_raw_log(monitor, mode)\n\n else:\n res_df = pd.DataFrame(epochs, columns=['epochs'])\n\n results = []\n results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME\n\n for epoch in epochs:\n df = pd.read_csv(results_path.format(epoch=epoch))\n if not monitor:\n monitor = df.columns[-1]\n\n results.append(df[monitor].mean())\n\n res_df[monitor] = results\n if mode == 'max':\n best_epoch = epochs[res_df[monitor].argmax()]\n else:\n best_epoch = epochs[res_df[monitor].argmin()]\n\n res_df.to_csv(self.log_base_path + '/log_new.csv', index=False)\n\n print('Best epoch:', best_epoch)\n\n if keep_best_only:\n print('Keep best results only. Deleting prediction files...')\n for epoch in epochs:\n if epoch != best_epoch:\n predicted_file = self.analysis_base_path + \\\n self.PREDICTION_PATH + \\\n self.PREDICTION_NAME.format(epoch=epoch)\n if os.path.exists(predicted_file):\n os.remove(predicted_file)\n elif self.log_base_path != self.analysis_base_path:\n # move the best prediction to main folder\n if os.path.exists(self.analysis_base_path +\n self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch)\n ):\n shutil.copy(\n self.analysis_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n\n os.remove(self.analysis_base_path +\n self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n elif self.log_base_path != self.analysis_base_path:\n # Copy the best prediction to the main folder\n shutil.copy(self.analysis_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=best_epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=best_epoch))\n\n return self.log_base_path + self.MODEL_PATH + \\\n self.MODEL_NAME.format(epoch=best_epoch)\n\n def get_best_performance_images(self, monitor='', best_num=2, worst_num=2):\n epochs = self.epochs\n results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME\n\n results = []\n for epoch in epochs:\n # only plot things in prediction\n if os.path.exists(self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch)):\n df = pd.read_csv(results_path.format(epoch=epoch))\n\n if not monitor:\n monitor = df.columns[-1]\n largest_indice = df[monitor].nlargest(best_num, keep='all')\n smallest_indice = df[monitor].nsmallest(\n worst_num, keep='all')\n\n indice = list(largest_indice.index) + \\\n list(smallest_indice.index)\n\n # `values` will implicitly cast all item to the same type\n # take out each column first, then use `values`\n results.append(\n {'file_name': self.PREDICTION_NAME.format(epoch=epoch),\n 'ids': df[self.main_meta_data].values[indice],\n 'values': df[monitor].values[indice]})\n\n return results\n\n def get_best_performance_images_test_set(\n self, monitor='', best_num=2, worst_num=2):\n\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n df = pd.read_csv(main_result_file_name)\n\n if not monitor:\n monitor = df.columns[-1]\n largest_indice = df[monitor].nlargest(best_num, keep='all')\n smallest_indice = df[monitor].nsmallest(\n worst_num, keep='all')\n\n indice = list(largest_indice.index) + \\\n list(smallest_indice.index)\n\n # `values` will implicitly cast all item to the same type\n # take out each column first, then use `values`\n return {'ids': df[self.main_meta_data].values[indice],\n 'values': df[monitor].values[indice]}\n", "# -*- coding: utf-8 -*-\n\n__author__ = \"Ngoc Huynh Bao\"\n__email__ = \"[email protected]\"\n\n\nfrom ..keras.utils import deserialize_keras_object\nfrom ..keras.callbacks import *\n\nimport warnings\nimport numpy as np\nimport io\nimport csv\nimport os\nimport h5py\nfrom collections import OrderedDict, Iterable\n\nfrom ..utils import Singleton\nfrom ..database import Tables, HDF5Attr, LogAttr\n\n\nclass DeoxysModelCallback(Callback): # noqa: F405\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.deoxys_model = None\n\n def set_deoxys_model(self, deoxys_model):\n if not self.deoxys_model:\n self.deoxys_model = deoxys_model\n\n\nclass EvaluationCheckpoint(DeoxysModelCallback): # pragma: no cover\n \"\"\"\n Evaluate test after some epochs. Only use when cross validation\n to avoid data leakage.\n \"\"\"\n\n def __init__(self, filename=None, period=1,\n separator=',', append=False):\n\n self.period = period\n self.epochs_since_last_save = 0\n\n self.sep = separator\n self.filename = filename\n self.append = append\n self.writer = None\n self.keys = None\n self.append_header = True\n\n self.file_flags = ''\n self._open_args = {'newline': '\\n'}\n super().__init__()\n\n def on_train_begin(self, logs=None):\n if self.append:\n if os.path.exists(self.filename):\n with open(self.filename, 'r' + self.file_flags) as f:\n self.append_header = not bool(len(f.readline()))\n mode = 'a'\n else:\n mode = 'w'\n self.csv_file = io.open(self.filename,\n mode + self.file_flags,\n **self._open_args)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epochs_since_last_save += 1\n\n if self.epochs_since_last_save >= self.period:\n\n print('\\nEvaluating test set...')\n self.epochs_since_last_save = 0\n score = self.deoxys_model.evaluate_test(verbose=1)\n\n def handle_value(k):\n is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n if isinstance(k, str):\n return k\n elif isinstance(k, Iterable) and not is_zero_dim_ndarray:\n if k.ndim == 1:\n return k[0]\n else:\n return '\"[%s]\"' % (', '.join(map(str, k)))\n else:\n return k\n\n if self.keys is None:\n self.keys = [key for key in list(logs.keys())\n if 'val_' not in key]\n\n if self.model.stop_training:\n # We set NA so that csv parsers do not fail in this last epoch.\n logs = dict([(k, logs[k] if k in logs else 'NA')\n for k in self.keys])\n\n if not self.writer:\n class CustomDialect(csv.excel):\n delimiter = self.sep\n fieldnames = ['epoch'] + self.keys\n\n self.writer = csv.DictWriter(self.csv_file,\n fieldnames=fieldnames,\n dialect=CustomDialect)\n if self.append_header:\n self.writer.writeheader()\n\n row_dict = OrderedDict({'epoch': epoch})\n row_dict.update(\n (key, handle_value(score[i]))\n for i, key in enumerate(self.keys) if i < len(score))\n self.writer.writerow(row_dict)\n self.csv_file.flush()\n\n\nclass DBLogger(Callback): # noqa: F405 # pragma: no cover\n\n def __init__(self, dbclient, session):\n \"\"\"\n Log performance to database\n\n Parameters\n ----------\n dbclient : deoxys.database.DBClient\n The database client that stores all data\n session : str, int, or ObjectId, depending of the provider of DBClient\n Session id\n \"\"\"\n self.dbclient = dbclient\n self.session = session\n\n self.keys = None\n\n super().__init__()\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n\n def handle_value(k):\n is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n if isinstance(k, str):\n return k\n elif isinstance(k, Iterable) and not is_zero_dim_ndarray:\n if k.ndim == 1:\n if isinstance(k[0], np.generic):\n return np.asscalar(k[0])\n else:\n return k[0]\n else:\n return '\"[%s]\"' % (', '.join(map(str, k)))\n else:\n if isinstance(k, np.generic):\n return np.asscalar(k)\n else:\n return k\n\n if self.keys is None:\n self.keys = sorted(logs.keys())\n\n if self.model.stop_training:\n # We set NA so that it won't fail in this last epoch.\n logs = dict([(k, logs[k] if k in logs else 'NA')\n for k in self.keys])\n\n identifier = {LogAttr.SESSION_ID: self.session,\n LogAttr.EPOCH: epoch + 1}\n perf_log = OrderedDict(identifier)\n perf_log.update((key, handle_value(logs[key])) for key in self.keys)\n\n self.dbclient.update_insert(Tables.LOGS, identifier, perf_log)\n\n\nclass PredictionCheckpoint(DeoxysModelCallback):\n \"\"\"\n Predict test in every number of epochs\n \"\"\"\n\n _max_size = 1\n\n def __init__(self, filepath=None, period=1, use_original=False,\n dbclient=None, session=None):\n self.period = period\n self.epochs_since_last_save = 0\n\n self.filepath = filepath\n self.use_original = use_original\n\n self.dbclient = dbclient\n self.session = session\n\n self._data_description = None\n\n super().__init__()\n\n @property\n def data_information(self):\n if self._data_description is None:\n dr = self.deoxys_model.data_reader\n\n self._data_description = dr.val_generator.description\n\n return self._data_description\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epochs_since_last_save += 1\n\n if self.epochs_since_last_save >= self.period:\n self.epochs_since_last_save = 0\n\n data_info = self.data_information\n total_size = np.product(\n data_info[0]['shape']) * data_info[0]['total'] / 1e9\n\n print('\\nPredicting validation data...')\n\n # Get file name\n filepath = self.filepath.format(epoch=epoch + 1, **logs)\n\n # predict directly for data of size < max_size (1GB)\n if len(data_info) == 1 and total_size < self._max_size:\n # Predict all data\n predicted = self.deoxys_model.predict_val(verbose=1)\n\n # Create the h5 file\n hf = h5py.File(filepath, 'w')\n hf.create_dataset('predicted', data=predicted,\n compression=\"gzip\")\n hf.close()\n\n if self.use_original:\n original_data = self.deoxys_model.data_reader.original_val\n\n for key, val in original_data.items():\n hf = h5py.File(filepath, 'a')\n hf.create_dataset(key, data=val, compression=\"gzip\")\n hf.close()\n else:\n # Create data from val_generator\n x = None\n y = None\n\n val_gen = self.deoxys_model.data_reader.val_generator\n data_gen = val_gen.generate()\n\n for _ in range(val_gen.total_batch):\n next_x, next_y = next(data_gen)\n # handle multiple inputs\n if type(next_x) == list:\n next_x = next_x[0]\n if x is None:\n x = next_x\n y = next_y\n else:\n x = np.concatenate((x, next_x))\n y = np.concatenate((y, next_y))\n\n hf = h5py.File(filepath, 'a')\n hf.create_dataset('x', data=x, compression=\"gzip\")\n hf.create_dataset('y', data=y, compression=\"gzip\")\n hf.close()\n\n # for large data of same size, predict each chunk\n elif len(data_info) == 1:\n val_gen = self.deoxys_model.data_reader.val_generator\n data_gen = val_gen.generate()\n\n next_x, next_y = next(data_gen)\n predicted = self.deoxys_model.predict(next_x, verbose=1)\n\n input_shape = (data_info[0]['total'],) + data_info[0]['shape']\n input_chunks = (1,) + data_info[0]['shape']\n target_shape = (data_info[0]['total'],) + next_y.shape[1:]\n target_chunks = (1,) + next_y.shape[1:]\n\n with h5py.File(filepath, 'w') as hf:\n hf.create_dataset('x',\n shape=input_shape, chunks=input_chunks,\n compression='gzip')\n hf.create_dataset('y',\n shape=target_shape, chunks=target_chunks,\n compression='gzip')\n\n hf.create_dataset('predicted',\n shape=target_shape, chunks=target_chunks,\n compression='gzip')\n # handle multiple inputs\n if type(next_x) == list:\n next_x = next_x[0]\n with h5py.File(filepath, 'a') as hf:\n next_index = len(next_x)\n hf['x'][:next_index] = next_x\n hf['y'][:next_index] = next_y\n hf['predicted'][:next_index] = predicted\n\n for _ in range(val_gen.total_batch - 1):\n next_x, next_y = next(data_gen)\n predicted = self.deoxys_model.predict(next_x, verbose=1)\n\n # handle multiple inputs\n if type(next_x) == list:\n next_x = next_x[0]\n\n curr_index = next_index\n next_index = curr_index + len(next_x)\n\n with h5py.File(filepath, 'a') as hf:\n hf['x'][curr_index:next_index] = next_x\n hf['y'][curr_index:next_index] = next_y\n hf['predicted'][curr_index:next_index] = predicted\n\n # data of different size\n else:\n val_gen = self.deoxys_model.data_reader.val_generator\n data_gen = val_gen.generate()\n\n for curr_info_idx, info in enumerate(data_info):\n next_x, next_y = next(data_gen)\n predicted = self.deoxys_model.predict(next_x, verbose=1)\n\n input_shape = (info['total'],) + info['shape']\n input_chunks = (1,) + info['shape']\n target_shape = (info['total'],) + next_y.shape[1:]\n target_chunks = (1,) + next_y.shape[1:]\n if curr_info_idx == 0:\n mode = 'w'\n else:\n mode = 'a'\n with h5py.File(filepath, mode) as hf:\n hf.create_dataset(f'{curr_info_idx:02d}/x',\n shape=input_shape,\n chunks=input_chunks,\n compression='gzip')\n hf.create_dataset(f'{curr_info_idx:02d}/y',\n shape=target_shape,\n chunks=target_chunks,\n compression='gzip')\n\n hf.create_dataset(f'{curr_info_idx:02d}/predicted',\n shape=target_shape,\n chunks=target_chunks,\n compression='gzip')\n\n # handle multiple inputs\n if type(next_x) == list:\n next_x = next_x[0]\n with h5py.File(filepath, 'a') as hf:\n next_index = len(next_x)\n hf[f'{curr_info_idx:02d}/x'][:next_index] = next_x\n hf[f'{curr_info_idx:02d}/y'][:next_index] = next_y\n hf[f'{curr_info_idx:02d}/predicted'][\n :next_index] = predicted\n\n while next_index < info['total']:\n next_x, next_y = next(data_gen)\n predicted = self.deoxys_model.predict(\n next_x, verbose=1)\n\n # handle multiple inputs\n if type(next_x) == list:\n next_x = next_x[0]\n\n curr_index = next_index\n next_index = curr_index + len(next_x)\n\n with h5py.File(filepath, 'a') as hf:\n hf[f'{curr_info_idx:02d}/x'][\n curr_index:next_index] = next_x\n hf[f'{curr_info_idx:02d}/y'][\n curr_index:next_index] = next_y\n hf[f'{curr_info_idx:02d}/predicted'][\n curr_index:next_index] = predicted\n\n if self.dbclient:\n item = OrderedDict(\n {HDF5Attr.SESSION_ID: self.session,\n HDF5Attr.EPOCH: epoch + 1})\n item.update(\n {HDF5Attr.FILE_LOCATION: os.path.abspath(filepath)})\n\n self.dbclient.insert(Tables.PREDICTIONS, item)\n\n\nclass DeoxysModelCheckpoint(DeoxysModelCallback,\n ModelCheckpoint): # noqa: F405\n\n def __init__(self, filepath, monitor='val_loss', verbose=0,\n save_best_only=False, save_weights_only=False,\n mode='auto', period=1,\n dbclient=None, session=None):\n super().__init__(filepath=filepath,\n monitor=monitor, verbose=verbose,\n save_best_only=save_best_only,\n save_weights_only=save_weights_only,\n mode=mode, period=period)\n\n self.dbclient = dbclient\n self.session = session\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epochs_since_last_save += 1\n if self.epochs_since_last_save >= self.period:\n self.epochs_since_last_save = 0\n filepath = self.filepath.format(epoch=epoch + 1, **logs)\n abs_path = os.path.abspath(filepath)\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn('Can save best model '\n ' only with % s available, '\n 'skipping.' % (self.monitor), RuntimeWarning)\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n print('\\nEpoch %05d: %s improved from '\n '%0.5f to %0.5f,'\n ' saving model to %s'\n % (epoch + 1, self.monitor, self.best,\n current, filepath))\n self.best = current\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True)\n else:\n self.deoxys_model.save(filepath, overwrite=True)\n\n if self.dbclient:\n item = OrderedDict(\n {HDF5Attr.SESSION_ID: self.session,\n HDF5Attr.EPOCH: epoch + 1})\n item.update(\n {HDF5Attr.FILE_LOCATION: abs_path})\n\n self.dbclient.insert(Tables.MODELS, item)\n else:\n if self.verbose > 0:\n print('\\nEpoch %05d: %s did not improve from '\n '%0.5f' %\n (epoch + 1, self.monitor, self.best))\n else:\n if self.verbose > 0:\n print('\\nEpoch %05d: saving model to %s' %\n (epoch + 1, filepath))\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True)\n else:\n self.deoxys_model.save(filepath, overwrite=True)\n\n if self.dbclient:\n item = OrderedDict(\n {HDF5Attr.SESSION_ID: self.session,\n HDF5Attr.EPOCH: epoch + 1})\n item.update({HDF5Attr.FILE_LOCATION: abs_path})\n self.dbclient.insert(Tables.MODELS, item)\n\n\nclass Callbacks(metaclass=Singleton):\n \"\"\"\n A singleton that contains all the registered customized callbacks\n \"\"\"\n\n def __init__(self):\n self._callbacks = {\n }\n\n def register(self, key, callback):\n if not issubclass(callback, Callback): # noqa: F405\n raise ValueError(\n \"The customized callback has to be a subclass\"\n + \" of keras.callbacks.Callback\"\n )\n\n if key in self._callbacks:\n raise KeyError(\n \"Duplicated key, please use another key for this callback\"\n )\n else:\n self._callbacks[key] = callback\n\n def unregister(self, key):\n if key in self._callbacks:\n del self._callbacks[key]\n\n @property\n def callbacks(self):\n return self._callbacks\n\n\ndef register_callback(key, callback):\n \"\"\"\n Register the customized callback.\n If the key name is already registered, it will raise a KeyError exception\n\n Parameters\n ----------\n key: str\n The unique key-name of the callback\n callback: tensorflow.keras.callbacks.Callback\n the customized callback class\n \"\"\"\n Callbacks().register(key, callback)\n\n\ndef unregister_callback(key):\n \"\"\"\n Remove the registered callback with the key-name\n\n Parameters\n ----------\n key: str\n The key-name of the callback to be removed\n \"\"\"\n Callbacks().unregister(key)\n\n\ndef callback_from_config(config):\n if 'class_name' not in config:\n raise ValueError('class_name is needed to define callback')\n\n if 'config' not in config:\n # auto add empty config for callback with only class_name\n config['config'] = {}\n return deserialize_keras_object(config,\n module_objects=globals(),\n custom_objects=Callbacks().callbacks,\n printable_module_name='callback')\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "pandas.read_csv", "numpy.expand_dims", "matplotlib.pyplot.title", "pandas.DataFrame", "matplotlib.pyplot.savefig", "numpy.concatenate", "numpy.ones", "numpy.where", "matplotlib.pyplot.close", "numpy.zeros", "numpy.sum", "numpy.vstack", "matplotlib.pyplot.figure" ], [ "numpy.concatenate", "numpy.product", "numpy.asscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jimilee/image-classification
[ "abd07abbbf3ed2e38bb7fda6f4bfeb28dd7ffaae" ]
[ "utils/losses.py" ]
[ "from torch import nn, Tensor\nfrom typing import Union\nfrom torch.nn import CrossEntropyLoss\n\n\nclass LabelSmoothCrossEntropy(nn.Module):\n def __init__(self, smoothing=0.1):\n super().__init__()\n assert smoothing < 1.0\n self.smoothing = smoothing\n self.confidence = 1. - smoothing\n self.log_softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n pred = self.log_softmax(pred)\n nll_loss = -pred.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)\n smooth_loss = -pred.mean(dim=-1)\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.mean()\n\n\nclass DistillationLoss(nn.Module):\n \"\"\"Distilling the Knowledge in a Neural Network\n https://arxiv.org/pdf/1503.02531.pdf\n \"\"\"\n def __init__(self, alpha: float = 0.95, temp: Union[float, int] = 6) -> None:\n super().__init__()\n self.alpha = alpha\n self.temp = temp\n self.kd_loss = nn.KLDivLoss()\n self.entropy_loss = nn.CrossEntropyLoss()\n self.log_softmax = nn.LogSoftmax(dim=1)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, pred_student: Tensor, pred_teacher: Tensor, target: Tensor) -> Tensor:\n loss = self.kd_loss(self.log_softmax(pred_student / self.temp), self.softmax(pred_teacher / self.temp)) * (self.alpha * self.temp * self.temp)\n loss += self.entropy_loss(pred_student, target) * (1. - self.alpha)\n return loss\n" ]
[ [ "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "torch.nn.LogSoftmax", "torch.nn.KLDivLoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kasoju2712/Science_of_success
[ "fbaa92a8d035f7869162bf45338fc9e174492b98" ]
[ "bibmatch/parse_wos.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\nimport os\nimport numpy as np\nimport pandas as pd\nfrom bibmatch.authorclass import author\n\ndef adf2author(aid, adf):\n author_dict = {}\n author_dict['all_names'] = set(adf['FullName'])\n author_dict['prefered_name'] = sorted(author_dict['all_names'], key = len)[-1]\n author_dict['articles'] = set([t for t in adf['Title'].dropna()])\n author_dict['co_authors'] = set([name.strip() for namelist in adf['CoAuthors'].dropna() for name in namelist.split('|') if len(name.strip()) > 0])\n author_dict['institutions'] = set([t for t in adf['Organization'].dropna()])\n a = author(author_dict)\n a.set_id(aid)\n a.process_names()\n return a\n\ndef parse_wos_authors(full_df, groupby_col='AuthorDAIS'):\n alist = [adf2author(aid, adf) for aid, adf in full_df.groupby(groupby_col)]\n return alist\n\ndef load_wos_data(name = 'article', year_list = None, columns = None,\n duplicate_subset = ['ArticleID'], path2rawdata = '',\n dropna = None, isindict = None, verbose = False):\n\n if year_list is None:\n year_list = [1900] + list(range(1945, 2017))\n year_list = map(str, year_list)\n\n file_df_list = []\n ifile = 0\n for year in year_list:\n for df_file in os.listdir(os.path.join(path2rawdata, name)):\n if \"WR_\" + year in df_file:\n\n fname = os.path.join(path2rawdata, name, df_file)\n subdf = pd.read_hdf(fname, mode = 'r')\n\n if type(columns) is list:\n subdf = subdf[columns]\n\n if type(dropna) is list:\n subdf.dropna(subset = dropna, inplace = True, how = 'any')\n\n if type(isindict) is dict:\n for isinkey, isinlist in isindict.items():\n subdf = subdf[isin_sorted(subdf[isinkey], isinlist)]\n\n # date tag to keep most recent entry\n filetag = df_file.split('_')[2]\n subdf['filetag'] = filetag\n\n file_df_list.append(subdf)\n ifile += 1\n if verbose and ifile % verbose == 0:\n print(ifile)\n\n df = pd.concat(file_df_list)\n\n # take most recent entries according to filetag\n df.sort_values(by = 'filetag', inplace = True)\n df.drop_duplicates(subset = duplicate_subset, keep = 'last', inplace = True)\n del df['filetag']\n\n if verbose:\n print(\"Final DF Shape\", df.shape)\n\n return df\n\ndef isin_sorted(values2check, masterlist):\n index = np.searchsorted(masterlist, values2check, side = 'left')\n index[index >= masterlist.shape[0]] = masterlist.shape[0] - 1\n return values2check == masterlist[index]" ]
[ [ "pandas.read_hdf", "pandas.concat", "numpy.searchsorted" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
lynnmunday/neml
[ "2c0e3db9f849345dba01d64fc8488e2b97e477dd" ]
[ "test/test_sliprules.py" ]
[ "#!/usr/bin/env python3\n\nfrom neml import history, interpolate\nfrom neml.math import tensors, rotations\nfrom neml.cp import crystallography, slipharden, sliprules\n\nfrom common import differentiate\nfrom nicediff import *\n\nimport unittest\nimport numpy as np\nimport numpy.linalg as la\n\nclass CommonSlipRule(object):\n def test_d_slip_d_stress(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n d = self.model.d_slip_d_s(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed)\n nd = diff_scalar_symmetric(lambda s: self.model.slip(g, i, s, self.Q, self.H, \n self.L, self.T, self.fixed), self.S)\n self.assertEqual(d, nd)\n\n def test_d_slip_d_hist(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n d = np.array(self.model.d_slip_d_h(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = np.array(diff_history_scalar(lambda h: self.model.slip(g, i, self.S, self.Q, h,\n self.L, self.T, self.fixed), self.H))\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\n def test_d_hist_rate_d_stress(self):\n d = np.array(self.model.d_hist_rate_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = diff_history_symmetric(lambda s: self.model.hist_rate(s, self.Q, self.H, self.L,\n self.T, self.fixed), self.S)\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\n def test_d_hist_rate_d_hist(self):\n d = np.array(self.model.d_hist_rate_d_hist(self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = diff_history_history(lambda h: self.model.hist_rate(self.S, self.Q, h, self.L,\n self.T, self.fixed), self.H)\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\nclass CommonSlipStrengthSlipRule(object):\n def test_init_hist(self):\n H1 = history.History()\n self.model.populate_history(H1)\n self.model.init_history(H1)\n\n H2 = history.History()\n self.strengthmodel.populate_history(H2)\n self.strengthmodel.init_history(H2)\n\n self.assertTrue(np.allclose(np.array(H1),\n np.array(H2)))\n\n def test_slip(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n rs = self.L.shear(g, i, self.Q, self.S)\n strength = self.strength + self.static\n self.assertTrue(np.isclose(self.model.slip(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed),\n self.model.sslip(g, i, rs, strength, self.T)))\n\n def test_d_hist_rate(self):\n self.assertTrue(np.allclose(\n np.array(self.model.hist_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)),\n np.array(self.strengthmodel.hist(self.S, self.Q, self.H, self.L, self.T, self.model, self.fixed))))\n\n def test_d_sslip_d_tau(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n nd = differentiate(lambda t: self.model.sslip(g, i, t, self.strength, self.T),\n self.tau)\n d = self.model.d_sslip_dtau(g, i, self.tau, self.strength, self.T)\n self.assertTrue(np.isclose(nd,d))\n\n def test_d_sslip_d_strength(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n nd = differentiate(lambda s: self.model.sslip(g, i, self.tau, s, self.T), self.strength)\n d = self.model.d_sslip_dstrength(g, i, self.tau, self.strength, self.T)\n print(nd)\n print(d)\n self.assertTrue(np.isclose(nd, d))\n\nclass TestPowerLawSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):\n def setUp(self):\n self.L = crystallography.CubicLattice(1.0)\n self.L.add_slip_system([1,1,0],[1,1,1])\n \n self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = \"degrees\")\n self.S = tensors.Symmetric(np.array([\n [100.0,-25.0,10.0],\n [-25.0,-17.0,15.0],\n [10.0, 15.0,35.0]]))\n self.strength = 35.0\n self.H = history.History()\n self.H.add_scalar(\"strength\")\n self.H.set_scalar(\"strength\", self.strength)\n\n self.T = 300.0\n\n self.tau0 = 10.0\n self.tau_sat = 50.0\n self.b = 2.5\n\n self.strengthmodel = slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0)\n\n self.static = self.tau0\n \n self.g0 = 1.0\n self.n = 3.0\n self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)\n\n self.tau = 33.0\n\n self.fixed = history.History()\n\n def test_scalar_rate(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n self.assertTrue(np.isclose(self.model.sslip(g, i, self.tau, self.strength, self.T),\n self.g0 * np.abs(self.tau/self.strength)**(self.n-1.0) * self.tau/self.strength))\n\n\nclass TestBiVoceSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):\n def setUp(self):\n self.L = crystallography.CubicLattice(1.0)\n self.L.add_slip_system([1,1,0],[1,1,1])\n \n self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = \"degrees\")\n self.S = tensors.Symmetric(np.array([\n [100.0,-25.0,10.0],\n [-25.0,-17.0,15.0],\n [10.0, 15.0,35.0]]))\n self.strength_1 = 35.0\n self.strength_2 = 25.0\n self.strength = self.strength_1 + self.strength_2\n self.H = history.History()\n self.H.add_scalar(\"strength0\")\n self.H.set_scalar(\"strength0\", self.strength_1)\n self.H.add_scalar(\"strength1\")\n self.H.set_scalar(\"strength1\", self.strength_2)\n\n self.T = 300.0\n\n self.tau0 = 10.0\n self.tau_sat = 50.0\n self.b = 2.5\n\n self.strengthmodel = slipharden.SumSlipSingleStrengthHardening(\n [slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0),\n slipharden.VoceSlipHardening(self.tau_sat/2, self.b/2, self.tau0/2)])\n\n self.static = self.tau0 + self.tau0 / 2\n \n self.g0 = 1.0\n self.n = 3.0\n self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)\n\n self.tau = 33.0\n\n self.fixed = history.History()\n" ]
[ [ "numpy.array", "numpy.abs", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hrk7531/scipy
[ "a62bf66b2a485fbb3e08fe52feecaca765bead1f" ]
[ "scipy/optimize/_linprog_util.py" ]
[ "\"\"\"\nMethod agnostic utility functions for linear progamming\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom warnings import warn\nfrom .optimize import OptimizeWarning\nfrom scipy.optimize._remove_redundancy import (\n _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense\n )\n\n\ndef _check_sparse_inputs(options, A_ub, A_eq):\n \"\"\"\n Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified\n optional sparsity variables.\n\n Parameters\n ----------\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n\n Returns\n -------\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n \"\"\"\n # This is an undocumented option for unit testing sparse presolve\n _sparse_presolve = options.pop('_sparse_presolve', False)\n if _sparse_presolve and A_eq is not None:\n A_eq = sps.coo_matrix(A_eq)\n if _sparse_presolve and A_ub is not None:\n A_ub = sps.coo_matrix(A_ub)\n\n sparse = options.get('sparse', False)\n if not sparse and (sps.issparse(A_eq) or sps.issparse(A_ub)):\n options['sparse'] = True\n warn(\"Sparse constraint matrix detected; setting 'sparse':True.\",\n OptimizeWarning)\n return options, A_ub, A_eq\n\n\ndef _format_A_constraints(A, n_x, sparse_lhs=False):\n \"\"\"Format the left hand side of the constraints to a 2D array\n\n Parameters\n ----------\n A : 2D array\n 2D array such that ``A @ x`` gives the values of the upper-bound\n (in)equality constraints at ``x``.\n n_x : int\n The number of variables in the linear programming problem.\n sparse_lhs : bool\n Whether either of `A_ub` or `A_eq` are sparse. If true return a\n coo_matrix instead of a numpy array.\n\n Returns\n -------\n np.ndarray or sparse.coo_matrix\n 2D array such that ``A @ x`` gives the values of the upper-bound\n (in)equality constraints at ``x``.\n\n \"\"\"\n if sparse_lhs:\n return sps.coo_matrix(\n (0, n_x) if A is None else A, dtype=float, copy=True\n )\n elif A is None:\n return np.zeros((0, n_x), dtype=float)\n else:\n return np.array(A, dtype=float, copy=True)\n\n\ndef _format_b_constraints(b):\n \"\"\"Format the upper bounds of the constraints to a 1D array\n\n Parameters\n ----------\n b : 1D array\n 1D array of values representing the upper-bound of each (in)equality\n constraint (row) in ``A``.\n\n Returns\n -------\n 1D np.array\n 1D array of values representing the upper-bound of each (in)equality\n constraint (row) in ``A``.\n\n \"\"\"\n if b is None:\n return np.array([], dtype=float)\n b = np.array(b, dtype=float, copy=True).squeeze()\n return b if b.size != 1 else b.reshape((-1))\n\n\ndef _clean_inputs(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n x0=None):\n \"\"\"\n Given user inputs for a linear programming problem, return the\n objective vector, upper bound constraints, equality constraints,\n and simple bounds in a preferred format.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence, optional\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n If a sequence containing a single tuple is provided, then ``min`` and\n ``max`` will be applied to all variables in the problem.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n \"\"\"\n if c is None:\n raise TypeError\n\n try:\n c = np.array(c, dtype=np.float, copy=True).squeeze()\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: c must be a 1D array of numerical \"\n \"coefficients\")\n else:\n # If c is a single value, convert it to a 1D array.\n if c.size == 1:\n c = c.reshape((-1))\n\n n_x = len(c)\n if n_x == 0 or len(c.shape) != 1:\n raise ValueError(\n \"Invalid input for linprog: c must be a 1D array and must \"\n \"not have more than one non-singleton dimension\")\n if not(np.isfinite(c).all()):\n raise ValueError(\n \"Invalid input for linprog: c must not contain values \"\n \"inf, nan, or None\")\n\n sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)\n try:\n A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: A_ub must be a 2D array \"\n \"of numerical values\")\n else:\n n_ub = A_ub.shape[0]\n if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:\n raise ValueError(\n \"Invalid input for linprog: A_ub must have exactly two \"\n \"dimensions, and the number of columns in A_ub must be \"\n \"equal to the size of c\")\n if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()\n or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):\n raise ValueError(\n \"Invalid input for linprog: A_ub must not contain values \"\n \"inf, nan, or None\")\n\n try:\n b_ub = _format_b_constraints(b_ub)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: b_ub must be a 1D array of \"\n \"numerical values, each representing the upper bound of an \"\n \"inequality constraint (row) in A_ub\")\n else:\n if b_ub.shape != (n_ub,):\n raise ValueError(\n \"Invalid input for linprog: b_ub must be a 1D array; b_ub \"\n \"must not have more than one non-singleton dimension and \"\n \"the number of rows in A_ub must equal the number of values \"\n \"in b_ub\")\n if not(np.isfinite(b_ub).all()):\n raise ValueError(\n \"Invalid input for linprog: b_ub must not contain values \"\n \"inf, nan, or None\")\n\n try:\n A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: A_eq must be a 2D array \"\n \"of numerical values\")\n else:\n n_eq = A_eq.shape[0]\n if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:\n raise ValueError(\n \"Invalid input for linprog: A_eq must have exactly two \"\n \"dimensions, and the number of columns in A_eq must be \"\n \"equal to the size of c\")\n\n if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()\n or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):\n raise ValueError(\n \"Invalid input for linprog: A_eq must not contain values \"\n \"inf, nan, or None\")\n\n try:\n b_eq = _format_b_constraints(b_eq)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: b_eq must be a 1D array of \"\n \"numerical values, each representing the upper bound of an \"\n \"inequality constraint (row) in A_eq\")\n else:\n if b_eq.shape != (n_eq,):\n raise ValueError(\n \"Invalid input for linprog: b_eq must be a 1D array; b_eq \"\n \"must not have more than one non-singleton dimension and \"\n \"the number of rows in A_eq must equal the number of values \"\n \"in b_eq\")\n if not(np.isfinite(b_eq).all()):\n raise ValueError(\n \"Invalid input for linprog: b_eq must not contain values \"\n \"inf, nan, or None\")\n\n # x0 gives a (optional) starting solution to the solver. If x0 is None,\n # skip the checks. Initial solution will be generated automatically.\n if x0 is not None:\n try:\n x0 = np.array(x0, dtype=float, copy=True).squeeze()\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: x0 must be a 1D array of \"\n \"numerical coefficients\")\n if x0.ndim == 0:\n x0 = x0.reshape((-1))\n if len(x0) == 0 or x0.ndim != 1:\n raise ValueError(\n \"Invalid input for linprog: x0 should be a 1D array; it \"\n \"must not have more than one non-singleton dimension\")\n if not x0.size == c.size:\n raise ValueError(\n \"Invalid input for linprog: x0 and c should contain the \"\n \"same number of elements\")\n if not np.isfinite(x0).all():\n raise ValueError(\n \"Invalid input for linprog: x0 must not contain values \"\n \"inf, nan, or None\")\n\n # \"If a sequence containing a single tuple is provided, then min and max\n # will be applied to all variables in the problem.\"\n # linprog doesn't treat this right: it didn't accept a list with one tuple\n # in it\n try:\n if isinstance(bounds, str):\n raise TypeError\n if bounds is None or len(bounds) == 0:\n bounds = [(0, None)] * n_x\n elif len(bounds) == 1:\n b = bounds[0]\n if len(b) != 2:\n raise ValueError(\n \"Invalid input for linprog: exactly one lower bound and \"\n \"one upper bound must be specified for each element of x\")\n bounds = [b] * n_x\n elif len(bounds) == n_x:\n try:\n len(bounds[0])\n except BaseException:\n bounds = [(bounds[0], bounds[1])] * n_x\n for i, b in enumerate(bounds):\n if len(b) != 2:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": exactly one lower bound and one upper bound must \"\n \"be specified for each element of x\")\n elif (len(bounds) == 2 and np.isreal(bounds[0])\n and np.isreal(bounds[1])):\n bounds = [(bounds[0], bounds[1])] * n_x\n else:\n raise ValueError(\n \"Invalid input for linprog: exactly one lower bound and one \"\n \"upper bound must be specified for each element of x\")\n\n clean_bounds = [] # also creates a copy so user's object isn't changed\n for i, b in enumerate(bounds):\n if b[0] is not None and b[1] is not None and b[0] > b[1]:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": a lower bound must be less than or equal to the \"\n \"corresponding upper bound\")\n if b[0] == np.inf:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": infinity is not a valid lower bound\")\n if b[1] == -np.inf:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": negative infinity is not a valid upper bound\")\n lb = float(b[0]) if b[0] is not None and b[0] != -np.inf else None\n ub = float(b[1]) if b[1] is not None and b[1] != np.inf else None\n clean_bounds.append((lb, ub))\n bounds = clean_bounds\n except ValueError as e:\n if \"could not convert string to float\" in e.args[0]:\n raise TypeError\n else:\n raise e\n except TypeError as e:\n print(e)\n raise TypeError(\n \"Invalid input for linprog: bounds must be a sequence of \"\n \"(min,max) pairs, each defining bounds on an element of x \")\n\n return c, A_ub, b_ub, A_eq, b_eq, bounds, x0\n\n\ndef _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, rr, tol=1e-9):\n \"\"\"\n Given inputs for a linear programming problem in preferred format,\n presolve the problem: identify trivial infeasibilities, redundancies,\n and unboundedness, tighten bounds where possible, and eliminate fixed\n variables.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n rr : bool\n If ``True`` attempts to eliminate any redundant rows in ``A_eq``.\n Set False if ``A_eq`` is known to be of full row rank, or if you are\n looking for a potential speedup (at the expense of reliability).\n tol : float\n The tolerance which determines when a solution is \"close enough\" to\n zero in Phase 1 to be considered a basic feasible solution or close\n enough to positive to serve as an optimal solution.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n c0 : 1D array\n Constant term in objective function due to fixed (and eliminated)\n variables.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. Bounds have been\n tightened where possible.\n x : 1D array\n Solution vector (when the solution is trivial and can be determined\n in presolve)\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm (if solution is not determined in presolve)\n undo: list of tuples\n (index, value) pairs that record the original index and fixed value\n for each variable removed from the problem\n complete: bool\n Whether the solution is complete (solved or determined to be infeasible\n or unbounded in presolve)\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n\n References\n ----------\n .. [5] Andersen, Erling D. \"Finding all linearly dependent rows in\n large-scale linear programming.\" Optimization Methods and Software\n 6.3 (1995): 219-227.\n .. [8] Andersen, Erling D., and Knud D. Andersen. \"Presolving in linear\n programming.\" Mathematical Programming 71.2 (1995): 221-245.\n\n \"\"\"\n # ideas from Reference [5] by Andersen and Andersen\n # however, unlike the reference, this is performed before converting\n # problem to standard form\n # There are a few advantages:\n # * artificial variables have not been added, so matrices are smaller\n # * bounds have not been converted to constraints yet. (It is better to\n # do that after presolve because presolve may adjust the simple bounds.)\n # There are many improvements that can be made, namely:\n # * implement remaining checks from [5]\n # * loop presolve until no additional changes are made\n # * implement additional efficiency improvements in redundancy removal [2]\n\n undo = [] # record of variables eliminated from problem\n # constant term in cost function may be added if variables are eliminated\n c0 = 0\n complete = False # complete is True if detected infeasible/unbounded\n x = np.zeros(c.shape) # this is solution vector if completed in presolve\n\n status = 0 # all OK unless determined otherwise\n message = \"\"\n\n # Standard form for bounds (from _clean_inputs) is list of tuples\n # but numpy array is more convenient here\n # In retrospect, numpy array should have been the standard\n bounds = np.array(bounds)\n lb = bounds[:, 0]\n ub = bounds[:, 1]\n lb[np.equal(lb, None)] = -np.inf\n ub[np.equal(ub, None)] = np.inf\n bounds = bounds.astype(float)\n lb = lb.astype(float)\n ub = ub.astype(float)\n\n m_eq, n = A_eq.shape\n m_ub, n = A_ub.shape\n\n if (sps.issparse(A_eq)):\n A_eq = A_eq.tolil()\n A_ub = A_ub.tolil()\n\n def where(A):\n return A.nonzero()\n\n vstack = sps.vstack\n else:\n where = np.where\n vstack = np.vstack\n\n # zero row in equality constraints\n zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()\n if np.any(zero_row):\n if np.any(\n np.logical_and(\n zero_row,\n np.abs(b_eq) > tol)): # test_zero_row_1\n # infeasible if RHS is not zero\n status = 2\n message = (\"The problem is (trivially) infeasible due to a row \"\n \"of zeros in the equality constraint matrix with a \"\n \"nonzero corresponding constraint value.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else: # test_zero_row_2\n # if RHS is zero, we can eliminate this equation entirely\n A_eq = A_eq[np.logical_not(zero_row), :]\n b_eq = b_eq[np.logical_not(zero_row)]\n\n # zero row in inequality constraints\n zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()\n if np.any(zero_row):\n if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1\n # infeasible if RHS is less than zero (because LHS is zero)\n status = 2\n message = (\"The problem is (trivially) infeasible due to a row \"\n \"of zeros in the equality constraint matrix with a \"\n \"nonzero corresponding constraint value.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else: # test_zero_row_2\n # if LHS is >= 0, we can eliminate this constraint entirely\n A_ub = A_ub[np.logical_not(zero_row), :]\n b_ub = b_ub[np.logical_not(zero_row)]\n\n # zero column in (both) constraints\n # this indicates that a variable isn't constrained and can be removed\n A = vstack((A_eq, A_ub))\n if A.shape[0] > 0:\n zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()\n # variable will be at upper or lower bound, depending on objective\n x[np.logical_and(zero_col, c < 0)] = ub[\n np.logical_and(zero_col, c < 0)]\n x[np.logical_and(zero_col, c > 0)] = lb[\n np.logical_and(zero_col, c > 0)]\n if np.any(np.isinf(x)): # if an unconstrained variable has no bound\n status = 3\n message = (\"If feasible, the problem is (trivially) unbounded \"\n \"due to a zero column in the constraint matrices. If \"\n \"you wish to check whether the problem is infeasible, \"\n \"turn presolve off.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n # variables will equal upper/lower bounds will be removed later\n lb[np.logical_and(zero_col, c < 0)] = ub[\n np.logical_and(zero_col, c < 0)]\n ub[np.logical_and(zero_col, c > 0)] = lb[\n np.logical_and(zero_col, c > 0)]\n\n # row singleton in equality constraints\n # this fixes a variable and removes the constraint\n singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()\n rows = where(singleton_row)[0]\n cols = where(A_eq[rows, :])[1]\n if len(rows) > 0:\n for row, col in zip(rows, cols):\n val = b_eq[row] / A_eq[row, col]\n if not lb[col] - tol <= val <= ub[col] + tol:\n # infeasible if fixed value is not within bounds\n status = 2\n message = (\"The problem is (trivially) infeasible because a \"\n \"singleton row in the equality constraints is \"\n \"inconsistent with the bounds.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else:\n # sets upper and lower bounds at that fixed value - variable\n # will be removed later\n lb[col] = val\n ub[col] = val\n A_eq = A_eq[np.logical_not(singleton_row), :]\n b_eq = b_eq[np.logical_not(singleton_row)]\n\n # row singleton in inequality constraints\n # this indicates a simple bound and the constraint can be removed\n # simple bounds may be adjusted here\n # After all of the simple bound information is combined here, get_Abc will\n # turn the simple bounds into constraints\n singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()\n cols = where(A_ub[singleton_row, :])[1]\n rows = where(singleton_row)[0]\n if len(rows) > 0:\n for row, col in zip(rows, cols):\n val = b_ub[row] / A_ub[row, col]\n if A_ub[row, col] > 0: # upper bound\n if val < lb[col] - tol: # infeasible\n complete = True\n elif val < ub[col]: # new upper bound\n ub[col] = val\n else: # lower bound\n if val > ub[col] + tol: # infeasible\n complete = True\n elif val > lb[col]: # new lower bound\n lb[col] = val\n if complete:\n status = 2\n message = (\"The problem is (trivially) infeasible because a \"\n \"singleton row in the upper bound constraints is \"\n \"inconsistent with the bounds.\")\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n A_ub = A_ub[np.logical_not(singleton_row), :]\n b_ub = b_ub[np.logical_not(singleton_row)]\n\n # identical bounds indicate that variable can be removed\n i_f = np.abs(lb - ub) < tol # indices of \"fixed\" variables\n i_nf = np.logical_not(i_f) # indices of \"not fixed\" variables\n\n # test_bounds_equal_but_infeasible\n if np.all(i_f): # if bounds define solution, check for consistency\n residual = b_eq - A_eq.dot(lb)\n slack = b_ub - A_ub.dot(lb)\n if ((A_ub.size > 0 and np.any(slack < 0)) or\n (A_eq.size > 0 and not np.allclose(residual, 0))):\n status = 2\n message = (\"The problem is (trivially) infeasible because the \"\n \"bounds fix all variables to values inconsistent with \"\n \"the constraints\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n ub_mod = ub\n lb_mod = lb\n if np.any(i_f):\n c0 += c[i_f].dot(lb[i_f])\n b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])\n b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])\n c = c[i_nf]\n x = x[i_nf]\n # user guess x0 stays separate from presolve solution x\n if x0 is not None:\n x0 = x0[i_nf]\n A_eq = A_eq[:, i_nf]\n A_ub = A_ub[:, i_nf]\n # record of variables to be added back in\n undo = [np.nonzero(i_f)[0], lb[i_f]]\n # don't remove these entries from bounds; they'll be used later.\n # but we _also_ need a version of the bounds with these removed\n lb_mod = lb[i_nf]\n ub_mod = ub[i_nf]\n\n # no constraints indicates that problem is trivial\n if A_eq.size == 0 and A_ub.size == 0:\n b_eq = np.array([])\n b_ub = np.array([])\n # test_empty_constraint_1\n if c.size == 0:\n status = 0\n message = (\"The solution was determined in presolve as there are \"\n \"no non-trivial constraints.\")\n elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or\n np.any(np.logical_and(c > 0, lb_mod == -np.inf))):\n # test_no_constraints()\n # test_unbounded_no_nontrivial_constraints_1\n # test_unbounded_no_nontrivial_constraints_2\n status = 3\n message = (\"The problem is (trivially) unbounded \"\n \"because there are no non-trivial constraints and \"\n \"a) at least one decision variable is unbounded \"\n \"above and its corresponding cost is negative, or \"\n \"b) at least one decision variable is unbounded below \"\n \"and its corresponding cost is positive. \")\n else: # test_empty_constraint_2\n status = 0\n message = (\"The solution was determined in presolve as there are \"\n \"no non-trivial constraints.\")\n complete = True\n x[c < 0] = ub_mod[c < 0]\n x[c > 0] = lb_mod[c > 0]\n # where c is zero, set x to a finite bound or zero\n x_zero_c = ub_mod[c == 0]\n x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]\n x_zero_c[np.isinf(x_zero_c)] = 0\n x[c == 0] = x_zero_c\n # if this is not the last step of presolve, should convert bounds back\n # to array and return here\n\n # *sigh* - convert bounds back to their standard form (list of tuples)\n # again, in retrospect, numpy array would be standard form\n lb[np.equal(lb, -np.inf)] = None\n ub[np.equal(ub, np.inf)] = None\n bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis]))\n bounds = bounds.tolist()\n for i, row in enumerate(bounds):\n for j, col in enumerate(row):\n if str(col) == \"nan\":\n # comparing col to float(\"nan\") and np.nan doesn't work.\n # should use np.isnan\n bounds[i][j] = None\n\n # remove redundant (linearly dependent) rows from equality constraints\n n_rows_A = A_eq.shape[0]\n redundancy_warning = (\"A_eq does not appear to be of full row rank. To \"\n \"improve performance, check the problem formulation \"\n \"for redundant equality constraints.\")\n if (sps.issparse(A_eq)):\n if rr and A_eq.size > 0: # TODO: Fast sparse rank check?\n A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)\n if A_eq.shape[0] < n_rows_A:\n warn(redundancy_warning, OptimizeWarning)\n if status != 0:\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n # This is a wild guess for which redundancy removal algorithm will be\n # faster. More testing would be good.\n small_nullspace = 5\n if rr and A_eq.size > 0:\n try: # TODO: instead use results of first SVD in _remove_redundancy\n rank = np.linalg.matrix_rank(A_eq)\n except Exception: # oh well, we'll have to go with _remove_redundancy_dense\n rank = 0\n if rr and A_eq.size > 0 and rank < A_eq.shape[0]:\n warn(redundancy_warning, OptimizeWarning)\n dim_row_nullspace = A_eq.shape[0]-rank\n if dim_row_nullspace <= small_nullspace:\n A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq)\n if dim_row_nullspace > small_nullspace or status == 4:\n A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq)\n if A_eq.shape[0] < rank:\n message = (\"Due to numerical issues, redundant equality \"\n \"constraints could not be removed automatically. \"\n \"Try providing your constraint matrices as sparse \"\n \"matrices to activate sparse presolve, try turning \"\n \"off redundancy removal, or try turning off presolve \"\n \"altogether.\")\n status = 4\n if status != 0:\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n\ndef _parse_linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, options, x0):\n \"\"\"\n Parse the provided linear programming problem\n\n ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and\n ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the\n provided constraints (``A_ub`` and ``A_eq) and if these match the provided\n sparsity optional values.\n\n ``_clean inputs`` checks of the provided inputs. If no violations are\n identified the objective vector, upper bound constraints, equality\n constraints, and simple bounds are returned in the expected format.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative). If a sequence containing a\n single tuple is provided, then ``min`` and ``max`` will be applied to\n all variables in the problem.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm. Currently compatible only with the\n 'revised simplex' method, and only if x0 is a basic feasible solution\n of the problem.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence, optional\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n If a sequence containing a single tuple is provided, then ``min`` and\n ``max`` will be applied to all variables in the problem.\n options : dict, optional\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm. Currently compatible only with the\n 'revised simplex' method, and only if x0 is a basic feasible solution\n of the problem.\n \"\"\"\n if options is None:\n options = {}\n\n solver_options = {k: v for k, v in options.items()}\n solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, A_ub, A_eq)\n # Convert lists to numpy arrays, etc...\n c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = _clean_inputs(\n c, A_ub, b_ub, A_eq, b_eq, bounds, x0)\n return c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options, x0\n\n\ndef _get_Abc(c, c0=0, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n x0=None, undo=[]):\n \"\"\"\n Given a linear programming problem of the form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A_ub @ x <= b_ub\n A_eq @ x == b_eq\n lb <= x <= ub\n\n where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.\n\n Return the problem in standard form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A @ x == b\n x >= 0\n\n by adding slack variables and making variable substitutions as necessary.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n Components corresponding with fixed variables have been eliminated.\n c0 : float\n Constant term in objective function due to fixed (and eliminated)\n variables.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. Bounds have been\n tightened where possible.\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n\n Returns\n -------\n A : 2D array\n 2D array such that ``A`` @ ``x``, gives the values of the equality\n constraints at ``x``.\n b : 1D array\n 1D array of values representing the RHS of each equality constraint\n (row) in A (for standard form problem).\n c : 1D array\n Coefficients of the linear objective function to be minimized (for\n standard form problem).\n c0 : float\n Constant term in objective function due to fixed (and eliminated)\n variables.\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm\n\n References\n ----------\n .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. \"Introduction to linear\n programming.\" Athena Scientific 1 (1997): 997.\n\n \"\"\"\n\n if sps.issparse(A_eq):\n sparse = True\n A_eq = sps.lil_matrix(A_eq)\n A_ub = sps.lil_matrix(A_ub)\n\n def hstack(blocks):\n return sps.hstack(blocks, format=\"lil\")\n\n def vstack(blocks):\n return sps.vstack(blocks, format=\"lil\")\n\n zeros = sps.lil_matrix\n eye = sps.eye\n else:\n sparse = False\n hstack = np.hstack\n vstack = np.vstack\n zeros = np.zeros\n eye = np.eye\n\n fixed_x = set()\n if len(undo) > 0:\n # these are indices of variables removed from the problem\n # however, their bounds are still part of the bounds list\n fixed_x = set(undo[0])\n # they are needed elsewhere, but not here\n bounds = [bounds[i] for i in range(len(bounds)) if i not in fixed_x]\n # in retrospect, the standard form of bounds should have been an n x 2\n # array. maybe change it someday.\n\n # modify problem such that all variables have only non-negativity bounds\n\n bounds = np.array(bounds)\n lbs = bounds[:, 0]\n ubs = bounds[:, 1]\n m_ub, n_ub = A_ub.shape\n\n lb_none = np.equal(lbs, None)\n ub_none = np.equal(ubs, None)\n lb_some = np.logical_not(lb_none)\n ub_some = np.logical_not(ub_none)\n\n # if preprocessing is on, lb == ub can't happen\n # if preprocessing is off, then it would be best to convert that\n # to an equality constraint, but it's tricky to make the other\n # required modifications from inside here.\n\n # unbounded below: substitute xi = -xi' (unbounded above)\n l_nolb_someub = np.logical_and(lb_none, ub_some)\n i_nolb = np.nonzero(l_nolb_someub)[0]\n lbs[l_nolb_someub], ubs[l_nolb_someub] = (\n -ubs[l_nolb_someub], lbs[l_nolb_someub])\n lb_none = np.equal(lbs, None)\n ub_none = np.equal(ubs, None)\n lb_some = np.logical_not(lb_none)\n ub_some = np.logical_not(ub_none)\n c[i_nolb] *= -1\n if x0 is not None:\n x0[i_nolb] *= -1\n if len(i_nolb) > 0:\n if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird\n A_ub[:, i_nolb] *= -1\n if A_eq.shape[0] > 0:\n A_eq[:, i_nolb] *= -1\n\n # upper bound: add inequality constraint\n i_newub = np.nonzero(ub_some)[0]\n ub_newub = ubs[ub_some]\n n_bounds = np.count_nonzero(ub_some)\n A_ub = vstack((A_ub, zeros((n_bounds, A_ub.shape[1]))))\n b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))\n A_ub[range(m_ub, A_ub.shape[0]), i_newub] = 1\n b_ub[m_ub:] = ub_newub\n\n A1 = vstack((A_ub, A_eq))\n b = np.concatenate((b_ub, b_eq))\n c = np.concatenate((c, np.zeros((A_ub.shape[0],))))\n if x0 is not None:\n x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))\n # unbounded: substitute xi = xi+ + xi-\n l_free = np.logical_and(lb_none, ub_none)\n i_free = np.nonzero(l_free)[0]\n n_free = len(i_free)\n A1 = hstack((A1, zeros((A1.shape[0], n_free))))\n c = np.concatenate((c, np.zeros(n_free)))\n if x0 is not None:\n x0 = np.concatenate((x0, np.zeros(n_free)))\n A1[:, range(n_ub, A1.shape[1])] = -A1[:, i_free]\n c[np.arange(n_ub, A1.shape[1])] = -c[i_free]\n if x0 is not None:\n i_free_neg = x0[i_free] < 0\n x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]\n x0[i_free[i_free_neg]] = 0\n\n # add slack variables\n A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])\n A = hstack([A1, A2])\n\n # lower bound: substitute xi = xi' + lb\n # now there is a constant term in objective\n i_shift = np.nonzero(lb_some)[0]\n lb_shift = lbs[lb_some].astype(float)\n c0 += np.sum(lb_shift * c[i_shift])\n if sparse:\n b = b.reshape(-1, 1)\n A = A.tocsc()\n b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)\n b = b.ravel()\n else:\n b -= (A[:, i_shift] * lb_shift).sum(axis=1)\n if x0 is not None:\n x0[i_shift] -= lb_shift\n\n return A, b, c, c0, x0\n\n\ndef _display_summary(message, status, fun, iteration):\n \"\"\"\n Print the termination summary of the linear program\n\n Parameters\n ----------\n message : str\n A string descriptor of the exit status of the optimization.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n fun : float\n Value of the objective function.\n iteration : iteration\n The number of iterations performed.\n \"\"\"\n print(message)\n if status in (0, 1):\n print(\" Current function value: {0: <12.6f}\".format(fun))\n print(\" Iterations: {0:d}\".format(iteration))\n\n\ndef _postsolve(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n complete=False, undo=[], tol=1e-8):\n \"\"\"\n Given solution x to presolved, standard form linear program x, add\n fixed variables back into the problem and undo the variable substitutions\n to get solution to original linear program. Also, calculate the objective\n function value, slack in original upper bound constraints, and residuals\n in original equality constraints.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to the standard-form problem.\n c : 1D array\n Original coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n Bounds, as modified in presolve\n complete : bool\n Whether the solution is was determined in presolve (``True`` if so)\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n \"\"\"\n # note that all the inputs are the ORIGINAL, unmodified versions\n # no rows, columns have been removed\n # the only exception is bounds; it has been modified\n # we need these modified values to undo the variable substitutions\n # in retrospect, perhaps this could have been simplified if the \"undo\"\n # variable also contained information for undoing variable substitutions\n\n n_x = len(c)\n\n # we don't have to undo variable substitutions for fixed variables that\n # were removed from the problem\n no_adjust = set()\n\n # if there were variables removed from the problem, add them back into the\n # solution vector\n if len(undo) > 0:\n no_adjust = set(undo[0])\n x = x.tolist()\n for i, val in zip(undo[0], undo[1]):\n x.insert(i, val)\n x = np.array(x)\n\n # now undo variable substitutions\n # if \"complete\", problem was solved in presolve; don't do anything here\n if not complete and bounds is not None: # bounds are never none, probably\n n_unbounded = 0\n for i, b in enumerate(bounds):\n if i in no_adjust:\n continue\n lb, ub = b\n if lb is None and ub is None:\n n_unbounded += 1\n x[i] = x[i] - x[n_x + n_unbounded - 1]\n else:\n if lb is None:\n x[i] = ub - x[i]\n else:\n x[i] += lb\n\n n_x = len(c)\n x = x[:n_x] # all the rest of the variables were artificial\n fun = x.dot(c)\n slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints\n # report residuals of ORIGINAL EQ constraints\n con = b_eq - A_eq.dot(x)\n\n # Patch for bug #8664. Detecting this sort of issue earlier\n # (via abnormalities in the indicators) would be better.\n bounds = np.array(bounds) # again, this should have been the standard form\n lb = bounds[:, 0]\n ub = bounds[:, 1]\n lb[np.equal(lb, None)] = -np.inf\n ub[np.equal(ub, None)] = np.inf\n\n return x, fun, slack, con, lb, ub\n\n\ndef _check_result(x, fun, status, slack, con, lb, ub, tol, message):\n \"\"\"\n Check the validity of the provided solution.\n\n A valid (optimal) solution satisfies all bounds, all slack variables are\n negative and all equality constraint residuals are strictly non-zero.\n Further, the lower-bounds, upper-bounds, slack and residuals contain\n no nan values.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n \"\"\"\n # Somewhat arbitrary, but status 5 is very unusual\n tol = np.sqrt(tol) * 10\n\n contains_nans = (\n np.isnan(x).any()\n or np.isnan(fun)\n or np.isnan(slack).any()\n or np.isnan(con).any()\n )\n\n if contains_nans:\n is_feasible = False\n else:\n invalid_bounds = (x < lb - tol).any() or (x > ub + tol).any()\n invalid_slack = status != 3 and (slack < -tol).any()\n invalid_con = status != 3 and (np.abs(con) > tol).any()\n is_feasible = not (invalid_bounds or invalid_slack or invalid_con)\n\n if status == 0 and not is_feasible:\n status = 4\n message = (\"The solution does not satisfy the constraints within the \"\n \"required tolerance of \" + \"{:.2E}\".format(tol) + \", yet \"\n \"no errors were raised and there is no certificate of \"\n \"infeasibility or unboundedness. This is known to occur \"\n \"if the `presolve` option is False and the problem is \"\n \"infeasible. This can also occur due to the limited \"\n \"accuracy of the `interior-point` method. Check whether \"\n \"the slack and constraint residuals are acceptable; \"\n \"if not, consider enabling presolve, reducing option \"\n \"`tol`, and/or using method `revised simplex`. \"\n \"If you encounter this message under different \"\n \"circumstances, please submit a bug report.\")\n elif status == 0 and contains_nans:\n status = 4\n message = (\"Numerical difficulties were encountered but no errors \"\n \"were raised. This is known to occur if the 'presolve' \"\n \"option is False, 'sparse' is True, and A_eq includes \"\n \"redundant rows. If you encounter this under different \"\n \"circumstances, please submit a bug report. Otherwise, \"\n \"remove linearly dependent equations from your equality \"\n \"constraints or enable presolve.\")\n elif status == 2 and is_feasible:\n # Occurs if the simplex method exits after phase one with a very\n # nearly basic feasible solution. Postsolving can make the solution\n # basic, however, this solution is NOT optimal\n raise ValueError(message)\n\n return status, message\n\n\ndef _postprocess(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n complete=False, undo=[], status=0, message=\"\", tol=1e-8,\n iteration=None, disp=False):\n \"\"\"\n Given solution x to presolved, standard form linear program x, add\n fixed variables back into the problem and undo the variable substitutions\n to get solution to original linear program. Also, calculate the objective\n function value, slack in original upper bound constraints, and residuals\n in original equality constraints.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to the standard-form problem.\n c : 1D array\n Original coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n Bounds, as modified in presolve\n complete : bool\n Whether the solution is was determined in presolve (``True`` if so)\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n\n \"\"\"\n\n x, fun, slack, con, lb, ub = _postsolve(\n x, c, A_ub, b_ub, A_eq, b_eq,\n bounds, complete, undo, tol\n )\n\n status, message = _check_result(\n x, fun, status, slack, con,\n lb, ub, tol, message\n )\n\n if disp:\n _display_summary(message, status, fun, iteration)\n\n return x, fun, slack, con, status, message\n" ]
[ [ "numpy.sqrt", "numpy.linalg.matrix_rank", "scipy.optimize._remove_redundancy._remove_redundancy_sparse", "numpy.all", "numpy.concatenate", "numpy.any", "scipy.sparse.vstack", "scipy.optimize._remove_redundancy._remove_redundancy_dense", "numpy.hstack", "scipy.sparse.coo_matrix", "scipy.sparse.issparse", "numpy.allclose", "numpy.arange", "scipy.sparse.diags", "numpy.count_nonzero", "numpy.zeros", "numpy.logical_not", "numpy.nonzero", "numpy.isnan", "scipy.optimize._remove_redundancy._remove_redundancy", "numpy.equal", "scipy.sparse.hstack", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.isreal", "numpy.abs", "numpy.isfinite", "numpy.isinf", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Francis777/agents
[ "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d", "24e878a697be418307cfbff69724d86be767719d" ]
[ "tf_agents/networks/nest_map_test.py", "tf_agents/bandits/environments/random_bandit_environment_test.py", "tf_agents/agents/qtopt/qtopt_agent_test.py", "tf_agents/policies/py_tf_policy.py", "tf_agents/bandits/policies/boltzmann_reward_prediction_policy.py", "tf_agents/replay_buffers/table_test.py", "tf_agents/metrics/py_metric.py", "tf_agents/agents/sac/tanh_normal_projection_network.py", "tf_agents/replay_buffers/episodic_replay_buffer_driver_test.py", "tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.networks.nest_map.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import flags\nimport tensorflow.compat.v2 as tf\n\nfrom tf_agents.keras_layers import inner_reshape\nfrom tf_agents.networks import nest_map\nfrom tf_agents.networks import sequential\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import test_utils\n\nFLAGS = flags.FLAGS\n\n\nclass MyPolicy(tf_policy.TFPolicy):\n\n def __init__(self, time_step_spec, net):\n super(MyPolicy, self).__init__(\n time_step_spec,\n action_spec=tf.TensorSpec((None,), tf.float32))\n self._net = net\n\n def _action(self, time_step, policy_state=(), seed=None):\n out, _ = self._net(time_step.observation)\n out = tf.math.add(*tf.nest.flatten(out))\n return policy_step.PolicyStep(out, (), ())\n\n\nclass NestFlattenTest(test_utils.TestCase):\n\n def testNestFlatten(self):\n layer = nest_map.NestFlatten()\n outputs = layer({'a': 1, 'b': 2})\n self.assertEqual(self.evaluate(outputs), [1, 2])\n\n\nclass NestMapTest(test_utils.TestCase):\n\n def setUp(self):\n if not common.has_eager_been_enabled():\n self.skipTest('Only supported in TF2.x.')\n super(NestMapTest, self).setUp()\n\n def testCreateAndCall(self):\n net = sequential.Sequential([\n nest_map.NestMap(\n {'inp1': tf.keras.layers.Dense(8),\n 'inp2': sequential.Sequential([\n tf.keras.layers.Conv2D(2, 3),\n # Convert 3 inner dimensions to [8] for RNN.\n inner_reshape.InnerReshape([None] * 3, [8]),\n ]),\n 'inp3': tf.keras.layers.LSTM(\n 8, return_state=True, return_sequences=True)}),\n nest_map.NestFlatten(),\n tf.keras.layers.Add()])\n self.assertEqual(\n net.state_spec,\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorSpec(shape=(8,), dtype=tf.float32),),),\n },))\n output_spec = net.create_variables(\n {\n 'inp1': tf.TensorSpec(shape=(3,), dtype=tf.float32),\n 'inp2': tf.TensorSpec(shape=(4, 4, 2,), dtype=tf.float32),\n 'inp3': tf.TensorSpec(shape=(3,), dtype=tf.float32),\n })\n self.assertEqual(output_spec, tf.TensorSpec(shape=(8,), dtype=tf.float32))\n\n inputs = {\n 'inp1': tf.ones((8, 10, 3), dtype=tf.float32),\n 'inp2': tf.ones((8, 10, 4, 4, 2), dtype=tf.float32),\n 'inp3': tf.ones((8, 10, 3), dtype=tf.float32)\n }\n output, next_state = net(inputs)\n self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))\n self.assertEqual(\n tf.nest.map_structure(lambda t: t.shape, next_state),\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorShape([8, 8]),),),\n },))\n\n # Test passing in a state.\n output, next_state = net(inputs, next_state)\n self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))\n self.assertEqual(\n tf.nest.map_structure(lambda t: t.shape, next_state),\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorShape([8, 8]),),),\n },))\n\n def testNestedNest(self):\n # layer structure: {'a': {'b': .}}\n net = nest_map.NestMap(\n {'a': nest_map.NestMap(\n {'b': tf.keras.layers.Dense(8)})})\n net.create_variables({'a': {'b': tf.TensorSpec((1,), dtype=tf.float32)}})\n\n def testNestedNestWithNestedState(self):\n # layer structure: (., {'a': {'b': .}})\n net = nest_map.NestMap(\n (tf.keras.layers.Dense(7),\n {'a': nest_map.NestMap(\n {'b': tf.keras.layers.LSTM(\n 8, return_state=True, return_sequences=True)})}))\n # TODO(b/177337002): remove the forced tuple wrapping the LSTM\n # state once we make a generic KerasWrapper network and clean up\n # Sequential and NestMap to use that instead of singleton Sequential.\n out, state = net(\n (tf.ones((1, 2)), {'a': {'b': tf.ones((1, 2))}}),\n network_state=((), {'a': {'b': ((tf.ones((1, 8)), tf.ones((1, 8))),)}}))\n nest_utils.assert_matching_dtypes_and_inner_shapes(\n out,\n (\n tf.TensorSpec(dtype=tf.float32, shape=(7,)),\n {'a': {'b': tf.TensorSpec(dtype=tf.float32, shape=(8,))}}\n ),\n caller=self, tensors_name='out', specs_name='out_expected')\n nest_utils.assert_matching_dtypes_and_inner_shapes(\n state,\n (\n (),\n {'a': {'b': ((tf.TensorSpec(dtype=tf.float32, shape=(8,)),\n tf.TensorSpec(dtype=tf.float32, shape=(8,))),)}}\n ),\n caller=self, tensors_name='state', specs_name='state_expected')\n\n def testIncompatibleStructureInputs(self):\n with self.assertRaisesRegex(\n TypeError,\n r'`nested_layers` and `input_spec` do not have matching structures'):\n nest_map.NestMap(\n [tf.keras.layers.Dense(8)],\n input_spec={'ick': tf.TensorSpec(8, tf.float32)})\n\n with self.assertRaisesRegex(\n TypeError,\n r'`self.nested_layers` and `inputs` do not have matching structures'):\n net = nest_map.NestMap([tf.keras.layers.Dense(8)])\n net.create_variables({'ick': tf.TensorSpec((1,), dtype=tf.float32)})\n\n with self.assertRaisesRegex(\n TypeError,\n r'`self.nested_layers` and `inputs` do not have matching structures'):\n net = nest_map.NestMap([tf.keras.layers.Dense(8)])\n net({'ick': tf.constant([[1.0]])})\n\n with self.assertRaisesRegex(\n ValueError,\n r'`network_state` and `state_spec` do not have matching structures'):\n net = nest_map.NestMap(\n tf.keras.layers.LSTM(8, return_state=True, return_sequences=True))\n net(tf.ones((1, 2)), network_state=(tf.ones((1, 1)), ()))\n\n def testPolicySaverCompatibility(self):\n observation_spec = {\n 'a': tf.TensorSpec(4, tf.float32),\n 'b': tf.TensorSpec(3, tf.float32)\n }\n time_step_tensor_spec = ts.time_step_spec(observation_spec)\n net = nest_map.NestMap(\n {'a': tf.keras.layers.LSTM(8, return_state=True, return_sequences=True),\n 'b': tf.keras.layers.Dense(8)})\n net.create_variables(observation_spec)\n policy = MyPolicy(time_step_tensor_spec, net)\n\n sample = tensor_spec.sample_spec_nest(\n time_step_tensor_spec, outer_dims=(5,))\n\n step = policy.action(sample)\n self.assertEqual(step.action.shape.as_list(), [5, 8])\n\n train_step = common.create_variable('train_step')\n saver = policy_saver.PolicySaver(policy, train_step=train_step)\n self.initialize_v1_variables()\n\n with self.cached_session():\n saver.save(os.path.join(FLAGS.test_tmpdir, 'nest_map_model'))\n\n\nif __name__ == '__main__':\n test_utils.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.environments.bandit_tf_environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.bandits.environments import random_bandit_environment\nfrom tf_agents.specs import tensor_spec\n\ntfd = tfp.distributions\n\n\ndef get_gaussian_random_environment(\n observation_shape, action_shape, batch_size):\n \"\"\"Returns a RandomBanditEnvironment with Gaussian observation and reward.\"\"\"\n overall_shape = [batch_size] + observation_shape\n observation_distribution = tfd.Independent(\n tfd.Normal(loc=tf.zeros(overall_shape), scale=tf.ones(overall_shape)))\n reward_distribution = tfd.Normal(\n loc=tf.zeros(batch_size), scale=tf.ones(batch_size))\n action_spec = tensor_spec.TensorSpec(shape=action_shape, dtype=tf.float32)\n return random_bandit_environment.RandomBanditEnvironment(\n observation_distribution,\n reward_distribution,\n action_spec)\n\n\nclass RandomBanditEnvironmentTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n dict(overall_observation_shape=[3, 4, 5, 6],\n batch_dims=2),\n dict(overall_observation_shape=[3, 3, 3, 3],\n batch_dims=0),\n )\n def testInvalidObservationBatchShape(\n self, overall_observation_shape, batch_dims):\n observation_distribution = tfd.Independent(\n tfd.Normal(tf.zeros(overall_observation_shape),\n tf.ones(overall_observation_shape)),\n reinterpreted_batch_ndims=batch_dims)\n reward_distribution = tfd.Normal(tf.zeros(overall_observation_shape[0]),\n tf.ones(overall_observation_shape[0]))\n with self.assertRaisesRegexp(\n ValueError,\n '`observation_distribution` must have batch shape with length 1'):\n random_bandit_environment.RandomBanditEnvironment(\n observation_distribution, reward_distribution)\n\n @parameterized.parameters(\n dict(overall_reward_shape=[3, 4, 5, 6],\n batch_dims=2),\n dict(overall_reward_shape=[4, 5, 6],\n batch_dims=0),\n )\n def testInvalidRewardBatchShape(\n self, overall_reward_shape, batch_dims):\n observation_distribution = tfd.Normal(\n tf.zeros(overall_reward_shape[0]),\n tf.ones(overall_reward_shape[0]))\n reward_distribution = tfd.Independent(\n tfd.Normal(tf.zeros(overall_reward_shape),\n tf.ones(overall_reward_shape)),\n reinterpreted_batch_ndims=batch_dims)\n with self.assertRaisesRegexp(\n ValueError,\n '`reward_distribution` must have batch shape with length 1'):\n random_bandit_environment.RandomBanditEnvironment(\n observation_distribution, reward_distribution)\n\n @parameterized.parameters(\n dict(overall_reward_shape=[3, 4, 5, 6]),\n dict(overall_reward_shape=[4, 5, 6]),\n )\n def testInvalidRewardEventShape(self, overall_reward_shape):\n observation_distribution = tfd.Normal(\n tf.zeros(overall_reward_shape[0]),\n tf.ones(overall_reward_shape[0]))\n reward_distribution = tfd.Independent(\n tfd.Normal(tf.zeros(overall_reward_shape),\n tf.ones(overall_reward_shape)))\n with self.assertRaisesRegexp(\n ValueError, '`reward_distribution` must have event_shape ()'):\n random_bandit_environment.RandomBanditEnvironment(\n observation_distribution, reward_distribution)\n\n @parameterized.parameters(\n dict(overall_observation_shape=[4, 5, 6],\n overall_reward_shape=[3]),\n dict(overall_observation_shape=[3],\n overall_reward_shape=[1]),\n )\n def testMismatchedBatchShape(\n self, overall_observation_shape, overall_reward_shape):\n observation_distribution = tfd.Independent(\n tfd.Normal(tf.zeros(overall_observation_shape),\n tf.ones(overall_observation_shape)))\n reward_distribution = tfd.Independent(\n tfd.Normal(tf.zeros(overall_reward_shape),\n tf.ones(overall_reward_shape)))\n with self.assertRaisesRegexp(\n ValueError,\n '`reward_distribution` and `observation_distribution` must have the '\n 'same batch shape'):\n random_bandit_environment.RandomBanditEnvironment(\n observation_distribution, reward_distribution)\n\n @parameterized.named_parameters(\n dict(testcase_name='_observation_[]_action_[]_batch_1',\n observation_shape=[],\n action_shape=[],\n batch_size=1),\n dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',\n observation_shape=[3, 4, 5, 6],\n action_shape=[2, 3, 4],\n batch_size=32),\n )\n def testObservationAndRewardShapes(\n self, observation_shape, action_shape, batch_size):\n \"\"\"Exercise `reset` and `step`. Ensure correct shapes are returned.\"\"\"\n env = get_gaussian_random_environment(\n observation_shape, action_shape, batch_size)\n observation = env.reset().observation\n reward = env.step(tf.zeros(batch_size)).reward\n\n expected_observation_shape = np.array([batch_size] + observation_shape)\n expected_reward_shape = np.array([batch_size])\n\n self.assertAllEqual(\n expected_observation_shape, self.evaluate(tf.shape(observation)))\n self.assertAllEqual(\n expected_reward_shape, self.evaluate(tf.shape(reward)))\n\n @parameterized.named_parameters(\n dict(testcase_name='_observation_[]_action_[]_batch_1',\n observation_shape=[],\n action_shape=[],\n batch_size=1,\n seed=12345),\n dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',\n observation_shape=[3, 4, 5, 6],\n action_shape=[2, 3, 4],\n batch_size=32,\n seed=98765),\n )\n def testObservationAndRewardsVary(\n self, observation_shape, action_shape, batch_size, seed):\n \"\"\"Ensure that observations and rewards change in consecutive calls.\"\"\"\n tf.compat.v1.set_random_seed(seed)\n env = get_gaussian_random_environment(\n observation_shape, action_shape, batch_size)\n\n observation0 = env.reset().observation\n reward0 = env.step(tf.zeros([batch_size] + action_shape)).reward\n observation0 = self.evaluate(observation0)\n reward0 = self.evaluate(reward0)\n\n observation1 = env.reset().observation\n reward1 = env.step(tf.zeros([batch_size] + action_shape)).reward\n self.evaluate(observation1)\n self.evaluate(reward1)\n\n self.assertNotAllClose(observation0, observation1)\n self.assertNotAllClose(reward0, reward1)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.agents.qtopt.qtopt_agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.agents.ddpg import critic_network\nfrom tf_agents.agents.qtopt import qtopt_agent\nfrom tf_agents.networks import network\nfrom tf_agents.policies.samplers import cem_actions_sampler_continuous\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import test_utils as trajectories_test_utils\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, input_spec, name=None, bias=2):\n super(DummyNet, self).__init__(\n input_spec, state_spec=(), name=name)\n\n # Store custom layers that can be serialized through the Checkpointable API.\n self._dummy_layers = [\n tf.keras.layers.Dense(\n 1,\n kernel_initializer=tf.constant_initializer([2, 1]),\n bias_initializer=tf.constant_initializer([bias]))\n ]\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n for layer in self._dummy_layers:\n inputs = inputs[0]\n inputs = layer(inputs)\n return tf.reshape(inputs, [-1]), network_state\n\n\nclass QtoptAgentTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(QtoptAgentTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n self._observation_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._observation_spec)\n self._action_spec = (\n tensor_spec.BoundedTensorSpec([1], tf.float32, 0.0, 1.0))\n\n # Initiate random mean and var.\n self._num_samples = 32\n action_size = 1\n np.random.seed(1999)\n samples = np.random.rand(self._num_samples,\n action_size).astype(np.float32) # [N, a]\n self._mean = np.mean(samples, axis=0)\n self._var = np.var(samples, axis=0)\n self._sampler = cem_actions_sampler_continuous.GaussianActionsSampler(\n action_spec=self._action_spec)\n\n def testCreateAgent(self):\n q_net = critic_network.CriticNetwork(\n (self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=None,\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler)\n self.assertIsNotNone(agent.policy)\n\n def testInitializeAgent(self):\n q_net = critic_network.CriticNetwork(\n (self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=None,\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler)\n agent.initialize()\n\n def testPolicy(self):\n q_net = critic_network.CriticNetwork(\n (self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=None,\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler)\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n policy = agent.policy\n action_step = policy.action(time_steps)\n # Batch size 2.\n self.assertAllEqual(\n [2] + self._action_spec.shape.as_list(),\n action_step.action.shape,\n )\n self.evaluate(tf.compat.v1.initialize_all_variables())\n actions_ = self.evaluate(action_step.action)\n self.assertTrue(all(actions_ <= self._action_spec.maximum))\n self.assertTrue(all(actions_ >= self._action_spec.minimum))\n\n def testLoss(self):\n q_net = DummyNet((self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=None,\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler)\n\n agent._target_q_network_delayed = DummyNet(\n (self._observation_spec, self._action_spec), bias=1)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n\n actions = tf.constant([[0.0], [0.0]], dtype=tf.float32)\n action_steps = policy_step.PolicyStep(\n actions, info=())\n\n rewards = tf.constant([10, 20], dtype=tf.float32)\n discounts = tf.constant([0.9, 0.9], dtype=tf.float32)\n next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)\n next_time_steps = ts.transition(next_observations, rewards, discounts)\n\n experience = trajectories_test_utils.stacked_trajectory_from_transition(\n time_steps, action_steps, next_time_steps)\n\n # Using the kernel initializer [[2, 1], [1, 1]] and bias initializer\n # ([[2], [2]] for q_network/target_network, [[1], [1]] for delayed\n # target_network)\n # from DummyNet above, we can calculate the following values:\n # Q Network:\n # Q-value for first observation/action pair: 2 * 1 + 1 * 2 + 2 = 6\n # Q-value for second observation/action pair: 2 * 3 + 1 * 4 + 2 = 12\n # Target Network:\n # Q-value for first next_observation: 2 * 5 + 1 * 6 + 2 = 18\n # Q-value for second next_observation: 2 * 7 + 1 * 8 + 2 = 24\n # Delayed Target Network:\n # Q-value for first next_observation: 2 * 5 + 1 * 6 + 1 = 17\n # Q-value for second next_observation: 2 * 7 + 1 * 8 + 1 = 23\n # TD targets: 10 + 0.9 * min(17, 18) = 25.3; 20 + 0.9 * min(23, 24) = 40.7\n # TD errors: 25.3 - 6 = 19.3; 40.7 - 12 = 28.7\n # TD loss: 18.8 and 28.2 (Huber loss subtracts 0.5)\n # Overall loss: (18.8 + 28.2) / 2 = 23.5\n expected_td_loss = 23.5\n loss, loss_info = agent._loss(experience)\n\n self.evaluate(tf.compat.v1.initialize_all_variables())\n self.assertAllClose(self.evaluate(loss), expected_td_loss)\n self.assertAllClose(self.evaluate(tf.reduce_mean(loss_info.td_loss)),\n expected_td_loss)\n\n def verifyVariableAssignAndRestore(self,\n loss_fn=None):\n strategy = tf.distribute.get_strategy()\n with strategy.scope():\n # Use BehaviorCloningAgent instead of AWRAgent to test the network.\n q_net = critic_network.CriticNetwork(\n (self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=None,\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler)\n # Assign all vars to 0.\n for var in tf.nest.flatten(agent.variables):\n var.assign(tf.zeros_like(var))\n # Save checkpoint\n ckpt_dir = self.create_tempdir()\n checkpointer = common.Checkpointer(\n ckpt_dir=ckpt_dir, agent=agent)\n global_step = tf.constant(0)\n checkpointer.save(global_step)\n # Assign all vars to 1.\n for var in tf.nest.flatten(agent.variables):\n var.assign(tf.ones_like(var))\n # Restore to 0.\n checkpointer._checkpoint.restore(checkpointer._manager.latest_checkpoint)\n for var in tf.nest.flatten(agent.variables):\n value = var.numpy()\n if isinstance(value, np.int64):\n self.assertEqual(value, 0)\n else:\n self.assertAllEqual(\n value, np.zeros_like(value),\n msg='{} has var mean {}, expected 0.'.format(var.name, value))\n\n def verifyTrainAndRestore(self,\n loss_fn=None):\n \"\"\"Helper function for testing correct variable updating and restoring.\"\"\"\n batch_size = 2\n seq_len = 2\n observations = tensor_spec.sample_spec_nest(\n self._observation_spec, outer_dims=(batch_size, seq_len))\n actions = tensor_spec.sample_spec_nest(\n self._action_spec, outer_dims=(batch_size, seq_len))\n rewards = tf.constant([[10, 10], [20, 20]], dtype=tf.float32)\n discounts = tf.constant([[0.9, 0.9], [0.9, 0.9]], dtype=tf.float32)\n experience = trajectory.first(\n observation=observations,\n action=actions,\n policy_info=(),\n reward=rewards,\n discount=discounts)\n strategy = tf.distribute.get_strategy()\n with strategy.scope():\n q_net = critic_network.CriticNetwork(\n (self._observation_spec, self._action_spec))\n agent = qtopt_agent.QtOptAgent(\n self._time_step_spec,\n self._action_spec,\n q_network=q_net,\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n init_mean_cem=self._mean,\n init_var_cem=self._var,\n num_samples_cem=self._num_samples,\n actions_sampler=self._sampler,\n in_graph_bellman_update=True)\n loss_before_train = agent.loss(experience).loss\n # Check loss is stable.\n self.assertEqual(loss_before_train, agent.loss(experience).loss)\n # Train 1 step, verify that loss is decreased for the same input.\n agent.train(experience)\n loss_after_train = agent.loss(experience).loss\n self.assertLessEqual(loss_after_train, loss_before_train)\n # Assert loss evaluation is still stable, e.g. deterministic.\n self.assertLessEqual(loss_after_train, agent.loss(experience).loss)\n # Save checkpoint\n ckpt_dir = self.create_tempdir()\n checkpointer = common.Checkpointer(ckpt_dir=ckpt_dir, agent=agent)\n global_step = tf.constant(1)\n checkpointer.save(global_step)\n # Assign all vars to 0.\n for var in tf.nest.flatten(agent.variables):\n var.assign(tf.zeros_like(var))\n loss_after_zero = agent.loss(experience).loss\n self.assertEqual(loss_after_zero, agent.loss(experience).loss)\n self.assertNotEqual(loss_after_zero, loss_after_train)\n # Restore\n checkpointer._checkpoint.restore(checkpointer._manager.latest_checkpoint)\n loss_after_restore = agent.loss(experience).loss\n self.assertNotEqual(loss_after_restore, loss_after_zero)\n self.assertEqual(loss_after_restore, loss_after_train)\n\n def testAssignAndRestore(self):\n self.verifyVariableAssignAndRestore()\n self.verifyTrainAndRestore()\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Converts TensorFlow Policies into Python Policies.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Optional, Text\nfrom absl import logging\n\nimport tensorflow as tf\nfrom tf_agents.policies import py_policy\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import session_utils\n\n\nclass PyTFPolicy(py_policy.PyPolicy, session_utils.SessionUser):\n \"\"\"Exposes a Python policy as wrapper over a TF Policy.\"\"\"\n\n _time_step = ... # type: ts.TimeStep\n _policy_state = ... # type: types.NestedPlaceHolder\n _action_step = ... # type: policy_step.PolicyStep\n\n # TODO(damienv): currently, the initial policy state must be batched\n # if batch_size is given. Without losing too much generality, the initial\n # policy state could be the same for every element in the batch.\n # In that case, the initial policy state could be given with no batch\n # dimension.\n # TODO(sfishman): Remove batch_size param entirely.\n def __init__(self,\n policy: tf_policy.TFPolicy,\n batch_size: Optional[int] = None,\n seed: Optional[types.Seed] = None):\n \"\"\"Initializes a new `PyTFPolicy`.\n\n Args:\n policy: A TF Policy implementing `tf_policy.TFPolicy`.\n batch_size: (deprecated)\n seed: Seed to use if policy performs random actions (optional).\n \"\"\"\n if not isinstance(policy, tf_policy.TFPolicy):\n logging.warning('Policy should implement tf_policy.TFPolicy')\n\n if batch_size is not None:\n logging.warning('In PyTFPolicy constructor, `batch_size` is deprecated, '\n 'this parameter has no effect. This argument will be '\n 'removed on 2019-05-01')\n\n time_step_spec = tensor_spec.to_nest_array_spec(policy.time_step_spec)\n action_spec = tensor_spec.to_nest_array_spec(policy.action_spec)\n super(PyTFPolicy, self).__init__(\n time_step_spec, action_spec, policy_state_spec=(), info_spec=())\n\n self._tf_policy = policy\n self.session = None\n\n self._policy_state_spec = tensor_spec.to_nest_array_spec(\n self._tf_policy.policy_state_spec)\n\n self._batch_size = None\n self._batched = None\n self._seed = seed\n self._built = False\n\n def _construct(self, batch_size, graph):\n \"\"\"Construct the agent graph through placeholders.\"\"\"\n\n self._batch_size = batch_size\n self._batched = batch_size is not None\n\n outer_dims = [self._batch_size] if self._batched else [1]\n with graph.as_default():\n self._time_step = tensor_spec.to_nest_placeholder(\n self._tf_policy.time_step_spec, outer_dims=outer_dims)\n self._tf_initial_state = self._tf_policy.get_initial_state(\n batch_size=self._batch_size or 1)\n\n self._policy_state = tf.nest.map_structure(\n lambda ps: tf.compat.v1.placeholder( # pylint: disable=g-long-lambda\n ps.dtype,\n ps.shape,\n name='policy_state'),\n self._tf_initial_state)\n self._action_step = self._tf_policy.action(\n self._time_step, self._policy_state, seed=self._seed)\n\n def initialize(self,\n batch_size: Optional[int],\n graph: Optional[tf.Graph] = None):\n if self._built:\n raise RuntimeError('PyTFPolicy can only be initialized once.')\n\n if not graph:\n graph = tf.compat.v1.get_default_graph()\n\n self._construct(batch_size, graph)\n var_list = tf.nest.flatten(self._tf_policy.variables())\n common.initialize_uninitialized_variables(self.session, var_list)\n self._built = True\n\n def save(self,\n policy_dir: Optional[Text] = None,\n graph: Optional[tf.Graph] = None):\n if not self._built:\n raise RuntimeError('PyTFPolicy has not been initialized yet.')\n\n if not graph:\n graph = tf.compat.v1.get_default_graph()\n\n with graph.as_default():\n global_step = tf.compat.v1.train.get_or_create_global_step()\n policy_checkpointer = common.Checkpointer(\n ckpt_dir=policy_dir, policy=self._tf_policy, global_step=global_step)\n policy_checkpointer.initialize_or_restore(self.session)\n with self.session.as_default():\n policy_checkpointer.save(global_step)\n\n def restore(self,\n policy_dir: Text,\n graph: Optional[tf.Graph] = None,\n assert_consumed: bool = True):\n \"\"\"Restores the policy from the checkpoint.\n\n Args:\n policy_dir: Directory with the checkpoint.\n graph: A graph, inside which policy the is restored (optional).\n assert_consumed: If true, contents of the checkpoint will be checked\n for a match against graph variables.\n\n Returns:\n step: Global step associated with the restored policy checkpoint.\n\n Raises:\n RuntimeError: if the policy is not initialized.\n AssertionError: if the checkpoint contains variables which do not have\n matching names in the graph, and assert_consumed is set to True.\n\n \"\"\"\n\n if not self._built:\n raise RuntimeError(\n 'PyTFPolicy must be initialized before being restored.')\n if not graph:\n graph = tf.compat.v1.get_default_graph()\n\n with graph.as_default():\n global_step = tf.compat.v1.train.get_or_create_global_step()\n policy_checkpointer = common.Checkpointer(\n ckpt_dir=policy_dir, policy=self._tf_policy, global_step=global_step)\n status = policy_checkpointer.initialize_or_restore(self.session)\n with self.session.as_default():\n if assert_consumed:\n status.assert_consumed()\n status.run_restore_ops()\n return self.session.run(global_step)\n\n def _build_from_time_step(self, time_step):\n outer_shape = nest_utils.get_outer_array_shape(time_step,\n self._time_step_spec)\n if len(outer_shape) == 1:\n self.initialize(outer_shape[0])\n elif not outer_shape:\n self.initialize(None)\n else:\n raise ValueError(\n 'Cannot handle more than one outer dimension. Saw {} outer '\n 'dimensions: {}'.format(len(outer_shape), outer_shape))\n\n def _get_initial_state(self, batch_size):\n if not self._built:\n self.initialize(batch_size)\n\n if batch_size not in [self._batch_size, self._batch_size or 1]:\n raise ValueError(\n '`batch_size` argument is different from the batch size provided '\n 'previously. Expected {}, but saw {}.'.format(self._batch_size,\n batch_size))\n return self.session.run(self._tf_initial_state)\n\n def _action(self, time_step, policy_state, seed: Optional[types.Seed] = None):\n if seed is not None:\n raise ValueError('`seed` is passed to the class as an argument.')\n if not self._built:\n self._build_from_time_step(time_step)\n\n batch_size = None\n if time_step.step_type.shape:\n batch_size = time_step.step_type.shape[0]\n if self._batch_size != batch_size:\n raise ValueError(\n 'The batch size of time_step is different from the batch size '\n 'provided previously. Expected {}, but saw {}.'.format(\n self._batch_size, batch_size))\n\n if not self._batched:\n # Since policy_state is given in a batched form from the policy and we\n # simply have to send it back we do not need to worry about it. Only\n # update time_step.\n time_step = nest_utils.batch_nested_array(time_step)\n\n nest_utils.assert_same_structure(self._time_step, time_step)\n feed_dict = {self._time_step: time_step}\n if policy_state is not None:\n # Flatten policy_state to handle specs that are not hashable due to lists.\n for state_ph, state in zip(\n tf.nest.flatten(self._policy_state), tf.nest.flatten(policy_state)):\n feed_dict[state_ph] = state\n\n action_step = self.session.run(self._action_step, feed_dict)\n action, state, info = action_step\n\n if not self._batched:\n action, info = nest_utils.unbatch_nested_array([action, info])\n\n return policy_step.PolicyStep(action, state, info)\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Policy for reward prediction and boltzmann exploration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Optional, Text, Tuple, Sequence\n\nimport gin\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.bandits.networks import heteroscedastic_q_network\nfrom tf_agents.bandits.policies import constraints as constr\nfrom tf_agents.bandits.specs import utils as bandit_spec_utils\nfrom tf_agents.distributions import shifted_categorical\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.policies import utils as policy_utilities\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.typing import types\n\n\[email protected]\nclass BoltzmannRewardPredictionPolicy(tf_policy.TFPolicy):\n \"\"\"Class to build Reward Prediction Policies with Boltzmann exploration.\"\"\"\n\n def __init__(self,\n time_step_spec: types.TimeStep,\n action_spec: types.NestedTensorSpec,\n reward_network: types.Network,\n temperature: types.FloatOrReturningFloat = 1.0,\n boltzmann_gumbel_exploration_constant: Optional[\n types.Float] = None,\n observation_and_action_constraint_splitter: Optional[\n types.Splitter] = None,\n accepts_per_arm_features: bool = False,\n constraints: Tuple[constr.NeuralConstraint, ...] = (),\n emit_policy_info: Tuple[Text, ...] = (),\n num_samples_list: Sequence[tf.Variable] = (),\n name: Optional[Text] = None):\n \"\"\"Builds a BoltzmannRewardPredictionPolicy given a reward network.\n\n This policy takes a tf_agents.Network predicting rewards and chooses an\n action with weighted probabilities (i.e., using a softmax over the network\n estimates of value for each action).\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n reward_network: An instance of a `tf_agents.network.Network`,\n callable via `network(observation, step_type) -> (output, final_state)`.\n temperature: float or callable that returns a float. The temperature used\n in the Boltzmann exploration.\n boltzmann_gumbel_exploration_constant: optional positive float. When\n provided, the policy implements Neural Bandit with Boltzmann-Gumbel\n exploration from the paper:\n N. Cesa-Bianchi et al., \"Boltzmann Exploration Done Right\", NIPS 2017.\n observation_and_action_constraint_splitter: A function used for masking\n valid/invalid actions with each state of the environment. The function\n takes in a full observation and returns a tuple consisting of 1) the\n part of the observation intended as input to the network and 2) the\n mask. The mask should be a 0-1 `Tensor` of shape\n `[batch_size, num_actions]`. This function should also work with a\n `TensorSpec` as input, and should output `TensorSpec` objects for the\n observation and mask.\n accepts_per_arm_features: (bool) Whether the policy accepts per-arm\n features.\n constraints: iterable of constraints objects that are instances of\n `tf_agents.bandits.agents.NeuralConstraint`.\n emit_policy_info: (tuple of strings) what side information we want to get\n as part of the policy info. Allowed values can be found in\n `policy_utilities.PolicyInfo`.\n num_samples_list: list or tuple of tf.Variable's. Used only in\n Boltzmann-Gumbel exploration. Otherwise, empty.\n name: The name of this policy. All variables in this module will fall\n under that name. Defaults to the class name.\n\n Raises:\n NotImplementedError: If `action_spec` contains more than one\n `BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.\n \"\"\"\n policy_utilities.check_no_mask_with_arm_features(\n accepts_per_arm_features, observation_and_action_constraint_splitter)\n flat_action_spec = tf.nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n raise NotImplementedError(\n 'action_spec can only contain a single BoundedTensorSpec.')\n\n self._temperature = temperature\n action_spec = flat_action_spec[0]\n if (not tensor_spec.is_bounded(action_spec) or\n not tensor_spec.is_discrete(action_spec) or\n action_spec.shape.rank > 1 or\n action_spec.shape.num_elements() != 1):\n raise NotImplementedError(\n 'action_spec must be a BoundedTensorSpec of type int32 and shape (). '\n 'Found {}.'.format(action_spec))\n self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1\n self._action_offset = action_spec.minimum\n reward_network.create_variables()\n self._reward_network = reward_network\n self._constraints = constraints\n\n self._boltzmann_gumbel_exploration_constant = (\n boltzmann_gumbel_exploration_constant)\n self._num_samples_list = num_samples_list\n if self._boltzmann_gumbel_exploration_constant is not None:\n if self._boltzmann_gumbel_exploration_constant <= 0.0:\n raise ValueError(\n 'The Boltzmann-Gumbel exploration constant is expected to be ',\n 'positive. Found: ', self._boltzmann_gumbel_exploration_constant)\n if self._action_offset > 0:\n raise NotImplementedError('Action offset is not supported when ',\n 'Boltzmann-Gumbel exploration is enabled.')\n if accepts_per_arm_features:\n raise NotImplementedError(\n 'Boltzmann-Gumbel exploration is not supported ',\n 'for arm features case.')\n if len(self._num_samples_list) != self._expected_num_actions:\n raise ValueError(\n 'Size of num_samples_list: ', len(self._num_samples_list),\n ' does not match the expected number of actions:',\n self._expected_num_actions)\n\n self._emit_policy_info = emit_policy_info\n predicted_rewards_mean = ()\n if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:\n predicted_rewards_mean = tensor_spec.TensorSpec(\n [self._expected_num_actions])\n bandit_policy_type = ()\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:\n bandit_policy_type = (\n policy_utilities.create_bandit_policy_type_tensor_spec(shape=[1]))\n if accepts_per_arm_features:\n # The features for the chosen arm is saved to policy_info.\n chosen_arm_features_info = (\n policy_utilities.create_chosen_arm_features_info_spec(\n time_step_spec.observation))\n info_spec = policy_utilities.PerArmPolicyInfo(\n predicted_rewards_mean=predicted_rewards_mean,\n bandit_policy_type=bandit_policy_type,\n chosen_arm_features=chosen_arm_features_info)\n else:\n info_spec = policy_utilities.PolicyInfo(\n predicted_rewards_mean=predicted_rewards_mean,\n bandit_policy_type=bandit_policy_type)\n\n self._accepts_per_arm_features = accepts_per_arm_features\n\n super(BoltzmannRewardPredictionPolicy, self).__init__(\n time_step_spec, action_spec,\n policy_state_spec=reward_network.state_spec,\n clip=False,\n info_spec=info_spec,\n emit_log_probability='log_probability' in emit_policy_info,\n observation_and_action_constraint_splitter=(\n observation_and_action_constraint_splitter),\n name=name)\n\n @property\n def accepts_per_arm_features(self):\n return self._accepts_per_arm_features\n\n def _variables(self):\n policy_variables = self._reward_network.variables\n for c in self._constraints:\n policy_variables.append(c.variables)\n return policy_variables\n\n def _get_temperature_value(self):\n if callable(self._temperature):\n return self._temperature()\n return self._temperature\n\n def _distribution(self, time_step, policy_state):\n observation = time_step.observation\n if self.observation_and_action_constraint_splitter is not None:\n observation, _ = self.observation_and_action_constraint_splitter(\n observation)\n\n predictions, policy_state = self._reward_network(\n observation, time_step.step_type, policy_state)\n batch_size = tf.shape(predictions)[0]\n\n if isinstance(self._reward_network,\n heteroscedastic_q_network.HeteroscedasticQNetwork):\n predicted_reward_values = predictions.q_value_logits\n else:\n predicted_reward_values = predictions\n\n predicted_reward_values.shape.with_rank_at_least(2)\n predicted_reward_values.shape.with_rank_at_most(3)\n if predicted_reward_values.shape[\n -1] is not None and predicted_reward_values.shape[\n -1] != self._expected_num_actions:\n raise ValueError(\n 'The number of actions ({}) does not match the reward_network output'\n ' size ({}).'.format(self._expected_num_actions,\n predicted_reward_values.shape[1]))\n\n mask = constr.construct_mask_from_multiple_sources(\n time_step.observation, self._observation_and_action_constraint_splitter,\n self._constraints, self._expected_num_actions)\n\n if self._boltzmann_gumbel_exploration_constant is not None:\n logits = predicted_reward_values\n\n # Apply masking if needed. Overwrite the logits for invalid actions to\n # logits.dtype.min.\n if mask is not None:\n almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)\n logits = tf.compat.v2.where(\n tf.cast(mask, tf.bool), logits, almost_neg_inf)\n\n gumbel_dist = tfp.distributions.Gumbel(loc=0., scale=1.)\n gumbel_samples = gumbel_dist.sample(tf.shape(logits))\n num_samples_list_float = tf.stack(\n [tf.cast(x.read_value(), tf.float32) for x in self._num_samples_list],\n axis=-1)\n exploration_weights = tf.math.divide_no_nan(\n self._boltzmann_gumbel_exploration_constant,\n tf.sqrt(num_samples_list_float))\n final_logits = logits + exploration_weights * gumbel_samples\n actions = tf.cast(\n tf.math.argmax(final_logits, axis=1), self._action_spec.dtype)\n # Log probability is not available in closed form. We treat this as a\n # deterministic policy at the moment.\n log_probability = tf.zeros([batch_size], tf.float32)\n else:\n # Apply the temperature scaling, needed for Boltzmann exploration.\n logits = predicted_reward_values / self._get_temperature_value()\n\n # Apply masking if needed. Overwrite the logits for invalid actions to\n # logits.dtype.min.\n if mask is not None:\n almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)\n logits = tf.compat.v2.where(\n tf.cast(mask, tf.bool), logits, almost_neg_inf)\n\n if self._action_offset != 0:\n distribution = shifted_categorical.ShiftedCategorical(\n logits=logits,\n dtype=self._action_spec.dtype,\n shift=self._action_offset)\n else:\n distribution = tfp.distributions.Categorical(\n logits=logits,\n dtype=self._action_spec.dtype)\n\n actions = distribution.sample()\n log_probability = distribution.log_prob(actions)\n\n bandit_policy_values = tf.fill([batch_size, 1],\n policy_utilities.BanditPolicyType.BOLTZMANN)\n\n if self._accepts_per_arm_features:\n # Saving the features for the chosen action to the policy_info.\n def gather_observation(obs):\n return tf.gather(params=obs, indices=actions, batch_dims=1)\n\n chosen_arm_features = tf.nest.map_structure(\n gather_observation,\n observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])\n policy_info = policy_utilities.PerArmPolicyInfo(\n log_probability=log_probability if\n policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info\n else (),\n predicted_rewards_mean=(\n predicted_reward_values if policy_utilities.InfoFields\n .PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),\n bandit_policy_type=(bandit_policy_values\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE\n in self._emit_policy_info else ()),\n chosen_arm_features=chosen_arm_features)\n else:\n policy_info = policy_utilities.PolicyInfo(\n log_probability=log_probability if\n policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info\n else (),\n predicted_rewards_mean=(\n predicted_reward_values if policy_utilities.InfoFields\n .PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),\n bandit_policy_type=(bandit_policy_values\n if policy_utilities.InfoFields.BANDIT_POLICY_TYPE\n in self._emit_policy_info else ()))\n\n return policy_step.PolicyStep(\n tfp.distributions.Deterministic(loc=actions), policy_state, policy_info)\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for tf_agents.replay_buffers.table.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents import specs\nfrom tf_agents.replay_buffers import table\n\nfrom tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import # TF internal\n\n\nclass TableTest(tf.test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadWriteSingle(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'action'), [\n specs.TensorSpec([5], tf.float32, 'camera'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n ]\n replay_table = table.Table(spec, capacity=3)\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],\n [v.name for v in variables])\n\n expected_values = [\n 1 * np.ones(spec[0].shape.as_list()),\n [2 * np.ones(spec[1][0].shape.as_list()),\n 3 * np.ones(spec[1][1].shape.as_list())]\n ]\n tensors = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),\n expected_values)\n\n write_op = replay_table.write(0, tensors)\n read_op = replay_table.read(0)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadWriteBatch(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'action'), [\n specs.TensorSpec([5], tf.float32, 'camera'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n ]\n replay_table = table.Table(spec, capacity=4)\n\n batch_size = 2\n expected_values = [\n 1 * np.ones([batch_size] + spec[0].shape.as_list()),\n [2 * np.ones([batch_size] + spec[1][0].shape.as_list()),\n 3 * np.ones([batch_size] + spec[1][1].shape.as_list())]\n ]\n tensors = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),\n expected_values)\n\n write_op = replay_table.write(list(range(batch_size)), tensors)\n read_op = replay_table.read(list(range(batch_size)))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadPartialSlots(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'action'), [\n specs.TensorSpec([5], tf.float32, 'camera'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n ]\n replay_table = table.Table(spec, capacity=4)\n\n batch_size = 2\n action = 1 * np.ones([batch_size] + spec[0].shape.as_list())\n camera = 2 * np.ones([batch_size] + spec[1][0].shape.as_list())\n lidar = 3 * np.ones([batch_size] + spec[1][1].shape.as_list())\n\n values = [action, [camera, lidar]]\n tensors = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32), values)\n\n write_op = replay_table.write(list(range(batch_size)), tensors)\n read_op = replay_table.read(\n list(range(batch_size)), slots=['lidar', ['action']])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n expected_values = [lidar, [action]]\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testWritePartialSlots(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'action'), [\n specs.TensorSpec([5], tf.float32, 'camera'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n ]\n replay_table = table.Table(spec, capacity=4)\n\n batch_size = 2\n\n action1 = 1 * np.ones([batch_size] + spec[0].shape.as_list())\n camera1 = 2 * np.ones([batch_size] + spec[1][0].shape.as_list())\n lidar1 = 3 * np.ones([batch_size] + spec[1][1].shape.as_list())\n write_op1 = replay_table.write(\n list(range(batch_size)), [action1, [camera1, lidar1]])\n\n lidar2 = 10 * np.ones([batch_size] + spec[1][1].shape.as_list())\n action2 = 20 * np.ones([batch_size] + spec[0].shape.as_list())\n write_op2 = replay_table.write(\n list(range(batch_size)), [lidar2, [action2]], ['lidar', ['action']])\n read_op = replay_table.read(list(range(batch_size)))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op1)\n self.evaluate(write_op2)\n read_value_ = self.evaluate(read_op)\n expected_values = [action2, [camera1, lidar2]]\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadWriteDict(self):\n spec = {\n 'action': specs.TensorSpec([3], tf.float32, 'action'),\n 'camera': specs.TensorSpec([5], tf.float32, 'camera'),\n 'lidar': specs.TensorSpec([3, 2], tf.float32, 'lidar')\n }\n replay_table = table.Table(spec, capacity=3)\n\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],\n [v.name for v in variables])\n\n expected_values = {\n 'action': 1 * np.ones(spec['action'].shape.as_list()),\n 'camera': 2 * np.ones(spec['camera'].shape.as_list()),\n 'lidar': 3 * np.ones(spec['lidar'].shape.as_list())\n }\n tensors = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),\n expected_values)\n\n write_op = replay_table.write(0, tensors)\n read_op = replay_table.read(0)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadWriteNamedTuple(self):\n # pylint: disable=invalid-name\n Observation = collections.namedtuple('Observation',\n ['action', 'camera', 'lidar'])\n # pylint: enable=invalid-name\n spec = Observation(\n action=specs.TensorSpec([3], tf.float32, 'action'),\n camera=specs.TensorSpec([5], tf.float32, 'camera'),\n lidar=specs.TensorSpec([3, 2], tf.float32, 'lidar')\n )\n replay_table = table.Table(spec, capacity=3)\n\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],\n [v.name for v in variables])\n\n expected_values = Observation(\n action=1 * np.ones(spec.action.shape.as_list()),\n camera=2 * np.ones(spec.camera.shape.as_list()),\n lidar=3 * np.ones(spec.lidar.shape.as_list())\n )\n tensors = tf.nest.map_structure(\n lambda x: tf.convert_to_tensor(value=x, dtype=tf.float32),\n expected_values)\n\n write_op = replay_table.write(0, tensors)\n read_op = replay_table.read(0)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n tf.nest.map_structure(self.assertAllClose, read_value_, expected_values)\n\n @test_util.run_in_graph_and_eager_modes()\n def testEmptySpecNames(self):\n spec = [\n specs.TensorSpec([3], tf.float32),\n specs.TensorSpec([5], tf.float32, ''),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n replay_table = table.Table(spec, capacity=3)\n\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/slot:0', 'Table/slot_1:0', 'Table/lidar:0'],\n [v.name for v in variables])\n\n expected_slots = ['slot', 'slot_1', 'lidar']\n self.assertAllEqual(replay_table.slots, expected_slots)\n tensors = replay_table.read(0, expected_slots)\n tf.nest.map_structure(lambda x, y: self.assertEqual(x.shape, y.shape), spec,\n tensors)\n\n @test_util.run_in_graph_and_eager_modes()\n def testDuplicateSpecNames(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'lidar'),\n specs.TensorSpec([5], tf.float32, 'lidar'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n replay_table = table.Table(spec, capacity=3)\n\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/lidar:0', 'Table/lidar_1:0', 'Table/lidar_2:0'],\n [v.name for v in variables])\n\n expected_slots = ['lidar', 'lidar_1', 'lidar_2']\n self.assertAllEqual(replay_table.slots, expected_slots)\n tensors = replay_table.read(0, expected_slots)\n tf.nest.map_structure(lambda x, y: self.assertEqual(x.shape, y.shape), spec,\n tensors)\n\n @test_util.run_in_graph_and_eager_modes()\n def testReadWriteString(self):\n spec = [\n specs.TensorSpec([3], tf.float32, 'action'), [\n specs.TensorSpec([], tf.string, 'camera'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n ]\n replay_table = table.Table(spec, capacity=3)\n variables = replay_table.variables()\n self.assertEqual(3, len(variables))\n self.assertAllEqual(['Table/action:0', 'Table/camera:0', 'Table/lidar:0'],\n [v.name for v in variables])\n\n expected_values = [\n 1 * np.ones(spec[0].shape.as_list()),\n [b'foo',\n 3 * np.ones(spec[1][1].shape.as_list())]\n ]\n tensors = tf.nest.map_structure(\n lambda x, dtype: tf.convert_to_tensor(value=x, dtype=dtype),\n expected_values, [tf.float32, [tf.string, tf.float32]])\n\n write_op = replay_table.write(0, tensors)\n read_op = replay_table.read(0)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(write_op)\n read_value_ = self.evaluate(read_op)\n self.assertAllClose(read_value_[0], expected_values[0])\n self.assertEqual(read_value_[1][0], expected_values[1][0])\n self.assertAllClose(read_value_[1][1], expected_values[1][1])\n\n @test_util.run_in_graph_and_eager_modes()\n def testSaveRestore(self):\n spec = [\n specs.TensorSpec([3], tf.float32),\n specs.TensorSpec([5], tf.float32, 'lidar'),\n specs.TensorSpec([3, 2], tf.float32, 'lidar')\n ]\n replay_table = table.Table(spec, capacity=3)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n directory = self.get_temp_dir()\n prefix = os.path.join(directory, 'table')\n root = tf.train.Checkpoint(table=replay_table)\n save_path = root.save(prefix)\n root.restore(save_path).assert_consumed().run_restore_ops()\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for Python metrics.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nfrom typing import Any, Optional, Sequence, Text, Union\n\nfrom absl import logging\nimport numpy as np\nimport six\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.metrics import tf_metric\nfrom tf_agents.trajectories import trajectory as traj\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\n\n\nPyMetricType = types.ForwardRef('PyMetric') # pylint: disable=invalid-name\nMetricType = Union[tf_metric.TFStepMetric, PyMetricType]\n\n\ndef run_summaries(metrics: Sequence[PyMetricType],\n session: Optional[tf.compat.v1.Session] = None):\n \"\"\"Execute summary ops for py_metrics.\n\n Args:\n metrics: A list of py_metric.Base objects.\n session: A TensorFlow session-like object. If it is not provided, it will\n use the current TensorFlow session context manager.\n\n Raises:\n RuntimeError: If .tf_summaries() was not previously called on any of the\n `metrics`.\n AttributeError: If session is not provided and there is no default session\n provided by a context manager.\n \"\"\"\n if session is None:\n default_session = tf.compat.v1.get_default_session()\n if default_session is None:\n raise AttributeError(\n 'No TensorFlow session-like object was provided, and none '\n 'could be retrieved using \\'tf.get_default_session()\\'.')\n session = default_session\n\n for metric in metrics:\n if metric.summary_op is None:\n raise RuntimeError('metric.tf_summaries() must be called on py_metric '\n '{} before attempting to run '\n 'summaries.'.format(metric.name))\n summary_ops = [metric.summary_op for metric in metrics]\n feed_dict = dict(\n (metric.summary_placeholder, metric.result()) for metric in metrics)\n session.run(summary_ops, feed_dict=feed_dict)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass PyMetric(tf.Module):\n \"\"\"Defines the interface for metrics.\"\"\"\n\n def __init__(self, name: Text, prefix: Text = 'Metrics'):\n \"\"\"Creates a metric.\"\"\"\n super(PyMetric, self).__init__(name)\n self._prefix = prefix\n self._summary_placeholder = None\n self._summary_op = None\n\n @property\n def prefix(self) -> Text:\n \"\"\"Prefix for the metric.\"\"\"\n return self._prefix\n\n @abc.abstractmethod\n def reset(self):\n \"\"\"Resets internal stat gathering variables used to compute the metric.\"\"\"\n\n @abc.abstractmethod\n def result(self) -> Any:\n \"\"\"Evaluates the current value of the metric.\"\"\"\n\n def log(self):\n tag = common.join_scope(self.prefix, self.name)\n logging.info('%s', '{0} = {1}'.format(tag, self.result()))\n\n def tf_summaries(self,\n train_step: types.Int = None,\n step_metrics: Sequence[MetricType] = ()) -> tf.Operation:\n \"\"\"Build TF summary op and placeholder for this metric.\n\n To execute the op, call py_metric.run_summaries.\n\n Args:\n train_step: Step counter for training iterations. If None, no metric is\n generated against the global step.\n step_metrics: Step values to plot as X axis in addition to global_step.\n\n Returns:\n The summary op.\n\n Raises:\n RuntimeError: If this method has already been called (it can only be\n called once).\n ValueError: If any item in step_metrics is not of type PyMetric or\n tf_metric.TFStepMetric.\n \"\"\"\n if self.summary_op is not None:\n raise RuntimeError('metric.tf_summaries() can only be called once.')\n\n tag = common.join_scope(self.prefix, self.name)\n summaries = []\n summaries.append(tf.compat.v2.summary.scalar(\n name=tag, data=self.summary_placeholder, step=train_step))\n prefix = self.prefix\n if prefix:\n prefix += '_'\n for step_metric in step_metrics:\n # Skip plotting the metrics against itself.\n if self.name == step_metric.name:\n continue\n step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, self.name)\n if isinstance(step_metric, PyMetric):\n step_tensor = step_metric.summary_placeholder\n elif isinstance(step_metric, tf_metric.TFStepMetric):\n step_tensor = step_metric.result()\n else:\n raise ValueError('step_metric is not PyMetric or TFStepMetric: '\n '{}'.format(step_metric))\n summaries.append(tf.compat.v2.summary.scalar(\n name=step_tag,\n data=self.summary_placeholder,\n step=step_tensor))\n\n self._summary_op = tf.group(*summaries)\n return self._summary_op\n\n @property\n def summary_placeholder(self) -> tf.compat.v1.placeholder:\n \"\"\"TF placeholder to be used for the result of this metric.\"\"\"\n if self._summary_placeholder is None:\n result = self.result()\n if not isinstance(result, (np.ndarray, np.generic)):\n result = np.array(result)\n dtype = tf.as_dtype(result.dtype)\n shape = result.shape\n self._summary_placeholder = tf.compat.v1.placeholder(\n dtype, shape=shape, name='{}_ph'.format(self.name))\n return self._summary_placeholder\n\n @property\n def summary_op(self) -> tf.Operation:\n \"\"\"TF summary op for this metric.\"\"\"\n return self._summary_op\n\n @staticmethod\n def aggregate(metrics: Sequence[PyMetricType]) -> types.Float:\n \"\"\"Aggregates a list of metrics.\n\n The default behaviour is to return the average of the metrics.\n\n Args:\n metrics: a list of metrics, of the same class.\n Returns:\n The result of aggregating this metric.\n \"\"\"\n return np.mean([metric.result() for metric in metrics])\n\n def __call__(self, *args):\n \"\"\"Method to update the metric contents.\n\n To change the behavior of this function, override the call method.\n\n Different subclasses might use this differently. For instance, the\n PyStepMetric takes in a trajectory, while the CounterMetric takes no\n parameters.\n\n Args:\n *args: See call method of subclass for specific arguments.\n \"\"\"\n self.call(*args)\n\n\nclass PyStepMetric(PyMetric):\n \"\"\"Defines the interface for metrics that operate on trajectories.\"\"\"\n\n @abc.abstractmethod\n def call(self, trajectory: traj.Trajectory):\n \"\"\"Processes a trajectory to update the metric.\n\n Args:\n trajectory: A trajectory.Trajectory.\n \"\"\"\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Project inputs to a tanh-squashed MultivariateNormalDiag distribution.\n\nThis network reproduces Soft Actor-Critic refererence implementation in:\nhttps://github.com/rail-berkeley/softlearning/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Callable, Optional, Text\n\nimport gin\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.distributions import utils as distribution_utils\nfrom tf_agents.networks import network\nfrom tf_agents.networks import utils as network_utils\nfrom tf_agents.specs import distribution_spec\nfrom tf_agents.specs import tensor_spec\n\nfrom tf_agents.typing import types\n\n\[email protected]\nclass TanhNormalProjectionNetwork(network.DistributionNetwork):\n \"\"\"Generates a tanh-squashed MultivariateNormalDiag distribution.\n\n Note: Due to the nature of the `tanh` function, values near the spec bounds\n cannot be returned.\n \"\"\"\n\n def __init__(self,\n sample_spec: types.TensorSpec,\n activation_fn: Optional[Callable[[types.Tensor],\n types.Tensor]] = None,\n std_transform: Optional[Callable[[types.Tensor],\n types.Tensor]] = tf.exp,\n name: Text = 'TanhNormalProjectionNetwork'):\n \"\"\"Creates an instance of TanhNormalProjectionNetwork.\n\n Args:\n sample_spec: A `tensor_spec.BoundedTensorSpec` detailing the shape and\n dtypes of samples pulled from the output distribution.\n activation_fn: Activation function to use in dense layer.\n std_transform: Transformation function to apply to the stddevs.\n name: A string representing name of the network.\n \"\"\"\n if len(tf.nest.flatten(sample_spec)) != 1:\n raise ValueError('Tanh Normal Projection network only supports single'\n ' spec samples.')\n output_spec = self._output_distribution_spec(sample_spec, name)\n super(TanhNormalProjectionNetwork, self).__init__(\n # We don't need these, but base class requires them.\n input_tensor_spec=None,\n state_spec=(),\n output_spec=output_spec,\n name=name)\n\n self._sample_spec = sample_spec\n self._std_transform = std_transform\n\n self._projection_layer = tf.keras.layers.Dense(\n sample_spec.shape.num_elements() * 2,\n activation=activation_fn,\n name='projection_layer')\n\n def _output_distribution_spec(self, sample_spec, network_name):\n input_param_shapes = {\n 'loc': sample_spec.shape,\n 'scale_diag': sample_spec.shape\n }\n input_param_spec = { # pylint: disable=g-complex-comprehension\n name: tensor_spec.TensorSpec(\n shape=shape,\n dtype=sample_spec.dtype,\n name=network_name + '_' + name)\n for name, shape in input_param_shapes.items()\n }\n\n def distribution_builder(*args, **kwargs):\n distribution = tfp.distributions.MultivariateNormalDiag(*args, **kwargs)\n return distribution_utils.scale_distribution_to_spec(\n distribution, sample_spec)\n\n return distribution_spec.DistributionSpec(\n distribution_builder, input_param_spec, sample_spec=sample_spec)\n\n def call(self,\n inputs: types.NestedTensor,\n outer_rank: int,\n training: bool = False,\n mask: Optional[types.NestedTensor] = None) -> types.NestedTensor:\n if inputs.dtype != self._sample_spec.dtype: # pytype: disable=attribute-error\n raise ValueError('Inputs to TanhNormalProjectionNetwork must match the '\n 'sample_spec.dtype.')\n\n if mask is not None:\n raise NotImplementedError(\n 'TanhNormalProjectionNetwork does not yet implement action masking; '\n 'got mask={}'.format(mask))\n\n # outer_rank is needed because the projection is not done on the raw\n # observations so getting the outer rank is hard as there is no spec to\n # compare to.\n batch_squash = network_utils.BatchSquash(outer_rank)\n inputs = batch_squash.flatten(inputs)\n\n means_and_stds = self._projection_layer(inputs, training=training)\n means, stds = tf.split(means_and_stds, num_or_size_splits=2, axis=-1)\n means = tf.reshape(means, [-1] + self._sample_spec.shape.as_list())\n means = tf.cast(means, self._sample_spec.dtype)\n\n if self._std_transform is not None:\n stds = self._std_transform(stds)\n stds = tf.cast(stds, self._sample_spec.dtype)\n\n means = batch_squash.unflatten(means)\n stds = batch_squash.unflatten(stds)\n\n return self.output_spec.build_distribution(loc=means, scale_diag=stds), () # pytype: disable=bad-return-type\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for episodic_replay_buffer using driver.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.drivers import dynamic_episode_driver\nfrom tf_agents.drivers import test_utils as driver_test_utils\nfrom tf_agents.environments import batched_py_environment\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.replay_buffers import episodic_replay_buffer\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import test_utils\n\n\nclass EpisodicReplayBufferDriverTest(test_utils.TestCase):\n\n # Creates a test EpisodicReplayBuffer.\n def _make_replay_buffer(self, tf_env):\n \"\"\"Default replay buffer factory.\"\"\"\n\n time_step_spec = tf_env.time_step_spec()\n action_spec = tf_env.action_spec()\n action_step_spec = policy_step.PolicyStep(\n action_spec, (), tensor_spec.TensorSpec((), tf.int32))\n trajectory_spec = trajectory.from_transition(time_step_spec,\n action_step_spec,\n time_step_spec)\n return episodic_replay_buffer.EpisodicReplayBuffer(\n trajectory_spec, end_episode_fn=lambda _: False)\n\n def testMultiStepEpisodicReplayBuffer(self):\n num_episodes = 5\n num_driver_episodes = 5\n\n # Create mock environment.\n py_env = batched_py_environment.BatchedPyEnvironment([\n driver_test_utils.PyEnvironmentMock(final_state=i+1)\n for i in range(num_episodes)\n ])\n env = tf_py_environment.TFPyEnvironment(py_env)\n\n # Creat mock policy.\n policy = driver_test_utils.TFPolicyMock(\n env.time_step_spec(), env.action_spec(), batch_size=num_episodes)\n\n # Create replay buffer and driver.\n replay_buffer = self._make_replay_buffer(env)\n stateful_buffer = episodic_replay_buffer.StatefulEpisodicReplayBuffer(\n replay_buffer, num_episodes)\n driver = dynamic_episode_driver.DynamicEpisodeDriver(\n env, policy, num_episodes=num_driver_episodes,\n observers=[stateful_buffer.add_batch])\n\n run_driver = driver.run()\n\n end_episodes = replay_buffer._maybe_end_batch_episodes(\n stateful_buffer.episode_ids, end_episode=True)\n\n completed_episodes = replay_buffer._completed_episodes()\n\n self.evaluate([\n tf.compat.v1.local_variables_initializer(),\n tf.compat.v1.global_variables_initializer()\n ])\n\n self.evaluate(run_driver)\n\n self.evaluate(end_episodes)\n completed_episodes = self.evaluate(completed_episodes)\n eps = [replay_buffer._get_episode(ep) for ep in completed_episodes]\n eps = self.evaluate(eps)\n\n episodes_length = [tf.nest.flatten(ep)[0].shape[0] for ep in eps]\n\n # Compare with expected output.\n self.assertAllEqual(completed_episodes, [3, 4, 5, 6, 7])\n self.assertAllEqual(episodes_length, [4, 4, 2, 1, 1])\n\n first = ts.StepType.FIRST\n mid = ts.StepType.MID\n last = ts.StepType.LAST\n\n step_types = [ep.step_type for ep in eps]\n observations = [ep.observation for ep in eps]\n rewards = [ep.reward for ep in eps]\n actions = [ep.action for ep in eps]\n\n self.assertAllClose([[first, mid, mid, last], [first, mid, mid, mid],\n [first, last], [first], [first]], step_types)\n\n self.assertAllClose([\n [0, 1, 3, 4],\n [0, 1, 3, 4],\n [0, 1],\n [0],\n [0],\n ], observations)\n\n self.assertAllClose([\n [1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2],\n [1],\n [1],\n ], actions)\n\n self.assertAllClose([\n [1, 1, 1, 0],\n [1, 1, 1, 1],\n [1, 0],\n [1],\n [1],\n ], rewards)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.experimental.examples.cql_sac.kumar20.cql_sac_train_eval.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.examples.cql_sac.kumar20 import cql_sac_train_eval\n\nFLAGS = flags.FLAGS\nTEST_DATA = 'third_party/py/tf_agents/examples/cql_sac/kumar20/dataset/test_data/antmaze-medium-play-v0_0.tfrecord'\nENV_NAME = 'antmaze-medium-play-v0'\n\n\nclass CqlSacTrainEval(tf.test.TestCase):\n\n def testBasic(self):\n root_dir = self.get_temp_dir()\n cql_sac_train_eval.train_eval(\n root_dir,\n dataset_path=TEST_DATA,\n env_name=ENV_NAME,\n num_gradient_updates=2,\n batch_size=4,\n eval_interval=2,\n eval_episodes=1,\n )\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_v2_behavior()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.keras.layers.Dense", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.keras.layers.LSTM", "tensorflow.compat.v2.keras.layers.Add", "tensorflow.compat.v2.nest.flatten", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.keras.layers.Conv2D" ], [ "tensorflow.zeros", "tensorflow.shape", "tensorflow.ones", "tensorflow.test.main", "tensorflow.compat.v1.set_random_seed", "numpy.array" ], [ "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.ones_like", "numpy.var", "tensorflow.test.main", "tensorflow.compat.v1.initialize_all_variables", "tensorflow.zeros_like", "tensorflow.constant_initializer", "tensorflow.keras.optimizers.Adam", "numpy.mean", "numpy.random.rand", "numpy.zeros_like", "tensorflow.compat.v1.enable_resource_variables", "tensorflow.distribute.get_strategy", "tensorflow.nest.flatten" ], [ "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.placeholder", "tensorflow.nest.flatten" ], [ "tensorflow.math.argmax", "tensorflow.fill", "tensorflow.constant", "tensorflow.zeros", "tensorflow.shape", "tensorflow.cast", "tensorflow.gather", "tensorflow.nest.flatten", "tensorflow.sqrt", "tensorflow.nest.map_structure" ], [ "tensorflow.convert_to_tensor", "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.train.Checkpoint", "tensorflow.test.main", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.nest.map_structure" ], [ "tensorflow.group", "tensorflow.as_dtype", "tensorflow.compat.v1.get_default_session", "tensorflow.compat.v2.summary.scalar", "numpy.array" ], [ "tensorflow.split", "tensorflow.cast", "tensorflow.nest.flatten" ], [ "tensorflow.nest.flatten", "tensorflow.compat.v1.local_variables_initializer", "tensorflow.test.main", "tensorflow.compat.v1.global_variables_initializer" ], [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ozen/pytorch-lightning
[ "3b0b402d30fa19e0fef7d150c30ff4bb14a64230", "3b0b402d30fa19e0fef7d150c30ff4bb14a64230", "3b0b402d30fa19e0fef7d150c30ff4bb14a64230" ]
[ "pytorch_lightning/accelerators/ddp_spawn_backend.py", "pytorch_lightning/accelerators/ddp_backend.py", "pytorch_lightning/accelerators/base_backend.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nimport re\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.distributed as torch_distrib\nimport torch.distributed as dist\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.accelerators.base_backend import Accelerator\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load\nfrom pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn\nfrom pytorch_lightning.utilities.seed import seed_everything\nfrom pytorch_lightning.distributed.dist import LightningDistributed\nfrom pytorch_lightning.utilities.distributed import find_free_network_port\n\n\ntry:\n from hydra.core.hydra_config import HydraConfig\n from hydra.utils import get_original_cwd, to_absolute_path\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\n\nclass DDPSpawnBackend(Accelerator):\n\n def __init__(self, trainer, nprocs, cluster_environment=None):\n super().__init__(trainer, cluster_environment)\n self.mp_queue = None\n self.nprocs = nprocs\n self.dist = LightningDistributed()\n\n def setup(self, model):\n os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', str(find_free_network_port()))\n\n # pass in a state q\n smp = mp.get_context('spawn')\n self.mp_queue = smp.SimpleQueue()\n\n self.trainer.model = model\n\n def train(self):\n model = self.trainer.model\n\n # train in children process\n mp.spawn(self.ddp_train, nprocs=self.nprocs, args=(self.mp_queue, model,))\n\n # restore main state with best weights\n best_path = self.mp_queue.get()\n results = self.mp_queue.get()\n last_path = self.mp_queue.get()\n\n # recover the weights of the processes trained in the children\n self.__recover_child_process_weights(model, best_path, last_path)\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n\n Returns:\n\n \"\"\"\n seed = os.environ.get(\"PL_GLOBAL_SEED\")\n if seed is not None:\n seed_everything(int(seed))\n\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n self.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero and not torch.distributed.is_initialized():\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # move the model to the correct device\n self.model_to_device(model, process_idx, is_master)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.setup_optimizers(model)\n\n # set model properties before going into wrapper\n self.trainer.model_connector.copy_trainer_model_properties(model)\n\n # 16-bit\n model = self.trainer.precision_connector.connect(model)\n\n # device ids change depending on the DDP setup\n device_ids = self.get_device_ids()\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # set up training routine\n self.trainer.train_loop.setup_training(model)\n\n # train or test\n results = self.train_or_test()\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n def set_world_ranks(self, process_idx):\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n def model_to_device(self, model, process_idx, is_master):\n gpu_idx = process_idx\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n def get_device_ids(self):\n device_ids = [self.trainer.root_gpu]\n return device_ids\n\n def training_step(self, args):\n if self.trainer.amp_backend == AMPType.NATIVE:\n with torch.cuda.amp.autocast():\n output = self.trainer.model(*args)\n else:\n output = self.trainer.model(*args)\n return output\n\n def validation_step(self, args):\n output = self.training_step(args)\n return output\n\n def test_step(self, args):\n output = self.training_step(args)\n return output\n\n def barrier(self, name: str = None):\n if torch_distrib.is_initialized():\n torch_distrib.barrier()\n\n def early_stopping_should_stop(self, pl_module):\n stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device)\n dist.all_reduce(stop, op=dist.reduce_op.SUM)\n dist.barrier()\n should_stop = stop == self.trainer.world_size\n return should_stop\n\n def broadcast(self, obj, src=0):\n return self.dist.broadcast(obj)\n\n def __recover_child_process_weights(self, model, best_path, last_path):\n # transfer back the best path to the trainer\n if self.trainer.checkpoint_callback:\n self.trainer.checkpoint_callback.best_model_path = best_path\n # todo, pass also best score\n\n # load last weights\n if last_path is not None and not self.trainer.testing:\n ckpt = pl_load(last_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(ckpt)\n\n self.trainer.model = model\n\n def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):\n best_model_path = None\n if self.trainer.checkpoint_callback is not None:\n best_model_path = self.trainer.checkpoint_callback.best_model_path\n\n if self.trainer.global_rank == 0 and mp_queue is not None:\n rank_zero_warn('cleaning up ddp environment...')\n # todo, pass complete checkpoint as state dictionary\n mp_queue.put(best_model_path)\n mp_queue.put(results)\n\n # save the last weights\n last_path = None\n if not self.trainer.testing and best_model_path is not None and len(best_model_path) > 0:\n last_path = re.sub('.ckpt', '.tmp_end.ckpt', best_model_path)\n atomic_save(model.state_dict(), last_path)\n mp_queue.put(last_path)\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nimport torch\nimport torch.distributed as torch_distrib\nimport subprocess\nimport sys\nfrom os.path import abspath\nfrom time import sleep\nfrom typing import Optional\nimport numpy as np\n\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.utilities.distributed import find_free_network_port\nfrom pytorch_lightning.accelerators.base_backend import Accelerator\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.seed import seed_everything\nfrom pytorch_lightning.distributed.dist import LightningDistributed\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\n\nclass DDPBackend(Accelerator):\n\n def __init__(self, trainer, cluster_environment=None):\n super().__init__(trainer, cluster_environment)\n self.task_idx = None\n self._has_spawned_children = False\n self.interactive_ddp_procs = []\n self.dist = LightningDistributed()\n\n def setup(self, model):\n # first track model\n self.trainer.model = model\n\n # start the other scripts\n if os.environ.get('PL_IN_DDP_SUBPROCESS', '0') != '1':\n self._call_children_scripts()\n\n # set the task idx\n self.task_idx = int(os.environ['PL_DDP_PID'])\n\n def _call_children_scripts(self):\n assert self.trainer.global_rank == 0\n self._check_can_spawn_children()\n self._has_spawned_children = True\n\n os.environ['MASTER_ADDR'] = os.environ.get('MASTER_ADDR', '127.0.0.1')\n os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', str(find_free_network_port()))\n\n # allow the user to pass the node rank\n node_rank = '0'\n node_rank = os.environ.get('NODE_RANK', node_rank)\n node_rank = os.environ.get('GROUP_RANK', node_rank)\n os.environ['NODE_RANK'] = node_rank\n os.environ['LOCAL_RANK'] = '0'\n\n # when user is using hydra find the absolute path\n path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception as e:\n full_path = abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n\n # the visible devices tell us how many GPUs we want to use.\n # when the trainer script was called the device has already been scoped by the time\n # code reaches this point. so, to call the scripts, we need to leave cuda visible devices alone\n # but forward the GPUs selected via environment variables\n if self.trainer.data_parallel_device_ids is None:\n raise MisconfigurationException('you selected (distribute_backend = ddp) but did not set Trainer(gpus=?)')\n\n os.environ['PL_TRAINER_GPUS'] = ','.join([str(i) for i in self.trainer.data_parallel_device_ids])\n os.environ['PL_IN_DDP_SUBPROCESS'] = '1'\n\n if self.trainer.logger is not None:\n os.environ['PL_EXP_VERSION'] = str(self.trainer.logger.version)\n\n gpu_ids = os.environ.get('CUDA_VISIBLE_DEVICES', '')\n if len(gpu_ids) == 1:\n gpu_ids = f'{gpu_ids},'\n\n num_gpus = max(1, len(gpu_ids.split(',')))\n\n os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'\n\n self.interactive_ddp_procs = []\n for local_rank in range(1, self.trainer.num_processes):\n env_copy = os.environ.copy()\n env_copy['LOCAL_RANK'] = f'{local_rank}'\n env_copy['PL_DDP_PID'] = str(self.trainer.data_parallel_device_ids[local_rank])\n env_copy['PL_GLOBAL_SEED'] = os.environ.get('PL_GLOBAL_SEED')\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n os.environ['PL_DDP_PID'] = str(0)\n\n def train(self):\n model = self.trainer.model\n\n results = self.ddp_train(process_idx=self.task_idx, model=model)\n if 'WORLD_SIZE' in os.environ:\n del os.environ['WORLD_SIZE']\n return results\n\n def training_step(self, args):\n if self.trainer.amp_backend == AMPType.NATIVE:\n with torch.cuda.amp.autocast():\n output = self.trainer.model(*args)\n else:\n output = self.trainer.model(*args)\n return output\n\n def validation_step(self, args):\n output = self.training_step(args)\n return output\n\n def test_step(self, args):\n output = self.training_step(args)\n return output\n\n def barrier(self, name: str = None):\n if torch_distrib.is_initialized():\n torch_distrib.barrier()\n\n def _check_can_spawn_children(self):\n if self._has_spawned_children:\n raise RuntimeError(\n \"You tried to run `.fit` or `.test` multiple times in the same script.\"\n \" This is not supported in DDP mode, switch to `distributed_backend='ddp_spawn'` instead.\"\n )\n\n def set_world_ranks(self, process_idx):\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n def model_to_device(self, model, process_idx):\n self.trainer.root_gpu = process_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n def get_device_ids(self):\n device_ids = [self.trainer.root_gpu]\n return device_ids\n\n def on_train_end(self):\n pass\n\n def early_stopping_should_stop(self, pl_module):\n stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device)\n torch_distrib.all_reduce(stop, op=torch_distrib.reduce_op.SUM)\n torch_distrib.barrier()\n should_stop = stop == self.trainer.world_size\n return should_stop\n\n def broadcast(self, obj, src=0):\n return self.dist.broadcast(obj)\n\n def ddp_train(self, process_idx, model):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n\n Returns:\n\n \"\"\"\n seed = os.environ.get(\"PL_GLOBAL_SEED\")\n if seed is not None:\n seed_everything(int(seed))\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n self.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero and not torch.distributed.is_initialized():\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # move the model to the correct device\n self.model_to_device(model, process_idx)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.setup_optimizers(model)\n\n # set model properties before going into wrapper\n self.trainer.model_connector.copy_trainer_model_properties(model)\n\n # 16-bit\n model = self.trainer.precision_connector.connect(model)\n\n # device ids change depending on the DDP setup\n device_ids = self.get_device_ids()\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # set up training routine\n self.barrier('ddp_setup')\n self.trainer.train_loop.setup_training(model)\n\n # train or test\n results = self.train_or_test()\n\n # clean up memory\n torch.cuda.empty_cache()\n\n return results\n", "import os\nimport math\nfrom enum import Enum\nfrom typing import Any\n\nimport torch\n\nfrom pytorch_lightning.utilities import AMPType, rank_zero_warn\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nimport torch.distributed as torch_distrib\nfrom pytorch_lightning import _logger as log\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nEPSILON = 1e-6\nEPSILON_FP16 = 1e-5\n\n\nclass Accelerator(object):\n\n def __init__(self, trainer, cluster_environment=None):\n self.trainer = trainer\n self.cluster_environment = cluster_environment\n self.dist = AttributeDict(rank=0, device=None)\n\n def setup(self, model):\n pass\n\n def teardown(self):\n pass\n\n def barrier(self, name: str = None):\n pass\n\n def broadcast(self, obj, src=0):\n return obj\n\n def train_or_test(self):\n if self.trainer.testing:\n results = self.trainer.run_test()\n else:\n results = self.trainer.train()\n return results\n\n def batch_to_device(self, batch: Any, device: torch.device):\n model = self.trainer.get_model()\n if model is not None:\n return model.transfer_batch_to_device(batch, device)\n return move_data_to_device(batch, device)\n\n def training_step_end(self, output):\n return output\n\n def test_step_end(self, output):\n return output\n\n def validation_step_end(self, output):\n return output\n\n def process_dataloader(self, dataloader):\n return dataloader\n\n def backward(self, closure_loss, optimizer, opt_idx):\n model_ref = self.trainer.get_model()\n\n # scale loss for 16 bit\n if self.trainer.precision == 16:\n closure_loss = model_ref.amp_scale_loss(\n closure_loss,\n optimizer,\n opt_idx,\n amp_backend=self.trainer.amp_backend\n )\n\n # enter amp context\n if self.trainer.amp_backend == AMPType.APEX:\n self.trainer.dev_debugger.track_event('AMP', str(AMPType.APEX))\n context = closure_loss\n closure_loss = closure_loss.__enter__()\n\n # do backward pass\n model_ref.backward(self, closure_loss, optimizer, opt_idx)\n\n # exit amp context\n if self.trainer.precision == 16 and self.trainer.amp_backend == AMPType.APEX:\n a, b, c = None, None, None\n error = context.__exit__(a, b, c)\n if error:\n rank_zero_warn(a, b, c)\n raise Exception('apex unscale error')\n\n # once backward has been applied, release graph\n closure_loss = closure_loss.detach()\n return closure_loss\n\n def optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure):\n model_ref = self.trainer.get_model()\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli')\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n lambda_closure,\n using_native_amp=native_amp,\n using_lbfgs=is_lbfgs\n )\n\n # scale when native amp\n if native_amp:\n self.trainer.scaler.update()\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n model_ref = self.trainer.get_model()\n model_ref.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def clip_gradients(self, optimizer):\n\n if self.trainer.amp_backend == AMPType.NATIVE:\n self.trainer.scaler.unscale_(optimizer)\n\n # apply clip gradients\n # TODO: separate TPU case from here\n self._clip_gradients(optimizer)\n\n def _clip_gradients(self, optimizer):\n # this code is a modification of torch.nn.utils.clip_grad_norm_\n # with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md\n if self.trainer.gradient_clip_val <= 0:\n return\n\n model = self.trainer.get_model()\n if self.trainer.amp_backend == AMPType.APEX:\n parameters = amp.master_params(optimizer)\n else:\n parameters = model.parameters()\n\n max_norm = float(self.trainer.gradient_clip_val)\n norm_type = float(2.0)\n\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n if norm_type == math.inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n device = parameters[0].device\n out = torch.empty(len(parameters), device=device)\n for i, p in enumerate(parameters):\n torch.norm(p.grad.data.to(device), norm_type, out=out[i])\n total_norm = torch.norm(out, norm_type)\n\n eps = EPSILON_FP16 if self.trainer.precision == 16 else EPSILON\n clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps)\n clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))\n for p in parameters:\n p.grad.data.mul_(clip_coef.to(p.grad.data.device))\n\n def on_train_epoch_end(self):\n pass\n\n def on_train_end(self):\n pass\n\n def early_stopping_should_stop(self, pl_module):\n return self.trainer.should_stop\n\n def setup_optimizers(self, model):\n if self.trainer.testing is True:\n return\n\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n def init_ddp_connection(\n self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True\n ) -> None:\n if is_slurm_managing_tasks:\n self.trainer.slurm_connector.connect_ddp(global_rank, world_size)\n else:\n self.connect_torchelastic(global_rank, world_size)\n\n def connect_torchelastic(\n self, global_rank: int, world_size: int\n ) -> None:\n \"\"\"\n Override to define your custom way of setting up a distributed environment.\n\n Lightning's implementation uses env:// init by default and sets the first node as root\n for SLURM managed cluster.\n\n Args:\n global_rank: The global process idx.\n world_size: Number of GPUs being use across all nodes. (num_nodes * num_gpus).\n \"\"\"\n\n if \"MASTER_ADDR\" not in os.environ:\n rank_zero_warn(\n \"MASTER_ADDR environment variable is not defined. Set as localhost\"\n )\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n log.debug(f\"MASTER_ADDR: {os.environ['MASTER_ADDR']}\")\n\n if \"MASTER_PORT\" not in os.environ:\n rank_zero_warn(\n \"MASTER_PORT environment variable is not defined. Set as 12910\"\n )\n os.environ[\"MASTER_PORT\"] = \"12910\"\n log.debug(f\"MASTER_PORT: {os.environ['MASTER_PORT']}\")\n\n if \"WORLD_SIZE\" in os.environ and int(os.environ[\"WORLD_SIZE\"]) != world_size:\n rank_zero_warn(\n f\"WORLD_SIZE environment variable ({os.environ['WORLD_SIZE']}) \"\n f\"is not equal to the computed world size ({world_size}). Ignored.\"\n )\n\n torch_backend = \"nccl\" if self.trainer.on_gpu else \"gloo\"\n\n if not torch.distributed.is_initialized():\n log.info(\n f\"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}\"\n )\n torch_distrib.init_process_group(\n torch_backend, rank=global_rank, world_size=world_size\n )\n\n\n# TODO: allow user to compare with string even internaly we shall use these Enum to prevent typos...\nclass BackendType(Enum):\n DP = 'dp'\n DDP = 'ddp'\n DDP2 = 'ddp2'\n DDP_SPAWN = 'ddp_spawn'\n # decuple distrib and device\n DDP_CPU = 'ddp_cpu'\n HOROVOD = 'horovod'\n # this is rather device\n TPU = 'tpu'\n" ]
[ [ "torch.cuda.set_device", "torch.multiprocessing.spawn", "torch.distributed.is_initialized", "torch.cuda.empty_cache", "torch.distributed.barrier", "torch.cuda.amp.autocast", "torch.multiprocessing.get_context", "torch.distributed.all_reduce" ], [ "torch.cuda.set_device", "torch.distributed.is_initialized", "torch.cuda.empty_cache", "torch.distributed.barrier", "torch.cuda.amp.autocast", "numpy.random.uniform", "torch.distributed.all_reduce" ], [ "torch.norm", "torch.distributed.init_process_group", "torch.distributed.is_initialized", "torch.tensor", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xta0/Python-Playground
[ "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc", "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc", "513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc" ]
[ "dl/pytorch/rnn/char-lstm.py", "dl/pytorch/torchscript/intro.py", "trade/hello_world/dataframe.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n# open text file and read in data as `text`\nwith open('data/anna.txt', 'r') as f:\n text = f.read()\n\n# print(text[:100])\n\n# encode the text and map each character to an integer and vice versa\n\n# we create two dictionaries:\n# 1. int2char, which maps integers to characters\n# 2. char2int, which maps characters to unique integers\n# text = text[:100]\nchars = tuple(set(text)) #(1', 'v', 'H', '.', 'i', 'E', 'a', 'r', 'C', 'p',...)\nint2char = dict(enumerate(chars))\nchar2int = {ch: ii for ii, ch in int2char.items()}\n\n# encode the text\nencoded = np.array([char2int[ch] for ch in text])\nprint(encoded[:100])\n\ndef one_hot_encode(arr, n_labels):\n # Initialize the the encoded array\n # arr is a multi-dim array\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n # Fill the appropriate elements with ones\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n # Finally reshape it to get back to the original array\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n return one_hot\n\n# check that the function works as expected\ntest_seq = np.array([[3, 5, 1]])\none_hot = np.zeros((np.multiply(*test_seq.shape), 8), dtype=np.float32)\n\n# one_hot = one_hot_encode(test_seq, 8)\n\nprint(one_hot)\n\ndef get_batches(arr, batch_size, seq_length):\n '''Create a generator that returns batches of size\n batch_size x seq_length from arr.\n \n Arguments\n ---------\n arr: Array you want to make batches from\n batch_size: Batch size, the number of sequences per batch\n seq_length: Number of encoded chars in a sequence\n '''\n total = batch_size * seq_length\n n_batches = len(arr) // total\n arr = arr[:n_batches * total]\n arr = arr.reshape(batch_size, -1)\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:,n:n+seq_length]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y\n\nbatches = get_batches(encoded, 8, 50)\nx, y = next(batches)\n# printing out the first 10 items in a sequence\nprint('x\\n', x[:10, :10])\nprint('\\ny\\n', y[:10, :10])\n\n# check if GPU is available\ntrain_on_gpu = torch.cuda.is_available()\nif(train_on_gpu):\n print('Training on GPU!')\nelse: \n print('No GPU available, training on CPU; consider making n_epochs very small.')\n \nclass CharRNN(nn.Module):\n \n def __init__(self, tokens, n_hidden=256, n_layers=2,\n drop_prob=0.5, lr=0.001):\n super().__init__()\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n # creating character dictionaries\n self.chars = tokens\n self.int2char = dict(enumerate(self.chars))\n self.char2int = {ch: ii for ii, ch in self.int2char.items()}\n \n ## TODO: define the layers of the model\n self.lstm = nn.LSTM(len(tokens), n_hidden, n_layers, dropout=drop_prob, batch_first=True)\n self.dropout = nn.Dropout(drop_prob)\n self.fc = nn.Linear (n_hidden, len(tokens))\n \n def forward(self, x, hidden):\n ''' Forward pass through the network. \n These inputs are x, and the hidden/cell state `hidden`. '''\n \n ## TODO: Get the outputs and the new hidden state from the lstm\n x, hidden = self.lstm(x,hidden)\n x = self.dropout(x)\n x = x.contiguous().view(-1, n_hidden)\n x = self.fc(x)\n \n # return the final output and the hidden state\n return x, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n \n if (train_on_gpu):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden\n\ndef train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):\n ''' Training a network \n \n Arguments\n ---------\n \n net: CharRNN network\n data: text data to train the network\n epochs: Number of epochs to train\n batch_size: Number of mini-sequences per mini-batch, aka batch size\n seq_length: Number of character steps per mini-batch\n lr: learning rate\n clip: gradient clipping\n val_frac: Fraction of data to hold out for validation\n print_every: Number of steps for printing training and validation loss\n \n '''\n net.train()\n \n opt = torch.optim.Adam(net.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n \n # create training and validation data\n val_idx = int(len(data)*(1-val_frac))\n data, val_data = data[:val_idx], data[val_idx:]\n \n if(train_on_gpu):\n net.cuda()\n \n counter = 0\n n_chars = len(net.chars)\n for e in range(epochs):\n # initialize hidden state\n h = net.init_hidden(batch_size)\n \n for x, y in get_batches(data, batch_size, seq_length):\n counter += 1\n \n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n \n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n \n # get the output from the model\n output, h = net(inputs, h)\n \n # calculate the loss and perform backprop\n loss = criterion(output, targets.view(batch_size*seq_length))\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n opt.step()\n \n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = net.init_hidden(batch_size)\n val_losses = []\n net.eval()\n for x, y in get_batches(val_data, batch_size, seq_length):\n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n \n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n \n inputs, targets = x, y\n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n output, val_h = net(inputs, val_h)\n val_loss = criterion(output, targets.view(batch_size*seq_length))\n \n val_losses.append(val_loss.item())\n \n net.train() # reset to train mode after iterationg through validation data\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.4f}...\".format(loss.item()),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)))\n \nn_hidden=512\nn_layers=2\n\nnet = CharRNN(chars, n_hidden, n_layers)\nprint(net)\n\nbatch_size = 128\nseq_length = 100\nn_epochs = 20 # start smaller if you are just testing initial behavior\n\n# train the model\ntrain(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)\n\n# change the name, for saving multiple files\nmodel_name = 'rnn_20_epoch.net'\n\ncheckpoint = {'n_hidden': net.n_hidden,\n 'n_layers': net.n_layers,\n 'state_dict': net.state_dict(),\n 'tokens': net.chars}\n\nwith open(model_name, 'wb') as f:\n torch.save(checkpoint, f)\n \n## Making Predictions\n\ndef predict(net, char, h=None, top_k=None):\n ''' Given a character, predict the next character.\n Returns the predicted character and the hidden state.\n '''\n \n # tensor inputs\n x = np.array([[net.char2int[char]]])\n x = one_hot_encode(x, len(net.chars))\n inputs = torch.from_numpy(x)\n \n if(train_on_gpu):\n inputs = inputs.cuda()\n \n # detach hidden state from history\n h = tuple([each.data for each in h])\n # get the output of the model\n out, h = net(inputs, h)\n\n # get the character probabilities\n p = F.softmax(out, dim=1).data\n if(train_on_gpu):\n p = p.cpu() # move to cpu\n \n # get top characters\n if top_k is None:\n top_ch = np.arange(len(net.chars))\n else:\n p, top_ch = p.topk(top_k)\n top_ch = top_ch.numpy().squeeze()\n \n # select the likely next character with some element of randomness\n p = p.numpy().squeeze()\n char = np.random.choice(top_ch, p=p/p.sum())\n \n # return the encoded value of the predicted char and the hidden state\n return net.int2char[char], h\n \ndef sample(net, size, prime='The', top_k=None):\n #prime is the arg that we want to start our model with\n \n if(train_on_gpu):\n net.cuda()\n else:\n net.cpu()\n \n net.eval() # eval mode\n \n # First off, run through the prime characters\n chars = [ch for ch in prime]\n h = net.init_hidden(1)\n for ch in prime:\n char, h = predict(net, ch, h, top_k=top_k)\n\n chars.append(char)\n \n # Now pass in the previous character and get a new one\n for ii in range(size):\n char, h = predict(net, chars[-1], h, top_k=top_k)\n chars.append(char)\n\n return ''.join(chars)\n\nprint(sample(net, 1000, prime='Anna', top_k=5))\n\n## Loading a checkpoint\nwith open('rnn_20_epoch.net', 'rb') as f:\n checkpoint = torch.load(f)\n \nloaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])\nloaded.load_state_dict(checkpoint['state_dict'])\n\n# Sample using a loaded model\nprint(sample(loaded, 2000, top_k=5, prime=\"And Levin said\"))", "import torch\n\nclass MyDecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\nclass MyCell(torch.nn.Module):\n def __init__(self, dg):\n super(MyCell, self).__init__()\n self.dg = dg\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n \nmy_cell = MyCell(MyDecisionGate())\nx, h = torch.rand(3, 4), torch.rand(3, 4)\ntraced_cell = torch.jit.trace(my_cell, (x, h))\nprint(traced_cell)\ntraced_cell(x, h)\n# print(traced_cell.graph)\nprint(traced_cell.code)\nscripted_cell = torch.jit.script(my_cell)\nprint(scripted_cell.code)\n\nclass MyRNNLoop(torch.nn.Module):\n def __init__(self):\n super(MyRNNLoop, self).__init__()\n self.cell = torch.jit.trace(my_cell, (x, h))\n\n def forward(self, xs):\n h, y = torch.zeros(3, 4), torch.zeros(3, 4)\n for i in range(xs.size(0)):\n y, h = self.cell(xs[i], h)\n return y, h\n\nrnn_loop = torch.jit.script(MyRNNLoop())\nprint(rnn_loop.code)", "import pandas as pd\nimport matplotlib.pyplot as plt\n\ndef test_run():\n start_date = '2017-03-10'\n end_date = '2018-03-30'\n dates = pd.date_range(start_date, end_date)\n # <class 'pandas.core.indexes.datetimes.DatetimeIndex'>\n # 2010-01-22 00:00:00\n #empty dataframe\n #index = date\n df1 = pd.DataFrame(index=dates)\n #指定主键\n dfSPY = pd.read_csv(\"./data/SPY.csv\",\n index_col=\"Date\",\n parse_dates=True,\n usecols=['Date','Adj Close'],\n na_values=['nan'])\n #Rename Column name\n dfSPY = dfSPY.rename(columns={'Adj Close':'SPY'})\n \n #Inner Join SP&500\n df1 = df1.join(dfSPY,how=\"inner\");\n\n #Read symbols in ./data\n symbols = ['GOOG','FB','AAPL'];\n for name in symbols:\n df_tmp = pd.read_csv(f\"./data/{name}.csv\",\n index_col=\"Date\",\n parse_dates=True,\n usecols=['Date','Adj Close'],\n na_values=['nan']);\n\n df_tmp = df_tmp.rename(columns={'Adj Close':name})\n df1 = df1.join(df_tmp) #how=\"left\";\n \n #normalized data\n df1 = df1 / df1.ix[0,:]\n\n # ax = df1.plot(title='Stock Prices', fontsize=6)\n # ax.set_xlabel(\"Date\")\n # ax.set_ylabel(\"Price\") \n # plt.show()\n\n ## slice\n # slice by row range(dates) using Datafrome.ix[] \n print(df1.ix['2018-03-01':'2018-03-06']) #first 6 days\n print(type(df1.values))\n # row slice\n # print(df1['GOOG'])\n # print(df1[['GOOG','FB']])\n # print(df1.ix['2018-03-01':'2018-03-06',['SPY','GOOG']])\n\nif __name__ == '__main__':\n test_run()" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.nn.functional.softmax", "numpy.multiply", "torch.load", "numpy.arange", "torch.from_numpy", "numpy.zeros_like", "numpy.mean", "torch.cuda.is_available", "numpy.array", "torch.save" ], [ "torch.jit.script", "torch.jit.trace", "torch.zeros", "torch.nn.Linear", "torch.rand" ], [ "pandas.read_csv", "pandas.DataFrame", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
diana-hep/shredtypes
[ "bb7c17eea849f8934c449c3fa260af54b3532736" ]
[ "oamap/schema.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2017, DIANA-HEP\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport bisect\nimport codecs\nimport copy\nimport fnmatch\nimport json\nimport numbers\nimport re\nimport sys\nfrom types import ModuleType\n\nimport numpy\n\nimport oamap.generator\nimport oamap.inference\nimport oamap.backend.packing\nimport oamap.extension.common\nimport oamap.proxy\nimport oamap.util\nfrom oamap.util import OrderedDict\n\nif sys.version_info[0] > 2:\n basestring = str\n unicode = str\n\n# Common extensions\nfrom oamap.extension.common import ByteString\nfrom oamap.extension.common import UTF8String\n\n# The \"PLURTP\" type system: Primitives, Lists, Unions, Records, Tuples, and Pointers\n\nclass Schema(object):\n _identifier = re.compile(\"[a-zA-Z][a-zA-Z_0-9]*\") # forbid starting with underscore in field names\n _baddelimiter = re.compile(\"[a-zA-Z_0-9]\") # could be confused with field names or integers\n\n def __init__(self, *args, **kwds):\n raise TypeError(\"Kind cannot be instantiated directly\")\n\n @property\n def nullable(self):\n return self._nullable\n\n @nullable.setter\n def nullable(self, value):\n if value is not True and value is not False:\n raise TypeError(\"nullable must be True or False, not {0}\".format(repr(value)))\n self._nullable = value\n\n @property\n def mask(self):\n return self._mask\n\n @mask.setter\n def mask(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"mask must be None or an array name (string), not {0}\".format(repr(value)))\n self._mask = value\n\n @property\n def namespace(self):\n return self._namespace\n\n @namespace.setter\n def namespace(self, value):\n if not isinstance(value, basestring):\n raise TypeError(\"namespace must be a string, not {0}\".format(repr(value)))\n self._namespace = value\n\n @property\n def packing(self):\n return self._packing\n\n @packing.setter\n def packing(self, value):\n if not (value is None or isinstance(value, oamap.backend.packing.PackedSource)):\n raise TypeError(\"packing must be None or a PackedSource, not {0}\".format(repr(value)))\n self._packing = value\n\n def _packingcopy(self, source=None):\n if self._packing is None:\n return source\n else:\n return self._packing.anchor(source)\n\n def _packingtojson(self):\n if self._packing is None:\n return None\n else:\n return self._packing.tojson()\n\n @staticmethod\n def _packingfromjson(packing):\n if packing is None:\n return None\n else:\n return oamap.backend.packing.PackedSource.fromjson(packing)\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n if value is None:\n self._name = value\n return\n if isinstance(value, basestring):\n match = self._identifier.match(value)\n if match is not None and len(match.group(0)) == len(value):\n self._name = value\n return\n raise TypeError(\"name must be None or a string matching /{0}/, not {1}\".format(self._identifier.pattern, repr(value)))\n\n @property\n def doc(self):\n return self._doc\n\n @doc.setter\n def doc(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"doc must be None or a string, not {0}\".format(repr(value)))\n self._doc = value\n\n @property\n def metadata(self):\n return self._metadata\n\n @metadata.setter\n def metadata(self, value):\n self._metadata = value\n\n def _labels(self):\n labels = []\n self._collectlabels(set(), labels)\n return labels\n \n def _label(self, labels):\n for index, label in enumerate(labels):\n if label is self:\n return \"#{0}\".format(index)\n return None\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def show(self, stream=sys.stdout):\n out = self.__repr__(indent=\"\")\n if stream is None:\n return out\n else:\n stream.write(out)\n stream.write(\"\\n\")\n\n @property\n def hasarraynames(self):\n return self._hasarraynames(set())\n\n def tojsonfile(self, file, explicit=False, *args, **kwds):\n json.dump(self.tojson(explicit=explicit), file, *args, **kwds)\n\n def tojsonstring(self, explicit=False, *args, **kwds):\n return json.dumps(self.tojson(explicit=explicit), *args, **kwds)\n\n def tojson(self, explicit=False):\n return self._tojson(explicit, self._labels(), set())\n\n @staticmethod\n def fromjsonfile(file, *args, **kwds):\n return Schema.fromjson(json.load(file, *args, **kwds))\n\n @staticmethod\n def fromjsonstring(data, *args, **kwds):\n return Schema.fromjson(json.loads(data, *args, **kwds))\n\n @staticmethod\n def fromjson(data):\n if isinstance(data, (basestring, dict)):\n labels = {}\n out = Schema._fromjson(data, labels)\n if not isinstance(out, Schema):\n raise TypeError(\"unresolved label: {0}\".format(repr(out)))\n out._finalizefromjson(labels)\n return out\n else:\n raise TypeError(\"JSON for a Schema must be a string or a dict, not {0}\".format(repr(data)))\n\n @staticmethod\n def _fromjson(data, labels):\n if isinstance(data, basestring) and data.startswith(\"#\"):\n return data\n\n elif isinstance(data, basestring):\n return Primitive._fromjson(data, labels)\n\n elif isinstance(data, dict):\n tpe = data.get(\"type\", \"primitive\")\n if tpe == \"primitive\":\n return Primitive._fromjson(data, labels)\n elif tpe == \"list\":\n return List._fromjson(data, labels)\n elif tpe == \"union\":\n return Union._fromjson(data, labels)\n elif tpe == \"record\":\n return Record._fromjson(data, labels)\n elif tpe == \"tuple\":\n return Tuple._fromjson(data, labels)\n elif tpe == \"pointer\":\n return Pointer._fromjson(data, labels)\n else:\n raise TypeError(\"unrecognized type argument for Schema from JSON: {0}\".format(repr(tpe)))\n\n else:\n raise TypeError(\"unrecognized type for Schema from JSON: {0}\".format(repr(data)))\n\n def renamespace(self, nullto=None, **to):\n if nullto is not None:\n to[\"\"] = nullto\n\n def replacement(node):\n node.namespace = to.get(node.namespace, node.namespace)\n return node\n\n return self.replace(replacement)\n\n def replace(self, fcn, *args, **kwds):\n return self._replace(fcn, args, kwds, {})\n\n def deepcopy(self, **replacements):\n return self.replace(lambda x: x, **replacements)\n\n def path(self, path, parents=False, allowtop=True):\n out = None\n for nodes in self._path((), path, (), allowtop, set()):\n if out is None:\n if parents:\n out = nodes\n else:\n out = nodes[0]\n else:\n raise ValueError(\"path {0} matches more than one field in schema\".format(repr(path)))\n\n if out is None:\n raise ValueError(\"path {0} does not match any fields in the schema\".format(repr(path)))\n else:\n return out\n\n def paths(self, *paths, **options):\n parents = options.pop(\"parents\", False)\n allowtop = options.pop(\"allowtop\", True)\n if len(options) > 0:\n raise TypeError(\"unrecognized options: {0}\".format(\", \".join(options)))\n for path in paths:\n for nodes in self._path((), path, (), allowtop, set()):\n if parents:\n yield nodes\n else:\n yield nodes[0]\n\n def _path(self, loc, path, parents, allowtop, memo):\n if allowtop and fnmatch.fnmatchcase(\"/\".join(loc), path):\n yield (self,) + parents\n\n def nodes(self, parents=False, bottomup=True):\n if parents:\n for x in self._nodes((), bottomup, set()):\n yield x\n else:\n for x in self._nodes((), bottomup, set()):\n yield x[0]\n\n def project(self, path):\n return self._keep((), [path], True, {})\n\n def keep(self, *paths):\n return self._keep((), paths, False, {})\n\n def drop(self, *paths):\n return self._drop((), paths, {})\n\n def contains(self, schema):\n return self._contains(schema, set())\n\n def _normalize_extension(self, extension):\n if isinstance(extension, ModuleType):\n recurse = False\n extension = extension.__dict__\n else:\n recurse = True\n\n if isinstance(extension, dict):\n extension = [extension[n] for n in sorted(extension)]\n\n try:\n iter(extension)\n except TypeError:\n raise TypeError(\"extension must be a module containing ExtendedGenerator classes or a dict or list (recursively) containing ExtendedGenerator classes\")\n else:\n out = []\n for x in extension:\n if isinstance(x, type) and issubclass(x, oamap.generator.ExtendedGenerator):\n out.append(x)\n elif recurse:\n out.extend(self._normalize_extension(x))\n return out\n\n def fromdata(self, value, pointer_fromequal=False):\n import oamap.fill\n return self(oamap.fill.fromdata(value, generator=self, pointer_fromequal=pointer_fromequal))\n\n def fromiterdata(self, values, limit=lambda entries, arrayitems, arraybytes: False, pointer_fromequal=False):\n import oamap.fill\n return self(oamap.fill.fromiterdata(values, generator=self, limit=limit, pointer_fromequal=pointer_fromequal))\n\n def __call__(self, arrays, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None):\n return self.generator(prefix=prefix, delimiter=delimiter, extension=self._normalize_extension(extension), packing=packing)(arrays)\n\n def generator(self, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None):\n if self._baddelimiter.match(delimiter) is not None:\n raise ValueError(\"delimiters must not contain /{0}/\".format(self._baddelimiter.pattern))\n cacheidx = [0]\n memo = OrderedDict()\n extension = self._normalize_extension(extension)\n if packing is not None:\n packing = packing.copy()\n return self._finalizegenerator(self._generator(prefix, delimiter, cacheidx, memo, set(), extension, packing), cacheidx, memo, extension, packing)\n\n def _get_name(self, prefix, delimiter):\n if self._name is not None:\n return prefix + delimiter + \"N\" + self._name\n else:\n return prefix\n\n def _get_mask(self, prefix, delimiter):\n if self._mask is None:\n return self._get_name(prefix, delimiter) + delimiter + \"M\"\n else:\n return self._mask\n\n def _finalizegenerator(self, out, cacheidx, memo, extension, packing):\n allgenerators = list(memo.values())\n for generator in memo.values():\n if isinstance(generator, oamap.generator.PointerGenerator):\n # only assign pointer targets after all other types have been resolved\n target, prefix, delimiter = generator.target\n if id(target) in memo:\n # the target points elsewhere in the type tree: link to that\n generator._internal = True\n if generator.schema.positions is None:\n generator.positions = generator.positions + delimiter + memo[id(target)].derivedname\n generator.target = memo[id(target)]\n generator.schema.target = generator.target.schema\n else:\n # the target is not in the type tree: resolve it now\n memo2 = OrderedDict() # new memo, but same cacheidx\n generator._internal = False\n generator.target = target._finalizegenerator(target._generator(generator.schema._get_external(prefix, delimiter), delimiter, cacheidx, memo2, set(), extension, packing), cacheidx, memo2, extension, packing)\n generator.schema.target = generator.target.schema\n for generator2 in memo2.values():\n allgenerators.append(generator2)\n\n for generator in allgenerators:\n generator._cachelen = cacheidx[0]\n\n return out\n\n def case(self, obj):\n return obj in self\n\n def cast(self, obj):\n if obj in self:\n return obj\n else:\n raise TypeError(\"object is not a member of {0}\".format(self))\n\n################################################################ Primitives can be any Numpy type\n\nclass Primitive(Schema):\n def __init__(self, dtype, nullable=False, data=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.dtype = dtype\n self.nullable = nullable\n self.data = data\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def dtype(self):\n return self._dtype\n\n @dtype.setter\n def dtype(self, value):\n if not isinstance(value, numpy.dtype):\n value = numpy.dtype(value)\n if value.hasobject:\n raise TypeError(\"dtypes containing objects are not allowed\")\n if value.names is not None:\n for n in value.names:\n if self._identifier.match(n) is None:\n raise TypeError(\"dtype names must be identifier strings; the name {0} is not an identifier (/{1}/)\".format(repr(n), self._identifier.pattern))\n raise NotImplementedError(\"record-array dtypes are not supported yet\")\n if value.subdtype is not None:\n raise NotImplementedError(\"multidimensional dtypes are not supported yet\")\n self._dtype = value\n\n _byteorder_transform = {\"!\": True, \">\": True, \"<\": False, \"|\": False, \"=\": numpy.dtype(\">f8\").isnative}\n\n @staticmethod\n def _dtype2str(dtype, delimiter):\n if dtype.names is not None:\n return delimiter.join(Primitive._dtype2str(dtype[n], delimiter) + delimiter + n for n in dtype.names)\n if dtype.subdtype is not None:\n subdtype, dims = dtype.subdtype\n else:\n subdtype, dims = dtype, ()\n return \"D\" + \"\".join(repr(x) + delimiter for x in dims) + (subdtype.kind.upper() if Primitive._byteorder_transform[subdtype.byteorder] else subdtype.kind) + repr(subdtype.itemsize)\n\n @staticmethod\n def _str2dtype(string, delimiter):\n out = []\n for _, dims, _, kind, itemsize, name in re.findall(\"(D(([1-9][0-9]*{0})*)([a-zA-Z])([1-9][0-9]*)({0}[a-zA-Z][a-zA-Z_0-9]*)?)\".format(delimiter), string):\n if dims == \"\":\n dims = ()\n else:\n dims = tuple(int(x) for x in dims[:-len(delimiter)].split(delimiter))\n itemsize = itemsize\n name = name[len(delimiter):]\n if ord(\"A\") <= ord(kind) <= ord(\"Z\"):\n byteorder = \">\"\n else:\n byteorder = \"<\"\n if kind == \"S\":\n descr = (kind + itemsize, dims)\n else:\n descr = (byteorder + kind.lower() + itemsize, dims)\n if name == \"\":\n out.append(descr)\n else:\n out.append((name,) + descr)\n if len(out) == 1:\n return numpy.dtype(out[0])\n else:\n return numpy.dtype(out)\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"data must be None or an array name (string), not {0}\".format(repr(value)))\n self._data = value\n\n def _hasarraynames(self, memo):\n return self._data is not None and (not self._nullable or self._mask is not None)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = [repr(self._dtype)]\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._data is not None:\n args.append(\"data\" + eq + repr(self._data))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if label is None:\n return \"Primitive(\" + \", \".join(args) + \")\"\n else:\n return label + \": Primitive(\" + \", \".join(args) + \")\"\n\n else:\n return label\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n else:\n labels.append(self)\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n if not explicit and self._nullable is False and self._data is None and self._mask is None and self._namespace == \"\" and self._packing is None and self._name is None and self._doc is None and self._metadata is None:\n return str(self._dtype)\n else:\n out = {\"type\": \"primitive\", \"dtype\": self._dtype2str(self._dtype, \"-\")}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._data is not None:\n out[\"data\"] = self._data\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if isinstance(data, basestring):\n return Primitive(data)\n else:\n if \"dtype\" not in data:\n raise TypeError(\"Primitive Schema from JSON is missing argument 'dtype'\")\n out = Primitive(Primitive._str2dtype(data[\"dtype\"], \"-\"), nullable=data.get(\"nullable\", False), data=data.get(\"data\", None), mask=data.get(\"mask\", None), namespace=data.get(\"namespace\", \"\"), packing=Schema._packingfromjson(data.get(\"packing\", None)), name=data.get(\"name\", None), doc=data.get(\"doc\", None), metadata=oamap.util.json2python(data.get(\"metadata\", None)))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n pass\n\n def copy(self, **replacements):\n if \"dtype\" not in replacements:\n replacements[\"dtype\"] = self._dtype\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"data\" not in replacements:\n replacements[\"data\"] = self._data\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Primitive(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Primitive(self._dtype, nullable=self._nullable, data=self._data, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _nodes(self, loc, bottomup, memo):\n yield (self,) + loc\n\n def _keep(self, loc, paths, project, memo):\n return self.deepcopy()\n\n def _drop(self, loc, paths, memo):\n return self.deepcopy()\n\n def _contains(self, schema, memo):\n return self == schema\n\n def __hash__(self):\n return hash((Primitive, self._dtype, self._nullable, self._data, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n return isinstance(other, Primitive) and self._dtype == other._dtype and self._nullable == other._nullable and self._data == other._data and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata\n\n def __contains__(self, value, memo=None):\n if value is None:\n return self.nullable\n\n def recurse(value, dims):\n if dims == ():\n if issubclass(self.dtype.type, (numpy.bool_, numpy.bool)):\n return value is True or value is False\n\n elif issubclass(self.dtype.type, numpy.integer):\n iinfo = numpy.iinfo(self.dtype.type)\n return isinstance(value, (numbers.Integral, numpy.integer)) and iinfo.min <= value <= iinfo.max\n\n elif issubclass(self.dtype.type, numpy.floating):\n return isinstance(value, (numbers.Real, numpy.floating))\n\n elif issubclass(self.dtype.type, numpy.complex):\n return isinstance(value, (numbers.Complex, numpy.complex))\n\n else:\n raise TypeError(\"unexpected dtype: {0}\".format(self.dtype))\n\n else:\n try:\n iter(value)\n len(value)\n except TypeError:\n return False\n else:\n return len(value) == dims[0] and all(recurse(x, dims[1:]) for x in value)\n\n if self._dtype.subdtype is None:\n return recurse(value, ())\n else:\n subdtype, dims = self._dtype.subdtype\n return recurse(value, dims)\n\n def _get_data(self, prefix, delimiter):\n if self._data is None:\n return self._get_name(prefix, delimiter) + delimiter + self._dtype2str(self._dtype, delimiter)\n else:\n return self._data\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedPrimitiveGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.PrimitiveGenerator\n\n args.append(self._get_data(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._dtype)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Lists may have arbitrary length\n\nclass List(Schema):\n def __init__(self, content, nullable=False, starts=None, stops=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.content = content\n self.nullable = nullable\n self.starts = starts\n self.stops = stops\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def content(self):\n return self._content\n\n @content.setter\n def content(self, value):\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"content must be a Schema, not {0}\".format(repr(value)))\n self._content = value\n\n @property\n def starts(self):\n return self._starts\n\n @starts.setter\n def starts(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"starts must be None or an array name (string), not {0}\".format(repr(value)))\n self._starts = value\n\n @property\n def stops(self):\n return self._stops\n\n @stops.setter\n def stops(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"stops must be None or an array name (string), not {0}\".format(repr(value)))\n self._stops = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._starts is not None and self._stops is not None and (not self._nullable or self._mask is not None) and self._content._hasarraynames(memo)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(self._content.__repr__(labels, shown, indent))\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._starts is not None:\n args.append(\"starts\" + eq + repr(self._starts))\n if self._stops is not None:\n args.append(\"stops\" + eq + repr(self._stops))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"content\" + eq + self._content.__repr__(labels, shown, indent + \" \").lstrip() + \"\\n\" + indent)\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"List(\" + argstr + \")\"\n else:\n return label + \": List(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"list\", \"content\": self._content._tojson(explicit, labels, shown)}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._starts is not None:\n out[\"starts\"] = self._starts\n if explicit or self._stops is not None:\n out[\"stops\"] = self._stops\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"content\" not in data:\n raise TypeError(\"List Schema from JSON is missing argument 'content'\")\n out = List.__new__(List)\n out._content = Schema._fromjson(data[\"content\"], labels)\n out.nullable = data.get(\"nullable\", False)\n out.starts = data.get(\"starts\", None)\n out.stops = data.get(\"stops\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n if isinstance(self._content, basestring):\n if self._content not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._content)))\n self._content = labels[self._content]\n else:\n self._content._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n self._content._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"content\" not in replacements:\n replacements[\"content\"] = self._content\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"starts\" not in replacements:\n replacements[\"starts\"] = self._starts\n if \"stops\" not in replacements:\n replacements[\"stops\"] = self._stops\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return List(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(List(self._content._replace(fcn, args, kwds, memo), nullable=self._nullable, starts=self._starts, stops=self._stops, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for nodes in self._content._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for x in self._content._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for x in self._content._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n content = self.content._keep(loc, paths, project, memo)\n if content is None:\n return None\n else:\n return self.copy(content=content)\n\n def _drop(self, loc, paths, memo):\n content = self.content._drop(loc, paths, memo)\n if content is None:\n return None\n else:\n return self.copy(content=content)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return self._content._contains(schema, memo)\n\n def __hash__(self):\n return hash((List, self._content, self._nullable, self._starts, self._stops, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, List) and self._nullable == other._nullable and self._starts == other._starts and self._stops == other._stops and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return self.content.__eq__(other.content, memo)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n try:\n iter(value)\n except TypeError:\n return False\n else:\n for x in value:\n memo2 = dict(memo) if len(memo) > 0 else memo\n if not self.content.__contains__(x, memo2):\n return False\n return True\n\n def _get_starts(self, prefix, delimiter):\n if self._starts is None:\n return self._get_name(prefix, delimiter) + delimiter + \"B\"\n else:\n return self._starts\n\n def _get_stops(self, prefix, delimiter):\n if self._stops is None:\n return self._get_name(prefix, delimiter) + delimiter + \"E\"\n else:\n return self._stops\n\n def _get_content(self, prefix, delimiter):\n return self._get_name(prefix, delimiter) + delimiter + \"L\"\n\n def __call__(self, arrays, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None, numentries=None):\n generator = self.generator(prefix=prefix, delimiter=delimiter, extension=self._normalize_extension(extension), packing=packing)\n import oamap.generator\n if isinstance(generator, oamap.generator.ListGenerator):\n return generator(arrays, numentries=numentries)\n else:\n return generator(arrays)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedListGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.ListGenerator\n\n args.append(self._get_starts(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._get_stops(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n contentgen = self._content._generator(self._get_content(prefix, delimiter), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing)\n args.append(contentgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(content=contentgen.schema, packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Unions may be one of several types\n\nclass Union(Schema):\n def __init__(self, possibilities, nullable=False, tags=None, offsets=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.possibilities = possibilities\n self.nullable = nullable\n self.tags = tags\n self.offsets = offsets\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def possibilities(self):\n return tuple(self._possibilities)\n\n @possibilities.setter\n def possibilities(self, value):\n self._extend(value, [])\n\n @property\n def tags(self):\n return self._tags\n\n @tags.setter\n def tags(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"tags must be None or an array name (string), not {0}\".format(repr(value)))\n self._tags = value\n\n @property\n def offsets(self):\n return self._offsets\n\n @offsets.setter\n def offsets(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"offsets must be None or an array name (string), not {0}\".format(repr(value)))\n self._offsets = value\n\n def _extend(self, possibilities, start):\n trial = []\n try:\n for i, x in enumerate(possibilities):\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"possibilities must be an iterable of Schemas; item at {0} is {1}\".format(i, repr(x))\n trial.append(x)\n except TypeError:\n raise TypeError(\"possibilities must be an iterable of Schemas, not {0}\".format(repr(possibilities)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._possibilities = start + trial\n\n def append(self, possibility):\n if isinstance(possibility, basestring):\n possibility = Primitive(possibility)\n if not isinstance(possibility, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(possibility)))\n self._possibilities.append(possibility)\n\n def insert(self, index, possibility):\n if isinstance(possibility, basestring):\n possibility = Primitive(possibility)\n if not isinstance(possibility, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(possibility)))\n self._possibilities.insert(index, possibility)\n\n def extend(self, possibilities):\n self._extend(possibilities, self._possibilities)\n\n def __getitem__(self, index):\n return self._possibilities[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, (numbers.Integral, numpy.integer)):\n raise TypeError(\"possibility index must be an integer, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(value)))\n self._possibilities[index] = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._tags is not None and self._offsets is not None and (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._possibilities)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"[\" + \", \".join(x.__repr__(labels, shown, indent) for x in self._possibilities) + \"]\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._tags is not None:\n args.append(\"tags\" + eq + repr(self._tags))\n if self._offsets is not None:\n args.append(\"offsets\" + eq + repr(self._offsets))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"possibilities\" + eq + \"[\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(x.__repr__(labels, shown, indent + \" \").lstrip() for x in self._possibilities) + \"\\n\" + indent + \" ]\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Union(\" + argstr + \")\"\n else:\n return label + \": Union(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"union\", \"possibilities\": [x._tojson(explicit, labels, shown) for x in self._possibilities]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._tags is not None:\n out[\"tags\"] = self._tags\n if explicit or self._offsets is not None:\n out[\"offsets\"] = self._offsets\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"possibilities\" not in data:\n raise TypeError(\"Union Schema from JSON is missing argument 'possibilities'\")\n if not isinstance(data[\"possibilities\"], list):\n raise TypeError(\"argument 'possibilities' for Union Schema from JSON should be a list, not {0}\".format(repr(data[\"possibilities\"])))\n out = Union.__new__(Union)\n out.possibilities = [Schema._fromjson(x, labels) for x in data[\"possibilities\"]]\n out.nullable = data.get(\"nullable\", False)\n out.tags = data.get(\"tags\", None)\n out.offsets = data.get(\"offsets\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for i in range(len(self._possibilities)):\n if isinstance(self._possibilities[i], basestring):\n if self._possibilities[i] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._possibilities[i])))\n self._possibilities[i] = labels[self._possibilities[i]]\n else:\n self._possibilities[i]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for possibility in self._possibilities:\n possibility._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"possibilities\" not in replacements:\n replacements[\"possibilities\"] = self._possibilities\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"tags\" not in replacements:\n replacements[\"tags\"] = self._tags\n if \"offsets\" not in replacements:\n replacements[\"offsets\"] = self._offsets\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Union(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Union([x._replace(fcn, args, kwds, memo) for x in self._possibilities], nullable=self._nullable, tags=self._tags, offsets=self._offsets, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for possibility in self._possibilities:\n for nodes in possibility._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for possibility in self._possibilities:\n for x in possibility._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for possibility in self._possibilities:\n for x in possibility._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n possibilities = []\n for x in self._possibilities:\n p = self._keep(loc, paths, project, memo)\n if p is None:\n return None\n else:\n possibilities.append(p)\n return self.copy(possibilities)\n\n def _drop(self, loc, paths, memo):\n possibilities = []\n for x in self._possibilities:\n p = self._drop(loc, paths, memo)\n if p is None:\n return None\n else:\n possibilities.append(p)\n return self.copy(possibilities)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._possibilities)\n\n def __hash__(self):\n return hash((Union, self._possibilities, self._nullable, self._tags, self._offsets, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Union) and len(self._possibilities) == len(other._possibilities) and self._nullable == other._nullable and self._tags == other._tags and self._offsets == other._offsets and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(x.__eq__(y, memo) for x, y in zip(self.possibilities, other.possibilities))\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self._nullable or any(x._nullable for x in self._possibilities)\n return any(x.__contains__(value, memo) for x in self.possibilities)\n\n def _get_tags(self, prefix, delimiter):\n if self._tags is None:\n return self._get_name(prefix, delimiter) + delimiter + \"T\"\n else:\n return self._tags\n\n def _get_offsets(self, prefix, delimiter):\n if self._offsets is None:\n return self._get_name(prefix, delimiter) + delimiter + \"O\"\n else:\n return self._offsets\n\n def _get_possibility(self, prefix, delimiter, i):\n return self._get_name(prefix, delimiter) + delimiter + \"U\" + repr(i)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedUnionGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.UnionGenerator\n\n args.append(self._get_tags(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._get_offsets(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n possibilitiesgen = [x._generator(self._get_possibility(prefix, delimiter, i), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing) for i, x in enumerate(self._possibilities)]\n args.append(possibilitiesgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(possibilities=[x.schema for x in possibilitiesgen], packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Records contain fields of known types\n\nclass Record(Schema):\n def __init__(self, fields, nullable=False, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.fields = fields\n self.nullable = nullable\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def fields(self):\n return dict(self._fields)\n\n @fields.setter\n def fields(self, value):\n self._extend(value, [])\n\n def keys(self):\n return self._fields.keys()\n\n def values(self):\n return self._fields.values()\n\n def items(self):\n return self._fields.items()\n \n def _extend(self, fields, start):\n trial = []\n try:\n for n, x in fields.items():\n assert isinstance(n, basestring), \"fields must be a dict from identifier strings to Schemas; the key {0} is not a string\".format(repr(n))\n matches = self._identifier.match(n)\n assert matches is not None and len(matches.group(0)) == len(n), \"fields must be a dict from identifier strings to Schemas; the key {0} is not an identifier (/{1}/)\".format(repr(n), self._identifier.pattern)\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"fields must be a dict from identifier strings to Schemas; the value at key {0} is {1}\".format(repr(n), repr(x))\n trial.append((n, x))\n except AttributeError:\n raise TypeError(\"fields must be a dict from strings to Schemas; {0} is not a dict\".format(repr(fields)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._fields = OrderedDict(start + trial)\n\n def __getitem__(self, index):\n return self._fields[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, basestring):\n raise TypeError(\"field keys must be strings, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"field values must be Schemas, not {0}\".format(repr(value)))\n self._fields[index] = value\n\n def __delitem__(self, index):\n del self._fields[index]\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._fields.values())\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"{\" + \", \".join(\"{0}: {1}\".format(repr(n), x.__repr__(labels, shown, indent)) for n, x in self._fields.items()) + \"}\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"fields\" + eq + \"{\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(\"{0}: {1}\".format(repr(n), x.__repr__(labels, shown, indent + \" \").lstrip()) for n, x in self._fields.items()) + \"\\n\" + indent + \" }\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Record(\" + argstr + \")\"\n else:\n return label + \": Record(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"record\", \"fields\": [[n, x._tojson(explicit, labels, shown)] for n, x in self._fields.items()]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"fields\" not in data:\n raise TypeError(\"Record Schema from JSON is missing argument 'fields'\")\n out = Record.__new__(Record)\n if isinstance(data[\"fields\"], list) and all(len(x) == 2 and isinstance(x[0], basestring) for x in data[\"fields\"]):\n out._fields = OrderedDict((n, Schema._fromjson(x, labels)) for n, x in data[\"fields\"])\n elif isinstance(data[\"fields\"], dict) and all(isinstance(x, basestring) for x in data[\"fields\"]):\n out._fields = OrderedDict((n, Schema._fromjson(data[\"fields\"][n], labels)) for n in sorted(data[\"fields\"]))\n else:\n raise TypeError(\"argument 'fields' for Record Schema from JSON should be a list or dict of key-value pairs (in which the keys are strings), not {0}\".format(repr(data[\"fields\"])))\n out.nullable = data.get(\"nullable\", False)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for n in list(self._fields.keys()):\n if isinstance(self._fields[n], basestring):\n if self._fields[n] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._fields[n])))\n self._fields[n] = labels[self._fields[n]]\n else:\n self._fields[n]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for field in self._fields.values():\n field._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"fields\" not in replacements:\n replacements[\"fields\"] = self._fields\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Record(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Record(OrderedDict((n, x._replace(fcn, args, kwds, memo)) for n, x in self._fields.items()), nullable=self._nullable, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for n, x in self._fields.items():\n for nodes in x._path(loc + (n,), path, (self,) + parents, True, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for field in self._fields.values():\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for field in self._fields.values():\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n fields = OrderedDict()\n for n, x in self._fields.items():\n if any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n fields[n] = x\n elif any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), \"/\".join(p.split(\"/\")[:len(loc) + 1])) for p in paths):\n f = x._keep(loc + (n,), paths, project, memo)\n if f is not None:\n fields[n] = f\n if len(fields) == 0:\n return None\n elif project and len(fields) == 1:\n out, = fields.values()\n return out\n else:\n return self.copy(fields=fields)\n\n def _drop(self, loc, paths, memo):\n fields = OrderedDict()\n for n, x in self._fields.items():\n if not any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n f = x._drop(loc + (n,), paths, memo)\n if f is not None:\n fields[n] = f\n if len(fields) == 0:\n return None\n else:\n return self.copy(fields=fields)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._fields.values())\n\n def __hash__(self):\n return hash((Record, tuple(self._fields.items()), self._nullable, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Record) and set(self._fields) == set(other._fields) and self._nullable == other._nullable and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(self._fields[n].__eq__(other._fields[n], memo) for n in self._fields)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n if isinstance(value, dict):\n return all(n in value and x.__contains__(value[n], memo) for n, x in self._fields.items())\n elif isinstance(value, tuple) and hasattr(value, \"_fields\"):\n return all(n in value._fields and x.__contains__(getattr(value, n), memo) for n, x in self._fields.items())\n elif isinstance(value, (list, tuple)):\n return False\n else:\n return all(hasattr(value, n) and x.__contains__(getattr(value, n), memo) for n, x in self._fields.items())\n\n def _get_field(self, prefix, delimiter, n):\n return self._get_name(prefix, delimiter) + delimiter + \"F\" + n\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if len(self._fields) == 0:\n raise TypeError(\"Record has no fields\")\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedRecordGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.RecordGenerator\n\n fieldsgen = OrderedDict([(n, self._fields[n]._generator(self._get_field(prefix, delimiter, n), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing)) for n in sorted(self._fields)])\n args.append(fieldsgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(fields=OrderedDict((n, x.schema) for n, x in fieldsgen.items()), packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Tuples are like records but with an order instead of field names\n\nclass Tuple(Schema):\n def __init__(self, types, nullable=False, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.types = types\n self.nullable = nullable\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def types(self):\n return tuple(self._types)\n\n @types.setter\n def types(self, value):\n self._extend(value, [])\n\n def _extend(self, types, start):\n trial = []\n try:\n for i, x in enumerate(types):\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"types must be an iterable of Schemas; item at {0} is {1}\".format(i, repr(x))\n trial.append(x)\n except TypeError:\n raise TypeError(\"types must be an iterable of Schemas, not {0}\".format(repr(types)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._types = start + trial\n\n def append(self, item):\n if isinstance(item, basestring):\n item = Primitive(item)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(item)))\n self._types.append(item)\n\n def insert(self, index, item):\n if isinstance(item, basestring):\n item = Primitive(item)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(item)))\n self._types.insert(index, item)\n\n def extend(self, types):\n self._extend(types, self._types)\n\n def __getitem__(self, index):\n return self._types[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, (numbers.Integral, numpy.integer)):\n raise TypeError(\"types index must be an integer, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(value)))\n self._types[index] = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._types)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"[\" + \", \".join(x.__repr__(labels, shown) for x in self._types) + \"]\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"types\" + eq + \"[\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(x.__repr__(labels, shown, indent + \" \").lstrip() for x in self._types) + \"\\n\" + indent + \" ]\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Tuple(\" + argstr + \")\"\n else:\n return label + \": Tuple(\" + argstr + \")\"\n\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"tuple\", \"types\": [x._tojson(explicit, labels, shown) for x in self._types]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"types\" not in data:\n raise TypeError(\"Tuple Schema from JSON is missing argument 'types'\")\n if not isinstance(data[\"types\"], list):\n raise TypeError(\"argument 'types' for Tuple Schema from JSON should be a list, not {0}\".format(repr(data[\"types\"])))\n out = Tuple.__new__(Tuple)\n out._types = [Schema._fromjson(x, labels) for x in data[\"types\"]]\n out.nullable = data.get(\"nullable\", False)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for i in range(len(self._types)):\n if isinstance(self._types[i], basestring):\n if self._types[i] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._types[i])))\n self._types[i] = labels[self._types[i]]\n else:\n self._types[i]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for item in self._types:\n item._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"types\" not in replacements:\n replacements[\"types\"] = self._types\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Tuple(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Tuple([x._replace(fcn, args, kwds, memo) for x in self._types], nullable=self._nullable, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for i, x in enumerate(self._types):\n for nodes in x._path(loc + (str(i),), path, (self,) + parents, True, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for field in self._types:\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for field in self._types:\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n types = []\n for i, x in enumerate(self._types):\n n = str(i)\n if any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n types.append(x)\n elif any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), \"/\".join(p.split(\"/\")[:len(loc) + 1])) for p in paths):\n f = x._keep(loc + (n,), paths, project, memo)\n if f is not None:\n types.append(f)\n if len(types) == 0:\n return None\n elif project and len(fields) == 1:\n out, = fields.values()\n return out\n else:\n return self.copy(types=types)\n\n def _drop(self, loc, paths, memo):\n types = []\n for i, x in enumerate(self._types):\n n = str(i)\n if not any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n f = x._drop(loc + (n,), paths, memo)\n if f is not None:\n types.append(f)\n if len(types) == 0:\n return None\n else:\n return self.copy(types=types)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._types)\n\n def __hash__(self):\n return hash((Tuple, self._types, self._nullable, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Tuple) and len(self._types) == len(other._types) and self._nullable == other._nullable and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(x.__eq__(y, memo) for x, y in zip(self._types, other._types))\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n if isinstance(value, tuple) and len(value) == len(self._types):\n return all(x.__contains__(v, memo) for v, x in zip(value, self._types))\n else:\n return False\n\n def _get_field(self, prefix, delimiter, i):\n return self._get_name(prefix, delimiter) + delimiter + \"F\" + repr(i)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if len(self._types) == 0:\n raise TypeError(\"Tuple has no types\")\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedTupleGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.TupleGenerator\n\n typesgen = [x._generator(self._get_field(prefix, delimiter, i), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing) for i, x in enumerate(self._types)]\n args.append(typesgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(types=[x.schema for x in typesgen], packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Pointers redirect to the contents of other types\n\nclass Pointer(Schema):\n def __init__(self, target, nullable=False, positions=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.target = target\n self.nullable = nullable\n self.positions = positions\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def target(self):\n return self._target\n\n @target.setter\n def target(self, value):\n if isinstance(value, basestring):\n value = Primitive(value)\n if not (value is None or isinstance(value, Schema)):\n raise TypeError(\"target must be None or a Schema, not {0}\".format(repr(value)))\n if value is self:\n raise TypeError(\"Pointer may not point directly at itself (it would never resolve to a value)\")\n self._target = value\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"positions must be None or an array name (string), not {0}\".format(repr(value)))\n self._positions = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._positions is not None and (not self._nullable or self._mask is not None) and self._target._hasarraynames(memo)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n if self._target is None:\n args.append(repr(None))\n else:\n args.append(self._target.__repr__(labels, shown, indent))\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._positions is not None:\n args.append(\"positions\" + eq + repr(self._positions))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n if self._target is None:\n args.append(\"target\" + eq + repr(None) + \"\\n\" + indent)\n else:\n args.append(\"target\" + eq + self._target.__repr__(labels, shown, indent + \" \").lstrip() + \"\\n\" + indent)\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n \n if label is None:\n return \"Pointer(\" + argstr + \")\"\n else:\n return label + \": Pointer(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n if self._target is None:\n raise TypeError(\"pointer target is still None; must be resolved before it can be stored\")\n out = {\"type\": \"pointer\", \"target\": self._target._tojson(explicit, labels, shown)}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._positions is not None:\n out[\"positions\"] = self._positions\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"target\" not in data:\n raise TypeError(\"Pointer Schema from JSON is missing argument 'target'\")\n out = Pointer.__new__(Pointer)\n out._target = Schema._fromjson(data[\"target\"], labels)\n out.nullable = data.get(\"nullable\", False)\n out.positions = data.get(\"positions\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n if isinstance(self._target, basestring):\n if self._target not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._target)))\n self._target = labels[self._target]\n else:\n self._target._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n if self._target is not None:\n self._target._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"target\" not in replacements:\n replacements[\"target\"] = self._target\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"positions\" not in replacements:\n replacements[\"positions\"] = self._positions\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Pointer(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n if id(self) in memo:\n return fcn(memo[id(self)], *args, **kwds)\n memo[id(self)] = Pointer(None, nullable=self._nullable, positions=self._positions, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata))\n memo[id(self)]._target = self._target._replace(fcn, args, kwds, memo)\n return fcn(memo[id(self)], *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n if id(self) not in memo:\n memo.add(id(self))\n for nodes in self._target._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if id(self) not in memo:\n memo.add(id(self))\n if bottomup:\n for x in self._target._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for x in self._target._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n if id(self) in memo:\n return memo[id(self)]\n memo[id(self)] = self.copy(target=None)\n target = self._target._keep(loc, paths, project, memo)\n if target is None:\n return None\n else:\n memo[id(self)]._target = target\n return memo[id(self)]\n\n def _drop(self, loc, paths, memo):\n if id(self) in memo:\n return memo[id(self)]\n memo[id(self)] = self.copy(target=None)\n target = self._target._drop(loc, paths, memo)\n if target is None:\n return None\n else:\n memo[id(self)]._target = target\n return memo[id(self)]\n\n def _contains(self, schema, memo):\n if id(self) in memo:\n return False\n memo.add(id(self))\n if self == schema:\n return True\n else:\n return self._target._contains(schema, memo)\n\n def __hash__(self):\n return hash((Pointer, self._target, self._nullable, self._positions, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Pointer) and self._nullable == other._nullable and self._positions == other._positions and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return self.target.__eq__(other.target, memo)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if id(value) in memo:\n return memo[id(value)] == id(self)\n memo[id(value)] = id(self)\n if value is None:\n return self._nullable\n return self.target.__contains__(value, memo)\n\n def _get_positions(self, prefix, delimiter):\n if self._positions is None:\n return self._get_name(prefix, delimiter) + delimiter + \"P\"\n else:\n return self._positions\n\n def _get_external(self, prefix, delimiter):\n return self._get_name(prefix, delimiter) + delimiter + \"X\"\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if self._target is None:\n raise TypeError(\"when creating a Pointer type from a Pointer schema, target must be set to a value other than None\")\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedPointerGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.PointerGenerator\n\n args.append(self._get_positions(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append((self._target, prefix, delimiter)) # placeholder! see _finalizegenerator!\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n" ]
[ [ "numpy.iinfo", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
domoritz/solas
[ "23878fed9efbf14781791dafec26705c6762cfd1" ]
[ "tests/test_maintainence.py" ]
[ "# Copyright 2019-2020 The Solas Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .context import solas\nimport pytest\nimport pandas as pd\nfrom solas.vis.Vis import Vis\n\n\ndef test_metadata_subsequent_display(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n\ndef test_metadata_subsequent_vis(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n vis = Vis([\"Acceleration\", \"Horsepower\"], df)\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n\ndef test_metadata_inplace_operation(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df.dropna(inplace=True)\n assert df._metadata_fresh == False, \"Failed to expire metadata after in-place Pandas operation\"\n\n\ndef test_metadata_new_df_operation(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df[[\"MilesPerGal\", \"Acceleration\"]]\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df2 = df[[\"MilesPerGal\", \"Acceleration\"]]\n assert not hasattr(df2, \"_metadata_fresh\")\n\n\ndef test_metadata_column_group_reset_df(global_var):\n df = pd.read_csv(\"solas/data/car.csv\")\n assert not hasattr(df, \"_metadata_fresh\")\n df[\"Year\"] = pd.to_datetime(df[\"Year\"], format=\"%Y\")\n assert hasattr(df, \"_metadata_fresh\")\n result = df.groupby(\"Cylinders\").mean()\n assert not hasattr(result, \"_metadata_fresh\")\n # Note that this should trigger two compute metadata (one for df, and one for an intermediate df.reset_index used to feed inside created Vis)\n result._ipython_display_()\n assert result._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n colgroup_recs = result.recommendation[\"Column Groups\"]\n assert len(colgroup_recs) == 5\n for rec in colgroup_recs:\n assert rec.mark == \"bar\", \"Column Group not displaying bar charts\"\n\n\ndef test_recs_inplace_operation(global_var):\n df = pytest.college_df\n df._ipython_display_()\n assert df._recs_fresh == True, \"Failed to maintain recommendation after display df\"\n assert len(df.recommendation[\"Occurrence\"]) == 6\n df.drop(columns=[\"Name\"], inplace=True)\n assert \"Name\" not in df.columns, \"Failed to perform `drop` operation in-place\"\n assert df._recs_fresh == False, \"Failed to maintain recommendation after in-place Pandas operation\"\n df._ipython_display_()\n assert len(df.recommendation[\"Occurrence\"]) == 5\n assert df._recs_fresh == True, \"Failed to maintain recommendation after display df\"\n\n\ndef test_intent_cleared_after_vis_data():\n df = pd.read_csv(\n \"https://github.com/lux/solas-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n df[\"Month\"] = pd.to_datetime(df[\"Month\"], format=\"%m\")\n df[\"Year\"] = pd.to_datetime(df[\"Year\"], format=\"%Y\")\n df.intent = [\n solas.Clause(\"Year\"),\n solas.Clause(\"PctForeclosured\"),\n solas.Clause(\"City=Crofton\"),\n ]\n df._ipython_display_()\n\n vis = df.recommendation[\"Similarity\"][0]\n visdata = vis.data\n visdata.data_type[\"PctForeclosured\"] = \"quantitative\"\n # otherwise because of the small size of the dataframe, the cardinality of PctForeclosured is less than 20\n # and thereby this attribute will be considered as nominal\n visdata._ipython_display_()\n all_column_vis = visdata.current_vis[0]\n assert all_column_vis.get_attr_by_channel(\"x\")[0].attribute == \"Year\"\n assert all_column_vis.get_attr_by_channel(\"y\")[0].attribute == \"PctForeclosured\"\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
yetyetanotherusername/zarr-python
[ "e3cdd1a50e1047304be2c91a017da6362f8df533", "e3cdd1a50e1047304be2c91a017da6362f8df533" ]
[ "zarr/core.py", "zarr/tests/test_core.py" ]
[ "import binascii\nimport hashlib\nimport itertools\nimport math\nimport operator\nimport re\nfrom functools import reduce\n\nimport numpy as np\nfrom numcodecs.compat import ensure_bytes, ensure_ndarray\n\nfrom zarr.attrs import Attributes\nfrom zarr.codecs import AsType, get_codec\nfrom zarr.errors import ArrayNotFoundError, ReadOnlyError\nfrom zarr.indexing import (BasicIndexer, CoordinateIndexer, MaskIndexer,\n OIndex, OrthogonalIndexer, VIndex, check_fields,\n check_no_multi_fields, ensure_tuple,\n err_too_many_indices, is_contiguous_selection,\n is_scalar, pop_fields)\nfrom zarr.meta import decode_array_metadata, encode_array_metadata\nfrom zarr.storage import array_meta_key, attrs_key, getsize, listdir\nfrom zarr.util import (InfoReporter, check_array_shape, human_readable_size,\n is_total_slice, nolock, normalize_chunks,\n normalize_resize_args, normalize_shape,\n normalize_storage_path)\n\n\n# noinspection PyUnresolvedReferences\nclass Array(object):\n \"\"\"Instantiate an array from an initialized store.\n\n Parameters\n ----------\n store : MutableMapping\n Array store, already initialized.\n path : string, optional\n Storage path.\n read_only : bool, optional\n True if array should be protected against modification.\n chunk_store : MutableMapping, optional\n Separate storage for chunks. If not provided, `store` will be used\n for storage of both chunks and metadata.\n synchronizer : object, optional\n Array synchronizer.\n cache_metadata : bool, optional\n If True (default), array configuration metadata will be cached for the\n lifetime of the object. If False, array metadata will be reloaded\n prior to all data access and modification operations (may incur\n overhead depending on storage and data access pattern).\n cache_attrs : bool, optional\n If True (default), user attributes will be cached for attribute read\n operations. If False, user attributes are reloaded from the store prior\n to all attribute read operations.\n\n Attributes\n ----------\n store\n path\n name\n read_only\n chunk_store\n shape\n chunks\n dtype\n compression\n compression_opts\n fill_value\n order\n synchronizer\n filters\n attrs\n size\n itemsize\n nbytes\n nbytes_stored\n cdata_shape\n nchunks\n nchunks_initialized\n is_view\n info\n vindex\n oindex\n\n Methods\n -------\n __getitem__\n __setitem__\n get_basic_selection\n set_basic_selection\n get_orthogonal_selection\n set_orthogonal_selection\n get_mask_selection\n set_mask_selection\n get_coordinate_selection\n set_coordinate_selection\n digest\n hexdigest\n resize\n append\n view\n astype\n\n \"\"\"\n\n def __init__(self, store, path=None, read_only=False, chunk_store=None,\n synchronizer=None, cache_metadata=True, cache_attrs=True):\n # N.B., expect at this point store is fully initialized with all\n # configuration metadata fully specified and normalized\n\n self._store = store\n self._chunk_store = chunk_store\n self._path = normalize_storage_path(path)\n if self._path:\n self._key_prefix = self._path + '/'\n else:\n self._key_prefix = ''\n self._read_only = bool(read_only)\n self._synchronizer = synchronizer\n self._cache_metadata = cache_metadata\n self._is_view = False\n\n # initialize metadata\n self._load_metadata()\n\n # initialize attributes\n akey = self._key_prefix + attrs_key\n self._attrs = Attributes(store, key=akey, read_only=read_only,\n synchronizer=synchronizer, cache=cache_attrs)\n\n # initialize info reporter\n self._info_reporter = InfoReporter(self)\n\n # initialize indexing helpers\n self._oindex = OIndex(self)\n self._vindex = VIndex(self)\n\n def _load_metadata(self):\n \"\"\"(Re)load metadata from store.\"\"\"\n if self._synchronizer is None:\n self._load_metadata_nosync()\n else:\n mkey = self._key_prefix + array_meta_key\n with self._synchronizer[mkey]:\n self._load_metadata_nosync()\n\n def _load_metadata_nosync(self):\n try:\n mkey = self._key_prefix + array_meta_key\n meta_bytes = self._store[mkey]\n except KeyError:\n raise ArrayNotFoundError(self._path)\n else:\n\n # decode and store metadata as instance members\n meta = decode_array_metadata(meta_bytes)\n self._meta = meta\n self._shape = meta['shape']\n self._chunks = meta['chunks']\n self._dtype = meta['dtype']\n self._fill_value = meta['fill_value']\n self._order = meta['order']\n\n # setup compressor\n config = meta['compressor']\n if config is None:\n self._compressor = None\n else:\n self._compressor = get_codec(config)\n\n # setup filters\n filters = meta['filters']\n if filters:\n filters = [get_codec(config) for config in filters]\n self._filters = filters\n\n def _refresh_metadata(self):\n if not self._cache_metadata:\n self._load_metadata()\n\n def _refresh_metadata_nosync(self):\n if not self._cache_metadata and not self._is_view:\n self._load_metadata_nosync()\n\n def _flush_metadata_nosync(self):\n if self._is_view:\n raise PermissionError('operation not permitted for views')\n\n if self._compressor:\n compressor_config = self._compressor.get_config()\n else:\n compressor_config = None\n if self._filters:\n filters_config = [f.get_config() for f in self._filters]\n else:\n filters_config = None\n meta = dict(shape=self._shape, chunks=self._chunks, dtype=self._dtype,\n compressor=compressor_config, fill_value=self._fill_value,\n order=self._order, filters=filters_config)\n mkey = self._key_prefix + array_meta_key\n self._store[mkey] = encode_array_metadata(meta)\n\n @property\n def store(self):\n \"\"\"A MutableMapping providing the underlying storage for the array.\"\"\"\n return self._store\n\n @property\n def path(self):\n \"\"\"Storage path.\"\"\"\n return self._path\n\n @property\n def name(self):\n \"\"\"Array name following h5py convention.\"\"\"\n if self.path:\n # follow h5py convention: add leading slash\n name = self.path\n if name[0] != '/':\n name = '/' + name\n return name\n return None\n\n @property\n def basename(self):\n \"\"\"Final component of name.\"\"\"\n if self.name is not None:\n return self.name.split('/')[-1]\n return None\n\n @property\n def read_only(self):\n \"\"\"A boolean, True if modification operations are not permitted.\"\"\"\n return self._read_only\n\n @read_only.setter\n def read_only(self, value):\n self._read_only = bool(value)\n\n @property\n def chunk_store(self):\n \"\"\"A MutableMapping providing the underlying storage for array chunks.\"\"\"\n if self._chunk_store is None:\n return self._store\n else:\n return self._chunk_store\n\n @property\n def shape(self):\n \"\"\"A tuple of integers describing the length of each dimension of\n the array.\"\"\"\n # N.B., shape may change if array is resized, hence need to refresh\n # metadata\n self._refresh_metadata()\n return self._shape\n\n @shape.setter\n def shape(self, value):\n self.resize(value)\n\n @property\n def chunks(self):\n \"\"\"A tuple of integers describing the length of each dimension of a\n chunk of the array.\"\"\"\n return self._chunks\n\n @property\n def dtype(self):\n \"\"\"The NumPy data type.\"\"\"\n return self._dtype\n\n @property\n def compressor(self):\n \"\"\"Primary compression codec.\"\"\"\n return self._compressor\n\n @property\n def fill_value(self):\n \"\"\"A value used for uninitialized portions of the array.\"\"\"\n return self._fill_value\n\n @property\n def order(self):\n \"\"\"A string indicating the order in which bytes are arranged within\n chunks of the array.\"\"\"\n return self._order\n\n @property\n def filters(self):\n \"\"\"One or more codecs used to transform data prior to compression.\"\"\"\n return self._filters\n\n @property\n def synchronizer(self):\n \"\"\"Object used to synchronize write access to the array.\"\"\"\n return self._synchronizer\n\n @property\n def attrs(self):\n \"\"\"A MutableMapping containing user-defined attributes. Note that\n attribute values must be JSON serializable.\"\"\"\n return self._attrs\n\n @property\n def ndim(self):\n \"\"\"Number of dimensions.\"\"\"\n return len(self.shape)\n\n @property\n def _size(self):\n return reduce(operator.mul, self._shape, 1)\n\n @property\n def size(self):\n \"\"\"The total number of elements in the array.\"\"\"\n # N.B., this property depends on shape, and shape may change if array\n # is resized, hence need to refresh metadata\n self._refresh_metadata()\n return self._size\n\n @property\n def itemsize(self):\n \"\"\"The size in bytes of each item in the array.\"\"\"\n return self.dtype.itemsize\n\n @property\n def _nbytes(self):\n return self._size * self.itemsize\n\n @property\n def nbytes(self):\n \"\"\"The total number of bytes that would be required to store the\n array without compression.\"\"\"\n # N.B., this property depends on shape, and shape may change if array\n # is resized, hence need to refresh metadata\n self._refresh_metadata()\n return self._nbytes\n\n @property\n def nbytes_stored(self):\n \"\"\"The total number of stored bytes of data for the array. This\n includes storage required for configuration metadata and user\n attributes.\"\"\"\n m = getsize(self._store, self._path)\n if self._chunk_store is None:\n return m\n else:\n n = getsize(self._chunk_store, self._path)\n if m < 0 or n < 0:\n return -1\n else:\n return m + n\n\n @property\n def _cdata_shape(self):\n if self._shape == ():\n return 1,\n else:\n return tuple(math.ceil(s / c)\n for s, c in zip(self._shape, self._chunks))\n\n @property\n def cdata_shape(self):\n \"\"\"A tuple of integers describing the number of chunks along each\n dimension of the array.\"\"\"\n self._refresh_metadata()\n return self._cdata_shape\n\n @property\n def _nchunks(self):\n return reduce(operator.mul, self._cdata_shape, 1)\n\n @property\n def nchunks(self):\n \"\"\"Total number of chunks.\"\"\"\n self._refresh_metadata()\n return self._nchunks\n\n @property\n def nchunks_initialized(self):\n \"\"\"The number of chunks that have been initialized with some data.\"\"\"\n\n # key pattern for chunk keys\n prog = re.compile(r'\\.'.join([r'\\d+'] * min(1, self.ndim)))\n\n # count chunk keys\n return sum(1 for k in listdir(self.chunk_store, self._path) if prog.match(k))\n\n # backwards compability\n initialized = nchunks_initialized\n\n @property\n def is_view(self):\n \"\"\"A boolean, True if this array is a view on another array.\"\"\"\n return self._is_view\n\n @property\n def oindex(self):\n \"\"\"Shortcut for orthogonal (outer) indexing, see :func:`get_orthogonal_selection` and\n :func:`set_orthogonal_selection` for documentation and examples.\"\"\"\n return self._oindex\n\n @property\n def vindex(self):\n \"\"\"Shortcut for vectorized (inner) indexing, see :func:`get_coordinate_selection`,\n :func:`set_coordinate_selection`, :func:`get_mask_selection` and\n :func:`set_mask_selection` for documentation and examples.\"\"\"\n return self._vindex\n\n def __eq__(self, other):\n return (\n isinstance(other, Array) and\n self.store == other.store and\n self.read_only == other.read_only and\n self.path == other.path and\n not self._is_view\n # N.B., no need to compare other properties, should be covered by\n # store comparison\n )\n\n def __array__(self, *args):\n a = self[...]\n if args:\n a = a.astype(args[0])\n return a\n\n def __iter__(self):\n if len(self.shape) == 0:\n # Same error as numpy\n raise TypeError(\"iteration over a 0-d array\")\n # Avoid repeatedly decompressing chunks by iterating over the chunks\n # in the first dimension.\n chunk_size = self.chunks[0]\n for j in range(self.shape[0]):\n if j % chunk_size == 0:\n chunk = self[j: j + chunk_size]\n yield chunk[j % chunk_size]\n\n def __len__(self):\n if self.shape:\n return self.shape[0]\n else:\n # 0-dimensional array, same error message as numpy\n raise TypeError('len() of unsized object')\n\n def __getitem__(self, selection):\n \"\"\"Retrieve data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice objects specifying the\n requested item or region for each dimension of the array.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested region.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100))\n\n Retrieve a single item::\n\n >>> z[5]\n 5\n\n Retrieve a region via slicing::\n\n >>> z[:5]\n array([0, 1, 2, 3, 4])\n >>> z[-5:]\n array([95, 96, 97, 98, 99])\n >>> z[5:10]\n array([5, 6, 7, 8, 9])\n >>> z[5:10:2]\n array([5, 7, 9])\n >>> z[::2]\n array([ 0, 2, 4, ..., 94, 96, 98])\n\n Load the entire array into memory::\n\n >>> z[...]\n array([ 0, 1, 2, ..., 97, 98, 99])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve an item::\n\n >>> z[2, 2]\n 22\n\n Retrieve a region via slicing::\n\n >>> z[1:3, 1:3]\n array([[11, 12],\n [21, 22]])\n >>> z[1:3, :]\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])\n >>> z[:, 1:3]\n array([[ 1, 2],\n [11, 12],\n [21, 22],\n [31, 32],\n [41, 42],\n [51, 52],\n [61, 62],\n [71, 72],\n [81, 82],\n [91, 92]])\n >>> z[0:5:2, 0:5:2]\n array([[ 0, 2, 4],\n [20, 22, 24],\n [40, 42, 44]])\n >>> z[::2, ::2]\n array([[ 0, 2, 4, 6, 8],\n [20, 22, 24, 26, 28],\n [40, 42, 44, 46, 48],\n [60, 62, 64, 66, 68],\n [80, 82, 84, 86, 88]])\n\n Load the entire array into memory::\n\n >>> z[...]\n array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],\n [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],\n [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])\n\n For arrays with a structured dtype, specific fields can be retrieved, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z['foo']\n array([b'aaa', b'bbb', b'ccc'],\n dtype='|S3')\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently the implementation for __getitem__ is provided by\n :func:`get_basic_selection`. For advanced (\"fancy\") indexing, see the methods\n listed under See Also.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __setitem__\n\n \"\"\"\n\n fields, selection = pop_fields(selection)\n return self.get_basic_selection(selection, fields=fields)\n\n def get_basic_selection(self, selection=Ellipsis, out=None, fields=None):\n \"\"\"Retrieve data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n A tuple specifying the requested item or region for each dimension of the\n array. May be any combination of int and/or slice for multidimensional arrays.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested region.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100))\n\n Retrieve a single item::\n\n >>> z.get_basic_selection(5)\n 5\n\n Retrieve a region via slicing::\n\n >>> z.get_basic_selection(slice(5))\n array([0, 1, 2, 3, 4])\n >>> z.get_basic_selection(slice(-5, None))\n array([95, 96, 97, 98, 99])\n >>> z.get_basic_selection(slice(5, 10))\n array([5, 6, 7, 8, 9])\n >>> z.get_basic_selection(slice(5, 10, 2))\n array([5, 7, 9])\n >>> z.get_basic_selection(slice(None, None, 2))\n array([ 0, 2, 4, ..., 94, 96, 98])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve an item::\n\n >>> z.get_basic_selection((2, 2))\n 22\n\n Retrieve a region via slicing::\n\n >>> z.get_basic_selection((slice(1, 3), slice(1, 3)))\n array([[11, 12],\n [21, 22]])\n >>> z.get_basic_selection((slice(1, 3), slice(None)))\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])\n >>> z.get_basic_selection((slice(None), slice(1, 3)))\n array([[ 1, 2],\n [11, 12],\n [21, 22],\n [31, 32],\n [41, 42],\n [51, 52],\n [61, 62],\n [71, 72],\n [81, 82],\n [91, 92]])\n >>> z.get_basic_selection((slice(0, 5, 2), slice(0, 5, 2)))\n array([[ 0, 2, 4],\n [20, 22, 24],\n [40, 42, 44]])\n >>> z.get_basic_selection((slice(None, None, 2), slice(None, None, 2)))\n array([[ 0, 2, 4, 6, 8],\n [20, 22, 24, 26, 28],\n [40, 42, 44, 46, 48],\n [60, 62, 64, 66, 68],\n [80, 82, 84, 86, 88]])\n\n For arrays with a structured dtype, specific fields can be retrieved, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z.get_basic_selection(slice(2), fields='foo')\n array([b'aaa', b'bbb'],\n dtype='|S3')\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently this method provides the implementation for accessing data via the\n square bracket notation (__getitem__). See :func:`__getitem__` for examples\n using the alternative notation.\n\n See Also\n --------\n set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # handle zero-dimensional arrays\n if self._shape == ():\n return self._get_basic_selection_zd(selection=selection, out=out,\n fields=fields)\n else:\n return self._get_basic_selection_nd(selection=selection, out=out,\n fields=fields)\n\n def _get_basic_selection_zd(self, selection, out=None, fields=None):\n # special case basic selection for zero-dimensional array\n\n # check selection is valid\n selection = ensure_tuple(selection)\n if selection not in ((), (Ellipsis,)):\n err_too_many_indices(selection, ())\n\n try:\n # obtain encoded data for chunk\n ckey = self._chunk_key((0,))\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n chunk = np.zeros((), dtype=self._dtype)\n if self._fill_value is not None:\n chunk.fill(self._fill_value)\n\n else:\n chunk = self._decode_chunk(cdata)\n\n # handle fields\n if fields:\n chunk = chunk[fields]\n\n # handle selection of the scalar value via empty tuple\n if out is None:\n out = chunk[selection]\n else:\n out[selection] = chunk[selection]\n\n return out\n\n def _get_basic_selection_nd(self, selection, out=None, fields=None):\n # implementation of basic selection for array with at least one dimension\n\n # setup indexer\n indexer = BasicIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def get_orthogonal_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve data by making a selection for each dimension of the array. For\n example, if an array has 2 dimensions, allows selecting specific rows and/or\n columns. The selection for each dimension can be either an integer (indexing a\n single item), a slice, an array of integers, or a Boolean array where True\n values indicate a selection.\n\n Parameters\n ----------\n selection : tuple\n A selection for each dimension of the array. May be any combination of int,\n slice, integer array or Boolean array.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve rows and columns via any combination of int, slice, integer array and/or\n Boolean array::\n\n >>> z.get_orthogonal_selection(([1, 4], slice(None)))\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])\n >>> z.get_orthogonal_selection((slice(None), [1, 4]))\n array([[ 1, 4],\n [11, 14],\n [21, 24],\n [31, 34],\n [41, 44],\n [51, 54],\n [61, 64],\n [71, 74],\n [81, 84],\n [91, 94]])\n >>> z.get_orthogonal_selection(([1, 4], [1, 4]))\n array([[11, 14],\n [41, 44]])\n >>> sel = np.zeros(z.shape[0], dtype=bool)\n >>> sel[1] = True\n >>> sel[4] = True\n >>> z.get_orthogonal_selection((sel, sel))\n array([[11, 14],\n [41, 44]])\n\n For convenience, the orthogonal selection functionality is also available via the\n `oindex` property, e.g.::\n\n >>> z.oindex[[1, 4], :]\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])\n >>> z.oindex[:, [1, 4]]\n array([[ 1, 4],\n [11, 14],\n [21, 24],\n [31, 34],\n [41, 44],\n [51, 54],\n [61, 64],\n [71, 74],\n [81, 84],\n [91, 94]])\n >>> z.oindex[[1, 4], [1, 4]]\n array([[11, 14],\n [41, 44]])\n >>> sel = np.zeros(z.shape[0], dtype=bool)\n >>> sel[1] = True\n >>> sel[4] = True\n >>> z.oindex[sel, sel]\n array([[11, 14],\n [41, 44]])\n\n Notes\n -----\n Orthogonal indexing is also known as outer indexing.\n\n Slices with step > 1 are supported, but slices with negative step are not.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = OrthogonalIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def get_coordinate_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve a selection of individual items, by providing the indices\n (coordinates) for each selected item.\n\n Parameters\n ----------\n selection : tuple\n An integer (coordinate) array for each dimension of the array.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve items by specifying their coordinates::\n\n >>> z.get_coordinate_selection(([1, 4], [1, 4]))\n array([11, 44])\n\n For convenience, the coordinate selection functionality is also available via the\n `vindex` property, e.g.::\n\n >>> z.vindex[[1, 4], [1, 4]]\n array([11, 44])\n\n Notes\n -----\n Coordinate indexing is also known as point selection, and is a form of vectorized\n or inner indexing.\n\n Slices are not supported. Coordinate arrays must be provided for all dimensions\n of the array.\n\n Coordinate arrays may be multidimensional, in which case the output array will\n also be multidimensional. Coordinate arrays are broadcast against each other\n before being applied. The shape of the output will be the same as the shape of\n each coordinate array after broadcasting.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, set_coordinate_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = CoordinateIndexer(selection, self)\n\n # handle output - need to flatten\n if out is not None:\n out = out.reshape(-1)\n\n out = self._get_selection(indexer=indexer, out=out, fields=fields)\n\n # restore shape\n out = out.reshape(indexer.sel_shape)\n\n return out\n\n def get_mask_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve a selection of individual items, by providing a Boolean array of the\n same shape as the array against which the selection is being made, where True\n values indicate a selected item.\n\n Parameters\n ----------\n selection : ndarray, bool\n A Boolean array of the same shape as the array against which the selection is\n being made.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve items by specifying a maks::\n\n >>> sel = np.zeros_like(z, dtype=bool)\n >>> sel[1, 1] = True\n >>> sel[4, 4] = True\n >>> z.get_mask_selection(sel)\n array([11, 44])\n\n For convenience, the mask selection functionality is also available via the\n `vindex` property, e.g.::\n\n >>> z.vindex[sel]\n array([11, 44])\n\n Notes\n -----\n Mask indexing is a form of vectorized or inner indexing, and is equivalent to\n coordinate indexing. Internally the mask array is converted to coordinate\n arrays by calling `np.nonzero`.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n set_coordinate_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = MaskIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def _get_selection(self, indexer, out=None, fields=None):\n\n # We iterate over all chunks which overlap the selection and thus contain data\n # that needs to be extracted. Each chunk is processed in turn, extracting the\n # necessary data and storing into the correct location in the output array.\n\n # N.B., it is an important optimisation that we only visit chunks which overlap\n # the selection. This minimises the number of iterations in the main for loop.\n\n # check fields are sensible\n out_dtype = check_fields(fields, self._dtype)\n\n # determine output shape\n out_shape = indexer.shape\n\n # setup output array\n if out is None:\n out = np.empty(out_shape, dtype=out_dtype, order=self._order)\n else:\n check_array_shape('out', out, out_shape)\n\n # iterate over chunks\n for chunk_coords, chunk_selection, out_selection in indexer:\n\n # load chunk selection into output array\n self._chunk_getitem(chunk_coords, chunk_selection, out, out_selection,\n drop_axes=indexer.drop_axes, fields=fields)\n\n if out.shape:\n return out\n else:\n return out[()]\n\n def __setitem__(self, selection, value):\n \"\"\"Modify data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice specifying the requested\n region for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> z = zarr.zeros(100, dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z[...] = 42\n >>> z[...]\n array([42, 42, 42, ..., 42, 42, 42])\n\n Set a portion of the array::\n\n >>> z[:10] = np.arange(10)\n >>> z[-10:] = np.arange(10)[::-1]\n >>> z[...]\n array([ 0, 1, 2, ..., 2, 1, 0])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z[...] = 42\n\n Set a portion of the array::\n\n >>> z[0, :] = np.arange(z.shape[1])\n >>> z[:, 0] = np.arange(z.shape[0])\n >>> z[...]\n array([[ 0, 1, 2, 3, 4],\n [ 1, 42, 42, 42, 42],\n [ 2, 42, 42, 42, 42],\n [ 3, 42, 42, 42, 42],\n [ 4, 42, 42, 42, 42]])\n\n For arrays with a structured dtype, specific fields can be modified, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z['foo'] = b'zzz'\n >>> z[...]\n array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'zzz', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently the implementation for __setitem__ is provided by\n :func:`set_basic_selection`, which means that only integers and slices are\n supported within the selection. For advanced (\"fancy\") indexing, see the\n methods listed under See Also.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__\n\n \"\"\"\n\n fields, selection = pop_fields(selection)\n self.set_basic_selection(selection, value, fields=fields)\n\n def set_basic_selection(self, selection, value, fields=None):\n \"\"\"Modify data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice specifying the requested\n region for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros(100, dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z.set_basic_selection(..., 42)\n >>> z[...]\n array([42, 42, 42, ..., 42, 42, 42])\n\n Set a portion of the array::\n\n >>> z.set_basic_selection(slice(10), np.arange(10))\n >>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1])\n >>> z[...]\n array([ 0, 1, 2, ..., 2, 1, 0])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z.set_basic_selection(..., 42)\n\n Set a portion of the array::\n\n >>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1]))\n >>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0]))\n >>> z[...]\n array([[ 0, 1, 2, 3, 4],\n [ 1, 42, 42, 42, 42],\n [ 2, 42, 42, 42, 42],\n [ 3, 42, 42, 42, 42],\n [ 4, 42, 42, 42, 42]])\n\n For arrays with a structured dtype, the `fields` parameter can be used to set\n data for a specific field, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z.set_basic_selection(slice(0, 2), b'zzz', fields='foo')\n >>> z[:]\n array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'ccc', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])\n\n Notes\n -----\n This method provides the underlying implementation for modifying data via square\n bracket notation, see :func:`__setitem__` for equivalent examples using the\n alternative notation.\n\n See Also\n --------\n get_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # handle zero-dimensional arrays\n if self._shape == ():\n return self._set_basic_selection_zd(selection, value, fields=fields)\n else:\n return self._set_basic_selection_nd(selection, value, fields=fields)\n\n def set_orthogonal_selection(self, selection, value, fields=None):\n \"\"\"Modify data via a selection for each dimension of the array.\n\n Parameters\n ----------\n selection : tuple\n A selection for each dimension of the array. May be any combination of int,\n slice, integer array or Boolean array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of rows::\n\n >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1]])\n\n Set data for a selection of columns::\n\n >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 2, 1, 1, 2],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 2, 1, 1, 2]])\n\n Set data for a selection of rows and columns::\n\n >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 3, 1, 1, 3],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 3, 1, 1, 3]])\n\n For convenience, this functionality is also available via the `oindex` property.\n E.g.::\n\n >>> z.oindex[[1, 4], [1, 4]] = 4\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 4, 1, 1, 4],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 4, 1, 1, 4]])\n\n Notes\n -----\n Orthogonal indexing is also known as outer indexing.\n\n Slices with step > 1 are supported, but slices with negative step are not.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = OrthogonalIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def set_coordinate_selection(self, selection, value, fields=None):\n \"\"\"Modify a selection of individual items, by providing the indices (coordinates)\n for each item to be modified.\n\n Parameters\n ----------\n selection : tuple\n An integer (coordinate) array for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of items::\n\n >>> z.set_coordinate_selection(([1, 4], [1, 4]), 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1]])\n\n For convenience, this functionality is also available via the `vindex` property.\n E.g.::\n\n >>> z.vindex[[1, 4], [1, 4]] = 2\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 2]])\n\n Notes\n -----\n Coordinate indexing is also known as point selection, and is a form of vectorized\n or inner indexing.\n\n Slices are not supported. Coordinate arrays must be provided for all dimensions\n of the array.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = CoordinateIndexer(selection, self)\n\n # handle value - need to flatten\n if not is_scalar(value, self._dtype):\n value = np.asanyarray(value)\n if hasattr(value, 'shape') and len(value.shape) > 1:\n value = value.reshape(-1)\n\n self._set_selection(indexer, value, fields=fields)\n\n def set_mask_selection(self, selection, value, fields=None):\n \"\"\"Modify a selection of individual items, by providing a Boolean array of the\n same shape as the array against which the selection is being made, where True\n values indicate a selected item.\n\n Parameters\n ----------\n selection : ndarray, bool\n A Boolean array of the same shape as the array against which the selection is\n being made.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of items::\n\n >>> sel = np.zeros_like(z, dtype=bool)\n >>> sel[1, 1] = True\n >>> sel[4, 4] = True\n >>> z.set_mask_selection(sel, 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1]])\n\n For convenience, this functionality is also available via the `vindex` property.\n E.g.::\n\n >>> z.vindex[sel] = 2\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 2]])\n\n Notes\n -----\n Mask indexing is a form of vectorized or inner indexing, and is equivalent to\n coordinate indexing. Internally the mask array is converted to coordinate\n arrays by calling `np.nonzero`.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n set_coordinate_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = MaskIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def _set_basic_selection_zd(self, selection, value, fields=None):\n # special case __setitem__ for zero-dimensional array\n\n # check selection is valid\n selection = ensure_tuple(selection)\n if selection not in ((), (Ellipsis,)):\n err_too_many_indices(selection, self._shape)\n\n # check fields\n check_fields(fields, self._dtype)\n fields = check_no_multi_fields(fields)\n\n # obtain key for chunk\n ckey = self._chunk_key((0,))\n\n # setup chunk\n try:\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n chunk = np.zeros((), dtype=self._dtype)\n if self._fill_value is not None:\n chunk.fill(self._fill_value)\n\n else:\n # decode chunk\n chunk = self._decode_chunk(cdata).copy()\n\n # set value\n if fields:\n chunk[fields][selection] = value\n else:\n chunk[selection] = value\n\n # encode and store\n cdata = self._encode_chunk(chunk)\n self.chunk_store[ckey] = cdata\n\n def _set_basic_selection_nd(self, selection, value, fields=None):\n # implementation of __setitem__ for array with at least one dimension\n\n # setup indexer\n indexer = BasicIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def _set_selection(self, indexer, value, fields=None):\n\n # We iterate over all chunks which overlap the selection and thus contain data\n # that needs to be replaced. Each chunk is processed in turn, extracting the\n # necessary data from the value array and storing into the chunk array.\n\n # N.B., it is an important optimisation that we only visit chunks which overlap\n # the selection. This minimises the number of iterations in the main for loop.\n\n # check fields are sensible\n check_fields(fields, self._dtype)\n fields = check_no_multi_fields(fields)\n\n # determine indices of chunks overlapping the selection\n sel_shape = indexer.shape\n\n # check value shape\n if sel_shape == ():\n # setting a single item\n pass\n elif is_scalar(value, self._dtype):\n # setting a scalar value\n pass\n else:\n if not hasattr(value, 'shape'):\n value = np.asanyarray(value)\n check_array_shape('value', value, sel_shape)\n\n # iterate over chunks in range\n for chunk_coords, chunk_selection, out_selection in indexer:\n\n # extract data to store\n if sel_shape == ():\n chunk_value = value\n elif is_scalar(value, self._dtype):\n chunk_value = value\n else:\n chunk_value = value[out_selection]\n # handle missing singleton dimensions\n if indexer.drop_axes:\n item = [slice(None)] * self.ndim\n for a in indexer.drop_axes:\n item[a] = np.newaxis\n item = tuple(item)\n chunk_value = chunk_value[item]\n\n # put data\n self._chunk_setitem(chunk_coords, chunk_selection, chunk_value, fields=fields)\n\n def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,\n drop_axes=None, fields=None):\n \"\"\"Obtain part or whole of a chunk.\n\n Parameters\n ----------\n chunk_coords : tuple of ints\n Indices of the chunk.\n chunk_selection : selection\n Location of region within the chunk to extract.\n out : ndarray\n Array to store result in.\n out_selection : selection\n Location of region within output array to store results in.\n drop_axes : tuple of ints\n Axes to squeeze out of the chunk.\n fields\n TODO\n\n \"\"\"\n\n assert len(chunk_coords) == len(self._cdata_shape)\n\n out_is_ndarray = True\n try:\n out = ensure_ndarray(out)\n except TypeError:\n out_is_ndarray = False\n\n # obtain key for chunk\n ckey = self._chunk_key(chunk_coords)\n\n try:\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n if self._fill_value is not None:\n if fields:\n fill_value = self._fill_value[fields]\n else:\n fill_value = self._fill_value\n out[out_selection] = fill_value\n\n else:\n\n if (out_is_ndarray and\n not fields and\n is_contiguous_selection(out_selection) and\n is_total_slice(chunk_selection, self._chunks) and\n not self._filters and\n self._dtype != object):\n\n dest = out[out_selection]\n write_direct = (\n dest.flags.writeable and (\n (self._order == 'C' and dest.flags.c_contiguous) or\n (self._order == 'F' and dest.flags.f_contiguous)\n )\n )\n\n if write_direct:\n\n # optimization: we want the whole chunk, and the destination is\n # contiguous, so we can decompress directly from the chunk\n # into the destination array\n\n if self._compressor:\n self._compressor.decode(cdata, dest)\n else:\n chunk = ensure_ndarray(cdata).view(self._dtype)\n chunk = chunk.reshape(self._chunks, order=self._order)\n np.copyto(dest, chunk)\n return\n\n # decode chunk\n chunk = self._decode_chunk(cdata)\n\n # select data from chunk\n if fields:\n chunk = chunk[fields]\n tmp = chunk[chunk_selection]\n if drop_axes:\n tmp = np.squeeze(tmp, axis=drop_axes)\n\n # store selected data in output\n out[out_selection] = tmp\n\n def _chunk_setitem(self, chunk_coords, chunk_selection, value, fields=None):\n \"\"\"Replace part or whole of a chunk.\n\n Parameters\n ----------\n chunk_coords : tuple of ints\n Indices of the chunk.\n chunk_selection : tuple of slices\n Location of region within the chunk.\n value : scalar or ndarray\n Value to set.\n\n \"\"\"\n\n if self._synchronizer is None:\n # no synchronization\n lock = nolock\n else:\n # synchronize on the chunk\n ckey = self._chunk_key(chunk_coords)\n lock = self._synchronizer[ckey]\n\n with lock:\n self._chunk_setitem_nosync(chunk_coords, chunk_selection, value,\n fields=fields)\n\n def _chunk_setitem_nosync(self, chunk_coords, chunk_selection, value, fields=None):\n\n # obtain key for chunk storage\n ckey = self._chunk_key(chunk_coords)\n\n if is_total_slice(chunk_selection, self._chunks) and not fields:\n # totally replace chunk\n\n # optimization: we are completely replacing the chunk, so no need\n # to access the existing chunk data\n\n if is_scalar(value, self._dtype):\n\n # setup array filled with value\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n chunk.fill(value)\n\n else:\n\n # ensure array is contiguous\n chunk = value.astype(self._dtype, order=self._order, copy=False)\n\n else:\n # partially replace the contents of this chunk\n\n try:\n\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n\n # chunk not initialized\n if self._fill_value is not None:\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n chunk.fill(self._fill_value)\n elif self._dtype == object:\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n else:\n # N.B., use zeros here so any region beyond the array has consistent\n # and compressible data\n chunk = np.zeros(self._chunks, dtype=self._dtype, order=self._order)\n\n else:\n\n # decode chunk\n chunk = self._decode_chunk(cdata)\n if not chunk.flags.writeable:\n chunk = chunk.copy(order='K')\n\n # modify\n if fields:\n # N.B., currently multi-field assignment is not supported in numpy, so\n # this only works for a single field\n chunk[fields][chunk_selection] = value\n else:\n chunk[chunk_selection] = value\n\n # encode chunk\n cdata = self._encode_chunk(chunk)\n\n # store\n self.chunk_store[ckey] = cdata\n\n def _chunk_key(self, chunk_coords):\n return self._key_prefix + '.'.join(map(str, chunk_coords))\n\n def _decode_chunk(self, cdata):\n\n # decompress\n if self._compressor:\n chunk = self._compressor.decode(cdata)\n else:\n chunk = cdata\n\n # apply filters\n if self._filters:\n for f in reversed(self._filters):\n chunk = f.decode(chunk)\n\n # view as numpy array with correct dtype\n chunk = ensure_ndarray(chunk)\n # special case object dtype, because incorrect handling can lead to\n # segfaults and other bad things happening\n if self._dtype != object:\n chunk = chunk.view(self._dtype)\n elif chunk.dtype != object:\n # If we end up here, someone must have hacked around with the filters.\n # We cannot deal with object arrays unless there is an object\n # codec in the filter chain, i.e., a filter that converts from object\n # array to something else during encoding, and converts back to object\n # array during decoding.\n raise RuntimeError('cannot read object array without object codec')\n\n # ensure correct chunk shape\n chunk = chunk.reshape(-1, order='A')\n chunk = chunk.reshape(self._chunks, order=self._order)\n\n return chunk\n\n def _encode_chunk(self, chunk):\n\n # apply filters\n if self._filters:\n for f in self._filters:\n chunk = f.encode(chunk)\n\n # check object encoding\n if ensure_ndarray(chunk).dtype == object:\n raise RuntimeError('cannot write object array without object codec')\n\n # compress\n if self._compressor:\n cdata = self._compressor.encode(chunk)\n else:\n cdata = chunk\n\n # ensure in-memory data is immutable and easy to compare\n if isinstance(self.chunk_store, dict):\n cdata = ensure_bytes(cdata)\n\n return cdata\n\n def __repr__(self):\n t = type(self)\n r = '<{}.{}'.format(t.__module__, t.__name__)\n if self.name:\n r += ' %r' % self.name\n r += ' %s' % str(self.shape)\n r += ' %s' % self.dtype\n if self._read_only:\n r += ' read-only'\n r += '>'\n return r\n\n @property\n def info(self):\n \"\"\"Report some diagnostic information about the array.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.zeros(1000000, chunks=100000, dtype='i4')\n >>> z.info\n Type : zarr.core.Array\n Data type : int32\n Shape : (1000000,)\n Chunk shape : (100000,)\n Order : C\n Read-only : False\n Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n Store type : builtins.dict\n No. bytes : 4000000 (3.8M)\n No. bytes stored : ...\n Storage ratio : ...\n Chunks initialized : 0/10\n\n \"\"\"\n return self._info_reporter\n\n def info_items(self):\n return self._synchronized_op(self._info_items_nosync)\n\n def _info_items_nosync(self):\n\n def typestr(o):\n return '{}.{}'.format(type(o).__module__, type(o).__name__)\n\n def bytestr(n):\n if n > 2**10:\n return '{} ({})'.format(n, human_readable_size(n))\n else:\n return str(n)\n\n items = []\n\n # basic info\n if self.name is not None:\n items += [('Name', self.name)]\n items += [\n ('Type', typestr(self)),\n ('Data type', '%s' % self.dtype),\n ('Shape', str(self.shape)),\n ('Chunk shape', str(self.chunks)),\n ('Order', self.order),\n ('Read-only', str(self.read_only)),\n ]\n\n # filters\n if self.filters:\n for i, f in enumerate(self.filters):\n items += [('Filter [%s]' % i, repr(f))]\n\n # compressor\n items += [('Compressor', repr(self.compressor))]\n\n # synchronizer\n if self._synchronizer is not None:\n items += [('Synchronizer type', typestr(self._synchronizer))]\n\n # storage info\n items += [('Store type', typestr(self._store))]\n if self._chunk_store is not None:\n items += [('Chunk store type', typestr(self._chunk_store))]\n items += [('No. bytes', bytestr(self.nbytes))]\n if self.nbytes_stored > 0:\n items += [\n ('No. bytes stored', bytestr(self.nbytes_stored)),\n ('Storage ratio', '%.1f' % (self.nbytes / self.nbytes_stored)),\n ]\n items += [\n ('Chunks initialized', '{}/{}'.format(self.nchunks_initialized, self.nchunks))\n ]\n\n return items\n\n def digest(self, hashname=\"sha1\"):\n \"\"\"\n Compute a checksum for the data. Default uses sha1 for speed.\n\n Examples\n --------\n >>> import binascii\n >>> import zarr\n >>> z = zarr.empty(shape=(10000, 10000), chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'041f90bc7a571452af4f850a8ca2c6cddfa8a1ac'\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'7162d416d26a68063b66ed1f30e0a866e4abed60'\n >>> z = zarr.zeros(shape=(10000, 10000), dtype=\"u1\", chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'cb387af37410ae5a3222e893cf3373e4e4f22816'\n \"\"\"\n\n h = hashlib.new(hashname)\n\n for i in itertools.product(*[range(s) for s in self.cdata_shape]):\n h.update(self.chunk_store.get(self._chunk_key(i), b\"\"))\n\n h.update(self.store.get(self._key_prefix + array_meta_key, b\"\"))\n\n h.update(self.store.get(self.attrs.key, b\"\"))\n\n checksum = h.digest()\n\n return checksum\n\n def hexdigest(self, hashname=\"sha1\"):\n \"\"\"\n Compute a checksum for the data. Default uses sha1 for speed.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.empty(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.hexdigest()\n '041f90bc7a571452af4f850a8ca2c6cddfa8a1ac'\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.hexdigest()\n '7162d416d26a68063b66ed1f30e0a866e4abed60'\n >>> z = zarr.zeros(shape=(10000, 10000), dtype=\"u1\", chunks=(1000, 1000))\n >>> z.hexdigest()\n 'cb387af37410ae5a3222e893cf3373e4e4f22816'\n \"\"\"\n\n checksum = binascii.hexlify(self.digest(hashname=hashname))\n\n # This is a bytes object on Python 3 and we want a str.\n if type(checksum) is not str:\n checksum = checksum.decode('utf8')\n\n return checksum\n\n def __getstate__(self):\n return (self._store, self._path, self._read_only, self._chunk_store,\n self._synchronizer, self._cache_metadata, self._attrs.cache)\n\n def __setstate__(self, state):\n self.__init__(*state)\n\n def _synchronized_op(self, f, *args, **kwargs):\n\n if self._synchronizer is None:\n # no synchronization\n lock = nolock\n\n else:\n # synchronize on the array\n mkey = self._key_prefix + array_meta_key\n lock = self._synchronizer[mkey]\n\n with lock:\n self._refresh_metadata_nosync()\n result = f(*args, **kwargs)\n\n return result\n\n def _write_op(self, f, *args, **kwargs):\n\n # guard condition\n if self._read_only:\n raise ReadOnlyError()\n\n return self._synchronized_op(f, *args, **kwargs)\n\n def resize(self, *args):\n \"\"\"Change the shape of the array by growing or shrinking one or more\n dimensions.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.shape\n (10000, 10000)\n >>> z.resize(20000, 10000)\n >>> z.shape\n (20000, 10000)\n >>> z.resize(30000, 1000)\n >>> z.shape\n (30000, 1000)\n\n Notes\n -----\n When resizing an array, the data are not rearranged in any way.\n\n If one or more dimensions are shrunk, any chunks falling outside the\n new array shape will be deleted from the underlying store.\n\n \"\"\"\n\n return self._write_op(self._resize_nosync, *args)\n\n def _resize_nosync(self, *args):\n\n # normalize new shape argument\n old_shape = self._shape\n new_shape = normalize_resize_args(old_shape, *args)\n old_cdata_shape = self._cdata_shape\n\n # update metadata\n self._shape = new_shape\n self._flush_metadata_nosync()\n\n # determine the new number and arrangement of chunks\n chunks = self._chunks\n new_cdata_shape = tuple(math.ceil(s / c)\n for s, c in zip(new_shape, chunks))\n\n # remove any chunks not within range\n chunk_store = self.chunk_store\n for cidx in itertools.product(*[range(n) for n in old_cdata_shape]):\n if all(i < c for i, c in zip(cidx, new_cdata_shape)):\n pass # keep the chunk\n else:\n key = self._chunk_key(cidx)\n try:\n del chunk_store[key]\n except KeyError:\n # chunk not initialized\n pass\n\n def append(self, data, axis=0):\n \"\"\"Append `data` to `axis`.\n\n Parameters\n ----------\n data : array_like\n Data to be appended.\n axis : int\n Axis along which to append.\n\n Returns\n -------\n new_shape : tuple\n\n Notes\n -----\n The size of all dimensions other than `axis` must match between this\n array and `data`.\n\n Examples\n --------\n >>> import numpy as np\n >>> import zarr\n >>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)\n >>> z = zarr.array(a, chunks=(1000, 100))\n >>> z.shape\n (10000, 1000)\n >>> z.append(a)\n (20000, 1000)\n >>> z.append(np.vstack([a, a]), axis=1)\n (20000, 2000)\n >>> z.shape\n (20000, 2000)\n\n \"\"\"\n return self._write_op(self._append_nosync, data, axis=axis)\n\n def _append_nosync(self, data, axis=0):\n\n # ensure data is array-like\n if not hasattr(data, 'shape'):\n data = np.asanyarray(data)\n\n # ensure shapes are compatible for non-append dimensions\n self_shape_preserved = tuple(s for i, s in enumerate(self._shape)\n if i != axis)\n data_shape_preserved = tuple(s for i, s in enumerate(data.shape)\n if i != axis)\n if self_shape_preserved != data_shape_preserved:\n raise ValueError('shape of data to append is not compatible with the array; '\n 'all dimensions must match except for the dimension being '\n 'appended')\n\n # remember old shape\n old_shape = self._shape\n\n # determine new shape\n new_shape = tuple(\n self._shape[i] if i != axis else self._shape[i] + data.shape[i]\n for i in range(len(self._shape))\n )\n\n # resize\n self._resize_nosync(new_shape)\n\n # store data\n # noinspection PyTypeChecker\n append_selection = tuple(\n slice(None) if i != axis else slice(old_shape[i], new_shape[i])\n for i in range(len(self._shape))\n )\n self[append_selection] = data\n\n return new_shape\n\n def view(self, shape=None, chunks=None, dtype=None,\n fill_value=None, filters=None, read_only=None,\n synchronizer=None):\n \"\"\"Return an array sharing the same data.\n\n Parameters\n ----------\n shape : int or tuple of ints\n Array shape.\n chunks : int or tuple of ints, optional\n Chunk shape.\n dtype : string or dtype, optional\n NumPy dtype.\n fill_value : object\n Default value to use for uninitialized portions of the array.\n filters : sequence, optional\n Sequence of filters to use to encode chunk data prior to\n compression.\n read_only : bool, optional\n True if array should be protected against modification.\n synchronizer : object, optional\n Array synchronizer.\n\n Notes\n -----\n WARNING: This is an experimental feature and should be used with care.\n There are plenty of ways to generate errors and/or cause data\n corruption.\n\n Examples\n --------\n\n Bypass filters:\n\n >>> import zarr\n >>> import numpy as np\n >>> np.random.seed(42)\n >>> labels = ['female', 'male']\n >>> data = np.random.choice(labels, size=10000)\n >>> filters = [zarr.Categorize(labels=labels,\n ... dtype=data.dtype,\n ... astype='u1')]\n >>> a = zarr.array(data, chunks=1000, filters=filters)\n >>> a[:]\n array(['female', 'male', 'female', ..., 'male', 'male', 'female'],\n dtype='<U6')\n >>> v = a.view(dtype='u1', filters=[])\n >>> v.is_view\n True\n >>> v[:]\n array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)\n\n Views can be used to modify data:\n\n >>> x = v[:]\n >>> x.sort()\n >>> v[:] = x\n >>> v[:]\n array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)\n >>> a[:]\n array(['female', 'female', 'female', ..., 'male', 'male', 'male'],\n dtype='<U6')\n\n View as a different dtype with the same item size:\n\n >>> data = np.random.randint(0, 2, size=10000, dtype='u1')\n >>> a = zarr.array(data, chunks=1000)\n >>> a[:]\n array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)\n >>> v = a.view(dtype=bool)\n >>> v[:]\n array([False, False, True, ..., True, False, False])\n >>> np.all(a[:].view(dtype=bool) == v[:])\n True\n\n An array can be viewed with a dtype with a different item size, however\n some care is needed to adjust the shape and chunk shape so that chunk\n data is interpreted correctly:\n\n >>> data = np.arange(10000, dtype='u2')\n >>> a = zarr.array(data, chunks=1000)\n >>> a[:10]\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)\n >>> v = a.view(dtype='u1', shape=20000, chunks=2000)\n >>> v[:10]\n array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)\n >>> np.all(a[:].view('u1') == v[:])\n True\n\n Change fill value for uninitialized chunks:\n\n >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')\n >>> a[:]\n array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)\n >>> v = a.view(fill_value=42)\n >>> v[:]\n array([42, 42, 42, ..., 42, 42, 42], dtype=int8)\n\n Note that resizing or appending to views is not permitted:\n\n >>> a = zarr.empty(10000)\n >>> v = a.view()\n >>> try:\n ... v.resize(20000)\n ... except PermissionError as e:\n ... print(e)\n operation not permitted for views\n\n \"\"\"\n\n store = self._store\n chunk_store = self._chunk_store\n path = self._path\n if read_only is None:\n read_only = self._read_only\n if synchronizer is None:\n synchronizer = self._synchronizer\n a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only,\n synchronizer=synchronizer, cache_metadata=True)\n a._is_view = True\n\n # allow override of some properties\n if dtype is None:\n dtype = self._dtype\n else:\n dtype = np.dtype(dtype)\n a._dtype = dtype\n if shape is None:\n shape = self._shape\n else:\n shape = normalize_shape(shape)\n a._shape = shape\n if chunks is not None:\n chunks = normalize_chunks(chunks, shape, dtype.itemsize)\n a._chunks = chunks\n if fill_value is not None:\n a._fill_value = fill_value\n if filters is not None:\n a._filters = filters\n\n return a\n\n def astype(self, dtype):\n \"\"\"Returns a view that does on the fly type conversion of the underlying data.\n\n Parameters\n ----------\n dtype : string or dtype\n NumPy dtype.\n\n Notes\n -----\n This method returns a new Array object which is a view on the same\n underlying chunk data. Modifying any data via the view is currently\n not permitted and will result in an error. This is an experimental\n feature and its behavior is subject to change in the future.\n\n See Also\n --------\n Array.view\n\n Examples\n --------\n\n >>> import zarr\n >>> import numpy as np\n >>> data = np.arange(100, dtype=np.uint8)\n >>> a = zarr.array(data, chunks=10)\n >>> a[:]\n array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,\n 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,\n 96, 97, 98, 99], dtype=uint8)\n >>> v = a.astype(np.float32)\n >>> v.is_view\n True\n >>> v[:]\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],\n dtype=float32)\n \"\"\"\n\n dtype = np.dtype(dtype)\n\n filters = []\n if self._filters:\n filters.extend(self._filters)\n filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))\n\n return self.view(filters=filters, dtype=dtype, read_only=True)\n", "import atexit\nimport os\nimport pickle\nimport shutil\nimport unittest\nfrom itertools import zip_longest\nfrom tempfile import mkdtemp, mktemp\n\nimport numpy as np\nimport pytest\nfrom numcodecs import (BZ2, JSON, LZ4, Blosc, Categorize, Delta,\n FixedScaleOffset, GZip, MsgPack, Pickle, VLenArray,\n VLenBytes, VLenUTF8, Zlib)\nfrom numcodecs.compat import ensure_bytes, ensure_ndarray\nfrom numcodecs.tests.common import greetings\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\n\nfrom zarr.core import Array\nfrom zarr.meta import json_loads\nfrom zarr.n5 import N5Store, n5_keywords\nfrom zarr.storage import (ABSStore, DBMStore, DirectoryStore, LMDBStore,\n LRUStoreCache, NestedDirectoryStore, SQLiteStore,\n atexit_rmglob, atexit_rmtree, init_array, init_group)\nfrom zarr.util import buffer_size\nfrom zarr.tests.util import skip_test_env_var\n\n\n# noinspection PyMethodMayBeStatic\nclass TestArray(unittest.TestCase):\n\n def test_array_init(self):\n\n # normal initialization\n store = dict()\n init_array(store, shape=100, chunks=10, dtype='<f8')\n a = Array(store)\n assert isinstance(a, Array)\n assert (100,) == a.shape\n assert (10,) == a.chunks\n assert '' == a.path\n assert a.name is None\n assert a.basename is None\n assert store is a.store\n assert \"8fecb7a17ea1493d9c1430d04437b4f5b0b34985\" == a.hexdigest()\n\n # initialize at path\n store = dict()\n init_array(store, shape=100, chunks=10, path='foo/bar', dtype='<f8')\n a = Array(store, path='foo/bar')\n assert isinstance(a, Array)\n assert (100,) == a.shape\n assert (10,) == a.chunks\n assert 'foo/bar' == a.path\n assert '/foo/bar' == a.name\n assert 'bar' == a.basename\n assert store is a.store\n assert \"8fecb7a17ea1493d9c1430d04437b4f5b0b34985\" == a.hexdigest()\n\n # store not initialized\n store = dict()\n with pytest.raises(ValueError):\n Array(store)\n\n # group is in the way\n store = dict()\n init_group(store, path='baz')\n with pytest.raises(ValueError):\n Array(store, path='baz')\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n kwargs.setdefault('compressor', Zlib(level=1))\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_store_has_text_keys(self):\n # Initialize array\n np.random.seed(42)\n z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[])\n z[:] = np.random.random(z.shape)\n\n expected_type = str\n\n for k in z.chunk_store.keys():\n if not isinstance(k, expected_type): # pragma: no cover\n pytest.fail(\"Non-text key: %s\" % repr(k))\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_store_has_binary_values(self):\n # Initialize array\n np.random.seed(42)\n z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[])\n z[:] = np.random.random(z.shape)\n\n for v in z.chunk_store.values():\n try:\n ensure_ndarray(v)\n except TypeError: # pragma: no cover\n pytest.fail(\"Non-bytes-like value: %s\" % repr(v))\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_store_has_bytes_values(self):\n # Test that many stores do hold bytes values.\n # Though this is not a strict requirement.\n # Should be disabled by any stores that fail this as needed.\n\n # Initialize array\n np.random.seed(42)\n z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[])\n z[:] = np.random.random(z.shape)\n\n # Check in-memory array only contains `bytes`\n assert all([isinstance(v, bytes) for v in z.chunk_store.values()])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_nbytes_stored(self):\n\n # dict as store\n z = self.create_array(shape=1000, chunks=100)\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n z[:] = 42\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n\n # mess with store\n try:\n z.store[z._key_prefix + 'foo'] = list(range(10))\n assert -1 == z.nbytes_stored\n except TypeError:\n pass\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # noinspection PyStatementEffect\n def test_array_1d(self):\n a = np.arange(1050)\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype)\n\n # check properties\n assert len(a) == len(z)\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert (100,) == z.chunks\n assert a.nbytes == z.nbytes\n assert 11 == z.nchunks\n assert 0 == z.nchunks_initialized\n assert (11,) == z.cdata_shape\n\n # check empty\n b = z[:]\n assert isinstance(b, np.ndarray)\n assert a.shape == b.shape\n assert a.dtype == b.dtype\n\n # check attributes\n z.attrs['foo'] = 'bar'\n assert 'bar' == z.attrs['foo']\n\n # set data\n z[:] = a\n\n # check properties\n assert a.nbytes == z.nbytes\n assert 11 == z.nchunks\n assert 11 == z.nchunks_initialized\n\n # check slicing\n assert_array_equal(a, np.array(z))\n assert_array_equal(a, z[:])\n assert_array_equal(a, z[...])\n # noinspection PyTypeChecker\n assert_array_equal(a, z[slice(None)])\n assert_array_equal(a[:10], z[:10])\n assert_array_equal(a[10:20], z[10:20])\n assert_array_equal(a[-10:], z[-10:])\n assert_array_equal(a[:10, ...], z[:10, ...])\n assert_array_equal(a[10:20, ...], z[10:20, ...])\n assert_array_equal(a[-10:, ...], z[-10:, ...])\n assert_array_equal(a[..., :10], z[..., :10])\n assert_array_equal(a[..., 10:20], z[..., 10:20])\n assert_array_equal(a[..., -10:], z[..., -10:])\n # ...across chunk boundaries...\n assert_array_equal(a[:110], z[:110])\n assert_array_equal(a[190:310], z[190:310])\n assert_array_equal(a[-110:], z[-110:])\n # single item\n assert a[0] == z[0]\n assert a[-1] == z[-1]\n # unusual integer items\n assert a[42] == z[np.int64(42)]\n assert a[42] == z[np.int32(42)]\n assert a[42] == z[np.uint64(42)]\n assert a[42] == z[np.uint32(42)]\n # too many indices\n with pytest.raises(IndexError):\n z[:, :]\n with pytest.raises(IndexError):\n z[0, :]\n with pytest.raises(IndexError):\n z[:, 0]\n with pytest.raises(IndexError):\n z[0, 0]\n # only single ellipsis allowed\n with pytest.raises(IndexError):\n z[..., ...]\n\n # check partial assignment\n b = np.arange(1e5, 2e5)\n z[190:310] = b[190:310]\n assert_array_equal(a[:190], z[:190])\n assert_array_equal(b[190:310], z[190:310])\n assert_array_equal(a[310:], z[310:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_1d_fill_value(self):\n for fill_value in -1, 0, 1, 10:\n\n a = np.arange(1050)\n f = np.empty_like(a)\n f.fill(fill_value)\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype,\n fill_value=fill_value)\n z[190:310] = a[190:310]\n\n assert_array_equal(f[:190], z[:190])\n assert_array_equal(a[190:310], z[190:310])\n assert_array_equal(f[310:], z[310:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_1d_set_scalar(self):\n # test setting the contents of an array with a scalar value\n\n # setup\n a = np.zeros(100)\n z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype)\n z[:] = a\n assert_array_equal(a, z[:])\n\n for value in -1, 0, 1, 10:\n a[15:35] = value\n z[15:35] = value\n assert_array_equal(a, z[:])\n a[:] = value\n z[:] = value\n assert_array_equal(a, z[:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_1d_selections(self):\n # light test here, full tests in test_indexing\n\n # setup\n a = np.arange(1050)\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype)\n z[:] = a\n\n # get\n assert_array_equal(a[50:150], z.get_orthogonal_selection(slice(50, 150)))\n assert_array_equal(a[50:150], z.oindex[50: 150])\n ix = [99, 100, 101]\n bix = np.zeros_like(a, dtype=bool)\n bix[ix] = True\n assert_array_equal(a[ix], z.get_orthogonal_selection(ix))\n assert_array_equal(a[ix], z.oindex[ix])\n assert_array_equal(a[ix], z.get_coordinate_selection(ix))\n assert_array_equal(a[ix], z.vindex[ix])\n assert_array_equal(a[bix], z.get_mask_selection(bix))\n assert_array_equal(a[bix], z.oindex[bix])\n assert_array_equal(a[bix], z.vindex[bix])\n\n # set\n z.set_orthogonal_selection(slice(50, 150), 1)\n assert_array_equal(1, z[50:150])\n z.oindex[50:150] = 2\n assert_array_equal(2, z[50:150])\n z.set_orthogonal_selection(ix, 3)\n assert_array_equal(3, z.get_coordinate_selection(ix))\n z.oindex[ix] = 4\n assert_array_equal(4, z.oindex[ix])\n z.set_coordinate_selection(ix, 5)\n assert_array_equal(5, z.get_coordinate_selection(ix))\n z.vindex[ix] = 6\n assert_array_equal(6, z.vindex[ix])\n z.set_mask_selection(bix, 7)\n assert_array_equal(7, z.get_mask_selection(bix))\n z.vindex[bix] = 8\n assert_array_equal(8, z.vindex[bix])\n z.oindex[bix] = 9\n assert_array_equal(9, z.oindex[bix])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # noinspection PyStatementEffect\n def test_array_2d(self):\n a = np.arange(10000).reshape((1000, 10))\n z = self.create_array(shape=a.shape, chunks=(100, 2), dtype=a.dtype)\n\n # check properties\n assert len(a) == len(z)\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert (100, 2) == z.chunks\n assert 0 == z.nchunks_initialized\n assert (10, 5) == z.cdata_shape\n\n # set data\n z[:] = a\n\n # check properties\n assert a.nbytes == z.nbytes\n assert 50 == z.nchunks_initialized\n\n # check array-like\n assert_array_equal(a, np.array(z))\n\n # check slicing\n\n # total slice\n assert_array_equal(a, z[:])\n assert_array_equal(a, z[...])\n # noinspection PyTypeChecker\n assert_array_equal(a, z[slice(None)])\n\n # slice first dimension\n assert_array_equal(a[:10], z[:10])\n assert_array_equal(a[10:20], z[10:20])\n assert_array_equal(a[-10:], z[-10:])\n assert_array_equal(a[:10, :], z[:10, :])\n assert_array_equal(a[10:20, :], z[10:20, :])\n assert_array_equal(a[-10:, :], z[-10:, :])\n assert_array_equal(a[:10, ...], z[:10, ...])\n assert_array_equal(a[10:20, ...], z[10:20, ...])\n assert_array_equal(a[-10:, ...], z[-10:, ...])\n assert_array_equal(a[:10, :, ...], z[:10, :, ...])\n assert_array_equal(a[10:20, :, ...], z[10:20, :, ...])\n assert_array_equal(a[-10:, :, ...], z[-10:, :, ...])\n\n # slice second dimension\n assert_array_equal(a[:, :2], z[:, :2])\n assert_array_equal(a[:, 2:4], z[:, 2:4])\n assert_array_equal(a[:, -2:], z[:, -2:])\n assert_array_equal(a[..., :2], z[..., :2])\n assert_array_equal(a[..., 2:4], z[..., 2:4])\n assert_array_equal(a[..., -2:], z[..., -2:])\n assert_array_equal(a[:, ..., :2], z[:, ..., :2])\n assert_array_equal(a[:, ..., 2:4], z[:, ..., 2:4])\n assert_array_equal(a[:, ..., -2:], z[:, ..., -2:])\n\n # slice both dimensions\n assert_array_equal(a[:10, :2], z[:10, :2])\n assert_array_equal(a[10:20, 2:4], z[10:20, 2:4])\n assert_array_equal(a[-10:, -2:], z[-10:, -2:])\n\n # slicing across chunk boundaries\n assert_array_equal(a[:110], z[:110])\n assert_array_equal(a[190:310], z[190:310])\n assert_array_equal(a[-110:], z[-110:])\n assert_array_equal(a[:110, :], z[:110, :])\n assert_array_equal(a[190:310, :], z[190:310, :])\n assert_array_equal(a[-110:, :], z[-110:, :])\n assert_array_equal(a[:, :3], z[:, :3])\n assert_array_equal(a[:, 3:7], z[:, 3:7])\n assert_array_equal(a[:, -3:], z[:, -3:])\n assert_array_equal(a[:110, :3], z[:110, :3])\n assert_array_equal(a[190:310, 3:7], z[190:310, 3:7])\n assert_array_equal(a[-110:, -3:], z[-110:, -3:])\n\n # single row/col/item\n assert_array_equal(a[0], z[0])\n assert_array_equal(a[-1], z[-1])\n assert_array_equal(a[:, 0], z[:, 0])\n assert_array_equal(a[:, -1], z[:, -1])\n assert a[0, 0] == z[0, 0]\n assert a[-1, -1] == z[-1, -1]\n\n # too many indices\n with pytest.raises(IndexError):\n z[:, :, :]\n with pytest.raises(IndexError):\n z[0, :, :]\n with pytest.raises(IndexError):\n z[:, 0, :]\n with pytest.raises(IndexError):\n z[:, :, 0]\n with pytest.raises(IndexError):\n z[0, 0, 0]\n # only single ellipsis allowed\n with pytest.raises(IndexError):\n z[..., ...]\n\n # check partial assignment\n b = np.arange(10000, 20000).reshape((1000, 10))\n z[190:310, 3:7] = b[190:310, 3:7]\n assert_array_equal(a[:190], z[:190])\n assert_array_equal(a[:, :3], z[:, :3])\n assert_array_equal(b[190:310, 3:7], z[190:310, 3:7])\n assert_array_equal(a[310:], z[310:])\n assert_array_equal(a[:, 7:], z[:, 7:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_2d_edge_case(self):\n # this fails with filters - chunks extend beyond edge of array, messes with delta\n # filter if no fill value?\n shape = 1000, 10\n chunks = 300, 30\n dtype = 'i8'\n z = self.create_array(shape=shape, dtype=dtype, chunks=chunks)\n z[:] = 0\n expect = np.zeros(shape, dtype=dtype)\n actual = z[:]\n assert_array_equal(expect, actual)\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_2d_partial(self):\n z = self.create_array(shape=(1000, 10), chunks=(100, 2), dtype='i4',\n fill_value=0)\n\n # check partial assignment, single row\n c = np.arange(z.shape[1])\n z[0, :] = c\n with pytest.raises(ValueError):\n # N.B., NumPy allows this, but we'll be strict for now\n z[2:3] = c\n with pytest.raises(ValueError):\n # N.B., NumPy allows this, but we'll be strict for now\n z[-1:] = c\n z[2:3] = c[None, :]\n z[-1:] = c[None, :]\n assert_array_equal(c, z[0, :])\n assert_array_equal(c, z[2, :])\n assert_array_equal(c, z[-1, :])\n\n # check partial assignment, single column\n d = np.arange(z.shape[0])\n z[:, 0] = d\n with pytest.raises(ValueError):\n z[:, 2:3] = d\n with pytest.raises(ValueError):\n z[:, -1:] = d\n z[:, 2:3] = d[:, None]\n z[:, -1:] = d[:, None]\n assert_array_equal(d, z[:, 0])\n assert_array_equal(d, z[:, 2])\n assert_array_equal(d, z[:, -1])\n\n # check single item assignment\n z[0, 0] = -1\n z[2, 2] = -1\n z[-1, -1] = -1\n assert -1 == z[0, 0]\n assert -1 == z[2, 2]\n assert -1 == z[-1, -1]\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_order(self):\n\n # 1D\n a = np.arange(1050)\n for order in 'C', 'F':\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype,\n order=order)\n assert order == z.order\n if order == 'F':\n assert z[:].flags.f_contiguous\n else:\n assert z[:].flags.c_contiguous\n z[:] = a\n assert_array_equal(a, z[:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # 2D\n a = np.arange(10000).reshape((100, 100))\n for order in 'C', 'F':\n z = self.create_array(shape=a.shape, chunks=(10, 10),\n dtype=a.dtype, order=order)\n assert order == z.order\n if order == 'F':\n assert z[:].flags.f_contiguous\n else:\n assert z[:].flags.c_contiguous\n z[:] = a\n actual = z[:]\n assert_array_equal(a, actual)\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_setitem_data_not_shared(self):\n # check that data don't end up being shared with another array\n # https://github.com/alimanfoo/zarr/issues/79\n z = self.create_array(shape=20, chunks=10, dtype='i4')\n a = np.arange(20, dtype='i4')\n z[:] = a\n assert_array_equal(z[:], np.arange(20, dtype='i4'))\n a[:] = 0\n assert_array_equal(z[:], np.arange(20, dtype='i4'))\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert '063b02ff8d9d3bab6da932ad5828b506ef0a6578' == z.hexdigest()\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert 'f97b84dc9ffac807415f750100108764e837bb82' == z.hexdigest()\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert 'c7190ad2bea1e9d2e73eaa2d3ca9187be1ead261' == z.hexdigest()\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '14470724dca6c1837edddedc490571b6a7f270bc' == z.hexdigest()\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '2a1046dd99b914459b3e86be9dde05027a07d209' == z.hexdigest()\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_resize_1d(self):\n\n z = self.create_array(shape=105, chunks=10, dtype='i4',\n fill_value=0)\n a = np.arange(105, dtype='i4')\n z[:] = a\n assert (105,) == z.shape\n assert (105,) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10,) == z.chunks\n assert_array_equal(a, z[:])\n\n z.resize(205)\n assert (205,) == z.shape\n assert (205,) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10,) == z.chunks\n assert_array_equal(a, z[:105])\n assert_array_equal(np.zeros(100, dtype='i4'), z[105:])\n\n z.resize(55)\n assert (55,) == z.shape\n assert (55,) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10,) == z.chunks\n assert_array_equal(a[:55], z[:])\n\n # via shape setter\n z.shape = (105,)\n assert (105,) == z.shape\n assert (105,) == z[:].shape\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_resize_2d(self):\n\n z = self.create_array(shape=(105, 105), chunks=(10, 10), dtype='i4',\n fill_value=0)\n a = np.arange(105*105, dtype='i4').reshape((105, 105))\n z[:] = a\n assert (105, 105) == z.shape\n assert (105, 105) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10, 10) == z.chunks\n assert_array_equal(a, z[:])\n\n z.resize((205, 205))\n assert (205, 205) == z.shape\n assert (205, 205) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10, 10) == z.chunks\n assert_array_equal(a, z[:105, :105])\n assert_array_equal(np.zeros((100, 205), dtype='i4'), z[105:, :])\n assert_array_equal(np.zeros((205, 100), dtype='i4'), z[:, 105:])\n\n z.resize((55, 55))\n assert (55, 55) == z.shape\n assert (55, 55) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10, 10) == z.chunks\n assert_array_equal(a[:55, :55], z[:])\n\n z.resize((55, 1))\n assert (55, 1) == z.shape\n assert (55, 1) == z[:].shape\n assert np.dtype('i4') == z.dtype\n assert np.dtype('i4') == z[:].dtype\n assert (10, 10) == z.chunks\n assert_array_equal(a[:55, :1], z[:])\n\n # via shape setter\n z.shape = (105, 105)\n assert (105, 105) == z.shape\n assert (105, 105) == z[:].shape\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_append_1d(self):\n\n a = np.arange(105)\n z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype)\n z[:] = a\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert (10,) == z.chunks\n assert_array_equal(a, z[:])\n\n b = np.arange(105, 205)\n e = np.append(a, b)\n z.append(b)\n assert e.shape == z.shape\n assert e.dtype == z.dtype\n assert (10,) == z.chunks\n assert_array_equal(e, z[:])\n\n # check append handles array-like\n c = [1, 2, 3]\n f = np.append(e, c)\n z.append(c)\n assert f.shape == z.shape\n assert f.dtype == z.dtype\n assert (10,) == z.chunks\n assert_array_equal(f, z[:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_append_2d(self):\n\n a = np.arange(105*105, dtype='i4').reshape((105, 105))\n z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype)\n z[:] = a\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert (10, 10) == z.chunks\n actual = z[:]\n assert_array_equal(a, actual)\n\n b = np.arange(105*105, 2*105*105, dtype='i4').reshape((105, 105))\n e = np.append(a, b, axis=0)\n z.append(b)\n assert e.shape == z.shape\n assert e.dtype == z.dtype\n assert (10, 10) == z.chunks\n actual = z[:]\n assert_array_equal(e, actual)\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_append_2d_axis(self):\n\n a = np.arange(105*105, dtype='i4').reshape((105, 105))\n z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype)\n z[:] = a\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert (10, 10) == z.chunks\n assert_array_equal(a, z[:])\n\n b = np.arange(105*105, 2*105*105, dtype='i4').reshape((105, 105))\n e = np.append(a, b, axis=1)\n z.append(b, axis=1)\n assert e.shape == z.shape\n assert e.dtype == z.dtype\n assert (10, 10) == z.chunks\n assert_array_equal(e, z[:])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_append_bad_shape(self):\n a = np.arange(100)\n z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype)\n z[:] = a\n b = a.reshape(10, 10)\n with pytest.raises(ValueError):\n z.append(b)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_read_only(self):\n\n z = self.create_array(shape=1000, chunks=100)\n assert not z.read_only\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=1000, chunks=100, read_only=True)\n assert z.read_only\n with pytest.raises(PermissionError):\n z[:] = 42\n with pytest.raises(PermissionError):\n z.resize(2000)\n with pytest.raises(PermissionError):\n z.append(np.arange(1000))\n with pytest.raises(PermissionError):\n z.set_basic_selection(Ellipsis, 42)\n with pytest.raises(PermissionError):\n z.set_orthogonal_selection([0, 1, 2], 42)\n with pytest.raises(PermissionError):\n z.oindex[[0, 1, 2]] = 42\n with pytest.raises(PermissionError):\n z.set_coordinate_selection([0, 1, 2], 42)\n with pytest.raises(PermissionError):\n z.vindex[[0, 1, 2]] = 42\n with pytest.raises(PermissionError):\n z.set_mask_selection(np.ones(z.shape, dtype=bool), 42)\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_pickle(self):\n\n # setup array\n z = self.create_array(shape=1000, chunks=100, dtype=int, cache_metadata=False,\n cache_attrs=False)\n shape = z.shape\n chunks = z.chunks\n dtype = z.dtype\n compressor_config = None\n if z.compressor:\n compressor_config = z.compressor.get_config()\n fill_value = z.fill_value\n cache_metadata = z._cache_metadata\n attrs_cache = z.attrs.cache\n a = np.random.randint(0, 1000, 1000)\n z[:] = a\n\n # round trip through pickle\n dump = pickle.dumps(z)\n # some stores cannot be opened twice at the same time, need to close\n # store before can round-trip through pickle\n if hasattr(z.store, 'close'):\n z.store.close()\n z2 = pickle.loads(dump)\n\n # verify\n assert shape == z2.shape\n assert chunks == z2.chunks\n assert dtype == z2.dtype\n if z2.compressor:\n assert compressor_config == z2.compressor.get_config()\n assert fill_value == z2.fill_value\n assert cache_metadata == z2._cache_metadata\n assert attrs_cache == z2.attrs.cache\n assert_array_equal(a, z2[:])\n\n if hasattr(z2.store, 'close'):\n z2.store.close()\n\n def test_np_ufuncs(self):\n z = self.create_array(shape=(100, 100), chunks=(10, 10))\n a = np.arange(10000).reshape(100, 100)\n z[:] = a\n\n assert np.sum(a) == np.sum(z)\n assert_array_equal(np.sum(a, axis=0), np.sum(z, axis=0))\n assert np.mean(a) == np.mean(z)\n assert_array_equal(np.mean(a, axis=1), np.mean(z, axis=1))\n condition = np.random.randint(0, 2, size=100, dtype=bool)\n assert_array_equal(np.compress(condition, a, axis=0),\n np.compress(condition, z, axis=0))\n indices = np.random.choice(100, size=50, replace=True)\n assert_array_equal(np.take(a, indices, axis=1),\n np.take(z, indices, axis=1))\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # use zarr array as indices or condition\n zc = self.create_array(shape=condition.shape, dtype=condition.dtype,\n chunks=10, filters=None)\n zc[:] = condition\n assert_array_equal(np.compress(condition, a, axis=0),\n np.compress(zc, a, axis=0))\n if hasattr(zc.store, 'close'):\n zc.store.close()\n\n zi = self.create_array(shape=indices.shape, dtype=indices.dtype,\n chunks=10, filters=None)\n zi[:] = indices\n # this triggers __array__() call with dtype argument\n assert_array_equal(np.take(a, indices, axis=1),\n np.take(a, zi, axis=1))\n if hasattr(zi.store, 'close'):\n zi.store.close()\n\n # noinspection PyStatementEffect\n def test_0len_dim_1d(self):\n # Test behaviour for 1D array with zero-length dimension.\n\n z = self.create_array(shape=0, fill_value=0)\n a = np.zeros(0)\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert a.size == z.size\n assert 0 == z.nchunks\n\n # cannot make a good decision when auto-chunking if a dimension has zero length,\n # fall back to 1 for now\n assert (1,) == z.chunks\n\n # check __getitem__\n assert isinstance(z[:], np.ndarray)\n assert_array_equal(a, np.array(z))\n assert_array_equal(a, z[:])\n assert_array_equal(a, z[...])\n assert_array_equal(a[0:0], z[0:0])\n with pytest.raises(IndexError):\n z[0]\n\n # check __setitem__\n # these should succeed but do nothing\n z[:] = 42\n z[...] = 42\n # this should error\n with pytest.raises(IndexError):\n z[0] = 42\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # noinspection PyStatementEffect\n def test_0len_dim_2d(self):\n # Test behavioud for 2D array with a zero-length dimension.\n\n z = self.create_array(shape=(10, 0), fill_value=0)\n a = np.zeros((10, 0))\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n assert a.size == z.size\n assert 0 == z.nchunks\n\n # cannot make a good decision when auto-chunking if a dimension has zero length,\n # fall back to 1 for now\n assert (10, 1) == z.chunks\n\n # check __getitem__\n assert isinstance(z[:], np.ndarray)\n assert_array_equal(a, np.array(z))\n assert_array_equal(a, z[:])\n assert_array_equal(a, z[...])\n assert_array_equal(a[0], z[0])\n assert_array_equal(a[0, 0:0], z[0, 0:0])\n assert_array_equal(a[0, :], z[0, :])\n assert_array_equal(a[0, 0:0], z[0, 0:0])\n with pytest.raises(IndexError):\n z[:, 0]\n\n # check __setitem__\n # these should succeed but do nothing\n z[:] = 42\n z[...] = 42\n z[0, :] = 42\n # this should error\n with pytest.raises(IndexError):\n z[:, 0] = 42\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # noinspection PyStatementEffect\n def test_array_0d(self):\n # test behaviour for array with 0 dimensions\n\n # setup\n a = np.zeros(())\n z = self.create_array(shape=(), dtype=a.dtype, fill_value=0)\n\n # check properties\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.size == z.size\n assert a.dtype == z.dtype\n assert a.nbytes == z.nbytes\n with pytest.raises(TypeError):\n len(z)\n assert () == z.chunks\n assert 1 == z.nchunks\n assert (1,) == z.cdata_shape\n # compressor always None - no point in compressing a single value\n assert z.compressor is None\n\n # check __getitem__\n b = z[...]\n assert isinstance(b, np.ndarray)\n assert a.shape == b.shape\n assert a.dtype == b.dtype\n assert_array_equal(a, np.array(z))\n assert_array_equal(a, z[...])\n assert a[()] == z[()]\n with pytest.raises(IndexError):\n z[0]\n with pytest.raises(IndexError):\n z[:]\n\n # check __setitem__\n z[...] = 42\n assert 42 == z[()]\n z[()] = 43\n assert 43 == z[()]\n with pytest.raises(IndexError):\n z[0] = 42\n with pytest.raises(IndexError):\n z[:] = 42\n with pytest.raises(ValueError):\n z[...] = np.array([1, 2, 3])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_nchunks_initialized(self):\n\n z = self.create_array(shape=100, chunks=10)\n assert 0 == z.nchunks_initialized\n # manually put something into the store to confuse matters\n z.store['foo'] = b'bar'\n assert 0 == z.nchunks_initialized\n z[:] = 42\n assert 10 == z.nchunks_initialized\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_array_dtype_shape(self):\n\n dt = \"(2, 2)f4\"\n # setup some data\n d = np.array([((0, 1),\n (1, 2)),\n ((1, 2),\n (2, 3)),\n ((2, 3),\n (3, 4))],\n dtype=dt)\n\n for a in (d, d[:0]):\n for fill_value in None, 0:\n z = self.create_array(shape=a.shape[:-2], chunks=2, dtype=dt, fill_value=fill_value)\n assert len(a) == len(z)\n if fill_value is not None:\n assert fill_value == z.fill_value\n z[...] = a\n assert_array_equal(a, z[...])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def check_structured_array(self, d, fill_values):\n for a in (d, d[:0]):\n for fill_value in fill_values:\n z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value)\n assert len(a) == len(z)\n assert a.shape == z.shape\n assert a.dtype == z.dtype\n\n # check use of fill value before array is initialised with data\n if fill_value is not None:\n if fill_value == b'':\n # numpy 1.14 compatibility\n np_fill_value = np.array(fill_value, dtype=a.dtype.str).view(a.dtype)[()]\n else:\n np_fill_value = np.array(fill_value, dtype=a.dtype)[()]\n assert np_fill_value == z.fill_value\n if len(a):\n assert np_fill_value == z[0]\n assert np_fill_value == z[-1]\n empty = np.empty_like(a)\n empty[:] = np_fill_value\n assert empty[0] == z[0]\n assert_array_equal(empty[0:2], z[0:2])\n assert_array_equal(empty, z[...])\n for f in a.dtype.names:\n assert_array_equal(empty[f], z[f])\n\n # store data in array\n z[...] = a\n\n # check stored data\n if len(a):\n assert a[0] == z[0]\n assert a[-1] == z[-1]\n assert_array_equal(a[0:2], z[0:2])\n assert_array_equal(a, z[...])\n for f in a.dtype.names:\n assert_array_equal(a[f], z[f])\n\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_structured_array(self):\n d = np.array([(b'aaa', 1, 4.2),\n (b'bbb', 2, 8.4),\n (b'ccc', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n fill_values = None, b'', (b'zzz', 42, 16.8)\n self.check_structured_array(d, fill_values)\n\n def test_structured_array_subshapes(self):\n d = np.array([(0, ((0, 1, 2), (1, 2, 3)), b'aaa'),\n (1, ((1, 2, 3), (2, 3, 4)), b'bbb'),\n (2, ((2, 3, 4), (3, 4, 5)), b'ccc')],\n dtype=[('foo', 'i8'), ('bar', '(2, 3)f4'), ('baz', 'S3')])\n fill_values = None, b'', (0, ((0, 0, 0), (1, 1, 1)), b'zzz')\n self.check_structured_array(d, fill_values)\n\n def test_structured_array_nested(self):\n d = np.array([(0, (0, ((0, 1), (1, 2), (2, 3)), 0), b'aaa'),\n (1, (1, ((1, 2), (2, 3), (3, 4)), 1), b'bbb'),\n (2, (2, ((2, 3), (3, 4), (4, 5)), 2), b'ccc')],\n dtype=[('foo', 'i8'), ('bar', [('foo', 'i4'), ('bar', '(3, 2)f4'),\n ('baz', 'u1')]), ('baz', 'S3')])\n fill_values = None, b'', (0, (0, ((0, 0), (1, 1), (2, 2)), 0), b'zzz')\n self.check_structured_array(d, fill_values)\n\n def test_dtypes(self):\n\n # integers\n for dtype in 'u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8':\n z = self.create_array(shape=10, chunks=3, dtype=dtype)\n assert z.dtype == np.dtype(dtype)\n a = np.arange(z.shape[0], dtype=dtype)\n z[:] = a\n assert_array_equal(a, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # floats\n for dtype in 'f2', 'f4', 'f8':\n z = self.create_array(shape=10, chunks=3, dtype=dtype)\n assert z.dtype == np.dtype(dtype)\n a = np.linspace(0, 1, z.shape[0], dtype=dtype)\n z[:] = a\n assert_array_almost_equal(a, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # complex\n for dtype in 'c8', 'c16':\n z = self.create_array(shape=10, chunks=3, dtype=dtype)\n assert z.dtype == np.dtype(dtype)\n a = np.linspace(0, 1, z.shape[0], dtype=dtype)\n a -= 1j * a\n z[:] = a\n assert_array_almost_equal(a, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # datetime, timedelta\n for base_type in 'Mm':\n for resolution in 'D', 'us', 'ns':\n dtype = '{}8[{}]'.format(base_type, resolution)\n z = self.create_array(shape=100, dtype=dtype, fill_value=0)\n assert z.dtype == np.dtype(dtype)\n a = np.random.randint(np.iinfo('i8').min, np.iinfo('i8').max,\n size=z.shape[0],\n dtype='i8').view(dtype)\n z[:] = a\n assert_array_equal(a, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # check that datetime generic units are not allowed\n with pytest.raises(ValueError):\n self.create_array(shape=100, dtype='M8')\n with pytest.raises(ValueError):\n self.create_array(shape=100, dtype='m8')\n\n def test_object_arrays(self):\n\n # an object_codec is required for object arrays\n with pytest.raises(ValueError):\n self.create_array(shape=10, chunks=3, dtype=object)\n\n # an object_codec is required for object arrays, but allow to be provided via\n # filters to maintain API backwards compatibility\n with pytest.warns(FutureWarning):\n z = self.create_array(shape=10, chunks=3, dtype=object, filters=[MsgPack()])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # create an object array using msgpack\n z = self.create_array(shape=10, chunks=3, dtype=object, object_codec=MsgPack())\n z[0] = 'foo'\n assert z[0] == 'foo'\n z[1] = b'bar'\n assert z[1] == b'bar'\n z[2] = 1\n assert z[2] == 1\n z[3] = [2, 4, 6, 'baz']\n assert z[3] == [2, 4, 6, 'baz']\n z[4] = {'a': 'b', 'c': 'd'}\n assert z[4] == {'a': 'b', 'c': 'd'}\n a = z[:]\n assert a.dtype == object\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # create an object array using pickle\n z = self.create_array(shape=10, chunks=3, dtype=object, object_codec=Pickle())\n z[0] = 'foo'\n assert z[0] == 'foo'\n z[1] = b'bar'\n assert z[1] == b'bar'\n z[2] = 1\n assert z[2] == 1\n z[3] = [2, 4, 6, 'baz']\n assert z[3] == [2, 4, 6, 'baz']\n z[4] = {'a': 'b', 'c': 'd'}\n assert z[4] == {'a': 'b', 'c': 'd'}\n a = z[:]\n assert a.dtype == object\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # create an object array using JSON\n z = self.create_array(shape=10, chunks=3, dtype=object, object_codec=JSON())\n z[0] = 'foo'\n assert z[0] == 'foo'\n # z[1] = b'bar'\n # assert z[1] == b'bar' # not supported for JSON\n z[2] = 1\n assert z[2] == 1\n z[3] = [2, 4, 6, 'baz']\n assert z[3] == [2, 4, 6, 'baz']\n z[4] = {'a': 'b', 'c': 'd'}\n assert z[4] == {'a': 'b', 'c': 'd'}\n a = z[:]\n assert a.dtype == object\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_object_arrays_vlen_text(self):\n\n data = np.array(greetings * 1000, dtype=object)\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenUTF8())\n z[0] = 'foo'\n assert z[0] == 'foo'\n z[1] = 'bar'\n assert z[1] == 'bar'\n z[2] = 'baz'\n assert z[2] == 'baz'\n z[:] = data\n a = z[:]\n assert a.dtype == object\n assert_array_equal(data, a)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # convenience API\n z = self.create_array(shape=data.shape, dtype=str)\n assert z.dtype == object\n assert isinstance(z.filters[0], VLenUTF8)\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=MsgPack())\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=JSON())\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=Pickle())\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=data.shape, dtype=object,\n object_codec=Categorize(greetings, dtype=object))\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_object_arrays_vlen_bytes(self):\n\n greetings_bytes = [g.encode('utf8') for g in greetings]\n data = np.array(greetings_bytes * 1000, dtype=object)\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenBytes())\n z[0] = b'foo'\n assert z[0] == b'foo'\n z[1] = b'bar'\n assert z[1] == b'bar'\n z[2] = b'baz'\n assert z[2] == b'baz'\n z[:] = data\n a = z[:]\n assert a.dtype == object\n assert_array_equal(data, a)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # convenience API\n z = self.create_array(shape=data.shape, dtype=bytes)\n assert z.dtype == object\n assert isinstance(z.filters[0], VLenBytes)\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n z = self.create_array(shape=data.shape, dtype=object, object_codec=Pickle())\n z[:] = data\n assert_array_equal(data, z[:])\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_object_arrays_vlen_array(self):\n\n data = np.array([np.array([1, 3, 7]),\n np.array([5]),\n np.array([2, 8, 12])] * 1000, dtype=object)\n\n def compare_arrays(expected, actual, item_dtype):\n assert isinstance(actual, np.ndarray)\n assert actual.dtype == object\n assert actual.shape == expected.shape\n for ev, av in zip(expected.flat, actual.flat):\n assert isinstance(av, np.ndarray)\n assert_array_equal(ev, av)\n assert av.dtype == item_dtype\n\n codecs = VLenArray(int), VLenArray('<u4')\n for codec in codecs:\n z = self.create_array(shape=data.shape, dtype=object, object_codec=codec)\n z[0] = np.array([4, 7])\n assert_array_equal(np.array([4, 7]), z[0])\n z[:] = data\n a = z[:]\n assert a.dtype == object\n compare_arrays(data, a, codec.dtype)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # convenience API\n for item_type in 'int', '<u4':\n z = self.create_array(shape=data.shape, dtype='array:{}'.format(item_type))\n assert z.dtype == object\n assert isinstance(z.filters[0], VLenArray)\n assert z.filters[0].dtype == np.dtype(item_type)\n z[:] = data\n compare_arrays(data, z[:], np.dtype(item_type))\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_object_arrays_danger(self):\n\n # do something dangerous - manually force an object array with no object codec\n z = self.create_array(shape=5, chunks=2, dtype=object, fill_value=0,\n object_codec=MsgPack())\n z._filters = None # wipe filters\n with pytest.raises(RuntimeError):\n z[0] = 'foo'\n with pytest.raises(RuntimeError):\n z[:] = 42\n if hasattr(z.store, 'close'):\n z.store.close()\n\n # do something else dangerous\n data = greetings * 10\n for compressor in Zlib(1), Blosc():\n z = self.create_array(shape=len(data), chunks=30, dtype=object,\n object_codec=Categorize(greetings,\n dtype=object),\n compressor=compressor)\n z[:] = data\n v = z.view(filters=[])\n with pytest.raises(RuntimeError):\n # noinspection PyStatementEffect\n v[:]\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_object_codec_warnings(self):\n\n with pytest.warns(UserWarning):\n # provide object_codec, but not object dtype\n z = self.create_array(shape=10, chunks=5, dtype='i4', object_codec=JSON())\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_zero_d_iter(self):\n a = np.array(1, dtype=int)\n z = self.create_array(shape=a.shape, dtype=int)\n z[...] = a\n with pytest.raises(TypeError):\n # noinspection PyStatementEffect\n list(a)\n with pytest.raises(TypeError):\n # noinspection PyStatementEffect\n list(z)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_iter(self):\n params = (\n ((1,), (1,)),\n ((2,), (1,)),\n ((1,), (2,)),\n ((3,), (3,)),\n ((1000,), (100,)),\n ((100,), (1000,)),\n ((1, 100), (1, 1)),\n ((1, 0), (1, 1)),\n ((0, 1), (1, 1)),\n ((0, 1), (2, 1)),\n ((100, 1), (3, 1)),\n ((100, 100), (10, 10)),\n ((10, 10, 10), (3, 3, 3)),\n )\n for shape, chunks in params:\n z = self.create_array(shape=shape, chunks=chunks, dtype=int)\n a = np.arange(np.product(shape)).reshape(shape)\n z[:] = a\n for expect, actual in zip_longest(a, z):\n assert_array_equal(expect, actual)\n if hasattr(z.store, 'close'):\n z.store.close()\n\n def test_compressors(self):\n compressors = [\n None, BZ2(), Blosc(), LZ4(), Zlib(), GZip()\n ]\n if LZMA:\n compressors.append(LZMA())\n for compressor in compressors:\n a = self.create_array(shape=1000, chunks=100, compressor=compressor)\n a[0:100] = 1\n assert np.all(a[0:100] == 1)\n a[:] = 1\n assert np.all(a[:] == 1)\n if hasattr(a.store, 'close'):\n a.store.close()\n\n def test_endian(self):\n dtype = np.dtype('float32')\n a1 = self.create_array(shape=1000, chunks=100, dtype=dtype.newbyteorder('<'))\n a1[:] = 1\n x1 = a1[:]\n a2 = self.create_array(shape=1000, chunks=100, dtype=dtype.newbyteorder('>'))\n a2[:] = 1\n x2 = a2[:]\n assert_array_equal(x1, x2)\n if hasattr(a1.store, 'close'):\n a1.store.close()\n if hasattr(a2.store, 'close'):\n a2.store.close()\n\n def test_attributes(self):\n a = self.create_array(shape=10, chunks=10, dtype='i8')\n a.attrs['foo'] = 'bar'\n assert a.attrs.key in a.store\n attrs = json_loads(a.store[a.attrs.key])\n assert 'foo' in attrs and attrs['foo'] == 'bar'\n\n a.attrs['bar'] = 'foo'\n assert a.attrs.key in a.store\n attrs = json_loads(a.store[a.attrs.key])\n assert 'foo' in attrs and attrs['foo'] == 'bar'\n assert 'bar' in attrs and attrs['bar'] == 'foo'\n if hasattr(a.store, 'close'):\n a.store.close()\n\n\nclass TestArrayWithPath(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = dict()\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, path='foo/bar', **kwargs)\n return Array(store, path='foo/bar', read_only=read_only,\n cache_metadata=cache_metadata, cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert 'f710da18d45d38d4aaf2afd7fb822fdd73d02957' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '1437428e69754b1e1a38bd7fc9e43669577620db' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '6c530b6b9d73e108cc5ee7b6be3d552cc994bdbe' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '4c0a76fb1222498e09dcd92f7f9221d6cea8b40e' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '05b0663ffe1785f38d3a459dec17e57a18f254af' == z.hexdigest()\n\n def test_nbytes_stored(self):\n\n # dict as store\n z = self.create_array(shape=1000, chunks=100)\n expect_nbytes_stored = sum(buffer_size(v)\n for k, v in z.store.items()\n if k.startswith('foo/bar/'))\n assert expect_nbytes_stored == z.nbytes_stored\n z[:] = 42\n expect_nbytes_stored = sum(buffer_size(v)\n for k, v in z.store.items()\n if k.startswith('foo/bar/'))\n assert expect_nbytes_stored == z.nbytes_stored\n\n # mess with store\n z.store[z._key_prefix + 'foo'] = list(range(10))\n assert -1 == z.nbytes_stored\n\n\nclass TestArrayWithChunkStore(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = dict()\n # separate chunk store\n chunk_store = dict()\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, chunk_store=chunk_store, **kwargs)\n return Array(store, read_only=read_only, chunk_store=chunk_store,\n cache_metadata=cache_metadata, cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert 'f710da18d45d38d4aaf2afd7fb822fdd73d02957' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '1437428e69754b1e1a38bd7fc9e43669577620db' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '6c530b6b9d73e108cc5ee7b6be3d552cc994bdbe' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '4c0a76fb1222498e09dcd92f7f9221d6cea8b40e' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '05b0663ffe1785f38d3a459dec17e57a18f254af' == z.hexdigest()\n\n def test_nbytes_stored(self):\n\n z = self.create_array(shape=1000, chunks=100)\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n expect_nbytes_stored += sum(buffer_size(v)\n for v in z.chunk_store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n z[:] = 42\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n expect_nbytes_stored += sum(buffer_size(v)\n for v in z.chunk_store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n\n # mess with store\n z.chunk_store[z._key_prefix + 'foo'] = list(range(10))\n assert -1 == z.nbytes_stored\n\n\nclass TestArrayWithDirectoryStore(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n path = mkdtemp()\n atexit.register(shutil.rmtree, path)\n store = DirectoryStore(path)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_nbytes_stored(self):\n\n # dict as store\n z = self.create_array(shape=1000, chunks=100)\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n z[:] = 42\n expect_nbytes_stored = sum(buffer_size(v) for v in z.store.values())\n assert expect_nbytes_stored == z.nbytes_stored\n\n\n@skip_test_env_var(\"ZARR_TEST_ABS\")\nclass TestArrayWithABSStore(TestArray):\n\n @staticmethod\n def absstore():\n asb = pytest.importorskip(\"azure.storage.blob\")\n blob_client = asb.BlockBlobService(is_emulated=True)\n blob_client.delete_container('test')\n blob_client.create_container('test')\n store = ABSStore(container='test', account_name='foo', account_key='bar',\n blob_service_kwargs={'is_emulated': True})\n store.rmdir()\n return store\n\n def create_array(self, read_only=False, **kwargs):\n store = self.absstore()\n kwargs.setdefault('compressor', Zlib(1))\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n\nclass TestArrayWithNestedDirectoryStore(TestArrayWithDirectoryStore):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n path = mkdtemp()\n atexit.register(shutil.rmtree, path)\n store = NestedDirectoryStore(path)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n\nclass TestArrayWithN5Store(TestArrayWithDirectoryStore):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n path = mkdtemp()\n atexit.register(shutil.rmtree, path)\n store = N5Store(path)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_array_0d(self):\n # test behaviour for array with 0 dimensions\n\n # setup\n a = np.zeros(())\n z = self.create_array(shape=(), dtype=a.dtype, fill_value=0)\n\n # check properties\n assert a.ndim == z.ndim\n assert a.shape == z.shape\n assert a.size == z.size\n assert a.dtype == z.dtype\n assert a.nbytes == z.nbytes\n with pytest.raises(TypeError):\n len(z)\n assert () == z.chunks\n assert 1 == z.nchunks\n assert (1,) == z.cdata_shape\n # compressor always None - no point in compressing a single value\n assert z.compressor.compressor_config is None\n\n # check __getitem__\n b = z[...]\n assert isinstance(b, np.ndarray)\n assert a.shape == b.shape\n assert a.dtype == b.dtype\n assert_array_equal(a, np.array(z))\n assert_array_equal(a, z[...])\n assert a[()] == z[()]\n with pytest.raises(IndexError):\n z[0]\n with pytest.raises(IndexError):\n z[:]\n\n # check __setitem__\n z[...] = 42\n assert 42 == z[()]\n z[()] = 43\n assert 43 == z[()]\n with pytest.raises(IndexError):\n z[0] = 42\n with pytest.raises(IndexError):\n z[:] = 42\n with pytest.raises(ValueError):\n z[...] = np.array([1, 2, 3])\n\n def test_array_1d_fill_value(self):\n nvalues = 1050\n dtype = np.int32\n for fill_value in 0, None:\n a = np.arange(nvalues, dtype=dtype)\n f = np.empty_like(a)\n f.fill(fill_value or 0)\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype,\n fill_value=fill_value)\n z[190:310] = a[190:310]\n\n assert_array_equal(f[:190], z[:190])\n assert_array_equal(a[190:310], z[190:310])\n assert_array_equal(f[310:], z[310:])\n\n with pytest.raises(ValueError):\n z = self.create_array(shape=(nvalues,), chunks=100, dtype=dtype,\n fill_value=1)\n\n def test_array_order(self):\n\n # N5 only supports 'C' at the moment\n with pytest.raises(ValueError):\n self.create_array(shape=(10, 11), chunks=(10, 11), dtype='i8',\n order='F')\n\n # 1D\n a = np.arange(1050)\n z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype,\n order='C')\n assert z.order == 'C'\n assert z[:].flags.c_contiguous\n z[:] = a\n assert_array_equal(a, z[:])\n\n # 2D\n a = np.arange(10000).reshape((100, 100))\n z = self.create_array(shape=a.shape, chunks=(10, 10),\n dtype=a.dtype, order='C')\n\n assert z.order == 'C'\n assert z[:].flags.c_contiguous\n z[:] = a\n actual = z[:]\n assert_array_equal(a, actual)\n\n def test_structured_array(self):\n d = np.array([(b'aaa', 1, 4.2),\n (b'bbb', 2, 8.4),\n (b'ccc', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n fill_values = None, b'', (b'zzz', 42, 16.8)\n with pytest.raises(TypeError):\n self.check_structured_array(d, fill_values)\n\n def test_structured_array_subshapes(self):\n d = np.array([(0, ((0, 1, 2), (1, 2, 3)), b'aaa'),\n (1, ((1, 2, 3), (2, 3, 4)), b'bbb'),\n (2, ((2, 3, 4), (3, 4, 5)), b'ccc')],\n dtype=[('foo', 'i8'), ('bar', '(2, 3)f4'), ('baz', 'S3')])\n fill_values = None, b'', (0, ((0, 0, 0), (1, 1, 1)), b'zzz')\n with pytest.raises(TypeError):\n self.check_structured_array(d, fill_values)\n\n def test_structured_array_nested(self):\n d = np.array([(0, (0, ((0, 1), (1, 2), (2, 3)), 0), b'aaa'),\n (1, (1, ((1, 2), (2, 3), (3, 4)), 1), b'bbb'),\n (2, (2, ((2, 3), (3, 4), (4, 5)), 2), b'ccc')],\n dtype=[('foo', 'i8'), ('bar', [('foo', 'i4'), ('bar', '(3, 2)f4'),\n ('baz', 'u1')]), ('baz', 'S3')])\n fill_values = None, b'', (0, (0, ((0, 0), (1, 1), (2, 2)), 0), b'zzz')\n with pytest.raises(TypeError):\n self.check_structured_array(d, fill_values)\n\n def test_dtypes(self):\n\n # integers\n for dtype in 'u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8':\n z = self.create_array(shape=10, chunks=3, dtype=dtype)\n assert z.dtype == np.dtype(dtype)\n a = np.arange(z.shape[0], dtype=dtype)\n z[:] = a\n assert_array_equal(a, z[:])\n\n # floats\n for dtype in 'f2', 'f4', 'f8':\n z = self.create_array(shape=10, chunks=3, dtype=dtype)\n assert z.dtype == np.dtype(dtype)\n a = np.linspace(0, 1, z.shape[0], dtype=dtype)\n z[:] = a\n assert_array_almost_equal(a, z[:])\n\n # check that datetime generic units are not allowed\n with pytest.raises(ValueError):\n self.create_array(shape=100, dtype='M8')\n with pytest.raises(ValueError):\n self.create_array(shape=100, dtype='m8')\n\n def test_object_arrays(self):\n\n # an object_codec is required for object arrays\n with pytest.raises(ValueError):\n self.create_array(shape=10, chunks=3, dtype=object)\n\n # an object_codec is required for object arrays, but allow to be provided via\n # filters to maintain API backwards compatibility\n with pytest.raises(ValueError):\n with pytest.warns(FutureWarning):\n self.create_array(shape=10, chunks=3, dtype=object, filters=[MsgPack()])\n\n # create an object array using an object codec\n with pytest.raises(ValueError):\n self.create_array(shape=10, chunks=3, dtype=object, object_codec=MsgPack())\n\n def test_object_arrays_vlen_text(self):\n\n data = np.array(greetings * 1000, dtype=object)\n\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype=object, object_codec=VLenUTF8())\n\n # convenience API\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype=str)\n\n def test_object_arrays_vlen_bytes(self):\n\n greetings_bytes = [g.encode('utf8') for g in greetings]\n data = np.array(greetings_bytes * 1000, dtype=object)\n\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype=object, object_codec=VLenBytes())\n\n # convenience API\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype=bytes)\n\n def test_object_arrays_vlen_array(self):\n\n data = np.array([np.array([1, 3, 7]),\n np.array([5]),\n np.array([2, 8, 12])] * 1000, dtype=object)\n\n codecs = VLenArray(int), VLenArray('<u4')\n for codec in codecs:\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype=object, object_codec=codec)\n\n # convenience API\n for item_type in 'int', '<u4':\n with pytest.raises(ValueError):\n self.create_array(shape=data.shape, dtype='array:{}'.format(item_type))\n\n def test_object_arrays_danger(self):\n # Cannot hacking out object codec as N5 doesn't allow object codecs\n pass\n\n def test_attrs_n5_keywords(self):\n z = self.create_array(shape=(1050,), chunks=100, dtype='i4')\n for k in n5_keywords:\n with pytest.raises(ValueError):\n z.attrs[k] = \"\"\n\n def test_compressors(self):\n compressors = [\n None, BZ2(), Zlib(), GZip()\n ]\n if LZMA:\n compressors.append(LZMA())\n compressors.append(LZMA(preset=1))\n compressors.append(LZMA(preset=6))\n for compressor in compressors:\n a1 = self.create_array(shape=1000, chunks=100, compressor=compressor)\n a1[0:100] = 1\n assert np.all(a1[0:100] == 1)\n a1[:] = 1\n assert np.all(a1[:] == 1)\n\n compressors_warn = [\n Blosc()\n ]\n if LZMA:\n compressors_warn.append(LZMA(2)) # Try lzma.FORMAT_ALONE, which N5 doesn't support.\n for compressor in compressors_warn:\n with pytest.warns(RuntimeWarning):\n a2 = self.create_array(shape=1000, chunks=100, compressor=compressor)\n a2[0:100] = 1\n assert np.all(a2[0:100] == 1)\n a2[:] = 1\n assert np.all(a2[:] == 1)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert 'c6b83adfad999fbd865057531d749d87cf138f58' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert 'a3d6d187536ecc3a9dd6897df55d258e2f52f9c5' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert 'ec2e008525ae09616dbc1d2408cbdb42532005c8' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert 'b63f031031dcd5248785616edcb2d6fe68203c28' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '0cfc673215a8292a87f3c505e2402ce75243c601' == z.hexdigest()\n\n\nclass TestArrayWithDBMStore(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n path = mktemp(suffix='.anydbm')\n atexit.register(atexit_rmglob, path + '*')\n store = DBMStore(path, flag='n')\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_attrs=cache_attrs,\n cache_metadata=cache_metadata)\n\n def test_nbytes_stored(self):\n pass # not implemented\n\n\nclass TestArrayWithDBMStoreBerkeleyDB(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n bsddb3 = pytest.importorskip(\"bsddb3\")\n path = mktemp(suffix='.dbm')\n atexit.register(os.remove, path)\n store = DBMStore(path, flag='n', open=bsddb3.btopen)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_nbytes_stored(self):\n pass # not implemented\n\n\nclass TestArrayWithLMDBStore(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n pytest.importorskip(\"lmdb\")\n path = mktemp(suffix='.lmdb')\n atexit.register(atexit_rmtree, path)\n store = LMDBStore(path, buffers=True)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_store_has_bytes_values(self):\n pass # returns values as memoryviews/buffers instead of bytes\n\n def test_nbytes_stored(self):\n pass # not implemented\n\n\nclass TestArrayWithLMDBStoreNoBuffers(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n pytest.importorskip(\"lmdb\")\n path = mktemp(suffix='.lmdb')\n atexit.register(atexit_rmtree, path)\n store = LMDBStore(path, buffers=False)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_nbytes_stored(self):\n pass # not implemented\n\n\nclass TestArrayWithSQLiteStore(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n pytest.importorskip(\"sqlite3\")\n path = mktemp(suffix='.db')\n atexit.register(atexit_rmtree, path)\n store = SQLiteStore(path)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n kwargs.setdefault('compressor', Zlib(1))\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_nbytes_stored(self):\n pass # not implemented\n\n\nclass TestArrayWithNoCompressor(TestArray):\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n kwargs.setdefault('compressor', None)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert 'd3da3d485de4a5fcc6d91f9dfc6a7cba9720c561' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '443b8dee512e42946cb63ff01d28e9bee8105a5f' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert 'b75eb90f68aa8ee1e29f2c542e851d3945066c54' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '42b6ae0d50ec361628736ab7e68fe5fefca22136' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert 'a0535f31c130f5e5ac66ba0713d1c1ceaebd089b' == z.hexdigest()\n\n\nclass TestArrayWithBZ2Compressor(TestArray):\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n compressor = BZ2(level=1)\n kwargs.setdefault('compressor', compressor)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert '33141032439fb1df5e24ad9891a7d845b6c668c8' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '44d719da065c88a412d609a5500ff41e07b331d6' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '37c7c46e5730bba37da5e518c9d75f0d774c5098' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '1e1bcaac63e4ef3c4a68f11672537131c627f168' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '86d7b9bf22dccbeaa22f340f38be506b55e76ff2' == z.hexdigest()\n\n\nclass TestArrayWithBloscCompressor(TestArray):\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n compressor = Blosc(cname='zstd', clevel=1, shuffle=1)\n kwargs.setdefault('compressor', compressor)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert '7ff2ae8511eac915fad311647c168ccfe943e788' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '962705c861863495e9ccb7be7735907aa15e85b5' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '74ed339cfe84d544ac023d085ea0cd6a63f56c4b' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert '90e30bdab745a9641cd0eb605356f531bc8ec1c3' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '95d40c391f167db8b1290e3c39d9bf741edacdf6' == z.hexdigest()\n\n\ntry:\n from numcodecs import LZMA\nexcept ImportError: # pragma: no cover\n LZMA = None\n\n\[email protected](LZMA is None, 'LZMA codec not available')\nclass TestArrayWithLZMACompressor(TestArray):\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n compressor = LZMA(preset=1)\n kwargs.setdefault('compressor', compressor)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert '93ecaa530a1162a9d48a3c1dcee4586ccfc59bae' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '04a9755a0cd638683531b7816c7fa4fbb6f577f2' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '9de97b5c49b38e68583ed701d7e8f4c94b6a8406' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert 'cde499f3dc945b4e97197ff8e3cf8188a1262c35' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert 'e2cf3afbf66ad0e28a2b6b68b1b07817c69aaee2' == z.hexdigest()\n\n\nclass TestArrayWithFilters(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = dict()\n dtype = kwargs.get('dtype', None)\n filters = [\n Delta(dtype=dtype),\n FixedScaleOffset(dtype=dtype, scale=1, offset=0),\n ]\n kwargs.setdefault('filters', filters)\n compressor = Zlib(1)\n kwargs.setdefault('compressor', compressor)\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_attrs=cache_attrs,\n cache_metadata=cache_metadata)\n\n def test_hexdigest(self):\n # Check basic 1-D array\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n assert 'b80367c5599d47110d42bd8886240c2f46620dba' == z.hexdigest()\n\n # Check basic 1-D array with different type\n z = self.create_array(shape=(1050,), chunks=100, dtype='<f4')\n assert '95a7b2471225e73199c9716d21e8d3dd6e5f6f2a' == z.hexdigest()\n\n # Check basic 2-D array\n z = self.create_array(shape=(20, 35,), chunks=10, dtype='<i4')\n assert '7300f1eb130cff5891630038fd99c28ef23d3a01' == z.hexdigest()\n\n # Check basic 1-D array with some data\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z[200:400] = np.arange(200, 400, dtype='i4')\n assert 'c649ad229bc5720258b934ea958570c2f354c2eb' == z.hexdigest()\n\n # Check basic 1-D array with attributes\n z = self.create_array(shape=(1050,), chunks=100, dtype='<i4')\n z.attrs['foo'] = 'bar'\n assert '62fc9236d78af18a5ec26c12eea1d33bce52501e' == z.hexdigest()\n\n def test_astype_no_filters(self):\n shape = (100,)\n dtype = np.dtype(np.int8)\n astype = np.dtype(np.float32)\n\n store = dict()\n init_array(store, shape=shape, chunks=10, dtype=dtype)\n\n data = np.arange(np.prod(shape), dtype=dtype).reshape(shape)\n\n z1 = Array(store)\n z1[...] = data\n z2 = z1.astype(astype)\n\n expected = data.astype(astype)\n assert_array_equal(expected, z2)\n assert z2.read_only\n\n def test_astype(self):\n shape = (100,)\n chunks = (10,)\n\n dtype = np.dtype(np.int8)\n astype = np.dtype(np.float32)\n\n data = np.arange(np.prod(shape), dtype=dtype).reshape(shape)\n\n z1 = self.create_array(shape=shape, chunks=chunks, dtype=dtype)\n z1[...] = data\n z2 = z1.astype(astype)\n\n expected = data.astype(astype)\n assert_array_equal(expected, z2)\n\n def test_array_dtype_shape(self):\n # skip this one, cannot do delta on unstructured array\n pass\n\n def test_structured_array(self):\n # skip this one, cannot do delta on structured array\n pass\n\n def test_structured_array_subshapes(self):\n # skip this one, cannot do delta on structured array\n pass\n\n def test_structured_array_nested(self):\n # skip this one, cannot do delta on structured array\n pass\n\n def test_dtypes(self):\n # skip this one, delta messes up floats\n pass\n\n def test_object_arrays(self):\n # skip this one, cannot use delta with objects\n pass\n\n def test_object_arrays_vlen_text(self):\n # skip this one, cannot use delta with objects\n pass\n\n def test_object_arrays_vlen_bytes(self):\n # skip this one, cannot use delta with objects\n pass\n\n def test_object_arrays_vlen_array(self):\n # skip this one, cannot use delta with objects\n pass\n\n def test_object_arrays_danger(self):\n # skip this one, cannot use delta with objects\n pass\n\n\n# custom store, does not support getsize()\nclass CustomMapping(object):\n\n def __init__(self):\n self.inner = dict()\n\n def keys(self):\n return self.inner.keys()\n\n def values(self):\n return self.inner.values()\n\n def get(self, item, default=None):\n try:\n return self.inner[item]\n except KeyError:\n return default\n\n def __getitem__(self, item):\n return self.inner[item]\n\n def __setitem__(self, item, value):\n self.inner[item] = ensure_bytes(value)\n\n def __delitem__(self, key):\n del self.inner[key]\n\n def __contains__(self, item):\n return item in self.inner\n\n\nclass TestArrayWithCustomMapping(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = CustomMapping()\n kwargs.setdefault('compressor', Zlib(1))\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_nbytes_stored(self):\n z = self.create_array(shape=1000, chunks=100)\n assert -1 == z.nbytes_stored\n z[:] = 42\n assert -1 == z.nbytes_stored\n\n\nclass TestArrayNoCache(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = dict()\n kwargs.setdefault('compressor', Zlib(level=1))\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_cache_metadata(self):\n a1 = self.create_array(shape=100, chunks=10, dtype='i1', cache_metadata=False)\n a2 = Array(a1.store, cache_metadata=True)\n assert a1.shape == a2.shape\n assert a1.size == a2.size\n assert a1.nbytes == a2.nbytes\n assert a1.nchunks == a2.nchunks\n\n # a1 is not caching so *will* see updates made via other objects\n a2.resize(200)\n assert (200,) == a2.shape\n assert 200 == a2.size\n assert 200 == a2.nbytes\n assert 20 == a2.nchunks\n assert a1.shape == a2.shape\n assert a1.size == a2.size\n assert a1.nbytes == a2.nbytes\n assert a1.nchunks == a2.nchunks\n\n a2.append(np.zeros(100))\n assert (300,) == a2.shape\n assert 300 == a2.size\n assert 300 == a2.nbytes\n assert 30 == a2.nchunks\n assert a1.shape == a2.shape\n assert a1.size == a2.size\n assert a1.nbytes == a2.nbytes\n assert a1.nchunks == a2.nchunks\n\n # a2 is caching so *will not* see updates made via other objects\n a1.resize(400)\n assert (400,) == a1.shape\n assert 400 == a1.size\n assert 400 == a1.nbytes\n assert 40 == a1.nchunks\n assert (300,) == a2.shape\n assert 300 == a2.size\n assert 300 == a2.nbytes\n assert 30 == a2.nchunks\n\n def test_cache_attrs(self):\n a1 = self.create_array(shape=100, chunks=10, dtype='i1', cache_attrs=False)\n a2 = Array(a1.store, cache_attrs=True)\n assert a1.attrs.asdict() == a2.attrs.asdict()\n\n # a1 is not caching so *will* see updates made via other objects\n a2.attrs['foo'] = 'xxx'\n a2.attrs['bar'] = 42\n assert a1.attrs.asdict() == a2.attrs.asdict()\n\n # a2 is caching so *will not* see updates made via other objects\n a1.attrs['foo'] = 'yyy'\n assert 'yyy' == a1.attrs['foo']\n assert 'xxx' == a2.attrs['foo']\n\n def test_object_arrays_danger(self):\n # skip this one as it only works if metadata are cached\n pass\n\n\nclass TestArrayWithStoreCache(TestArray):\n\n @staticmethod\n def create_array(read_only=False, **kwargs):\n store = LRUStoreCache(dict(), max_size=None)\n kwargs.setdefault('compressor', Zlib(level=1))\n cache_metadata = kwargs.pop('cache_metadata', True)\n cache_attrs = kwargs.pop('cache_attrs', True)\n init_array(store, **kwargs)\n return Array(store, read_only=read_only, cache_metadata=cache_metadata,\n cache_attrs=cache_attrs)\n\n def test_store_has_bytes_values(self):\n # skip as the cache has no control over how the store provides values\n pass\n" ]
[ [ "numpy.squeeze", "numpy.dtype", "numpy.asanyarray", "numpy.copyto", "numpy.zeros", "numpy.empty" ], [ "numpy.product", "numpy.take", "numpy.linspace", "numpy.dtype", "numpy.all", "numpy.zeros_like", "numpy.mean", "numpy.iinfo", "numpy.random.randint", "numpy.uint32", "numpy.arange", "numpy.empty_like", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "numpy.random.choice", "numpy.int64", "numpy.append", "numpy.array", "numpy.sum", "numpy.random.random", "numpy.random.seed", "numpy.int32", "numpy.compress", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.uint64", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
san99tiago/ML_BASICS
[ "ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f", "ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f" ]
[ "00_LIBRARIES/00_NUMPY/03_numpy_manipulations.py", "00_LIBRARIES/01_PANDAS/03_pandas_missing_data.py" ]
[ "# NUMPY MANIPULATIONS OF ARRAYS\n# Santiago Garcia Arango\n# -------------------------------------------------------------------------\nimport numpy as np\n\nmy_array = np.arange(1, 11) # [1,2,..,8,9,10]\nprint(\"my_array=\\n\", my_array, \"\\n\")\n\n\n# -----------------CHECKING CONDITIONS IN ARRAY ITEMS----------------------\n# FIRST WAY...\n# This is how we show boolean result of a desired condition of an array\nboolean_array = my_array > 5\nprint(\"my_array > 5 --> \", boolean_array, \"\\n\")\n# We can take advantage of the boolean_array, by calling the main array...\n# ...\"evaluated\" in the True statements of the boolean_array.\n# This will give us only the original array where the conditions are True\nprint(\"my_array[boolean_array] = \", my_array[boolean_array], \"\\n\")\n\n# SECOND WAY...\n# This previous two step process is usually done in one step!!!\n# Remark: This is the most common way to to this!!!\nprint(\"my_array[my_array>5] = \", my_array[my_array > 5], \"\\n\")\n\n\n# -----------------------CREATE MATRICES EASIER----------------------------\n# Example: create this matrix:\n# 1 1 1 1 1\n# 1 0 0 0 1\n# 1 0 9 0 1\n# 1 0 0 0 1\n# 1 1 1 1 1\n\ncool_matrix = np.ones((5, 5))\ncool_matrix[1:4, 1:4] = 0\ncool_matrix[2, 2] = 9\nprint(\"cool_matrix:\\n\", cool_matrix, \"\\n\")\n", "# HOW TO HANDLE MISSING DATA IN PANDAS\n# Santiago Garcia Arango, June 2020\n\nimport numpy as np\nimport pandas as pd\n\n# Create a dictionary with missing values (to show missing data examples)\ndictionary = {\n \"A\": [1, 2, np.nan, 5],\n \"B\": [5, np.nan, np.nan, 2],\n \"C\": [1, 2, 3, 4],\n \"D\": [3, np.nan, 3, 5]\n }\n\n# Create data_frame from the dictionary already created\ndata_frame = pd.DataFrame(dictionary)\nprint(\"data_frame=\\n\", data_frame, \"\\n\")\n\n# ----------------METHOD 1(delete rows/columns with NAN)----------------------\n# 1) Delete rows that have NAN values from the data_frame.\nprint(\"data_frame.dropna() [delete NAN rows]=\")\nprint(data_frame.dropna(), \"\\n\")\n\n# 2) Delete columns that have NAN values from the data_frame.\nprint(\"data_frame.dropna(axis=1) [delete NAN rows]=\")\nprint(data_frame.dropna(axis=1), \"\\n\")\n\n\n# ---------------------------METHOD 2(fill the NAN)--------------------------\n# Get the average(or mean) value of each data_frame column\naverage_1 = data_frame.mean()\nprint(\"average_1 (COLUMNS average)=\\n\", average_1, \"\\n\")\n\n# We can strategically fill the missing values (NAN) with the average value\nprint(\"data_frame.fillna(value=average_1)=\")\nprint(data_frame.fillna(value=average_1), \"\\n\")\n" ]
[ [ "numpy.arange", "numpy.ones" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
onyalcin/PyMO
[ "1d49620096b7f81b6db0cd4ed427cd7496bd5f99" ]
[ "pymo/viz_tools.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport IPython\nimport os\n\ndef save_fig(fig_id, tight_layout=True):\n if tight_layout:\n plt.tight_layout()\n plt.savefig(fig_id + '.png', format='png', dpi=300)\n \n \ndef draw_stickfigure(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n if joints is None:\n joints_to_draw = mocap_track.skeleton.keys()\n else:\n joints_to_draw = joints\n \n if data is None:\n df = mocap_track.values\n else:\n df = data\n \n for joint in joints_to_draw:\n ax.scatter(x=df['%s_Xposition'%joint][frame], \n y=df['%s_Yposition'%joint][frame], \n alpha=0.6, c='b', marker='o')\n\n parent_x = df['%s_Xposition'%joint][frame]\n parent_y = df['%s_Yposition'%joint][frame]\n \n children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]\n \n for c in children_to_draw:\n child_x = df['%s_Xposition'%c][frame]\n child_y = df['%s_Yposition'%c][frame]\n ax.plot([parent_x, child_x], [parent_y, child_y], 'k-', lw=2)\n \n if draw_names:\n ax.annotate(joint, \n (df['%s_Xposition'%joint][frame] + 0.1, \n df['%s_Yposition'%joint][frame] + 0.1))\n\n return ax\n\ndef draw_stickfigure3d(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):\n from mpl_toolkits.mplot3d import Axes3D\n \n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d') \n \n if joints is None:\n joints_to_draw = mocap_track.skeleton.keys()\n else:\n joints_to_draw = joints\n \n if data is None:\n df = mocap_track.values\n else:\n df = data\n \n for joint in joints_to_draw:\n parent_x = df['%s_Xposition'%joint][frame]\n parent_y = df['%s_Zposition'%joint][frame]\n parent_z = df['%s_Yposition'%joint][frame]\n # ^ In mocaps, Y is the up-right axis \n\n ax.scatter(xs=parent_x, \n ys=parent_y, \n zs=parent_z, \n alpha=0.6, c='b', marker='o')\n\n \n children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]\n \n for c in children_to_draw:\n child_x = df['%s_Xposition'%c][frame]\n child_y = df['%s_Zposition'%c][frame]\n child_z = df['%s_Yposition'%c][frame]\n # ^ In mocaps, Y is the up-right axis\n\n ax.plot([parent_x, child_x], [parent_y, child_y], [parent_z, child_z], 'k-', lw=2, c='black')\n \n if draw_names:\n ax.text(x=parent_x + 0.1, \n y=parent_y + 0.1,\n z=parent_z + 0.1,\n s=joint,\n color='rgba(0,0,0,0.9)')\n\n return ax\n\n\ndef sketch_move(mocap_track, data=None, ax=None, figsize=(16,8)):\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n if data is None:\n data = mocap_track.values\n\n for frame in range(0, data.shape[0], 4):\n# draw_stickfigure(mocap_track, f, data=data, ax=ax)\n \n for joint in mocap_track.skeleton.keys():\n children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]\n \n parent_x = data['%s_Xposition'%joint][frame]\n parent_y = data['%s_Yposition'%joint][frame]\n \n frame_alpha = frame/data.shape[0]\n \n for c in children_to_draw:\n child_x = data['%s_Xposition'%c][frame]\n child_y = data['%s_Yposition'%c][frame]\n \n ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)\n\n\n\ndef viz_cnn_filter(feature_to_viz, mocap_track, data, gap=25):\n fig = plt.figure(figsize=(16,4))\n ax = plt.subplot2grid((1,8),(0,0))\n ax.imshow(feature_to_viz.T, aspect='auto', interpolation='nearest')\n \n ax = plt.subplot2grid((1,8),(0,1), colspan=7)\n for frame in range(feature_to_viz.shape[0]):\n frame_alpha = 0.2#frame/data.shape[0] * 2 + 0.2\n\n for joint_i, joint in enumerate(mocap_track.skeleton.keys()):\n children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]\n\n parent_x = data['%s_Xposition'%joint][frame] + frame * gap\n parent_y = data['%s_Yposition'%joint][frame] \n\n ax.scatter(x=parent_x, \n y=parent_y, \n alpha=0.6,\n cmap='RdBu',\n c=feature_to_viz[frame][joint_i] * 10000,\n marker='o',\n s = abs(feature_to_viz[frame][joint_i] * 10000))\n plt.axis('off')\n for c in children_to_draw:\n child_x = data['%s_Xposition'%c][frame] + frame * gap\n child_y = data['%s_Yposition'%c][frame] \n\n ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)\n\n \ndef print_skel(X):\n stack = [X.root_name]\n tab=0\n while stack:\n joint = stack.pop()\n tab = len(stack)\n print('%s- %s (%s)'%('| '*tab, joint, X.skeleton[joint]['parent']))\n for c in X.skeleton[joint]['children']:\n stack.append(c)\n\n\ndef nb_play_mocap_fromurl(mocap, mf, frame_time=1/30, scale=1, base_url='http://titan:8385'):\n if mf == 'bvh':\n bw = BVHWriter()\n with open('test.bvh', 'w') as ofile:\n bw.write(mocap, ofile)\n \n filepath = '../notebooks/test.bvh'\n elif mf == 'pos':\n c = list(mocap.values.columns)\n\n for cc in c:\n if 'rotation' in cc:\n c.remove(cc)\n mocap.values.to_csv('test.csv', index=False, columns=c)\n \n filepath = '../notebooks/test.csv'\n else:\n return\n \n url = '%s/mocapplayer/player.html?data_url=%s&scale=%f&cz=200&order=xzyi&frame_time=%f'%(base_url, filepath, scale, frame_time)\n iframe = '<iframe src=' + url + ' width=\"100%\" height=500></iframe>'\n link = '<a href=%s target=\"_blank\">New Window</a>'%url\n return IPython.display.HTML(iframe+link)\n\ndef nb_play_mocap(mocap, mf, meta=None, frame_time=1/30, scale=1, camera_z=500, base_url=None):\n data_template = 'var dataBuffer = `$$DATA$$`;'\n data_template += 'var metadata = $$META$$;'\n data_template += 'start(dataBuffer, metadata, $$CZ$$, $$SCALE$$, $$FRAMETIME$$);'\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n\n if base_url is None:\n base_url = os.path.join(dir_path, 'mocapplayer/playBuffer.html')\n \n # print(dir_path)\n\n if mf == 'bvh':\n pass\n elif mf == 'pos':\n cols = list(mocap.values.columns)\n for c in cols:\n if 'rotation' in c:\n cols.remove(c)\n \n data_csv = mocap.values.to_csv(index=False, columns=cols)\n\n if meta is not None:\n lines = [','.join(item) for item in meta.astype('str')]\n meta_csv = '[' + ','.join('[%s]'%l for l in lines) +']' \n else:\n meta_csv = '[]'\n \n data_assigned = data_template.replace('$$DATA$$', data_csv)\n data_assigned = data_assigned.replace('$$META$$', meta_csv)\n data_assigned = data_assigned.replace('$$CZ$$', str(camera_z))\n data_assigned = data_assigned.replace('$$SCALE$$', str(scale))\n data_assigned = data_assigned.replace('$$FRAMETIME$$', str(frame_time))\n\n else:\n return\n \n \n\n with open(os.path.join(dir_path, 'mocapplayer/data.js'), 'w') as oFile:\n oFile.write(data_assigned)\n\n url = '%s?&cz=200&order=xzyi&frame_time=%f&scale=%f'%(base_url, frame_time, scale)\n iframe = '<iframe frameborder=\"0\" src=' + url + ' width=\"100%\" height=500></iframe>'\n link = '<a href=%s target=\"_blank\">New Window</a>'%url\n return IPython.display.HTML(iframe+link)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SunLoveSheep/Sem-LSD
[ "8c085217c372588fbb9ca37c5aef32d66270560f" ]
[ "src/_analyze_semantic_line.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n# /workspace/tangyang.sy/pytorch_CV/pytorch_CenterNet/src\n\ntry:\n from utils_ctdet.lineNMS import do_nms_line, do_acl_line, do_acl_line_v1\nexcept ImportError:\n from utils.lineNMS import do_nms_line, do_acl_line, do_acl_line_v1\n\nfrom collections import OrderedDict\nimport numpy as np\nimport os\nimport xml.etree.ElementTree as ET\n\nLABEL_MAP_KAIST = [\n 'building', 'ground_building', 'wall', 'ground_wall',\n 'grass', 'fence', 'ground_fence', 'pole', 'curb', 'sign', 'tree', 'window', 'door', 'bridge'\n]\n\nLABEL2SKIP = [\n 'sign', 'window', 'tree',\n]\n\nnms_func = {\n 'acl': do_acl_line,\n 'acl_v1': do_acl_line_v1,\n 'iou': do_nms_line,\n}\n\nMETRIC = 'acl'\nMETRIC_EVAL = 'acl'\nIF_CORR_FN = True\nMETRIC_THRESH = 0.9\nDEFAULT_DATASET = 'OBJLINE_KAIST'\n\n# mAP metrics:\nIF_mAP = True\nmAP_score = 0.5\nLV_IOU = [0.5, 0.75, 0.9] if not IF_mAP else [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]\n# LV_IOU = [0.5, 0.75, 0.9] if not IF_mAP else [0.5, 0.9]\nmAP_SUB = ['building', 'pole', 'curb']\n\nLV_ACL = LV_IOU\nLV_SCORE = [0.25, 0.5, 0.6, 0.7, 0.8, 0.9]\nROC_score = 0.25\nif mAP_score not in LV_SCORE:\n LV_SCORE.append(mAP_score)\n\nroot = '/workspace/tangyang.sy/pytorch_CV/test_imgs/KAIST_5seqs_20200214/'\n\nMODEL = 'best'\n# EXP_ID = 'kaist_5seqs_ResNet18_GradMagLoss_NegOnly_20200224_ep{}_vis0.25'.format(MODEL)\nEXP_ID = 'kaist_5seqs_ResNet18_20200224_ep{}_vis0.25'.format(MODEL)\n\nSRC_GT = root + 'Annotations/'\n\nDATASET_MAP = {\n 'KAIST': LABEL_MAP_KAIST,\n}\n\nDATASET = 'KAIST'\nLABEL_MAP = DATASET_MAP[DATASET]\nLABEL_MAP.append('lineseg') # For LSD and AFM results.\n\nSRC_PRED = root + 'Preds_{}/'.format(EXP_ID)\nPTH_EXT = '' if not IF_CORR_FN else '_corrFN'\nRECORD_PTH = root + 'summary{}/'.format(PTH_EXT)\nif not os.path.exists(RECORD_PTH):\n os.makedirs(RECORD_PTH)\nif len(LABEL2SKIP) == 0:\n RECORD_TXT = RECORD_PTH + 'res_id-{}_metric-{}-{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH)\n RECORD_perCate_TXT = RECORD_PTH + 'res_perCate_id-{}_metric-{}-{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH)\nelse:\n RECORD_TXT = RECORD_PTH + 'res_id-{}_metric-{}-{}-skip{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH, len(LABEL2SKIP))\n RECORD_perCate_TXT = RECORD_PTH + 'res_perCate_id-{}_metric-{}-{}-skip{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH,\n len(LABEL2SKIP))\n\nlst_gt = [x for x in sorted(os.listdir(SRC_GT)) if '.xml' in x]\nlst_pred = [x for x in sorted(os.listdir(SRC_PRED)) if '.xml' in x]\n\nPI = 3.14159265359\n\n\nclass LineSeg:\n x_left = 0 # x_left\n y_left = 0 # y_left\n x_right = 0 # x_right\n y_right = 0 # y_right\n cate = 'null'\n score = 0.0\n direction = None\n angle = 0.0 # angle against positive x-axis\n length = 0.0\n\n def __init__(self, x_left_in, y_left_in, x_right_in, y_right_in,\n cate_in, score_in, direct_in):\n self.x_left = x_left_in\n self.y_left = y_left_in\n self.x_right = x_right_in\n self.y_right = y_right_in\n self.cate = cate_in\n self.score = score_in\n self.direction = direct_in\n self.angle = self._cal_ang()\n\n def _cal_ang(self):\n ang_res = 0.\n return ang_res\n\n\ndef _fast_reject(line1, line2):\n # Fast reject:\n if line1.x_right < line2.x_left or line1.x_left > line2.x_right:\n return True\n line1_ymin = min(line1.y_left, line1.y_right)\n line1_ymax = max(line1.y_left, line1.y_right)\n line2_ymin = min(line2.y_left, line2.y_right)\n line2_ymax = max(line2.y_left, line2.y_right)\n if line1_ymin > line2_ymax or line1_ymax < line2_ymin:\n return True\n return False\n\n\ndef _cal_iou_line(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n\n # if line1.direction != line2.direction:\n # return 0.\n\n line1_xmin = min(line1.x_left, line1.x_right)\n line1_ymin = min(line1.y_left, line1.y_right)\n line1_xmax = max(line1.x_left, line1.x_right)\n line1_ymax = max(line1.y_left, line1.y_right)\n\n line2_xmin = min(line2.x_left, line2.x_right)\n line2_ymin = min(line2.y_left, line2.y_right)\n line2_xmax = max(line2.x_left, line2.x_right)\n line2_ymax = max(line2.y_left, line2.y_right)\n\n inter_xmin = max(line1_xmin, line2_xmin)\n inter_ymin = max(line1_ymin, line2_ymin)\n inter_xmax = min(line1_xmax, line2_xmax)\n inter_ymax = min(line1_ymax, line2_ymax)\n inter_x = max(0, inter_xmax - inter_xmin)\n inter_y = max(0, inter_ymax - inter_ymin)\n area_inter = inter_x * inter_y\n\n union_xmin = min(line1_xmin, line2_xmin)\n union_ymin = min(line1_ymin, line2_ymin)\n union_xmax = max(line1_xmax, line2_xmax)\n union_ymax = max(line1_ymax, line2_ymax)\n\n union_x = 1. if union_xmax == union_xmin else union_xmax - union_xmin\n union_y = 1. if union_ymax == union_ymin else union_ymax - union_ymin\n area_union = union_x * union_y\n\n iou = area_inter / area_union\n\n return iou\n\n\n# line1 should be ground truth, if available\ndef _cal_acl_line(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n # if line1.direction != line2.direction:\n # return 0.\n\n sum1_x = line1.x_left + line1.x_right\n sum1_y = line1.y_left + line1.y_right\n c1_x = sum1_x / 2\n c1_y = sum1_y / 2\n l1_wr = np.sqrt(sum1_x * sum1_x + sum1_y * sum1_y)\n l1_x = line1.x_right - line1.x_left\n l1_y = line1.y_right - line1.y_left\n l1 = np.sqrt(l1_x * l1_x + l1_y * l1_y)\n alpha1 = (line1.y_right - line1.y_left) / (line1.x_right - line1.x_left) if \\\n line1.x_right != line1.x_left else (line1.y_right - line1.y_left) / 1.0\n alpha1 = np.abs(np.arctan(alpha1) * 180 / PI)\n\n sum2_x = line2.x_left + line2.x_right\n sum2_y = line2.y_left + line2.y_right\n c2_x = sum2_x / 2\n c2_y = sum2_y / 2\n l2_wr = np.sqrt(sum2_x * sum2_x + sum2_y * sum2_y)\n l2_x = line2.x_right - line2.x_left\n l2_y = line2.y_right - line2.y_left\n l2 = np.sqrt(l2_x * l2_x + l2_y * l2_y)\n alpha2 = (line2.y_right - line2.y_left) / (line2.x_right - line2.x_left) if \\\n line2.x_right != line2.x_left else (line2.y_right - line2.y_left) / 1.0\n alpha2 = np.abs(np.arctan(alpha2) * 180 / PI)\n\n sim_a = max(0, 1 - (np.abs(alpha1 - alpha2) * 0.0111111111111))\n sim_c = max(0, 1 - (np.sqrt((c2_x - c1_x) * (c2_x - c1_x) + (c2_y - c1_y) * (c2_y - c1_y)))\n / (l1 * 0.5))\n sim_l = max(0, 1 - np.abs(l1 - l2) / l1)\n sim_l_wr = max(0, 1 - np.abs(l1_wr - l2_wr) / l1_wr)\n #print(\"sim l: {:.3f} | sim l wr: {:.3f} | line1: {},{},{},{} | line2: {},{},{},{}\".format(\n # sim_l, sim_l_wr,\n # line1.x_left, line1.y_left, line1.x_right, line1.y_right,\n # line2.x_left, line2.y_left, line1.x_right, line2.y_right\n #))\n acl = sim_a * sim_c * sim_l\n\n return acl\n\n\ndef _gaussian_radius(height, width, min_overlap=0.7):\n # a3 = 4 * min_overlap\n # b3 = -2 * min_overlap * (height + width)\n # c3 = (min_overlap - 1) * width * height\n # sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n # r3 = (b3 + sq3) / 2\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n return r1\n\n\ndef _cal_acl_line_v1(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n\n # Vectorize to calculate cosine angle\n v_x1 = line1.x_right - line1.x_left\n v_y1 = line1.y_right - line1.y_left\n v_x2 = line2.x_right - line2.x_left\n v_y2 = line2.y_right - line2.y_left\n l1 = np.sqrt(v_x1 * v_x1 + v_y1 * v_y1)\n l2 = np.sqrt(v_x2 * v_x2 + v_y2 * v_y2)\n cos_a = (v_x1 * v_x2 + v_y1 * v_y2) / (l1 * l2)\n\n # Gaussian distribution to get score of center point\n radius = _gaussian_radius(v_y1, v_x1)\n sigma = (2 * radius - 1)/6\n sum1_x = line1.x_left + line1.x_right\n sum1_y = line1.y_left + line1.y_right\n c1_x = sum1_x / 2\n c1_y = sum1_y / 2\n sum2_x = line2.x_left + line2.x_right\n sum2_y = line2.y_left + line2.y_right\n c2_x = sum2_x / 2\n c2_y = sum2_y / 2\n d_x = c2_x - c1_x\n d_y = c2_y - c1_y\n c_score = np.exp(-(d_x * d_x + d_y * d_y) / (2 * sigma * sigma))\n\n sim_a = cos_a\n sim_c = c_score\n sim_l = max(0, 1 - np.abs(l1 - l2) / l1)\n\n return sim_a * sim_c * sim_l\n\n\nmetric_func = {\n 'acl': _cal_acl_line,\n 'acl_v1': _cal_acl_line_v1,\n 'iou': _cal_iou_line,\n}\n\n\ndef reverse_direct(lines):\n lines_out = lines.copy()\n for line in lines_out:\n line.direction = 1 - line.direction\n y_tmp = line.y_left\n line.y_left = line.y_right\n line.y_right = y_tmp\n return lines_out\n\n\ndef get_lines_from_xml(xml_file, if_gt=False):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n lst_lines = list()\n for obj in root.findall('object'):\n objline = obj.find('bndbox')\n\n name = obj.find('name').text\n if name is None:\n continue\n name = name.lower()\n score = obj.find('score').text if not if_gt else 1.0\n try:\n direct_str = obj.find('direction').text\n direction = 1.0 if direct_str == 'lt2rb' else 0.0\n except AttributeError:\n direction = 2.0\n\n # The followings are actually already converted to x_left, y_left, x_right, and y_right\n xmin = int(float(objline.find('xmin').text))\n ymin = int(float(objline.find('ymin').text))\n xmax = int(float(objline.find('xmax').text))\n ymax = int(float(objline.find('ymax').text))\n if direction == 1.0:\n x_left = min(xmin, xmax)\n y_left = min(ymin, ymax)\n x_right = max(xmin, xmax)\n y_right = max(ymin, ymax)\n elif direction == 0.0:\n x_left = min(xmin, xmax)\n y_left = max(ymin, ymax)\n x_right = max(xmin, xmax)\n y_right = min(ymin, ymax)\n else: # direction == 2.0, bbox\n x_left = xmin\n y_left = ymin\n x_right = xmax\n y_right = ymax\n\n # if not if_gt:\n # direction = obj.find('direction').text\n # else:\n # direct_str = 'lb2rt' if x_left < x_right and y_left < y_right else 'lt2rb'\n # direction = 1.0 if direct_str == 'lt2rb' else 0.0\n\n # line = [xmin, ymin, xmax, ymax, name, score, direction]\n line = LineSeg(\n x_left_in=int(x_left),\n y_left_in=int(y_left),\n x_right_in=int(x_right),\n y_right_in=int(y_right),\n cate_in=name,\n score_in=float(score),\n direct_in=float(direction)\n )\n lst_lines.append(line)\n\n return lst_lines\n\n\n# given ground truth objects and predicted objects.\n# given 3M/5M mask and levels of IoU and confidence to test\n# return resultant number of precision / recall objects\n# TP (True Positive): predict the box with correct category\n# TN (True Negative): predict the box with wrong category\n# FP (False Positive): predict a box when there is no box\n# FN (False Negative): predict no box when there is a box\n# Output dictionary format:\n# IoU_thres_1 -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\n# IoU_thres_2 -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\n# ...\n# IoU_thres_M -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\ndef cal_metric_res(\n lines_gt_in,\n lines_pred_in,\n metric='acl'\n):\n # initiate output dict\n dict_res = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res.keys():\n dict_res[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res[key_iou].keys():\n dict_res[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # initiate output dict with Cate level:\n dict_res_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_res_perCate.keys():\n dict_res_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res_perCate[key_cate].keys():\n dict_res_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res_perCate[key_cate][key_iou].keys():\n dict_res_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # ------ loop through ground truth bboxes to find FN (missed bboxes) ------\n for line in lines_gt_in:\n # Each gt should be counted as TP or FN, this is to record if it has been recorded\n if_recorded = 0\n\n # Reivse some typo in prediction cates\n line.cate = 'ground_building' if line.cate == 'ground building' else line.cate\n\n if line.cate in LABEL2SKIP or line.cate not in LABEL_MAP:\n # print(\"Found invalid line type: {}, skip.\".format(line.cate))\n continue\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n score_at_max_iou = 0.\n for line_pred in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line_pred.cate not in LABEL_MAP and line_pred.cate != 'lineseg')\\\n or (line_pred.cate in LABEL2SKIP):\n continue\n\n if line.cate in LABEL_MAP_OBJ_KAIST and line_pred.cate in LABEL_MAP_OBJ_KAIST:\n iou = _cal_iou_line(line, line_pred)\n else:\n iou = metric_func[metric](line, line_pred)\n # print(iou)\n # iou = _cal_acl_line(line, line_pred) if metric == 'acl' else _cal_iou_line(line, line_pred)\n # iou_test = _cal_acl_line_v1(line, line_pred)\n # print(iou_test)\n if iou > max_iou:\n max_iou = iou\n score_at_max_iou = line_pred.score\n if line.cate == line_pred.cate:\n if_cate_right = True\n else:\n if_cate_right = False if line_pred.cate != 'lineseg' else True\n\n # arrange output according to levels of IoU and levels of score to check:\n for lv_iou in LV_IOU:\n # if IoU larger than current IoU threshold:\n if max_iou > lv_iou:\n for lv_score in LV_SCORE:\n # if score larger than current score threshold:\n if score_at_max_iou > lv_score:\n if if_cate_right:\n dict_res[lv_iou][lv_score]['TP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['TP'] += 1\n else:\n # FP increase by 1, since this pred has no gt (diff cate)\n dict_res[lv_iou][lv_score]['FP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n\n # FN should also be increased by 1, since this gt has no pred (diff cate)\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n for lv_score in LV_SCORE:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n # -----------------------------------------------------------------------------\n\n # ------ loop through predicted bboxes to find FP (extra bboxes) ------\n for line in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line.cate not in LABEL_MAP and line.cate != 'lineseg')\\\n or (line.cate in LABEL2SKIP):\n continue\n\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n for line_gt in lines_gt_in:\n if line_gt.cate in LABEL2SKIP:\n continue\n iou = metric_func[metric](line_gt, line)\n # iou = _cal_acl_line(line_gt, line) if metric == 'acl' else _cal_iou_line(line_gt, line)\n if iou > max_iou:\n max_iou = iou\n if line.cate == line_gt.cate:\n if_cate_right = True\n else:\n if_cate_right = False\n\n # arrange output according to levels of IoU and levels of score to check:\n score_at_max_iou = line.score\n for lv_iou in LV_IOU:\n # if IoU smaller than current IoU threshold:\n if max_iou < lv_iou:\n for lv_score in LV_SCORE:\n if score_at_max_iou > lv_score:\n dict_res[lv_iou][lv_score]['FP'] += 1\n if line.cate != 'lineseg':\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n # ---------------------------------------------------------------------\n\n return dict_res, dict_res_perCate\n\n\ndef cal_metric_res_AngLen(\n lines_gt_in,\n lines_pred_in,\n metric='acl'\n):\n # initiate output dict\n dict_res = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res.keys():\n dict_res[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res[key_iou].keys():\n dict_res[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # initiate output dict with Cate level:\n dict_res_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_res_perCate.keys():\n dict_res_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res_perCate[key_cate].keys():\n dict_res_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res_perCate[key_cate][key_iou].keys():\n dict_res_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # ------ loop through ground truth bboxes to find FN (missed bboxes) ------\n for line in lines_gt_in:\n # Each gt should be counted as TP or FN, this is to record if it has been recorded\n if_recorded = 0\n\n # Reivse some typo in prediction cates\n line.cate = 'ground_building' if line.cate == 'ground building' else line.cate\n\n if line.cate in LABEL2SKIP or line.cate not in LABEL_MAP:\n print(\"Found invalid line type: {}, skip.\".format(line.cate))\n continue\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n score_at_max_iou = 0.\n for line_pred in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line_pred.cate not in LABEL_MAP and line_pred.cate != 'lineseg')\\\n or (line_pred.cate in LABEL2SKIP):\n continue\n\n if line.cate in LABEL_MAP_OBJ_KAIST and line_pred.cate in LABEL_MAP_OBJ_KAIST:\n iou = _cal_iou_line(line, line_pred)\n else:\n iou = metric_func[metric](line, line_pred)\n # print(iou)\n # iou = _cal_acl_line(line, line_pred) if metric == 'acl' else _cal_iou_line(line, line_pred)\n # iou_test = _cal_acl_line_v1(line, line_pred)\n # print(iou_test)\n if iou > max_iou:\n max_iou = iou\n score_at_max_iou = line_pred.score\n if line.cate == line_pred.cate:\n if_cate_right = True\n else:\n if_cate_right = False if line_pred.cate != 'lineseg' else True\n\n # arrange output according to levels of IoU and levels of score to check:\n for lv_iou in LV_IOU:\n # if IoU larger than current IoU threshold:\n if max_iou > lv_iou:\n for lv_score in LV_SCORE:\n # if score larger than current score threshold:\n if score_at_max_iou > lv_score:\n if if_cate_right:\n dict_res[lv_iou][lv_score]['TP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['TP'] += 1\n else:\n # FP increase by 1, since this pred has no gt (diff cate)\n dict_res[lv_iou][lv_score]['FP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n\n # FN should also be increased by 1, since this gt has no pred (diff cate)\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n for lv_score in LV_SCORE:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n # -----------------------------------------------------------------------------\n\n # ------ loop through predicted bboxes to find FP (extra bboxes) ------\n for line in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line.cate not in LABEL_MAP and line.cate != 'lineseg')\\\n or (line.cate in LABEL2SKIP):\n continue\n\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n for line_gt in lines_gt_in:\n if line_gt.cate in LABEL2SKIP:\n continue\n iou = metric_func[metric](line_gt, line)\n # iou = _cal_acl_line(line_gt, line) if metric == 'acl' else _cal_iou_line(line_gt, line)\n if iou > max_iou:\n max_iou = iou\n if line.cate == line_gt.cate:\n if_cate_right = True\n else:\n if_cate_right = False\n\n # arrange output according to levels of IoU and levels of score to check:\n score_at_max_iou = line.score\n for lv_iou in LV_IOU:\n # if IoU smaller than current IoU threshold:\n if max_iou < lv_iou:\n for lv_score in LV_SCORE:\n if score_at_max_iou > lv_score:\n dict_res[lv_iou][lv_score]['FP'] += 1\n if line.cate != 'lineseg':\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n # ---------------------------------------------------------------------\n\n return dict_res, dict_res_perCate\n\n\ndef cal_mAP(dict_res_perCate_in):\n mAP = 0.\n mAP3 = 0.\n cnt_nonzero = 0\n for cate, sub_dict in dict_res_perCate_in.items():\n print(\"cate: \", cate)\n cate_AP = 0.\n cate_AP3 = 0.\n if_has_gt = False # to check if there exist gt of this category\n for lv_iou in LV_IOU:\n TP = dict_res_perCate_in[cate][lv_iou][mAP_score]['TP']\n FP = dict_res_perCate_in[cate][lv_iou][mAP_score]['FP']\n FN = dict_res_perCate_in[cate][lv_iou][mAP_score]['FN']\n AP = TP / (TP + FP) if TP + FP > 0 else 0\n cate_AP += AP\n if cate in mAP_SUB:\n cate_AP3 += AP\n if_has_gt = True if TP + FN > 0 else False\n\n print(\"lv IoU: {} | TP: {} | FP: {} | FN : {} | AP: {} |\".format(\n lv_iou, TP, FP, FN, AP\n ))\n\n cate_mAP = cate_AP/len(LV_IOU)\n cate_mAP3 = cate_AP3/len(LV_IOU)\n print(\"Category {} | AP {}\".format(cate, cate_mAP))\n mAP += cate_mAP\n mAP3 += cate_mAP3\n if if_has_gt:\n cnt_nonzero += 1\n # print(\"... category {} has gt...\".format(cate))\n\n print(\"# of None zero AP categories: \", cnt_nonzero)\n mAP = mAP / cnt_nonzero\n mAP3 = mAP3 / len(mAP_SUB)\n print(\"mAP @ score {} : \".format(mAP_score), mAP)\n print(\"mAP{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAP3)\n return mAP, mAP3\n\n\ndef cal_mAP_mAR_F1(dict_res_perCate_in):\n mAP = 0.\n mAP3 = 0.\n mAR = 0.\n mAR3 = 0.\n mF1 = 0.\n mF13 = 0.\n cnt_nonzero = 0\n for cate, sub_dict in dict_res_perCate_in.items():\n print(\"cate: \", cate)\n cate_AP = 0.\n cate_AP3 = 0.\n cate_AR = 0.\n cate_AR3 = 0.\n cate_F1 = 0.\n cate_F13 = 0.\n if_has_gt = False # to check if there exist gt of this category\n for lv_iou in LV_IOU:\n TP = dict_res_perCate_in[cate][lv_iou][mAP_score]['TP']\n FP = dict_res_perCate_in[cate][lv_iou][mAP_score]['FP']\n FN = dict_res_perCate_in[cate][lv_iou][mAP_score]['FN']\n AP = TP / (TP + FP) if TP + FP > 0 else 0\n AR = TP / (TP + FN) if TP + FN > 0 else 0\n F1 = AP * AR / (AP + AR) if AP > 0 and AR > 0 else 0\n cate_AP += AP\n cate_AR += AR\n cate_F1 += F1\n if cate in mAP_SUB:\n cate_AP3 += AP\n cate_AR3 += AR\n cate_F13 += F1\n if_has_gt = True if TP + FN > 0 else False\n\n print(\"lv IoU: {} | TP: {} | FP: {} | FN : {} | AP: {} |\".format(\n lv_iou, TP, FP, FN, AP\n ))\n\n cate_mAP = cate_AP / len(LV_IOU)\n cate_mAP3 = cate_AP3 / len(LV_IOU)\n cate_mAR = cate_AR / len(LV_IOU)\n cate_mAR3 = cate_AR3 / len(LV_IOU)\n cate_F1 = cate_F1 / len(LV_IOU)\n cate_F13 = cate_F13 / len(LV_IOU)\n print(\"Category {} | AP {} | AR {} | F1 {}\".format(cate, cate_mAP, cate_mAR, cate_F1))\n mAP += cate_mAP\n mAP3 += cate_mAP3\n mAR += cate_mAR\n mAR3 += cate_mAR3\n mF1 += cate_F1\n mF13 += cate_F13\n if if_has_gt:\n cnt_nonzero += 1\n # print(\"... category {} has gt...\".format(cate))\n\n print(\"# of None zero AP categories: \", cnt_nonzero)\n mAP = mAP / cnt_nonzero\n mAP3 = mAP3 / len(mAP_SUB)\n mAR = mAR / cnt_nonzero\n mAR3 = mAR3 / len(mAP_SUB)\n mF1 = mF1 / cnt_nonzero\n mF13 = mF13 / len(mAP_SUB)\n print(\"mAP @ score {} : \".format(mAP_score), mAP)\n print(\"mAP{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAP3)\n print(\"mAR @ score {} : \".format(mAP_score), mAR)\n print(\"mAR{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAR3)\n print(\"F1 @ score {} : \".format(mAP_score), mF1)\n print(\"F1{} @ score {} : \".format(len(mAP_SUB), mAP_score), mF13)\n return mAP, mAP3\n\n\ndef performLineNMS(lineSegs_in):\n lst_line = [None] * len(lineSegs_in)\n for l, lineseg in enumerate(lineSegs_in):\n line = [lineseg.x_left, lineseg.y_left, lineseg.x_right, lineseg.y_right,\n lineseg.cate, lineseg.score, lineseg.direction]\n lst_line[l] = line\n dict_line_out = nms_func[METRIC](lst_line, thres_in=METRIC_THRESH)\n # dict_line_out = do_acl_line(lst_line, thres_in=METRIC_THRESH) if METRIC == 'acl' else \\\n # do_nms_line(lst_line, thres_in=METRIC_THRESH)\n\n lineSegs_out = []\n for cate, lines in dict_line_out.items():\n if lines is None:\n continue\n for line in lines:\n lineSeg_res = LineSeg(\n x_left_in=int(line[0]),\n y_left_in=int(line[1]),\n x_right_in=int(line[2]),\n y_right_in=int(line[3]),\n cate_in=cate,\n score_in=float(line[4]),\n direct_in=float(line[5])\n )\n lineSegs_out.append(lineSeg_res)\n\n return lineSegs_out\n\n\ndef eval():\n # initiate total result dict\n dict_total = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_total.keys():\n dict_total[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_total[key_iou].keys():\n dict_total[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n # initiate total result dict with Cate level:\n dict_total_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_total_perCate.keys():\n dict_total_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_total_perCate[key_cate].keys():\n dict_total_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_total_perCate[key_cate][key_iou].keys():\n dict_total_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n cnt_file = 0\n cnt_gt = 0\n for file in lst_gt:\n if 'TZKJ' in SRC_PRED:\n lines_pred = get_lines_from_xml(SRC_PRED + file)\n else:\n if file not in lst_pred:\n tmpfile = file.replace('val_semantic_line', '')\n if tmpfile not in lst_pred:\n print(\"{} not found in prediction! Empty prediction...\".format(file))\n lines_pred = list()\n else:\n lines_pred = get_lines_from_xml(SRC_PRED + tmpfile)\n else:\n lines_pred = get_lines_from_xml(SRC_PRED + file)\n\n lines_pred = performLineNMS(lines_pred) # Line ACL\n lines_gt = get_lines_from_xml(SRC_GT + file, if_gt=True)\n cnt_gt += len(lines_gt)\n\n # metric_res, metric_res_perCate = cal_metric_res(lines_gt, lines_pred, metric=METRIC)\n metric_res, metric_res_perCate = cal_metric_res(lines_gt, lines_pred, metric=METRIC_EVAL)\n\n # update total results:\n for key_iou in dict_total.keys():\n for key_score in dict_total[key_iou].keys():\n dict_total[key_iou][key_score]['TP'] += metric_res[key_iou][key_score]['TP']\n dict_total[key_iou][key_score]['TN'] += metric_res[key_iou][key_score]['TN']\n dict_total[key_iou][key_score]['FP'] += metric_res[key_iou][key_score]['FP']\n dict_total[key_iou][key_score]['FN'] += metric_res[key_iou][key_score]['FN']\n\n for key_cate in LABEL_MAP:\n if key_cate in LABEL2SKIP:\n continue\n for key_iou in dict_total.keys():\n for key_score in dict_total[key_iou].keys():\n dict_total_perCate[key_cate][key_iou][key_score]['TP'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['TP']\n dict_total_perCate[key_cate][key_iou][key_score]['TN'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['TN']\n dict_total_perCate[key_cate][key_iou][key_score]['FP'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['FP']\n dict_total_perCate[key_cate][key_iou][key_score]['FN'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['FN']\n\n cnt_file += 1\n if cnt_file % 100 == 0:\n print(\"Checked {} images out of total {}...\".format(cnt_file, len(lst_pred)))\n\n # Calculate mAP:\n print(\"Calculating mAP...\")\n # mAP, mAP3 = cal_mAP(dict_res_perCate_in=dict_total_perCate)\n mAP, mAP3 = cal_mAP_mAR_F1(dict_res_perCate_in=dict_total_perCate)\n\n print(\"DONE, printing summary...\")\n\n def cal_recall_precision(tp, tn, fp, fn):\n try:\n rec = float(tp) / float(tp + fn)\n except ZeroDivisionError:\n rec = float(tp) / (float(tp + fn) + 1e-6)\n try:\n prec = float(tp) / float(tp + fp)\n except:\n prec = float(tp) / (float(tp + fp) + 1e-6)\n return rec, prec\n\n print(\"For ROC curve:\")\n txt4roc = \"\"\n for key_iou in dict_total.keys():\n TP = dict_total[key_iou][ROC_score]['TP']\n TN = dict_total[key_iou][ROC_score]['TN']\n FP = dict_total[key_iou][ROC_score]['FP']\n FN = dict_total[key_iou][ROC_score]['FN']\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n txt4roc += \"{}|{}|\".format(recall, precision)\n print(txt4roc)\n\n print(\"In total {} test images:\".format(len(lst_gt)))\n for key_iou in dict_total.keys():\n print(\"IoU: {}\".format(key_iou))\n for key_score in dict_total[key_iou].keys():\n print(\"\\tConf: {}\".format(key_score))\n\n TP = dict_total[key_iou][key_score]['TP']\n TN = dict_total[key_iou][key_score]['TN']\n FP = dict_total[key_iou][key_score]['FP']\n FN = dict_total[key_iou][key_score]['FN']\n num_obj = TP + TN + FN\n print(\"\\tTotal gt objects: {}\".format(num_obj))\n print(\"\\tTP: {}\\t TN: {}\\t FP: {}\\t FN:{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n print(\"\\tRecall: {:.4f}\\t Precision: {:.4f}\".format(recall, precision))\n\n with open(RECORD_TXT, 'w+') as res:\n res.write(\"mAP@score{}: {} | mAP{}@score{}: {}\\n\".format(mAP_score, mAP,\n len(mAP_SUB), mAP_score, mAP3))\n res.write(\"IoU\\tConfidence\\tTP\\tTN\\tFP\\tFN\\tRecall\\tPrecision\\n\")\n for key_iou in dict_total.keys():\n res.write(\"{}\".format(key_iou))\n pre = '\\t'\n for key_score in dict_total[key_iou].keys():\n res.write(\"{}{}\".format(pre, key_score))\n pre = '\\t'\n\n TP = dict_total[key_iou][key_score]['TP']\n TN = dict_total[key_iou][key_score]['TN']\n FP = dict_total[key_iou][key_score]['FP']\n FN = dict_total[key_iou][key_score]['FN']\n res.write(\"\\t{}\\t{}\\t{}\\t{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n res.write(\"\\t{:.4f}\\t{:.4f}\\n\".format(recall, precision))\n\n with open(RECORD_perCate_TXT, 'w+') as res:\n res.write(\"Category\\tIoU\\tConfidence\\tTP\\tTN\\tFP\\tFN\\tRecall\\tPrecision\\n\")\n for key_cate in dict_total_perCate.keys():\n res.write(\"{}\".format(key_cate))\n for key_iou in dict_total_perCate[key_cate].keys():\n pre = '\\t\\t'\n res.write(\"{}{}\".format(pre, key_iou))\n pre = '\\t'\n for key_score in dict_total_perCate[key_cate][key_iou].keys():\n res.write(\"{}{}\".format(pre, key_score))\n pre = '\\t\\t\\t'\n\n TP = dict_total_perCate[key_cate][key_iou][key_score]['TP']\n TN = dict_total_perCate[key_cate][key_iou][key_score]['TN']\n FP = dict_total_perCate[key_cate][key_iou][key_score]['FP']\n FN = dict_total_perCate[key_cate][key_iou][key_score]['FN']\n res.write(\"\\t{}\\t{}\\t{}\\t{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n res.write(\"\\t{:.4f}\\t{:.4f}\\n\".format(recall, precision))\n\n\nif __name__ == '__main__':\n eval()\n" ]
[ [ "numpy.abs", "numpy.exp", "numpy.sqrt", "numpy.arctan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jidebingfeng/segmatch
[ "c662324d23b9e049fbb49b52cda7895d1a4d2798" ]
[ "segmatch/python/display_matched_segments.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\n\n## LOAD DATA ##\n###############\nprint(\"Loading segments.\")\nfrom import_export import load_segments\nsegments, ids = load_segments(folder=\"/tmp/segment_matcher/\")\nfrom import_export import load_matches\nmatches = load_matches(folder=\"/tmp/segment_matcher/\")\n\nvisualize=True\nprint(\"q<Enter> to quit\")\nfor match in matches:\n if visualize:\n segment1 = segments[match[0]]\n segment2 = segments[match[1]]\n\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n X = segment1[:,0]\n Y = segment1[:,1]\n Z = segment1[:,2]\n fig = plt.figure(1)\n plt.cla()\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.scatter(X, Y, Z)\n plt.pause(0.05)\n\n X = segment2[:,0]\n Y = segment2[:,1]\n Z = segment2[:,2]\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.scatter(X, Y, Z)\n plt.pause(0.05)\n\n ## Get User Input ##\n ####################\n keys = raw_input(\">\")\n if keys == 'q':\n visualize = False\n\nplt.close(\"all\")\n" ]
[ [ "matplotlib.pyplot.cla", "matplotlib.pyplot.pause", "matplotlib.pyplot.close", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TingquanGao/Paddle
[ "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "f249a5f05f0f5832279244d88c8cb4eaaad1fbd4", "9b1015d90b4d498ab58df7cff2c3ed27863ce970", "f249a5f05f0f5832279244d88c8cb4eaaad1fbd4", "9b1015d90b4d498ab58df7cff2c3ed27863ce970", "f249a5f05f0f5832279244d88c8cb4eaaad1fbd4" ]
[ "python/paddle/dataset/movielens.py", "python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py", "python/paddle/fluid/contrib/slim/quantization/quantization_pass.py", "python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py", "python/paddle/fluid/tests/unittests/test_optimizer.py", "python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py" ]
[ "# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nMovielens 1-M dataset.\n\nMovielens 1-M dataset contains 1 million ratings from 6000 users on 4000\nmovies, which was collected by GroupLens Research. This module will download\nMovielens 1-M dataset from\nhttp://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training\nset and test set into paddle reader creators.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport zipfile\nimport paddle.dataset.common\nimport paddle.utils.deprecated as deprecated\nimport re\nimport random\nimport functools\nimport six\nimport paddle.compat as cpt\n\n__all__ = [\n 'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id',\n 'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info'\n]\n\nage_table = [1, 18, 25, 35, 45, 50, 56]\n\n#URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'\nURL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip'\nMD5 = 'c4d9eecfca2ab87c1945afe126590906'\n\n\nclass MovieInfo(object):\n \"\"\"\n Movie id, title and categories information are stored in MovieInfo.\n \"\"\"\n\n def __init__(self, index, categories, title):\n self.index = int(index)\n self.categories = categories\n self.title = title\n\n def value(self):\n \"\"\"\n Get information from a movie.\n \"\"\"\n return [\n self.index, [CATEGORIES_DICT[c] for c in self.categories],\n [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()]\n ]\n\n def __str__(self):\n return \"<MovieInfo id(%d), title(%s), categories(%s)>\" % (\n self.index, self.title, self.categories)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass UserInfo(object):\n \"\"\"\n User id, gender, age, and job information are stored in UserInfo.\n \"\"\"\n\n def __init__(self, index, gender, age, job_id):\n self.index = int(index)\n self.is_male = gender == 'M'\n self.age = age_table.index(int(age))\n self.job_id = int(job_id)\n\n def value(self):\n \"\"\"\n Get information from a user.\n \"\"\"\n return [self.index, 0 if self.is_male else 1, self.age, self.job_id]\n\n def __str__(self):\n return \"<UserInfo id(%d), gender(%s), age(%d), job(%d)>\" % (\n self.index, \"M\"\n if self.is_male else \"F\", age_table[self.age], self.job_id)\n\n def __repr__(self):\n return str(self)\n\n\nMOVIE_INFO = None\nMOVIE_TITLE_DICT = None\nCATEGORIES_DICT = None\nUSER_INFO = None\n\n\ndef __initialize_meta_info__():\n fn = paddle.dataset.common.download(URL, \"movielens\", MD5)\n global MOVIE_INFO\n if MOVIE_INFO is None:\n pattern = re.compile(r'^(.*)\\((\\d+)\\)$')\n with zipfile.ZipFile(file=fn) as package:\n for info in package.infolist():\n assert isinstance(info, zipfile.ZipInfo)\n MOVIE_INFO = dict()\n title_word_set = set()\n categories_set = set()\n with package.open('ml-1m/movies.dat') as movie_file:\n for i, line in enumerate(movie_file):\n line = cpt.to_text(line, encoding='latin')\n movie_id, title, categories = line.strip().split('::')\n categories = categories.split('|')\n for c in categories:\n categories_set.add(c)\n title = pattern.match(title).group(1)\n MOVIE_INFO[int(movie_id)] = MovieInfo(\n index=movie_id, categories=categories, title=title)\n for w in title.split():\n title_word_set.add(w.lower())\n\n global MOVIE_TITLE_DICT\n MOVIE_TITLE_DICT = dict()\n for i, w in enumerate(title_word_set):\n MOVIE_TITLE_DICT[w] = i\n\n global CATEGORIES_DICT\n CATEGORIES_DICT = dict()\n for i, c in enumerate(categories_set):\n CATEGORIES_DICT[c] = i\n\n global USER_INFO\n USER_INFO = dict()\n with package.open('ml-1m/users.dat') as user_file:\n for line in user_file:\n line = cpt.to_text(line, encoding='latin')\n uid, gender, age, job, _ = line.strip().split(\"::\")\n USER_INFO[int(uid)] = UserInfo(\n index=uid, gender=gender, age=age, job_id=job)\n return fn\n\n\ndef __reader__(rand_seed=0, test_ratio=0.1, is_test=False):\n fn = __initialize_meta_info__()\n np.random.seed(rand_seed)\n with zipfile.ZipFile(file=fn) as package:\n with package.open('ml-1m/ratings.dat') as rating:\n for line in rating:\n line = cpt.to_text(line, encoding='latin')\n if (np.random.random() < test_ratio) == is_test:\n uid, mov_id, rating, _ = line.strip().split(\"::\")\n uid = int(uid)\n mov_id = int(mov_id)\n rating = float(rating) * 2 - 5.0\n\n mov = MOVIE_INFO[mov_id]\n usr = USER_INFO[uid]\n yield usr.value() + mov.value() + [[rating]]\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef __reader_creator__(**kwargs):\n return lambda: __reader__(**kwargs)\n\n\ntrain = functools.partial(__reader_creator__, is_test=False)\ntest = functools.partial(__reader_creator__, is_test=True)\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef get_movie_title_dict():\n \"\"\"\n Get movie title dictionary.\n \"\"\"\n __initialize_meta_info__()\n return MOVIE_TITLE_DICT\n\n\ndef __max_index_info__(a, b):\n if a.index > b.index:\n return a\n else:\n return b\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_movie_id():\n \"\"\"\n Get the maximum value of movie id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_index_info__, list(MOVIE_INFO.values())).index\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_user_id():\n \"\"\"\n Get the maximum value of user id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_index_info__, list(USER_INFO.values())).index\n\n\ndef __max_job_id_impl__(a, b):\n if a.job_id > b.job_id:\n return a\n else:\n return b\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_job_id():\n \"\"\"\n Get the maximum value of job id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_job_id_impl__,\n list(USER_INFO.values())).job_id\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef movie_categories():\n \"\"\"\n Get movie categories dictionary.\n \"\"\"\n __initialize_meta_info__()\n return CATEGORIES_DICT\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef user_info():\n \"\"\"\n Get user info dictionary.\n \"\"\"\n __initialize_meta_info__()\n return USER_INFO\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef movie_info():\n \"\"\"\n Get movie info dictionary.\n \"\"\"\n __initialize_meta_info__()\n return MOVIE_INFO\n\n\ndef unittest():\n for train_count, _ in enumerate(train()()):\n pass\n for test_count, _ in enumerate(test()()):\n pass\n\n print(train_count, test_count)\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef fetch():\n paddle.dataset.common.download(URL, \"movielens\", MD5)\n\n\nif __name__ == '__main__':\n unittest()\n", "# copyright (c) 2018 paddlepaddle authors. all rights reserved.\n#\n# licensed under the apache license, version 2.0 (the \"license\");\n# you may not use this file except in compliance with the license.\n# you may obtain a copy of the license at\n#\n# http://www.apache.org/licenses/license-2.0\n#\n# unless required by applicable law or agreed to in writing, software\n# distributed under the license is distributed on an \"as is\" basis,\n# without warranties or conditions of any kind, either express or implied.\n# see the license for the specific language governing permissions and\n# limitations under the license.\n\nimport os\nimport unittest\nimport random\nimport numpy as np\nimport paddle.fluid as fluid\nimport six\nimport paddle\nfrom paddle.fluid.framework import IrGraph\nfrom paddle.fluid.contrib.slim.quantization import QuantizationTransformPass\nfrom paddle.fluid.contrib.slim.quantization import QuantizationFreezePass\nfrom paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass\nfrom paddle.fluid.contrib.slim.quantization import TransformForMobilePass\nfrom paddle.fluid.contrib.slim.quantization import AddQuantDequantPass\nfrom paddle.fluid import core\n\npaddle.enable_static()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nos.environ[\"CPU_NUM\"] = \"1\"\n\n\ndef linear_fc(num):\n data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n hidden = data\n for _ in six.moves.xrange(num):\n hidden = fluid.layers.fc(hidden, size=128, act='relu')\n loss = fluid.layers.cross_entropy(input=hidden, label=label)\n loss = fluid.layers.mean(loss)\n return loss\n\n\ndef residual_block(num, quant_skip_pattern=None):\n def conv_bn_layer(input,\n ch_out,\n filter_size,\n stride,\n padding,\n act='relu',\n bias_attr=False):\n tmp = fluid.layers.conv2d(\n input=input,\n filter_size=filter_size,\n num_filters=ch_out,\n stride=stride,\n padding=padding,\n act=None,\n bias_attr=bias_attr)\n return fluid.layers.batch_norm(input=tmp, act=act)\n\n data = fluid.layers.data(\n name='image',\n shape=[1, 1, 32, 32],\n dtype='float32',\n append_batch_size=False)\n label = fluid.layers.data(\n name='label', shape=[1, 1], dtype='int64', append_batch_size=False)\n hidden = data\n for _ in six.moves.xrange(num):\n conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)\n short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)\n hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')\n matmul_weight = fluid.layers.create_parameter(\n shape=[1, 16, 32, 32], dtype='float32')\n hidden = fluid.layers.matmul(hidden, matmul_weight, True, True)\n if quant_skip_pattern:\n with fluid.name_scope(quant_skip_pattern):\n pool = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='avg', pool_stride=2)\n else:\n pool = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='avg', pool_stride=2)\n fc = fluid.layers.fc(input=pool, size=10)\n loss = fluid.layers.cross_entropy(input=fc, label=label)\n loss = fluid.layers.mean(loss)\n return loss\n\n\ndef conv_net(img, label, quant_skip_pattern):\n conv_pool_1 = fluid.nets.simple_img_conv_pool(\n input=img,\n filter_size=5,\n num_filters=20,\n pool_size=2,\n pool_stride=2,\n pool_type='max',\n act=\"relu\")\n conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)\n conv_pool_2 = fluid.nets.simple_img_conv_pool(\n input=conv_pool_1,\n filter_size=5,\n num_filters=50,\n pool_size=2,\n pool_stride=2,\n pool_type='avg',\n act=\"relu\")\n hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')\n with fluid.name_scope(quant_skip_pattern):\n prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')\n loss = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_loss = fluid.layers.mean(loss)\n return avg_loss\n\n\nclass TestQuantizationTransformPass(unittest.TestCase):\n def setUp(self):\n self.quantizable_op_and_inputs = {\n 'conv2d': ['Input', 'Filter'],\n 'depthwise_conv2d': ['Input', 'Filter'],\n 'mul': ['X', 'Y']\n }\n self.quantizable_grad_op_inputs = {\n 'conv2d_grad': ['Input', 'Filter'],\n 'depthwise_conv2d_grad': ['Input', 'Filter'],\n 'mul_grad': ['X', 'Y']\n }\n\n def check_program(self, program):\n quantized_ops = set()\n for block in program.blocks:\n for op in block.ops:\n # check forward\n if op.type in self.quantizable_op_and_inputs:\n for arg_name in op.input_arg_names:\n self.assertTrue(\n arg_name.endswith('.quantized.dequantized'))\n quantized_ops.add(arg_name)\n\n for op in block.ops:\n # check backward\n if op.type in self.quantizable_grad_op_inputs:\n for pname in self.quantizable_grad_op_inputs[op.type]:\n arg_name = op.input(pname)[0]\n self.assertTrue(\n arg_name.endswith('.quantized.dequantized'))\n self.assertTrue(arg_name in quantized_ops)\n\n def linear_fc_quant(self,\n activation_quant_type,\n weight_quantize_type,\n for_ci=True):\n main = fluid.Program()\n startup = fluid.Program()\n with fluid.program_guard(main, startup):\n loss = linear_fc(3)\n opt = fluid.optimizer.Adam(learning_rate=0.001)\n opt.minimize(loss)\n place = fluid.CPUPlace()\n graph = IrGraph(core.Graph(main.desc), for_test=False)\n transform_pass = QuantizationTransformPass(\n scope=fluid.global_scope(),\n place=place,\n activation_quantize_type=activation_quant_type,\n weight_quantize_type=weight_quantize_type)\n transform_pass.apply(graph)\n if not for_ci:\n marked_nodes = set()\n for op in graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n graph.draw('.', 'quantize_fc_' + activation_quant_type,\n marked_nodes)\n program = graph.to_program()\n self.check_program(program)\n val_graph = IrGraph(core.Graph(program.desc), for_test=False)\n if not for_ci:\n val_marked_nodes = set()\n for op in val_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n val_marked_nodes.add(op)\n val_graph.draw('.', 'val_fc_' + activation_quant_type,\n val_marked_nodes)\n\n def test_linear_fc_quant_abs_max(self):\n self.linear_fc_quant('abs_max', 'abs_max', for_ci=True)\n\n def test_linear_fc_quant_range_abs_max(self):\n self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True)\n\n def test_linear_fc_quant_moving_average_abs_max(self):\n self.linear_fc_quant(\n 'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True)\n\n def residual_block_quant(self,\n activation_quant_type,\n weight_quantize_type,\n quantizable_op_type,\n for_ci=True):\n main = fluid.Program()\n startup = fluid.Program()\n with fluid.program_guard(main, startup):\n loss = residual_block(2)\n opt = fluid.optimizer.Adam(learning_rate=0.001)\n opt.minimize(loss)\n place = fluid.CPUPlace()\n graph = IrGraph(core.Graph(main.desc), for_test=False)\n transform_pass = QuantizationTransformPass(\n scope=fluid.global_scope(),\n place=place,\n activation_quantize_type=activation_quant_type,\n weight_quantize_type=weight_quantize_type,\n quantizable_op_type=quantizable_op_type)\n transform_pass.apply(graph)\n if not for_ci:\n marked_nodes = set()\n for op in graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n graph.draw('.', 'quantize_residual_' + activation_quant_type,\n marked_nodes)\n program = graph.to_program()\n self.check_program(program)\n val_graph = IrGraph(core.Graph(program.desc), for_test=False)\n if not for_ci:\n val_marked_nodes = set()\n for op in val_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n val_marked_nodes.add(op)\n val_graph.draw('.', 'val_residual_' + activation_quant_type,\n val_marked_nodes)\n\n def test_residual_block_abs_max(self):\n quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']\n self.residual_block_quant(\n 'abs_max', 'abs_max', quantizable_op_type, for_ci=True)\n\n def test_residual_block_range_abs_max(self):\n quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']\n self.residual_block_quant(\n 'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True)\n\n def test_residual_block_moving_average_abs_max(self):\n quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']\n self.residual_block_quant(\n 'moving_average_abs_max',\n 'channel_wise_abs_max',\n quantizable_op_type,\n for_ci=True)\n\n\nclass TestQuantizationFreezePass(unittest.TestCase):\n def freeze_graph(self,\n use_cuda,\n seed,\n activation_quant_type,\n bias_correction=False,\n weight_quant_type='abs_max',\n for_ci=True,\n quant_skip_pattern='skip_quant'):\n def build_program(main, startup, is_test):\n main.random_seed = seed\n startup.random_seed = seed\n with fluid.unique_name.guard():\n with fluid.program_guard(main, startup):\n img = fluid.layers.data(\n name='image', shape=[1, 28, 28], dtype='float32')\n label = fluid.layers.data(\n name='label', shape=[1], dtype='int64')\n loss = conv_net(img, label, quant_skip_pattern)\n if not is_test:\n opt = fluid.optimizer.Adam(learning_rate=0.001)\n opt.minimize(loss)\n return [img, label], loss\n\n random.seed(0)\n np.random.seed(0)\n\n main = fluid.Program()\n startup = fluid.Program()\n test_program = fluid.Program()\n feeds, loss = build_program(main, startup, False)\n build_program(test_program, startup, True)\n test_program = test_program.clone(for_test=True)\n main_graph = IrGraph(core.Graph(main.desc), for_test=False)\n test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n scope = fluid.Scope()\n with fluid.scope_guard(scope):\n exe.run(startup)\n transform_pass = QuantizationTransformPass(\n scope=scope,\n place=place,\n activation_quantize_type=activation_quant_type,\n weight_quantize_type=weight_quant_type,\n skip_pattern=quant_skip_pattern)\n transform_pass.apply(main_graph)\n transform_pass.apply(test_graph)\n dev_name = '_gpu_' if use_cuda else '_cpu_'\n if not for_ci:\n marked_nodes = set()\n for op in main_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n main_graph.draw('.', 'main' + dev_name + activation_quant_type + '_'\n + weight_quant_type, marked_nodes)\n marked_nodes = set()\n for op in test_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n test_graph.draw('.', 'test' + dev_name + activation_quant_type + '_'\n + weight_quant_type, marked_nodes)\n\n build_strategy = fluid.BuildStrategy()\n build_strategy.memory_optimize = False\n build_strategy.enable_inplace = False\n build_strategy.fuse_all_reduce_ops = False\n binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(\n loss_name=loss.name, build_strategy=build_strategy)\n quantized_test_program = test_graph.to_program()\n iters = 5\n batch_size = 8\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.mnist.train(), buf_size=500),\n batch_size=batch_size)\n test_reader = paddle.batch(\n paddle.dataset.mnist.test(), batch_size=batch_size)\n feeder = fluid.DataFeeder(feed_list=feeds, place=place)\n with fluid.scope_guard(scope):\n for _ in range(iters):\n data = next(train_reader())\n loss_v = exe.run(binary,\n feed=feeder.feed(data),\n fetch_list=[loss])\n if not for_ci:\n print('{}: {}'.format('loss' + dev_name +\n activation_quant_type + '_' +\n weight_quant_type, loss_v))\n\n test_data = next(test_reader())\n with fluid.program_guard(quantized_test_program):\n w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',\n quantized_test_program)\n # Testing\n with fluid.scope_guard(scope):\n test_loss1, w_quant = exe.run(program=quantized_test_program,\n feed=feeder.feed(test_data),\n fetch_list=[loss, w_var])\n\n # Freeze graph for inference, but the weight of fc/conv is still float type.\n freeze_pass = QuantizationFreezePass(\n scope=scope, place=place, bias_correction=bias_correction, \\\n weight_quantize_type=weight_quant_type)\n freeze_pass.apply(test_graph)\n if not for_ci:\n marked_nodes = set()\n for op in test_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n test_graph.draw('.', 'test_freeze' + dev_name +\n activation_quant_type + '_' + weight_quant_type,\n marked_nodes)\n\n server_program = test_graph.to_program()\n with fluid.scope_guard(scope):\n test_loss2, = exe.run(program=server_program,\n feed=feeder.feed(test_data),\n fetch_list=[loss])\n self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)\n if not for_ci:\n print(\n '{}: {}'.format('test_loss1' + dev_name + activation_quant_type\n + '_' + weight_quant_type, test_loss1))\n print(\n '{}: {}'.format('test_loss2' + dev_name + activation_quant_type\n + '_' + weight_quant_type, test_loss2))\n w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor())\n # Maybe failed, this is due to the calculation precision\n # self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))\n if not for_ci:\n print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type\n + '_' + weight_quant_type, np.sum(w_freeze)))\n print('{}: {}'.format('w_quant' + dev_name + activation_quant_type +\n '_' + weight_quant_type, np.sum(w_quant)))\n\n # Convert parameter to 8-bit.\n convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)\n convert_int8_pass.apply(test_graph)\n if not for_ci:\n marked_nodes = set()\n for op in test_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n test_graph.draw('.', 'test_int8' + dev_name + activation_quant_type\n + '_' + weight_quant_type, marked_nodes)\n server_program_int8 = test_graph.to_program()\n # Save the 8-bit parameter and model file.\n with fluid.scope_guard(scope):\n fluid.io.save_inference_model(\n 'server_int8' + dev_name + activation_quant_type + '_' +\n weight_quant_type, ['image', 'label'], [loss], exe,\n server_program_int8)\n # Test whether the 8-bit parameter and model file can be loaded successfully.\n [infer, feed, fetch] = fluid.io.load_inference_model(\n 'server_int8' + dev_name + activation_quant_type + '_' +\n weight_quant_type, exe)\n # Check the loaded 8-bit weight.\n w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor())\n self.assertEqual(w_8bit.dtype, np.int8)\n self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))\n if not for_ci:\n print('{}: {}'.format('w_8bit' + dev_name + activation_quant_type +\n '_' + weight_quant_type, np.sum(w_8bit)))\n print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type\n + '_' + weight_quant_type, np.sum(w_freeze)))\n\n mobile_pass = TransformForMobilePass()\n mobile_pass.apply(test_graph)\n if not for_ci:\n marked_nodes = set()\n for op in test_graph.all_op_nodes():\n if op.name().find('quantize') > -1:\n marked_nodes.add(op)\n test_graph.draw('.', 'test_mobile' + dev_name +\n activation_quant_type + '_' + weight_quant_type,\n marked_nodes)\n\n mobile_program = test_graph.to_program()\n with fluid.scope_guard(scope):\n fluid.io.save_inference_model(\n 'mobile_int8' + dev_name + activation_quant_type + '_' +\n weight_quant_type, ['image', 'label'], [loss], exe,\n mobile_program)\n\n def test_freeze_graph_cuda_dynamic(self):\n if fluid.core.is_compiled_with_cuda():\n with fluid.unique_name.guard():\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n with fluid.unique_name.guard():\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n\n def test_freeze_graph_cpu_dynamic(self):\n with fluid.unique_name.guard():\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n\n def test_freeze_graph_cuda_static(self):\n if fluid.core.is_compiled_with_cuda():\n with fluid.unique_name.guard():\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='range_abs_max',\n bias_correction=True,\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='range_abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='moving_average_abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='range_abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='moving_average_abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n self.freeze_graph(\n True,\n seed=1,\n activation_quant_type='moving_average_abs_max',\n bias_correction=True,\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n\n def test_freeze_graph_cpu_static(self):\n with fluid.unique_name.guard():\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='range_abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='moving_average_abs_max',\n weight_quant_type='abs_max',\n for_ci=True)\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='range_abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n self.freeze_graph(\n False,\n seed=2,\n activation_quant_type='moving_average_abs_max',\n weight_quant_type='channel_wise_abs_max',\n for_ci=True)\n\n\ndef quant_dequant_residual_block(num, quant_skip_pattern=None):\n def conv_bn_layer(input,\n ch_out,\n filter_size,\n stride,\n padding,\n act='relu',\n bias_attr=False):\n tmp = fluid.layers.conv2d(\n input=input,\n filter_size=filter_size,\n num_filters=ch_out,\n stride=stride,\n padding=padding,\n act=None,\n bias_attr=bias_attr)\n return fluid.layers.batch_norm(input=tmp, act=act)\n\n data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')\n data2 = fluid.layers.data(\n name='matmul_input', shape=[16, 32, 32], dtype='float32')\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n hidden = data1\n for _ in six.moves.xrange(num):\n conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)\n short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)\n hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')\n hidden = fluid.layers.matmul(hidden, data2, True, True)\n if isinstance(quant_skip_pattern, str):\n with fluid.name_scope(quant_skip_pattern):\n pool1 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='avg', pool_stride=2)\n pool2 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='max', pool_stride=2)\n pool_add = fluid.layers.elementwise_add(\n x=pool1, y=pool2, act='relu')\n elif isinstance(quant_skip_pattern, list):\n assert len(\n quant_skip_pattern\n ) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.'\n with fluid.name_scope(quant_skip_pattern[0]):\n pool1 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='avg', pool_stride=2)\n pool2 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='max', pool_stride=2)\n with fluid.name_scope(quant_skip_pattern[1]):\n pool_add = fluid.layers.elementwise_add(\n x=pool1, y=pool2, act='relu')\n else:\n pool1 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='avg', pool_stride=2)\n pool2 = fluid.layers.pool2d(\n input=hidden, pool_size=2, pool_type='max', pool_stride=2)\n pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')\n fc = fluid.layers.fc(input=pool_add, size=10)\n loss = fluid.layers.cross_entropy(input=fc, label=label)\n loss = fluid.layers.mean(loss)\n return loss\n\n\nclass TestAddQuantDequantPass(unittest.TestCase):\n def setUp(self):\n self._target_ops = {'elementwise_add', 'pool2d'}\n self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'}\n\n def check_graph(self, graph, skip_pattern=None):\n ops = graph.all_op_nodes()\n for op_node in ops:\n if op_node.name() in self._target_ops:\n user_skipped = False\n if isinstance(skip_pattern, list):\n user_skipped = op_node.op().has_attr(\"op_namescope\") and \\\n any(pattern in op_node.op().attr(\"op_namescope\") for pattern in skip_pattern)\n elif isinstance(skip_pattern, str):\n user_skipped = op_node.op().has_attr(\"op_namescope\") and \\\n op_node.op().attr(\"op_namescope\").find(skip_pattern) != -1\n\n if user_skipped:\n continue\n\n in_nodes_all_not_persistable = True\n for input_name in op_node.input_arg_names():\n in_node = graph._find_node_by_name(op_node.inputs,\n input_name)\n in_nodes_all_not_persistable = (\n in_nodes_all_not_persistable and\n not in_node.persistable())\n if not in_nodes_all_not_persistable:\n continue\n input_names = op_node.input_arg_names()\n for input_name in input_names:\n self.assertTrue(input_name.endswith('.quant_dequant'))\n\n def residual_block_quant(self,\n quantizable_op_type,\n skip_pattern=None,\n for_ci=True):\n main = fluid.Program()\n startup = fluid.Program()\n with fluid.program_guard(main, startup):\n loss = quant_dequant_residual_block(2, skip_pattern)\n opt = fluid.optimizer.Adam(learning_rate=0.001)\n opt.minimize(loss)\n place = fluid.CPUPlace()\n graph = IrGraph(core.Graph(main.desc), for_test=False)\n add_quant_dequant_pass = AddQuantDequantPass(\n scope=fluid.global_scope(),\n place=place,\n skip_pattern=skip_pattern,\n quantizable_op_type=quantizable_op_type)\n add_quant_dequant_pass.apply(graph)\n if not for_ci:\n marked_nodes = set()\n for op in graph.all_op_nodes():\n if op.name().find('quant') > -1:\n marked_nodes.add(op)\n graph.draw('.', 'add_quant_dequant_graph', marked_nodes)\n self.check_graph(graph, skip_pattern)\n program = graph.to_program()\n val_graph = IrGraph(core.Graph(program.desc), for_test=False)\n if not for_ci:\n val_marked_nodes = set()\n for op in val_graph.all_op_nodes():\n if op.name().find('quant') > -1:\n val_marked_nodes.add(op)\n val_graph.draw('.', 'val_add_quant_dequant_graph', val_marked_nodes)\n\n def test_residual_block(self):\n quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']\n self.residual_block_quant(\n quantizable_op_type, skip_pattern=None, for_ci=True)\n\n def test_residual_block_skip_pattern(self):\n quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']\n self.residual_block_quant(\n quantizable_op_type, skip_pattern='skip_quant', for_ci=True)\n\n def test_residual_block_skip_pattern(self):\n quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']\n self.residual_block_quant(\n quantizable_op_type,\n skip_pattern=['skip_quant1', 'skip_quant2'],\n for_ci=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\nfrom ..... import compat as cpt\nfrom .... import core\nfrom ....framework import IrGraph\nfrom ....framework import IrNode\nfrom ....framework import Operator\nfrom .... import unique_name\n\nfrom ....framework import Program, program_guard, default_startup_program\nfrom ....data import data\nfrom ....layers import mean\nfrom ....executor import scope_guard\nfrom ....framework import _get_paddle_place\n\n__all__ = [\n 'QuantizationTransformPass', 'QuantizationFreezePass', 'ConvertToInt8Pass',\n 'TransformForMobilePass', 'OutScaleForTrainingPass',\n 'OutScaleForInferencePass', 'AddQuantDequantPass'\n]\n\n_fake_quant_op_list = [\n 'fake_quantize_abs_max', 'fake_quantize_range_abs_max',\n 'fake_quantize_moving_average_abs_max', 'fake_channel_wise_quantize_abs_max'\n]\n\n_fake_dequant_op_list = [\n 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs'\n]\n\n_fake_quant_dequant_op_list = [\n 'fake_quantize_dequantize_moving_average_abs_max'\n]\n\n_out_scale_op_list = [\n \"conv2d\",\n \"depthwise_conv2d\",\n \"mul\",\n \"matmul\",\n \"relu\",\n \"leaky_relu\",\n \"relu6\",\n \"sigmoid\",\n \"tanh\",\n \"prelu\",\n \"swish\",\n \"softmax\",\n \"batch_norm\",\n \"elementwise_add\",\n \"pool2d\",\n \"reshape2\",\n \"transpose2\",\n \"concat\",\n \"elementwise_mul\",\n \"scale\",\n \"hard_swish\",\n \"hard_sigmoid\",\n \"conv2d_transpose\",\n \"gru\",\n \"bilinear_interp\",\n \"nearest_interp\",\n \"trilinear_interp\",\n \"flatten\",\n \"flatten2\",\n \"transpose\",\n \"pad2d\",\n \"reshape\",\n]\n\n# list op real input and output names, to avoid processing input such as AxisTensor.\n_op_real_in_out_name = {\n \"conv2d\": [[\"Input\", \"Filter\"], [\"Output\"]],\n \"depthwise_conv2d\": [[\"Input\", \"Filter\"], [\"Output\"]],\n \"conv2d_transpose\": [[\"Input\", \"Filter\"], [\"Output\"]],\n \"mul\": [[\"X\", \"Y\"], [\"Out\"]],\n \"matmul\": [[\"X\", \"Y\"], [\"Out\"]],\n \"pool2d\": [[\"X\"], [\"Out\"]],\n \"elementwise_add\": [[\"X\", \"Y\"], [\"Out\"]],\n \"concat\": [[\"X\"], [\"Out\"]],\n \"softmax\": [[\"X\"], [\"Out\"]],\n \"argmax\": [[\"X\"], [\"Out\"]],\n \"transpose\": [[\"X\"], [\"Out\"]],\n \"equal\": [[\"X\", \"Y\"], [\"Out\"]],\n \"gather\": [[\"X\"], [\"Out\"]],\n \"greater_equal\": [[\"X\", \"Y\"], [\"Out\"]],\n \"greater_than\": [[\"X\", \"Y\"], [\"Out\"]],\n \"less_equal\": [[\"X\", \"Y\"], [\"Out\"]],\n \"less_than\": [[\"X\", \"Y\"], [\"Out\"]],\n \"mean\": [[\"X\"], [\"Out\"]],\n \"not_equal\": [[\"X\", \"Y\"], [\"Out\"]],\n \"reshape\": [[\"X\"], [\"Out\"]],\n \"reshape2\": [[\"X\"], [\"Out\"]],\n \"transpose2\": [[\"X\"], [\"Out\"]],\n \"bilinear_interp\": [[\"X\"], [\"Out\"]],\n \"nearest_interp\": [[\"X\"], [\"Out\"]],\n \"trilinear_interp\": [[\"X\"], [\"Out\"]],\n \"slice\": [[\"Input\"], [\"Out\"]],\n \"squeeze\": [[\"X\"], [\"Out\"]],\n \"elementwise_sub\": [[\"X\", \"Y\"], [\"Out\"]],\n \"relu\": [[\"X\"], [\"Out\"]],\n \"relu6\": [[\"X\"], [\"Out\"]],\n \"leaky_relu\": [[\"X\"], [\"Out\"]],\n \"prelu\": [[\"X\"], [\"Out\"]],\n \"tanh\": [[\"X\"], [\"Out\"]],\n \"swish\": [[\"X\"], [\"Out\"]],\n \"dropout\": [[\"X\"], [\"Out\"]],\n \"batch_norm\": [[\"X\"], [\"Y\"]],\n \"sigmoid\": [[\"X\"], [\"Out\"]],\n \"elementwise_mul\": [[\"X\", \"Y\"], [\"Out\"]],\n \"scale\": [[\"X\"], [\"Out\"]],\n \"hard_swish\": [[\"X\"], [\"Out\"]],\n \"hard_sigmoid\": [[\"X\"], [\"Out\"]],\n \"gru\": [[\"Input\", \"Weight\"], [\"Hidden\"]],\n \"lstm\": [[\"Input\", \"Weight\"], [\"Hidden\"]],\n \"pad2d\": [[\"X\"], [\"Out\"]],\n \"flatten\": [[\"X\"], [\"Out\"]],\n \"flatten2\": [[\"X\"], [\"Out\"]],\n}\n\n_conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose']\n\n_channelwise_quant_axis1_ops = ['conv2d_transpose', 'mul']\n\n\ndef _get_op_input_var_names(op):\n \"\"\" \"\"\"\n assert isinstance(op, (IrNode, Operator)), \\\n \"The input op should be IrNode or Operator.\"\n var_names = []\n op_name = op.name() if isinstance(op, IrNode) \\\n else op.type\n name_list = _op_real_in_out_name[op_name][0]\n for name in name_list:\n var_name = op.input(name)\n if isinstance(var_name, list):\n var_names.extend(var_name)\n else:\n var_names.append(var_name)\n return var_names\n\n\ndef _get_input_name_index(op, input_var_name):\n \"\"\"Get the input name and index of the var_name in the op\"\"\"\n assert isinstance(op, (IrNode, Operator)), \\\n \"The input op should be IrNode or Operator.\"\n op_name = op.name() if isinstance(op, IrNode) \\\n else op.type\n res = None\n for argname in _op_real_in_out_name[op_name][0]:\n var_names = op.input(argname)\n for index, name in enumerate(var_names):\n if name == input_var_name:\n res = (argname, index)\n return res\n\n\ndef _get_op_output_var_names(op):\n \"\"\" \"\"\"\n assert isinstance(op, (IrNode, Operator)), \\\n \"The input op should be IrNode or Operator.\"\n var_names = []\n op_name = op.name() if isinstance(op, IrNode) \\\n else op.type\n name_list = _op_real_in_out_name[op_name][1]\n for name in name_list:\n var_name = op.output(name)\n if isinstance(var_name, list):\n var_names.extend(var_name)\n else:\n var_names.append(var_name)\n return var_names\n\n\ndef _get_output_name_index(op, output_var_name):\n \"\"\"Get the output name and index of the var_name in the op\"\"\"\n assert isinstance(op, (IrNode, Operator)), \\\n \"The input op should be IrNode or Operator.\"\n op_name = op.name() if isinstance(op, IrNode) \\\n else op.type\n name_list = _op_real_in_out_name[op_name][1]\n res = None\n for name in name_list:\n var_name = op.output(name)\n for index, val in enumerate(var_name):\n if val == output_var_name:\n res = (name, index)\n return res\n\n\ndef _init_var_node(var_node, value, scope, place):\n assert isinstance(value,\n np.ndarray), 'The type of value should be numpy array.'\n assert scope is not None, \\\n 'The scope cannot be set None.'\n assert place is not None, \\\n 'The place cannot be set None.'\n tensor = scope.var(var_node.name()).get_tensor()\n tensor.set(value, place)\n\n\ndef _is_input_all_not_persistable(graph, op_node):\n '''\n Analyse the real inputs of the op node are all not persistable.\n '''\n is_input_all_not_persistable = True\n for var_name in _get_op_input_var_names(op_node):\n in_node = graph._find_node_by_name(op_node.inputs, var_name)\n is_input_all_not_persistable = (is_input_all_not_persistable and \\\n (not in_node.persistable()))\n return is_input_all_not_persistable\n\n\ndef _check_grandchild_op_node(op_node, grandchild_op_name):\n '''\n Check whether the fake_quant node has a grandchild op node named\n grandchild_op_name.\n '''\n for out1_var_node in op_node.outputs:\n for out1_op_node in out1_var_node.outputs:\n for out2_var_node in out1_op_node.outputs:\n for out2_op_node in out2_var_node.outputs:\n if out2_op_node.name() == grandchild_op_name:\n return True\n return False\n\n\nclass QuantizationTransformPass(object):\n \"\"\"\n Quantize the ops that have weights. Add quant and dequant ops for\n the quantized ops's inputs.\n \"\"\"\n _supported_quantizable_op_type = [\n 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', 'matmul'\n ]\n\n def __init__(self,\n scope=None,\n place=None,\n weight_bits=8,\n activation_bits=8,\n activation_quantize_type='abs_max',\n weight_quantize_type='abs_max',\n window_size=10000,\n moving_rate=0.9,\n skip_pattern=['skip_quant'],\n quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'],\n weight_quantize_func=None,\n act_quantize_func=None,\n weight_preprocess_func=None,\n act_preprocess_func=None,\n optimizer_func=None,\n executor=None):\n r\"\"\"\n Constructor.\n\n Args:\n scope(fluid.Scope): When activation use 'range_abs_max' as the quantize\n type, this pass will create some new parameters. The scope is used to\n initialize these new parameters.\n place(fluid.CPUPlace|fluid.CUDAPlace|str): place is used to initialize new\n parameters described above. If it's string, It can be ``cpu``, and ``gpu:x``,\n where ``x`` is the index of the GPUs. \n weight_bits(int): quantization bit number for weights,\n the bias is not quantized.\n activation_bits(int): quantization bit number for activation.\n activation_quantize_type(str): quantization type for activation,\n now support 'abs_max', 'range_abs_max' and 'moving_average_abs_max'.\n If use 'abs_max' mode, the quantization scale will be calculated\n dynamically each step in both training and testing period. If use\n 'range_abs_max', a static quantization scale will be calculated\n during training and used in inference.\n weight_quantize_type(str): quantization type for weights,\n support 'abs_max' and 'channel_wise_abs_max'. The 'range_abs_max'\n usually is not used for weight, since weights are fixed once the\n model is well trained.\n window_size(int): the window size for 'range_abs_max' quantization.\n moving_rate(float): the param for 'moving_average_abs_max' quantization.\n skip_pattern(str or str list): The user-defined quantization skip pattern, which\n will be presented in the name scope of an op. When the skip pattern is\n detected in an op's name scope, the corresponding op will not be quantized. \n quantizable_op_type(list[str]): List the type of ops that will be quantized. \n Default is [\"conv2d\", \"depthwise_conv2d\", \"mul\"]. The quantizable_op_type in\n QuantizationFreezePass and ConvertToInt8Pass must be the same as this.\n weight_quantize_func(function): Function that defines how to quantize weight.\n Using this can quickly test if user's quantization method works or not.\n In this function, user should both define quantization function and\n dequantization function, that is, the function's input is non-quantized\n weight and function returns dequantized weight. If None, will use\n quantization op defined by 'weight_quantize_type'. Default is None.\n act_quantize_func(function): Function that defines how to quantize activation.\n Using this can quickly test if user's quantization method works or not.\n In this function, user should both define quantization and dequantization\n process, that is, the function's input is non-quantized activation and\n function returns dequantized activation. If None, will use quantization\n op defined by 'activation_quantize_type'. Default is None.\n weight_preprocess_func(function): Function that defines how to preprocess\n weight before quantization. Using this can quickly test if user's preprocess\n method works or not. The function's input is non-quantized weight and\n function returns processed weight to be quantized. If None, the weight will\n be quantized directly. Default is None.\n act_preprocess_func(function): Function that defines how to preprocess\n activation before quantization. Using this can quickly test if user's\n preprocess method works or not. The function's input is non-quantized\n activation and function returns processed activation to be quantized.\n If None, the activation will be quantized directly. Default is None.\n optimizer_func(function): Fuction return a optimizer. When 'is_test' is\n False and user want to use self-defined quantization function and\n preprocess function, this function must be set. Default is None.\n executor(Fluid.Executor): If user want to use self-defined quantization\n function and preprocess function, executor must be set for initialization.\n Default is None.\n\n\n Examples:\n .. code-block:: python\n # The original graph will be rewrite.\n import paddle.fluid as fluid\n from paddle.fluid.contrib.slim.quantization \\\n import QuantizationTransformPass\n from paddle.fluid.contrib.slim.graph import IrGraph\n from paddle.fluid import core\n\n graph = IrGraph(core.Graph(program.desc), for_test=False)\n place = fluid.CPUPlace()\n transform_pass = QuantizationTransformPass(fluid.global_scope(),\n place)\n transform_pass.apply(graph)\n \"\"\"\n self._scope = scope\n self._place = _get_paddle_place(place)\n self._weight_bits = weight_bits\n self._activation_bits = activation_bits\n self._skip_pattern = skip_pattern\n self._weight_quantize_func = weight_quantize_func\n self._act_quantize_func = act_quantize_func\n self._weight_preprocess_func = weight_preprocess_func\n self._act_preprocess_func = act_preprocess_func\n self._optimizer = optimizer_func\n self._exe = executor\n quant_type = [\n 'abs_max', 'channel_wise_abs_max', 'range_abs_max',\n 'moving_average_abs_max'\n ]\n assert activation_quantize_type != 'channel_wise_abs_max', \\\n \"The activation quantization type does not support 'channel_wise_abs_max'.\"\n if activation_quantize_type not in quant_type:\n raise ValueError(\n \"Unknown activation_quantize_type : '%s'. It can only be \"\n \"'abs_max' or 'range_abs_max' or 'moving_average_abs_max'.\" %\n (str(activation_quantize_type)))\n if weight_quantize_type not in quant_type:\n raise ValueError(\n \"Unknown weight_quantize_type: '%s'. It can only be \"\n \"'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' \"\n \"or 'moving_average_abs_max'.\" % (str(weight_quantize_type)))\n\n self._activation_quantize_type = activation_quantize_type\n self._weight_quantize_type = weight_quantize_type\n self._window_size = window_size\n self._moving_rate = moving_rate\n\n self._quantizable_ops = quantizable_op_type\n for op in self._quantizable_ops:\n assert op in QuantizationTransformPass._supported_quantizable_op_type, \\\n op + \" is not supported for quantization.\"\n self._quantizable_grad_ops = [\n '%s_grad' % (op) for op in self._quantizable_ops\n ]\n self._is_test = None\n self._global_step = None\n\n self.create_var_map = {}\n self.create_op_map = {}\n\n def apply(self, graph):\n \"\"\"\n Quantize the graph for training process. According to weight and\n activation quantization type, the graph will be added some fake\n quantize operators and fake dequantize operators.\n\n Args:\n graph(IrGraph): the applied graph.\n Returns:\n None\n \"\"\"\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n self._is_test = graph.is_test()\n # marked the variable which has been dequantized.\n dequantized_vars = collections.OrderedDict()\n persistable_vars = [p.name() for p in graph.all_persistable_nodes()]\n processed_vars = []\n\n def _quant_preprocess(op_node):\n user_skipped = False\n if isinstance(self._skip_pattern, list):\n user_skipped = op_node.op().has_attr(\"op_namescope\") and \\\n any(pattern in op_node.op().attr(\"op_namescope\") \\\n for pattern in self._skip_pattern)\n elif isinstance(self._skip_pattern, str):\n user_skipped = op_node.op().has_attr(\"op_namescope\") and \\\n op_node.op().attr(\"op_namescope\").find(\n self._skip_pattern) != -1\n\n if user_skipped:\n op_node.op()._set_attr(\"skip_quant\", True)\n\n def _transform_forward(graph, op):\n op.op()._set_attr(\"quantization_type\", \"qat_with_weight\")\n inputs = op.inputs\n for var_node in inputs:\n if var_node.name() not in op.input_arg_names():\n continue\n if var_node.name() in dequantized_vars:\n dequant_var_node = dequantized_vars[var_node.name()]\n else:\n name = var_node.name()\n if name in processed_vars:\n continue\n is_weight = True if var_node.name() in persistable_vars \\\n else False\n\n # if var node is weight and weight_preprocess_func is not None,\n # will insert weight preprocess func \n # to preorocess weight before quantization\n # if var node is activation and act_preprocess_func is not None, \n # will insert activation preprocess func \n # to preorocess activation before quantization\n if is_weight and self._weight_preprocess_func is not None:\n var_node = self._insert_func(\n graph, self._weight_preprocess_func, var_node, op)\n elif not is_weight and self._act_preprocess_func is not None:\n var_node = self._insert_func(\n graph, self._act_preprocess_func, var_node, op)\n\n # if var node is weight and weight_quantize_func is not None,\n # will insert weight quantize func to quantize and dequantize weight\n # if var node is activation and act_quantize_func is not None,\n # will insert act quantize func to quantize and dequantize activation\n if is_weight and self._weight_quantize_func is not None:\n target_out_node = self._insert_func(\n graph, self._weight_quantize_func, var_node, op)\n processed_vars.append(name)\n continue\n elif not is_weight and self._act_quantize_func is not None:\n target_out_node = self._insert_func(\n graph, self._act_quantize_func, var_node, op)\n processed_vars.append(name)\n continue\n\n quant_bits = self._weight_bits if var_node.name() in persistable_vars \\\n else self._activation_bits\n quant_type = self._weight_quantize_type if is_weight \\\n else self._activation_quantize_type\n if quant_type == 'channel_wise_abs_max': # Weight quantization\n quant_axis = 1 if op.name() in \\\n _channelwise_quant_axis1_ops else 0\n quant_var_node, scale_var_node = self._insert_channel_quant_op(\n graph, var_node, name, quant_bits, quant_axis)\n dequant_var_node = self._insert_channel_dequant_op(\n graph, quant_var_node, [scale_var_node],\n [quant_bits], quant_axis)\n else:\n quant_var_node, scale_var_node = self._insert_quant_op(\n graph, var_node, name, quant_bits, quant_type)\n dequant_var_node = self._insert_dequant_op(\n graph, quant_var_node, scale_var_node, quant_bits)\n dequantized_vars[name] = dequant_var_node\n graph.update_input_link(var_node, dequant_var_node, op)\n\n def _transform_backward(graph, op):\n for var_node in op.inputs:\n if var_node.name() not in op.input_arg_names():\n continue\n if var_node.name() in dequantized_vars:\n dequant_var_node = dequantized_vars[var_node.name()]\n graph.update_input_link(var_node, dequant_var_node, op)\n\n if not self._is_test:\n self._create_global_step(graph)\n ops = graph.all_op_nodes()\n # Do the preproccess of quantization, such as skipping some ops\n # for not being quantized.\n for op in ops:\n if op.name() in self._quantizable_ops or \\\n op.name() in self._quantizable_grad_ops:\n _quant_preprocess(op)\n # Insert mapping table to solve the problem in saving inference model.\n graph.out_node_mapping_table = dict()\n # The process of _transform_forward and _transform_backward is needed in two for loops.\n # The loop for transforming the forward graph:\n for op in ops:\n if op.name() in self._quantizable_ops:\n if not self._is_skip_quant(graph, op):\n _transform_forward(graph, op)\n # The loop for renaming the inputs of backward op.\n for op in ops:\n if op.name() in self._quantizable_grad_ops:\n _transform_backward(graph, op)\n graph.resolve_hazard()\n return graph\n\n def _create_global_step(self, graph):\n if self._weight_quantize_type == 'range_abs_max' or \\\n self._activation_quantize_type == 'range_abs_max':\n counter_name = cpt.to_text('@STEP_COUNTER@')\n for node in graph.all_var_nodes():\n if node.name() == counter_name:\n self._global_step = node\n if self._global_step is None:\n global_step_in = graph.create_persistable_node(\n name=counter_name,\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[1],\n var_dtype=core.VarDesc.VarType.INT64)\n _init_var_node(\n global_step_in,\n np.zeros(\n [1], dtype='int64'),\n self._scope,\n self._place)\n global_step_out = graph.create_var_node_from_desc(\n global_step_in.var())\n # The attribute of `op_role` is needed by ParallelExecutor.\n increment_op = graph.create_op_node(\n op_type='increment',\n attrs={\n 'step': 1.0,\n 'op_role':\n core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': global_step_in},\n outputs={'Out': global_step_out})\n graph.link_to(global_step_in, increment_op)\n graph.link_to(increment_op, global_step_out)\n self._global_step = global_step_out\n\n def _insert_quant_op(self, graph, var_node, name, quant_bits, quant_type):\n \"\"\"\n Insert fake_quantize_op in the graph.\n \"\"\"\n if quant_type == 'abs_max':\n return self._insert_quant_abs_max_op(graph, var_node, name,\n quant_bits)\n elif quant_type == 'range_abs_max':\n return self._insert_quant_range_abs_max_op(graph, var_node, name,\n quant_bits)\n elif quant_type == 'moving_average_abs_max':\n return self._insert_quant_moving_average_abs_max_op(\n graph, var_node, name, quant_bits)\n\n def _insert_quant_abs_max_op(self, graph, var_node, name, quant_bits):\n \"\"\"\n Insert fake_quantize_abs_max op in the graph.\n \"\"\"\n assert var_node.is_var(), '{} is not a var'.format(var_node.name())\n\n quant_var_node = graph.create_var_node(\n name=self._quantized_var_name(name),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n scale_var_node = graph.create_persistable_node(\n name=self._quantized_scale_name(name),\n var_type=var_node.type(),\n shape=[1],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_var_node,\n np.zeros(\n scale_var_node.shape(), dtype=data_type),\n self._scope,\n self._place)\n quant_op_node = graph.create_op_node(\n op_type='fake_quantize_abs_max',\n attrs={\n 'bit_length': quant_bits,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': var_node},\n outputs={'Out': quant_var_node,\n 'OutScale': scale_var_node})\n graph.link_to(var_node, quant_op_node)\n graph.link_to(quant_op_node, quant_var_node)\n graph.link_to(quant_op_node, scale_var_node)\n return quant_var_node, scale_var_node\n\n def _insert_quant_range_abs_max_op(self, graph, var_node, name, quant_bits):\n \"\"\"\n Insert fake_quantize_range_abs_max on the graph.\n \"\"\"\n assert var_node.is_var(), '{} is not a var'.format(var_node.name())\n\n quant_var_node = graph.create_var_node(\n name=self._quantized_var_name(name),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n\n scale_in_node = graph.create_persistable_node(\n name=self._quantized_scale_name(name),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[1],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_in_node,\n np.array(\n [0.001], dtype=data_type),\n self._scope,\n self._place)\n\n scale_out_node = graph.create_var_node_from_desc(scale_in_node.var())\n inputs = {'X': var_node, 'InScale': scale_in_node}\n outputs = {'Out': quant_var_node, 'OutScale': scale_out_node}\n\n if not self._is_test:\n # The name of scales_var_node maybe 'scales_0', 'scales_1', etc.\n scales_node = graph.create_persistable_node(\n name=unique_name.generate('scales'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[self._window_size],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scales_node,\n np.zeros(\n [self._window_size], dtype=data_type),\n self._scope,\n self._place)\n\n inputs['Iter'] = self._global_step\n outputs['OutScales'] = scales_node\n attrs = {\n 'window_size': self._window_size,\n 'bit_length': quant_bits,\n 'is_test': self._is_test,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n }\n quant_op_node = graph.create_op_node(\n op_type='fake_quantize_range_abs_max',\n attrs=attrs,\n inputs=inputs,\n outputs=outputs)\n\n graph.link_to(var_node, quant_op_node)\n graph.link_to(scale_in_node, quant_op_node)\n graph.link_to(quant_op_node, quant_var_node)\n graph.link_to(quant_op_node, scale_out_node)\n\n if not self._is_test:\n graph.link_to(self._global_step, quant_op_node)\n graph.link_to(quant_op_node, scales_node)\n\n return quant_var_node, scale_out_node\n\n def _insert_quant_moving_average_abs_max_op(self, graph, var_node, name,\n quant_bits):\n \"\"\"Insert fake_quantize_moving_average_abs_max\n \"\"\"\n quant_var_node = graph.create_var_node(\n name=self._quantized_var_name(name),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n scale_in_node = graph.create_persistable_node(\n name=self._quantized_scale_name(name),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[1],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_in_node,\n np.array(\n [0.001], dtype=data_type),\n self._scope,\n self._place)\n\n scale_out_node = graph.create_var_node_from_desc(scale_in_node.var())\n ins = {'X': var_node, 'InScale': scale_in_node}\n outs = {'Out': quant_var_node, 'OutScale': scale_out_node}\n if not self._is_test:\n state_in_node = graph.create_persistable_node(\n name=unique_name.generate('state'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=var_node.dtype(),\n shape=[1])\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n state_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n accum_in_node = graph.create_persistable_node(\n name=unique_name.generate('accum'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=var_node.dtype(),\n shape=[1])\n _init_var_node(\n accum_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n state_out_node = graph.create_var_node_from_desc(state_in_node.var(\n ))\n accum_out_node = graph.create_var_node_from_desc(accum_in_node.var(\n ))\n\n ins['InState'] = state_in_node\n ins['InAccum'] = accum_in_node\n outs['OutState'] = state_out_node\n outs['OutAccum'] = accum_out_node\n\n attrs = {\n 'bit_length': quant_bits,\n 'moving_rate': self._moving_rate,\n 'is_test': self._is_test,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n }\n\n quant_op_node = graph.create_op_node(\n op_type='fake_quantize_moving_average_abs_max',\n attrs=attrs,\n inputs=ins,\n outputs=outs)\n\n graph.link_to(var_node, quant_op_node)\n graph.link_to(scale_in_node, quant_op_node)\n graph.link_to(quant_op_node, quant_var_node)\n graph.link_to(quant_op_node, scale_out_node)\n\n if not self._is_test:\n graph.link_to(state_in_node, quant_op_node)\n graph.link_to(accum_in_node, quant_op_node)\n graph.link_to(quant_op_node, state_out_node)\n graph.link_to(quant_op_node, accum_out_node)\n\n return quant_var_node, scale_out_node\n\n def _insert_channel_quant_op(self, graph, var_node, name, quant_bits,\n quant_axis):\n \"\"\"\n Insert fake_channel_wise_quantize_abs_max op in the graph.\n \"\"\"\n assert var_node.is_var(), '{} is not a var'.format(var_node.name())\n\n quant_var_node = graph.create_var_node(\n name=self._quantized_var_name(name),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n scale_var_node = graph.create_persistable_node(\n name=self._quantized_scale_name(name),\n var_type=var_node.type(),\n shape=[var_node.shape()[quant_axis]],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_var_node,\n np.zeros(\n scale_var_node.shape(), dtype=data_type),\n self._scope,\n self._place)\n quant_op_node = graph.create_op_node(\n op_type='fake_channel_wise_quantize_abs_max',\n attrs={\n 'bit_length': quant_bits,\n 'quant_axis': quant_axis,\n 'is_test': self._is_test,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': var_node},\n outputs={'Out': quant_var_node,\n 'OutScale': scale_var_node})\n graph.link_to(var_node, quant_op_node)\n graph.link_to(quant_op_node, quant_var_node)\n graph.link_to(quant_op_node, scale_var_node)\n return quant_var_node, scale_var_node\n\n def _insert_dequant_op(self, graph, var_node, scale_var_node, quant_bits):\n \"\"\"\n Insert fake_dequantize_op in the graph.\n \"\"\"\n assert var_node.is_var(), '{} is not a var'.format(var_node.name())\n\n dequant_var_node = graph.create_var_node(\n name=self._dequantized_var_name(var_node.name()),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n max_range = (1 << (quant_bits - 1)) - 1\n dequant_op_node = graph.create_op_node(\n op_type='fake_dequantize_max_abs',\n attrs={\n 'max_range': float(max_range),\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': var_node,\n 'Scale': scale_var_node},\n outputs={'Out': dequant_var_node})\n graph.link_to(var_node, dequant_op_node)\n graph.link_to(scale_var_node, dequant_op_node)\n graph.link_to(dequant_op_node, dequant_var_node)\n return dequant_var_node\n\n def _insert_channel_dequant_op(self, graph, var_node, scale_var_nodes,\n quant_bits, quant_axis):\n \"\"\"\n Insert fake_channel_wise_dequantize_max_abs in the graph.\n \"\"\"\n assert var_node.is_var(), '{} is not a var'.format(var_node.name())\n\n dequant_var_node = graph.create_var_node(\n name=self._dequantized_var_name(var_node.name()),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n dequant_op_node = graph.create_op_node(\n op_type='fake_channel_wise_dequantize_max_abs',\n attrs={\n 'quant_bits': quant_bits,\n 'quant_axis': quant_axis,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': var_node,\n 'Scales': scale_var_nodes},\n outputs={'Out': dequant_var_node})\n graph.link_to(var_node, dequant_op_node)\n for scale_n in scale_var_nodes:\n graph.link_to(scale_n, dequant_op_node)\n graph.link_to(dequant_op_node, dequant_var_node)\n return dequant_var_node\n\n def _create_new_node(self, graph, in_node):\n \"\"\"\n create a node that same with in_node in graph\n Args:\n graph(IrGraph): create node in graph.\n in_node(IrVarNode): create node that same with in_node.\n Returns:\n created new node\n \"\"\"\n key = ''\n for inp in in_node.inputs:\n key = key + inp.name()\n key = key + in_node.name()\n for inp in in_node.outputs:\n key = key + inp.name()\n\n if key in self.create_var_map.keys():\n new_node = self.create_var_map[key]\n elif in_node.is_ctrl_var():\n new_node = graph.create_control_dep_var()\n self.create_var_map[key] = new_node\n else:\n new_node = graph.create_var_node_from_desc(in_node.node.var())\n self.create_var_map[key] = new_node\n return new_node\n\n def _copy_graph(self, graph, source_graph, op_node):\n \"\"\"\n copy op_node in source_graph to graph. And will run recursively \n for next ops that link to op_node's outputs.\n Args:\n graph(IrGraph): target graph to copy.\n source_graph(IrGraph): source graph to copy.\n op_node(IrOpNode): op node in source_graph.\n Returns:\n None\n\n \"\"\"\n key = ''\n for inp in op_node.inputs:\n key = key + inp.name()\n key = key + op_node.name()\n for inp in op_node.outputs:\n key = key + inp.name()\n has_created = False\n if key in self.create_op_map.keys():\n new_op_node = self.create_op_map[key]\n has_created = True\n else:\n new_op_node = graph.create_op_node_from_desc(op_node.node.op())\n self.create_op_map[key] = new_op_node\n if has_created:\n return\n for in_node in op_node.inputs:\n new_node = self._create_new_node(graph, in_node)\n graph.link_to(new_node, new_op_node)\n for in_node in op_node.outputs:\n new_node = self._create_new_node(graph, in_node)\n graph.link_to(new_op_node, new_node)\n for var_node in op_node.outputs:\n for next_op_node in var_node.outputs:\n self._copy_graph(graph, source_graph, next_op_node)\n return\n\n def _insert_func(self, graph, func, var_node, op):\n \"\"\"\n Insert a tmp program that returned by func between var_node and op.\n\n Args:\n graph(IrGraph): target graph to insert tmp program.\n func(Function): function to define a tmp program\n var_node(IrVarNode): node in target graph.\n op(IrOpNode): op in target graph.\n Returns:\n op's new input that replaces var_node\n \"\"\"\n tmp_program = Program()\n startup_program = Program()\n with program_guard(tmp_program, startup_program):\n with unique_name.guard(var_node.name() + \"_\"):\n in_node = data(\n var_node.name() + '_tmp_input',\n shape=var_node.shape(),\n dtype='float32')\n out_node = func(in_node)\n graph.out_node_mapping_table[out_node.name] = var_node.name()\n # loss shape must be 1 when minimize\n loss = mean(out_node)\n if not graph._for_test:\n assert self._optimizer, \"optimizer_func must be set when graph is test graph\"\n in_node.stop_gradient = False\n optimizer = self._optimizer()\n optimizer.minimize(loss)\n with scope_guard(self._scope):\n self._exe.run(startup_program)\n\n tmp_graph = IrGraph(\n core.Graph(tmp_program.desc), for_test=graph._for_test)\n in_node = tmp_graph._find_node_by_name(tmp_graph.all_var_nodes(),\n in_node.name)\n out_node = tmp_graph._find_node_by_name(tmp_graph.all_var_nodes(),\n out_node.name)\n\n in_node_params = []\n in_op_node = []\n # copy tmp graph to graph, after that, we can insert tmp graph's copy to graph.\n for node in tmp_graph.all_var_nodes():\n if node.inputs == [] and node.persistable():\n in_node_params.append(node)\n for node in tmp_graph.all_op_nodes():\n if node.inputs == []:\n in_op_node.append(node)\n for node in in_node.outputs:\n self._copy_graph(graph, tmp_graph, node)\n for node in in_node_params:\n for op_node in node.outputs:\n self._copy_graph(graph, tmp_graph, op_node)\n for node in in_op_node:\n self._copy_graph(graph, tmp_graph, node)\n\n target_in_node = graph._find_node_by_name(graph.all_var_nodes(),\n in_node.name())\n target_out_node = graph._find_node_by_name(graph.all_var_nodes(),\n out_node.name())\n loss_node = graph._find_node_by_name(graph.all_var_nodes(), loss.name)\n outputs = target_in_node.outputs\n for node in outputs:\n graph.update_input_link(target_in_node, var_node, node)\n graph.update_input_link(var_node, target_out_node, op)\n\n # update grad\n if not graph._for_test:\n op_out = op.outputs[0]\n op_out_grad = graph._find_node_by_name(graph.all_var_nodes(),\n op_out.name() + \"@GRAD\")\n # find op's gradient op, such as conv2d_grad\n op_grad = op_out_grad.outputs[0]\n target_out_grad_node = graph._find_node_by_name(\n graph.all_var_nodes(), target_out_node.name() + \"@GRAD\")\n in_node_grad = graph._find_node_by_name(\n graph.all_var_nodes(), target_in_node.name() + \"@GRAD\")\n in_node_grad_op = in_node_grad.inputs\n # update op_grad's input\n graph.update_input_link(var_node, target_out_node, op_grad)\n\n op_grad_out = None\n # find var_node's corresponding grad node\n for node in op_grad.outputs:\n if var_node.name() + \"@GRAD\" in node.name():\n op_grad_out = node\n # update op_grad's output\n if op_grad_out is not None:\n graph.update_output_link(op_grad_out, target_out_grad_node,\n op_grad)\n else:\n graph.link_to(op_grad, target_out_grad_node)\n\n for node in in_node_grad_op:\n graph.update_input_link(target_in_node, var_node, node)\n if op_grad_out:\n graph.update_output_link(in_node_grad, op_grad_out, node)\n # remove useless nodes\n mean_grad = target_out_grad_node.inputs[0]\n mean_out_grad = mean_grad.inputs[0]\n fill_constant_node = mean_out_grad.inputs[0]\n graph.safe_remove_nodes(mean_grad)\n graph.safe_remove_nodes(mean_out_grad)\n graph.safe_remove_nodes(fill_constant_node)\n graph.safe_remove_nodes(in_node_grad)\n\n graph.safe_remove_nodes(loss_node.inputs[0])\n graph.safe_remove_nodes(loss_node)\n graph.safe_remove_nodes(target_in_node)\n return target_out_node\n\n def _quantized_var_name(self, var_name):\n \"\"\"\n Return quantized variable name for the input `var_name`.\n \"\"\"\n return \"%s.quantized\" % (var_name)\n\n def _dequantized_var_name(self, var_name):\n \"\"\"\n Return dequantized variable name for the input `var_name`.\n \"\"\"\n return \"%s.dequantized\" % (var_name)\n\n def _quantized_scale_name(self, var_name):\n \"\"\"\n Return the scale name of quantized variable for the input `var_name`.\n \"\"\"\n return \"%s.scale\" % (var_name)\n\n def _is_skip_quant(self, graph, op_node):\n \"\"\"\n Analyse whether the op node skips quantization.\n \"\"\"\n is_skip = False\n if op_node.op().has_attr(\"skip_quant\") and \\\n op_node.op().attr(\"skip_quant\"):\n is_skip = True\n # if the inputs of mul and matmul are not all persistable, use\n # AddQuantDequantPass to quantize them.\n if op_node.name() in [\"mul\", \"matmul\"] and \\\n _is_input_all_not_persistable(graph, op_node):\n is_skip = True\n if op_node.op().has_attr(\"quantization_type\") and \\\n op_node.op().attr(\"quantization_type\") == \"qat_without_weight\":\n is_skip = True\n return is_skip\n\n\nclass QuantizationFreezePass(object):\n def __init__(self,\n scope,\n place,\n bias_correction=False,\n weight_bits=8,\n activation_bits=8,\n weight_quantize_type='abs_max',\n quantizable_op_type=None):\n \"\"\"\n The freeze pass is used to adjust the quantize operator order, for example:\n 1) `activation -> quant -> dequant -> conv2d` will be frozen into\n `activation -> quant -> conv2d -> dequant`\n 2) `weight -> quant -> dequant -> conv2d` will be frozen into `weight -> conv2d`,\n and weight will be scaled offline.\n\n Args:\n scope(fluid.Scope): scope is used to get the weight tensor values.\n place(fluid.CPUPlace|fluid.CUDAPlace|str): place is used to restore the weight tensors.\n If it's string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the index of the GPUs.\n bias_correction(bool): whether use bias correction for post-training quantization.\n https://arxiv.org/abs/1810.05723.\n weight_bits(int): quantization bit number for weights.\n activation_bits(int): quantization bit number for activation.\n weight_quantize_type(str): quantization type for weights, support 'abs_max' and \n 'channel_wise_abs_max'. The 'range_abs_max' usually is not used for weight, \n since weights are fixed once the model is well trained.\n quantizable_op_type(list[str]): This input param will be removed latter. The pass\n will process all quantized op, so it is not necessary to set the input param.\n \"\"\"\n assert scope is not None, \\\n 'The scope cannot be set None.'\n assert place is not None, \\\n 'The place cannot be set None.'\n self._scope = scope\n self._bias_correction = bias_correction\n self._place = _get_paddle_place(place)\n self._weight_bits = weight_bits\n self._activation_bits = activation_bits\n self._weight_quantize_type = weight_quantize_type\n self._fake_quant_op_names = _fake_quant_op_list\n self._fake_dequant_op_names = _fake_dequant_op_list\n self._op_input_rename_map = collections.OrderedDict()\n self._op_output_rename_map = collections.OrderedDict()\n self._quant_var_scale_map = collections.OrderedDict()\n\n def apply(self, graph):\n \"\"\"\n Adjust quantize/dequantize operators order for the inference process.\n\n Args:\n graph(IrGraph): the applied graph.\n Returns:\n None\n \"\"\"\n # Get input scales in fake quant op and process weights\n persistable_vars = [p.name() for p in graph.all_persistable_nodes()]\n ops = graph.all_op_nodes()\n for op_node in ops:\n op_name = op_node.name()\n if op_name in self._fake_quant_op_names:\n input_arg_name = op_node.input('X')[0]\n if hasattr(graph, 'out_node_mapping_table'):\n if input_arg_name in graph.out_node_mapping_table.keys():\n input_arg_name = graph.out_node_mapping_table[\n input_arg_name]\n if input_arg_name not in persistable_vars:\n scale_v = graph._find_node_by_name(\n op_node.outputs, op_node.output('OutScale')[0])\n self._quant_var_scale_map[input_arg_name] = scale_v\n else:\n # Obtain scale from OutScale var node\n scale_v = self._load_var(op_node.output('OutScale')[0])\n assert scale_v.ndim in [\n 1, 2\n ], \"the dim of scale_v should be 1 or 2\"\n if scale_v.ndim == 2:\n scale_v = scale_v[0]\n if scale_v.size == 1:\n scale_v = scale_v[0]\n else:\n scale_v = scale_v.tolist()\n self._quant_var_scale_map[input_arg_name] = scale_v\n # Quantize weight and restore\n param_v = self._load_var(input_arg_name)\n if isinstance(scale_v, list) and \\\n any(_check_grandchild_op_node(op_node, op)\n for op in _channelwise_quant_axis1_ops):\n quant_axis = 1\n else:\n quant_axis = 0\n quantized_param_v = self._quant(\n param_v.copy(), scale_v, self._weight_bits, quant_axis)\n if self._bias_correction == True:\n quantized_param_v = self._bias_correction_w(\n param_v, quantized_param_v, scale_v, quant_axis)\n self._restore_var(input_arg_name, quantized_param_v)\n self._remove_fake_quant_and_dequant_op(graph, op_node)\n\n # Remove all fake dequant op\n ops = graph.all_op_nodes()\n for op_node in ops:\n op_name = op_node.name()\n if op_name in self._fake_dequant_op_names:\n self._remove_fake_quant_and_dequant_op(graph, op_node)\n\n # Insert post dequant op\n ops = graph.all_op_nodes()\n for op_node in ops:\n op_node_desc = op_node.op()\n if op_node_desc.has_attr(\"quantization_type\") and \\\n op_node_desc.attr(\"quantization_type\") == \"qat_with_weight\":\n if self._weight_quantize_type == 'channel_wise_abs_max':\n self._insert_post_channel_dequant_op(graph, op_node)\n else:\n self._insert_post_dequant_op(graph, op_node)\n\n # Rename inputs of the followed ops after inserting dequant_op after fc/conv\n for op_node in ops:\n for var_node in op_node.inputs:\n if var_node.node in self._op_output_rename_map:\n old_in = var_node\n new_in = self._op_output_rename_map[var_node.node]\n graph.update_input_link(old_in, new_in, op_node)\n\n # remove the unused var node in the graph\n self._remove_unused_var_nodes(graph)\n graph.resolve_hazard()\n return graph\n\n def _remove_fake_quant_and_dequant_op(self, graph, op_node):\n k = graph._find_node_by_name(op_node.outputs, op_node.output('Out')[0])\n v = graph._find_node_by_name(op_node.inputs, op_node.input('X')[0])\n if v.node not in self._op_input_rename_map:\n self._op_input_rename_map[k.node] = v\n else:\n self._op_input_rename_map[k.node] = self._op_input_rename_map[\n v.node]\n graph.safe_remove_nodes(op_node)\n\n def _insert_post_channel_dequant_op(self, graph, op_node):\n persistable_vars = [p.name() for p in graph.all_persistable_nodes()]\n for var_node in op_node.inputs:\n name = var_node.name()\n if name not in op_node.input_arg_names():\n continue\n if var_node.node in self._op_input_rename_map:\n old_in = var_node\n new_in = self._op_input_rename_map[var_node.node]\n new_in.clear_outputs()\n graph.update_input_link(old_in, new_in, op_node)\n original_var_name = self._original_var_name(name)\n scale_v = self._quant_var_scale_map[original_var_name]\n if original_var_name in persistable_vars:\n assert isinstance(\n scale_v,\n list), 'The scale of parameter %s is not a list.' % (\n original_var_name)\n channel_scale = np.array(scale_v)\n else:\n assert isinstance(scale_v, IrNode)\n scale_var_node = self._quant_var_scale_map[original_var_name]\n\n if len(op_node.output_arg_names()) != 1:\n raise ValueError(\"Only support one output, but op %s has\"\n \" more than one output.\" % (op_node.name()))\n\n output_var_node = graph._find_node_by_name(\n op_node.outputs, op_node.output_arg_names()[0])\n weight_scale_node = graph.create_persistable_node(\n name=unique_name.generate('channel_scale'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[channel_scale.shape[0]],\n var_dtype=output_var_node.dtype())\n data_type = 'float64' if output_var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(weight_scale_node,\n channel_scale.astype(data_type), self._scope,\n self._place)\n dequant_var_node = graph.create_var_node(\n name=self._dequantized_var_name(output_var_node.name()),\n var_type=output_var_node.type(),\n shape=output_var_node.shape(),\n var_dtype=output_var_node.dtype())\n dequant_op_node = graph.create_op_node(\n op_type='fake_channel_wise_dequantize_max_abs',\n attrs={\n 'quant_bits': [self._weight_bits, self._activation_bits],\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={\n 'X': output_var_node,\n 'Scales': [weight_scale_node, scale_var_node]\n },\n outputs={'Out': dequant_var_node})\n graph.link_to(output_var_node, dequant_op_node)\n graph.link_to(scale_var_node, dequant_op_node)\n graph.link_to(weight_scale_node, dequant_op_node)\n graph.link_to(dequant_op_node, dequant_var_node)\n self._op_output_rename_map[output_var_node.node] = dequant_var_node\n return dequant_var_node\n\n def _insert_post_dequant_op(self, graph, op_node):\n persistable_vars = [p.name() for p in graph.all_persistable_nodes()]\n max_range = 1\n param_range = (1 << (self._weight_bits - 1)) - 1\n act_range = (1 << (self._activation_bits - 1)) - 1\n for var_node in op_node.inputs:\n name = var_node.name()\n if name not in op_node.input_arg_names():\n continue\n if var_node.node in self._op_input_rename_map:\n old_in = var_node\n new_in = self._op_input_rename_map[var_node.node]\n new_in.clear_outputs()\n graph.update_input_link(old_in, new_in, op_node)\n original_var_name = self._original_var_name(name)\n scale_v = self._quant_var_scale_map[original_var_name]\n if original_var_name in persistable_vars:\n assert self._is_float(\n scale_v), 'The scale of parameter %s is not a float.' % (\n original_var_name)\n max_range *= param_range / scale_v\n else:\n max_range *= act_range\n assert isinstance(scale_v, IrNode)\n scale_var_node = self._quant_var_scale_map[original_var_name]\n\n if len(op_node.output_arg_names()) != 1:\n raise ValueError(\"Only support one output, but op %s has\"\n \" more than one output.\" % (op_node.name()))\n\n output_var_node = graph._find_node_by_name(\n op_node.outputs, op_node.output_arg_names()[0])\n dequant_var_node = graph.create_var_node(\n name=self._dequantized_var_name(output_var_node.name()),\n var_type=output_var_node.type(),\n shape=output_var_node.shape(),\n var_dtype=output_var_node.dtype())\n dequant_op_node = graph.create_op_node(\n op_type='fake_dequantize_max_abs',\n attrs={\n 'max_range': float(max_range),\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n },\n inputs={'X': output_var_node,\n 'Scale': scale_var_node},\n outputs={'Out': dequant_var_node})\n graph.link_to(output_var_node, dequant_op_node)\n graph.link_to(scale_var_node, dequant_op_node)\n graph.link_to(dequant_op_node, dequant_var_node)\n self._op_output_rename_map[output_var_node.node] = dequant_var_node\n return dequant_var_node\n\n def _load_var(self, name):\n return np.array(self._scope.find_var(name).get_tensor())\n\n def _restore_var(self, name, array):\n tensor = self._scope.find_var(name).get_tensor()\n tensor.set(array, self._place)\n\n def _remove_unused_var_nodes(self, graph):\n all_used_vars = set()\n ops = graph.all_op_nodes()\n for op_node in ops:\n for input_node in op_node.inputs:\n all_used_vars.add(input_node)\n for output_node in op_node.outputs:\n all_used_vars.add(output_node)\n\n all_used_vars = {n.node for n in all_used_vars}\n all_unused_vars = {\n n\n for n in filter(lambda node: node.node not in all_used_vars,\n graph.all_var_nodes())\n }\n graph.safe_remove_nodes(all_unused_vars)\n\n def _original_var_name(self, var_name):\n \"\"\"\n Return the original variable name.\n \"\"\"\n if var_name.endswith('.quantized.dequantized'):\n return var_name[:-len('.quantized.dequantized')]\n if var_name.endswith('.quantized'):\n return var_name[:-len('.quantized')]\n if var_name.endswith('.dequantized'):\n return var_name[:-len('.dequantized')]\n if var_name.endswith('.scale'):\n return var_name[:-len('.scale')]\n else:\n return var_name\n\n def _dequantized_var_name(self, var_name):\n \"\"\"\n Return dequantized variable name for the input `var_name`.\n \"\"\"\n return \"%s.dequantized\" % (var_name)\n\n def _is_float(self, v):\n return isinstance(v, float) or isinstance(v, np.float32) \\\n or isinstance(v, np.float64)\n\n def _quant(self, x, scale, num_bits, quant_axis):\n assert quant_axis in [0, 1], 'quant_axis should be 0 or 1 for now.'\n bnt = (1 << (num_bits - 1)) - 1\n\n def _clip(x, scale):\n x[x > scale] = scale\n x[x < -scale] = -scale\n return x\n\n if isinstance(scale, list):\n for i, s in enumerate(scale):\n if s == 0.0:\n s = 1e-8\n if quant_axis == 0:\n x[i] = _clip(x[i], s)\n x[i] = np.round(x[i] / s * bnt)\n else:\n x[:, i] = _clip(x[:, i], s)\n x[:, i] = np.round(x[:, i] / s * bnt)\n else:\n x = _clip(x, scale)\n x = np.round(x / scale * bnt)\n return x\n\n def _bias_correction_w(self, x, x_quant, scale_v, quant_axis):\n '''\n Bias correction for weight\n '''\n eps = 1e-8\n bnt = (1 << (self._weight_bits - 1)) - 1\n x_dequant = x_quant.copy()\n if isinstance(scale_v, list):\n if quant_axis == 0:\n for i, s in enumerate(scale_v):\n x_dequant[i] = x_dequant[i] * s / bnt\n quant_bias = x - x_dequant\n mean_bias = quant_bias.reshape(quant_bias.shape[0], -1).mean(-1)\n std_orig = x.reshape(x.shape[0], -1).std(-1)\n std_quant = x_dequant.reshape(x_dequant.shape[0], -1).std(-1)\n std_bias = std_orig / (std_quant + eps)\n else:\n for i, s in enumerate(scale_v):\n x_dequant[:, i] = x_quant[:, i] * s / bnt\n quant_bias = x - x_dequant\n mean_bias = np.array([\n quant_bias[:, i].mean() for i in range(quant_bias.shape[1])\n ])\n std_orig = np.array([x[:, i].std() for i in range(x.shape[1])])\n std_quant = np.array(\n [x_dequant[:, i].std() for i in range(x_dequant.shape[1])])\n std_bias = std_orig / (std_quant + eps)\n else:\n x_dequant = x_quant * scale_v / bnt\n mean_bias = (x - x_dequant).mean()\n std_bias = x.std() / (x_dequant.std() + eps)\n if mean_bias.ndim == 1:\n std_bias = np.resize(std_bias, x.shape)\n mean_bias = np.resize(mean_bias, x.shape)\n\n x_dequant = (mean_bias + x_dequant) * std_bias\n quantized_param_v = self._quant(x_dequant, scale_v, self._weight_bits,\n quant_axis)\n return quantized_param_v\n\n\nclass ConvertToInt8Pass(object):\n def __init__(self, scope, place, quantizable_op_type=None):\n \"\"\"\n Convert the weights into int8_t type.\n\n Args:\n scope(fluid.Scope): scope is used to get the weight tensor values.\n place(fluid.CPUPlace|fluid.CUDAPlace|str): place is used to restore the\n 8bits weight tensors. If it's string, It can be ``cpu``, and ``gpu:x``,\n where ``x`` is the index of the GPUs.\n quantizable_op_type(list[str]): This input param will be removed latter. The pass\n will process all quantized op, so it is not necessary to set the input param.\n \"\"\"\n assert scope is not None, \\\n 'The scope cannot be set None.'\n assert place is not None, \\\n 'The place cannot be set None.'\n self._scope = scope\n self._place = _get_paddle_place(place)\n\n def apply(self, graph):\n \"\"\"\n Convert weights' type of the graph. After that, the data type of the\n graph weights is int8_t.\n\n Args:\n graph(IrGraph): the applied graph.\n Returns:\n None\n \"\"\"\n persistable_vars = [p.name() for p in graph.all_persistable_nodes()]\n ops = graph.all_op_nodes()\n input_map = {}\n for op_node in ops:\n if op_node.op().has_attr(\"quantization_type\") and \\\n op_node.op().attr(\"quantization_type\") == \"qat_with_weight\":\n for var_node in op_node.inputs:\n name = var_node.name()\n if name in persistable_vars:\n if name not in input_map:\n int8_var_node = self._convert_to_int8(graph,\n var_node)\n input_map[name] = int8_var_node\n graph.update_input_link(var_node, input_map[name],\n op_node)\n\n # remove the unused var node in the graph\n self._remove_unused_var_nodes(graph)\n graph.resolve_hazard()\n return graph\n\n def _convert_to_int8(self, graph, var_node):\n int8_var_node_name = var_node.name() + \".int8\"\n int8_var_node = graph.create_persistable_node(\n name=cpt.to_text(int8_var_node_name),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=core.VarDesc.VarType.INT8)\n array = self._load_var(var_node.name())\n self._scope.var(int8_var_node_name)\n self._store_var(int8_var_node_name, array, np.int8)\n return int8_var_node\n\n def _load_var(self, name):\n return np.array(self._scope.find_var(name).get_tensor())\n\n def _store_var(self, name, array, dtype):\n tensor = self._scope.find_var(name).get_tensor()\n tensor.set(array.astype(dtype), self._place)\n\n def _remove_unused_var_nodes(self, graph):\n all_used_vars = set()\n ops = graph.all_op_nodes()\n for op_node in ops:\n for input_node in op_node.inputs:\n all_used_vars.add(input_node)\n for output_node in op_node.outputs:\n all_used_vars.add(output_node)\n\n all_used_vars = {n.node for n in all_used_vars}\n all_unused_vars = {\n n\n for n in filter(lambda node: node.node not in all_used_vars,\n graph.all_var_nodes())\n }\n graph.safe_remove_nodes(all_unused_vars)\n\n\nclass TransformForMobilePass(object):\n def __init__(self):\n \"\"\"\n This pass is used to convert the frozen graph for paddle-mobile execution.\n \"\"\"\n self._fake_quant_op_names = _fake_quant_op_list\n self._fake_dequant_op_names = _fake_dequant_op_list\n\n def apply(self, graph):\n \"\"\"\n Because paddle-mobile use `quantize` an `dequantize` as the names of\n quantize operator and dequantize operator, the `apply` function just\n realize this logic.\n\n Args:\n graph(IrGraph): the graph will be transformed.\n Returns:\n None\n \"\"\"\n ops = graph.all_op_nodes()\n for op_node in ops:\n name = op_node.name()\n if name in self._fake_quant_op_names:\n op_node.set_type('quantize')\n quant_node = graph.create_op_node_from_desc(op_node.op())\n for input_node in op_node.inputs:\n graph.link_to(input_node, quant_node)\n for output_node in op_node.outputs:\n graph.link_to(quant_node, output_node)\n graph.safe_remove_nodes(op_node)\n if name in self._fake_dequant_op_names:\n op_node.set_type('dequantize')\n dequant_node = graph.create_op_node_from_desc(op_node.op())\n for input_node in op_node.inputs:\n graph.link_to(input_node, dequant_node)\n for output_node in op_node.outputs:\n graph.link_to(dequant_node, output_node)\n graph.safe_remove_nodes(op_node)\n graph.resolve_hazard()\n return graph\n\n\nclass OutScaleForTrainingPass(object):\n def __init__(self, scope=None, place=None, moving_rate=0.9):\n \"\"\"\n This pass is used for calculating output scales of some operators.\n These output scales may be used by tensorRT or some other inference engines.\n\n Args:\n scope(fluid.Scope): The scope is used to initialize these new parameters.\n place(fluid.CPUPlace|fluid.CUDAPlace|str): The place is used to initialize new parameters.\n If it's string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the\n index of the GPUs.\n moving_rate(float): The decay coefficient of moving average. The default value is 0.9.\n \"\"\"\n self._scope = scope\n self._place = _get_paddle_place(place)\n self._moving_rate = moving_rate\n self._is_test = None\n self._teller_set = _out_scale_op_list\n\n def apply(self, graph):\n \"\"\"\n Insert the `moving_average_abs_max_scale` op in order to calculate output scales\n of operators in the teller_set.\n\n Args:\n graph(IrGraph): the target graph.\n \"\"\"\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n self._is_test = graph.is_test()\n target_ops = []\n for op in graph.all_op_nodes():\n if op.name() in self._teller_set:\n target_ops.append(op)\n for op in target_ops:\n for output_var_name in _get_op_output_var_names(op):\n in_node = graph._find_node_by_name(op.outputs, output_var_name)\n if in_node.dtype() not in \\\n [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]:\n continue\n\n scale_node = graph.create_persistable_node(\n name=self._scale_name(in_node.name()),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[1],\n var_dtype=in_node.dtype())\n data_type = 'float64' if in_node.dtype() \\\n == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n ins = {'X': in_node}\n outs = {'OutScale': scale_node}\n if not self._is_test:\n state_in_node = graph.create_persistable_node(\n name=unique_name.generate('scale_state@'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=in_node.dtype(),\n shape=[1])\n _init_var_node(\n state_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n accum_in_node = graph.create_persistable_node(\n name=unique_name.generate('scale_accum@'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=in_node.dtype(),\n shape=[1])\n _init_var_node(\n accum_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n state_out_node = graph.create_var_node_from_desc(\n state_in_node.var())\n accum_out_node = graph.create_var_node_from_desc(\n accum_in_node.var())\n\n ins['InState'] = state_in_node\n ins['InAccum'] = accum_in_node\n outs['OutState'] = state_out_node\n outs['OutAccum'] = accum_out_node\n\n attrs = {\n 'moving_rate': self._moving_rate,\n 'is_test': self._is_test,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n }\n scale_op_node = graph.create_op_node(\n op_type='moving_average_abs_max_scale',\n attrs=attrs,\n inputs=ins,\n outputs=outs)\n graph.link_to(in_node, scale_op_node)\n graph.link_to(scale_op_node, scale_node)\n if not self._is_test:\n graph.link_to(state_in_node, scale_op_node)\n graph.link_to(accum_in_node, scale_op_node)\n graph.link_to(scale_op_node, state_out_node)\n graph.link_to(scale_op_node, accum_out_node)\n graph.resolve_hazard()\n return graph\n\n def _scale_name(self, var_name):\n \"\"\"\n Return the scale name for the var named `var_name`.\n \"\"\"\n return \"%s@scale\" % (var_name)\n\n\nclass OutScaleForInferencePass(object):\n def __init__(self, scope=None):\n \"\"\"\n This pass is used for setting output scales of some operators.\n These output scales may be used by tensorRT or some other inference engines.\n\n Args:\n scope(fluid.Scope): The scope is used to initialize these new parameters.\n \"\"\"\n self._scope = scope\n self._teller_set = _out_scale_op_list\n\n def apply(self, graph):\n \"\"\"\n Get output scales from the scope and set these scales in op_descs\n of operators in the teller_set.\n\n Args:\n graph(IrGraph): the target graph.\n \"\"\"\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n op_nodes = graph.all_op_nodes()\n for op_node in op_nodes:\n if op_node.name() in self._teller_set:\n var_names = _get_op_output_var_names(op_node)\n for var_name in var_names:\n in_node = graph._find_node_by_name(op_node.outputs,\n var_name)\n if in_node.dtype() not in \\\n [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]:\n continue\n\n scale_name = self._scale_name(var_name)\n scale_var = self._scope.find_var(scale_name)\n assert scale_var is not None, \\\n \"Can not find {} variable in the scope\".format(scale_name)\n scale_value = np.array(scale_var.get_tensor())[0]\n\n # For compatibility, we save output threshold by two methods.\n op_node.op()._set_attr(\"out_threshold\", float(scale_value))\n\n argname_index = _get_output_name_index(op_node, var_name)\n assert argname_index is not None, \\\n var_name + \" is not the output of the op\"\n op_node.op()._set_attr(argname_index[0] + str(argname_index[1]) \\\n + \"_threshold\", float(scale_value))\n graph.resolve_hazard()\n return graph\n\n def _scale_name(self, var_name):\n \"\"\"\n Return the scale name for the var named `var_name`.\n \"\"\"\n return \"%s@scale\" % (var_name)\n\n\nclass AddQuantDequantPass(object):\n \"\"\"\n Quantize the ops that do not have weights, and add quant_dequant op for the \n quantized ops's inputs.\n \"\"\"\n _supported_quantizable_op_type = [\n \"pool2d\", \"elementwise_add\", \"concat\", \"softmax\", \"argmax\", \"transpose\",\n \"equal\", \"gather\", \"greater_equal\", \"greater_than\", \"less_equal\",\n \"less_than\", \"mean\", \"not_equal\", \"reshape\", \"reshape2\",\n \"bilinear_interp\", \"nearest_interp\", \"trilinear_interp\", \"slice\",\n \"squeeze\", \"elementwise_sub\", \"mul\", \"matmul\", \"relu\", \"relu6\",\n \"leaky_relu\", \"tanh\", \"swish\", \"scale\", \"transpose\", \"transpose2\",\n \"sigmoid\", \"pad2d\", \"flatten\", \"flatten2\", \"batch_norm\"\n ]\n\n # To be compatible with PaddleSlim, not remove _activation_type for now\n _activation_type = [\"relu\", \"relu6\", \"leaky_relu\", \"tanh\", \"swish\"]\n\n def __init__(self,\n scope=None,\n place=None,\n moving_rate=0.9,\n quant_bits=8,\n skip_pattern=[\"skip_quant\"],\n quantizable_op_type=[\"elementwise_add\", \"pool2d\"],\n is_full_quantized=False):\n \"\"\"\n Constructor.\n\n Args:\n scope(fluid.Scope): The scope is used to initialize these new parameters.\n place(fluid.CPUPlace|fluid.CUDAPlace|str): place is used to initialize new\n parameters described above. If ``place`` is string, it can be It can be ``cpu``\n or ``gpu:x``, where ``x`` is the index of the GPUs.\n moving_rate(float, optional): the param for 'quant_dequant_moving_average_abs_max' \n quantization. Default is 0.9.\n quant_bits(int, optional): quantization bit number for activation. Default is 8.\n skip_pattern(str, optional): The user-defined quantization skip pattern, which\n will be presented in the name scope of an op. When the skip pattern is\n detected in an op's name scope, the corresponding op will not be quantized.\n Default is 'skip_quant'.\n quantizable_op_type(list[str], optional): List the type of ops that will be \n quantized. Default is [\"elementwise_add\", \"pool2d\"]. \n is_full_quantized(bool, optional): If set is_full_quantized as True, apply \n quantization to all supported quantizable op type. If set is_full_quantized\n as False, only apply quantization to the op type according to the input \n quantizable_op_type.\n \"\"\"\n self._scope = scope\n self._place = _get_paddle_place(place)\n self._moving_rate = moving_rate\n self._quant_bits = quant_bits\n self._is_test = None\n self._skip_pattern = skip_pattern\n\n if is_full_quantized:\n self._quantizable_op_type = \\\n AddQuantDequantPass._supported_quantizable_op_type\n else:\n self._quantizable_op_type = quantizable_op_type\n for op_type in quantizable_op_type:\n assert op_type in AddQuantDequantPass._supported_quantizable_op_type, \\\n op_type + \" is not supported for quantization.\"\n self._quantizable_grad_op_type = [\n '%s_grad' % (op) for op in self._quantizable_op_type\n ]\n\n assert self._scope != None, \"scope must not be None.\"\n assert self._place != None, \"place must not be None.\"\n\n def apply(self, graph):\n \"\"\"\n Add quant_dequant before some ops, such as the 'elementwise_add' and\n 'pool2d' op.\n\n Args:\n graph(IrGraph): the target graph.\n Returns:\n None\n \"\"\"\n assert isinstance(graph,\n IrGraph), 'graph must be the instance of IrGraph.'\n self._is_test = graph.is_test()\n dequantized_vars_map = collections.OrderedDict()\n\n # Forward stage, insert quant_dequant op\n all_op_nodes = graph.all_op_nodes()\n for op_node in all_op_nodes:\n if op_node.name() in self._quantizable_op_type:\n is_skip = False\n if isinstance(self._skip_pattern, list):\n is_skip = op_node.op().has_attr(\"op_namescope\") and \\\n any(pattern in op_node.op().attr(\"op_namescope\") for pattern in self._skip_pattern)\n elif isinstance(self._skip_pattern, str):\n is_skip = op_node.op().has_attr(\"op_namescope\") and \\\n op_node.op().attr(\"op_namescope\").find(self._skip_pattern) != -1\n is_quantized = op_node.op().has_attr(\"quantization_type\") and \\\n op_node.op().attr(\"quantization_type\") == \"qat_with_weight\"\n if is_skip or is_quantized or \\\n (not _is_input_all_not_persistable(graph, op_node)):\n continue\n\n op_node.op()._set_attr(\"quantization_type\",\n \"qat_without_weight\")\n op_node.op()._set_attr(\"activation_bits\", self._quant_bits)\n arg_names = _get_op_input_var_names(op_node)\n for arg_name in arg_names:\n in_node = graph._find_node_by_name(op_node.inputs, arg_name)\n if arg_name in dequantized_vars_map:\n quant_var_node = dequantized_vars_map[arg_name]\n else:\n quant_var_node, _ = \\\n self._inser_quant_dequant_moving_average_abs_max_op(\n graph, in_node, self._quant_bits)\n dequantized_vars_map[arg_name] = quant_var_node\n graph.update_input_link(in_node, quant_var_node, op_node)\n\n # Backward stage, update input link\n for op_node in all_op_nodes:\n if op_node.name() in self._quantizable_grad_op_type:\n for input_name in op_node.input_arg_names():\n if input_name in dequantized_vars_map:\n in_node = graph._find_node_by_name(op_node.inputs,\n input_name)\n dequant_var_node = dequantized_vars_map[input_name]\n graph.update_input_link(in_node, dequant_var_node,\n op_node)\n\n graph.resolve_hazard()\n return graph\n\n def _inser_quant_dequant_moving_average_abs_max_op(self, graph, var_node,\n quant_bits):\n \"\"\"Insert fake_quantize_dequantize_moving_average_abs_max op.\n \"\"\"\n quant_var_node = graph.create_var_node(\n name=\"{}.quant_dequant\".format(var_node.name()),\n var_type=var_node.type(),\n shape=var_node.shape(),\n var_dtype=var_node.dtype())\n scale_in_node = graph.create_persistable_node(\n name=\"{}.quant_dequant.scale\".format(var_node.name()),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n shape=[1],\n var_dtype=var_node.dtype())\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n scale_in_node,\n np.array(\n [0.001], dtype=data_type),\n self._scope,\n self._place)\n\n scale_out_node = graph.create_var_node_from_desc(scale_in_node.var())\n ins = {'X': var_node, 'InScale': scale_in_node}\n outs = {'Out': quant_var_node, 'OutScale': scale_out_node}\n if not self._is_test:\n state_in_node = graph.create_persistable_node(\n name=unique_name.generate('quant_dequant.state'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=var_node.dtype(),\n shape=[1])\n data_type = 'float64' if var_node.dtype(\n ) == core.VarDesc.VarType.FP64 else 'float32'\n _init_var_node(\n state_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n accum_in_node = graph.create_persistable_node(\n name=unique_name.generate('quant_dequant.accum'),\n var_type=core.VarDesc.VarType.LOD_TENSOR,\n var_dtype=var_node.dtype(),\n shape=[1])\n _init_var_node(\n accum_in_node,\n np.ones(\n [1], dtype=data_type),\n self._scope,\n self._place)\n state_out_node = graph.create_var_node_from_desc(state_in_node.var(\n ))\n accum_out_node = graph.create_var_node_from_desc(accum_in_node.var(\n ))\n\n ins['InState'] = state_in_node\n ins['InAccum'] = accum_in_node\n outs['OutState'] = state_out_node\n outs['OutAccum'] = accum_out_node\n\n attrs = {\n 'bit_length': quant_bits,\n 'moving_rate': self._moving_rate,\n 'is_test': self._is_test,\n 'op_role': core.op_proto_and_checker_maker.OpRole.Forward\n }\n\n quant_op_node = graph.create_op_node(\n op_type='fake_quantize_dequantize_moving_average_abs_max',\n attrs=attrs,\n inputs=ins,\n outputs=outs)\n\n graph.link_to(var_node, quant_op_node)\n graph.link_to(scale_in_node, quant_op_node)\n graph.link_to(quant_op_node, quant_var_node)\n graph.link_to(quant_op_node, scale_out_node)\n\n if not self._is_test:\n graph.link_to(state_in_node, quant_op_node)\n graph.link_to(accum_in_node, quant_op_node)\n graph.link_to(quant_op_node, state_out_node)\n graph.link_to(quant_op_node, accum_out_node)\n\n return quant_var_node, scale_out_node\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport unittest\nimport numpy as np\nfrom paddle.fluid.tests.unittests.op_test import skip_check_grad_ci\nfrom paddle.fluid.tests.unittests.test_elementwise_mul_op import ElementwiseMulOp\nfrom paddle import enable_static\n\n\nclass TestMKLDNNElementwiseMulOp(ElementwiseMulOp):\n def init_kernel_type(self):\n self.use_mkldnn = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestMKLDNNElementwiseMulOp2(TestMKLDNNElementwiseMulOp):\n def init_input_output(self):\n self.x = np.random.random((100, )).astype(self.dtype)\n self.y = np.random.random((100, )).astype(self.dtype)\n self.out = np.multiply(self.x, self.y)\n\n\nclass TestMKLDNNElementwiseMulOp3(TestMKLDNNElementwiseMulOp):\n def init_input_output(self):\n self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)\n self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)\n self.out = np.multiply(self.x, self.y)\n\n\nclass TestMKLDNNElementwiseMulOp4(TestMKLDNNElementwiseMulOp):\n def init_input_output(self):\n self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype)\n self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype)\n self.out = np.multiply(self.x, self.y)\n\n # TODO(jczaja): Enable when grad is ready\n def test_check_grad_normal(self):\n pass\n\n def test_check_grad_ingore_y(self):\n pass\n\n\nclass TestMKLDNNElementwiseMulOp5(TestMKLDNNElementwiseMulOp):\n def init_input_output(self):\n self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype)\n self.y = np.random.uniform(1, 2, [100]).astype(self.dtype)\n self.out = np.multiply(self.x, self.y)\n\n\n''' INT8 Tests '''\n\n\n@skip_check_grad_ci(\n reason=\"oneDNN's int8 elementwise_ops don't implemend grad kernel.\")\nclass TestInt8(ElementwiseMulOp):\n def init_kernel_type(self):\n self.use_mkldnn = True\n self._cpu_only = True\n\n def init_dtype(self):\n self.dtype = np.int8\n\n def init_input_output(self):\n self.x = np.random.randint(0, 3, (12, 9)).astype(\"int8\")\n self.y = np.random.randint(0, 3, (12, 9)).astype(\"int8\")\n self.out = np.multiply(self.x, self.y)\n\n def init_scales(self):\n self.attrs['Scale_x'] = 1.0\n self.attrs['Scale_y'] = 1.0\n self.attrs['Scale_out'] = 1.0\n\n def test_check_output(self):\n # TODO(wangzhongpu): support mkldnn op in dygraph mode\n self.init_scales()\n self.check_output(check_dygraph=(self.use_mkldnn == False))\n\n def test_check_grad_normal(self):\n pass\n\n def test_check_grad_ingore_x(self):\n pass\n\n def test_check_grad_ingore_y(self):\n pass\n\n\nclass TestInt8Scales(TestInt8):\n def quantize(self, tensor, dt=\"int8\"):\n max_int = 127.0 if dt == \"int8\" else 255.0\n scale = max_int / np.abs(np.amax(tensor))\n quantized = np.round(scale * tensor).astype(dt)\n return scale, quantized\n\n def init_input_output(self):\n self.x_f = np.random.random((100, )).astype(\"float\")\n self.y_f = np.random.random((100, )).astype(\"float\")\n self.out_f = np.multiply(self.x_f, self.y_f)\n\n self.scale_x, self.x = self.quantize(self.x_f)\n self.scale_y, self.y = self.quantize(self.y_f)\n self.scale_o, self.out = self.quantize(self.out_f)\n\n def init_scales(self):\n self.attrs['Scale_x'] = self.scale_x\n self.attrs['Scale_y'] = self.scale_y\n self.attrs['Scale_out'] = self.scale_o\n\n def test_check_output(self):\n # TODO(wangzhongpu): support mkldnn op in dygraph mode\n self.init_scales()\n int_atol = 1 # different quantization techniques\n self.check_output(\n check_dygraph=(self.use_mkldnn == False), atol=int_atol)\n\n\nclass TestUint8Scales(TestInt8Scales):\n def init_input_output(self):\n self.x_f = np.random.random((100, )).astype(\"float\")\n self.y_f = np.random.random((100, )).astype(\"float\")\n self.out_f = np.multiply(self.x_f, self.y_f)\n\n self.scale_x, self.x = self.quantize(self.x_f, \"uint8\")\n self.scale_y, self.y = self.quantize(self.y_f, \"uint8\")\n self.scale_o, self.out = self.quantize(self.out_f, \"uint8\")\n\n def init_dtype(self):\n self.dtype = np.uint8\n\n\nif __name__ == '__main__':\n enable_static()\n unittest.main()\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\n\nimport paddle.fluid as fluid\nimport paddle.fluid.framework as framework\nimport paddle.fluid.optimizer as optimizer\nimport paddle.fluid.core as core\nimport paddle.compat as cpt\nimport numpy as np\nfrom paddle.fluid.backward import append_backward\nfrom paddle.fluid.framework import Program, program_guard, convert_np_dtype_to_dtype_\nimport paddle\npaddle.enable_static()\n\n\nclass TestOptimizer(unittest.TestCase):\n def test_sgd_optimizer(self):\n def check_sgd_optimizer(optimizer_attr):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr=optimizer_attr)\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)\n opts, _ = sgd_optimizer.minimize(mean_out, init_program)\n return opts\n\n opts = check_sgd_optimizer({'learning_rate': 1.1})\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"sgd\"])\n\n opts = check_sgd_optimizer({'learning_rate': 1.0})\n self.assertEqual(len(opts), 1)\n self.assertEqual([op.type for op in opts], [\"sgd\"])\n\n\nclass TestOptimizerBackwardApplygrad(unittest.TestCase):\n def test_sgd_optimizer(self):\n def check_sgd_optimizer(optimizer_attr):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr=optimizer_attr)\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)\n with framework.program_guard(program, init_program):\n p_g = sgd_optimizer.backward(mean_out)\n opts = sgd_optimizer.apply_gradients(p_g)\n return opts\n\n opts = check_sgd_optimizer({'learning_rate': 1.1})\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"sgd\"])\n\n opts = check_sgd_optimizer({'learning_rate': 1.0})\n self.assertEqual(len(opts), 1)\n self.assertEqual([op.type for op in opts], [\"sgd\"])\n\n\nclass TestMomentumOptimizer(unittest.TestCase):\n class MockMomentum(optimizer.MomentumOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_velocity_str(self):\n return self._velocity_acc_str\n\n def test_vanilla_momentum_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n learning_rate = 0.01\n momentum_optimizer = self.MockMomentum(\n learning_rate=learning_rate, momentum=0.2)\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = momentum_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n sgd_op = opts[-1]\n self.assertEqual([op.type for op in opts], [\"scale\", \"momentum\"])\n self.assertFalse(sgd_op.attr('use_nesterov'))\n\n # Check accumulators\n accumulators = momentum_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 1)\n self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)\n velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]\n self.assertEqual(len(velocity_acc), 1)\n self.assertTrue(mul_x.name in velocity_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 2)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n self.assertEqual(init_ops[1].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)\n\n def test_nesterov_momentum_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n momentum_optimizer = self.MockMomentum(\n learning_rate=learning_rate, momentum=0.2, use_nesterov=True)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = momentum_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n sgd_op = opts[-1]\n self.assertEqual([op.type for op in opts], [\"scale\", \"momentum\"])\n self.assertTrue(sgd_op.attr('use_nesterov'))\n\n # Check accumulators\n accumulators = momentum_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 1)\n self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)\n velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]\n self.assertEqual(len(velocity_acc), 1)\n self.assertTrue(mul_x.name in velocity_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 2)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n self.assertEqual(init_ops[1].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)\n\n\nclass TestAdagradOptimizer(unittest.TestCase):\n class MockAdagrad(optimizer.AdagradOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_moment_str(self):\n return self._moment_acc_str\n\n def test_adagrad_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n adagrad_optimizer = self.MockAdagrad(\n learning_rate=learning_rate, epsilon=1.0e-6)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = adagrad_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"adagrad\"])\n\n # Check accumulators\n accumulators = adagrad_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 1)\n self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators)\n moment_acc = accumulators[adagrad_optimizer.get_moment_str()]\n self.assertEqual(len(moment_acc), 1)\n self.assertTrue(mul_x.name in moment_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 2)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n self.assertEqual(init_ops[1].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)\n\n\nclass TestAdamOptimizer(unittest.TestCase):\n class MockAdam(optimizer.AdamOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_moment1_str(self):\n return self._moment1_acc_str\n\n def get_moment2_str(self):\n return self._moment2_acc_str\n\n def test_adam_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n adam_optimizer = self.MockAdam(\n learning_rate=learning_rate, beta1=0.9, beta2=0.999)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(adam_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = adam_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"adam\"])\n\n # Check accumulators\n accumulators = adam_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 4)\n self.assertTrue(adam_optimizer.get_moment1_str() in accumulators)\n self.assertTrue(adam_optimizer.get_moment2_str() in accumulators)\n moment1_acc = accumulators[adam_optimizer.get_moment1_str()]\n moment2_acc = accumulators[adam_optimizer.get_moment2_str()]\n self.assertEqual(len(moment1_acc), 1)\n self.assertEqual(len(moment2_acc), 1)\n self.assertTrue(mul_x.name in moment1_acc)\n self.assertTrue(mul_x.name in moment2_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 5)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n\n\nclass TestAdamaxOptimizer(unittest.TestCase):\n class MockAdamax(optimizer.AdamaxOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_moment_str(self):\n return self._moment_acc_str\n\n def get_inf_norm_str(self):\n return self._inf_norm_acc_str\n\n def test_adamax_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n adamax_optimizer = self.MockAdamax(\n learning_rate=learning_rate, beta1=0.9, beta2=0.999)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = adamax_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 3)\n self.assertEqual([op.type for op in opts], [\"scale\", \"adamax\", \"scale\"])\n\n # Check accumulators\n accumulators = adamax_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 3)\n self.assertTrue(adamax_optimizer.get_moment_str() in accumulators)\n self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators)\n moment_acc = accumulators[adamax_optimizer.get_moment_str()]\n inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()]\n self.assertEqual(len(moment_acc), 1)\n self.assertEqual(len(inf_norm_acc), 1)\n self.assertTrue(mul_x.name in moment_acc)\n self.assertTrue(mul_x.name in inf_norm_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 4)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n\n\nclass TestDpsgdOptimizer(unittest.TestCase):\n def test_dpsgd_optimizer(self):\n def check_dpsgd_optimizer(optimizer_attr):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr=optimizer_attr)\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n dpsgd_optimizer = optimizer.DpsgdOptimizer(\n learning_rate=0.01, clip=100.0, batch_size=16.0, sigma=0.0)\n opts, _ = dpsgd_optimizer.minimize(mean_out, init_program)\n return opts\n\n opts = check_dpsgd_optimizer({\n 'learning_rate': 1.1,\n 'clip': 100.0,\n 'batch_size': 16.0,\n 'sigma': 4.0\n })\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"dpsgd\"])\n\n\nclass TestDecayedAdagradOptimizer(unittest.TestCase):\n class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_moment_str(self):\n return self._moment_acc_str\n\n def test_decayed_adagrad_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n decayed_adagrad_optimizer = self.MockDecayedAdagrad(\n learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = decayed_adagrad_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"decayed_adagrad\"])\n\n # Check accumulators\n accumulators = decayed_adagrad_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 1)\n self.assertTrue(\n decayed_adagrad_optimizer.get_moment_str() in accumulators)\n moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()]\n self.assertEqual(len(moment_acc), 1)\n self.assertTrue(mul_x.name in moment_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 2)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n self.assertEqual(init_ops[1].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)\n\n\nclass TestFtrlOptimizer(unittest.TestCase):\n class MockFtrl(optimizer.FtrlOptimizer):\n def get_accumulators(self):\n return self._accumulators\n\n def get_squared_str(self):\n return self._squared_acc_str\n\n def get_linear_str(self):\n return self._linear_acc_str\n\n def test_ftrl_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n learning_rate = 0.01\n ftrl_optimizer = self.MockFtrl(\n learning_rate=learning_rate, l1=0.0, l2=0.0, lr_power=-0.5)\n params_grads = append_backward(mean_out)\n self.assertEqual(len(params_grads), 1)\n self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0)\n with framework.program_guard(program, init_program):\n opts = ftrl_optimizer.apply_gradients(params_grads)\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"ftrl\"])\n\n # Check accumulators\n accumulators = ftrl_optimizer.get_accumulators()\n self.assertEqual(len(accumulators), 2)\n self.assertTrue(ftrl_optimizer.get_squared_str() in accumulators)\n self.assertTrue(ftrl_optimizer.get_linear_str() in accumulators)\n squared_acc = accumulators[ftrl_optimizer.get_squared_str()]\n linear_acc = accumulators[ftrl_optimizer.get_linear_str()]\n self.assertEqual(len(squared_acc), 1)\n self.assertEqual(len(linear_acc), 1)\n self.assertTrue(mul_x.name in squared_acc)\n self.assertTrue(mul_x.name in linear_acc)\n\n # Check init_program\n init_ops = init_program.global_block().ops\n self.assertEqual(len(init_ops), 3)\n self.assertEqual(init_ops[0].type, \"fill_constant\")\n self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)\n\n\nclass TestLookaheadOptimizer(unittest.TestCase):\n def test_lookahead_optimizer(self):\n init_program = framework.Program()\n program = framework.Program()\n block = program.global_block()\n init_block = init_program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\",\n shape=[5, 10],\n lod_level=0,\n name=\"mul.x\",\n optimize_attr={'learning_rate': 1.1})\n init_mul_x = init_block.create_parameter(\n dtype=\"float32\", shape=[5, 10], lod_level=0, name=\"mul.x\")\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n block.append_op(\n type=\"mean\", inputs={\"X\": mul_out}, outputs={\"Out\": mean_out})\n\n sgd = optimizer.SGD(learning_rate=0.01)\n lookahead = optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=5)\n with framework.program_guard(program, init_program):\n opts, _ = lookahead.minimize(mean_out)\n self.assertEqual(len(opts), 2)\n self.assertEqual([op.type for op in opts], [\"scale\", \"sgd\"])\n\n\nclass TestRecomputeOptimizer(unittest.TestCase):\n def net(self, return_input=False, with_dropout=False):\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\", shape=[5, 10], lod_level=0, name=\"mul.x\")\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n if with_dropout == True:\n mul_out_drop = block.create_var(\n dtype=\"float32\",\n shape=[5, 8],\n lod_level=0,\n name=\"mul.out.dropout\")\n mul_out_mask = block.create_var(\n dtype=\"uint8\", shape=[5, 8], lod_level=0, name=\"mul.out.mask\")\n b1 = block.create_parameter(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b1\")\n b1_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b1_out\")\n b2 = block.create_parameter(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b2\")\n b2_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b2_out\")\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n if with_dropout == True:\n block.append_op(\n type='dropout',\n inputs={'X': [mul_out]},\n outputs={'Out': [mul_out_drop],\n 'Mask': [mul_out_mask]},\n attrs={'dropout_prob': 0.5, })\n block.append_op(\n type=\"elementwise_add\",\n inputs={\"X\": mul_out_drop,\n \"Y\": b1},\n outputs={\"Out\": b1_out})\n else:\n block.append_op(\n type=\"elementwise_add\",\n inputs={\"X\": mul_out,\n \"Y\": b1},\n outputs={\"Out\": b1_out})\n block.append_op(\n type=\"elementwise_add\",\n inputs={\"X\": b1_out,\n \"Y\": b2},\n outputs={\"Out\": b2_out})\n block.append_op(\n type=\"mean\", inputs={\"X\": b2_out}, outputs={\"Out\": mean_out})\n\n if return_input == True:\n return mul_x, mul_out, b1_out, b2_out, mean_out\n return mul_out, b1_out, b2_out, mean_out\n\n def test_no_checkpoint(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 12)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\",\n \"elementwise_add_grad\", \"mul_grad\", \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_one_checkpoint(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b1_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 13)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\", \"mul\",\n \"elementwise_add_grad\", \"mul_grad\", \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_str_checkpoints(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b1_out.name])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 13)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\", \"mul\",\n \"elementwise_add_grad\", \"mul_grad\", \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_multi_checkpoint(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([mul_out, b2_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 13)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add\",\n \"elementwise_add_grad\", \"elementwise_add_grad\", \"mul_grad\", \"sgd\",\n \"sgd\", \"sgd\"\n ])\n\n def test_adjacent_checkpoint(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([mul_out, b1_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 12)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\",\n \"elementwise_add_grad\", \"mul_grad\", \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_out_of_order_checkpoint(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b2_out, mul_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 13)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add\",\n \"elementwise_add_grad\", \"elementwise_add_grad\", \"mul_grad\", \"sgd\",\n \"sgd\", \"sgd\"\n ])\n\n def test_input_as_checkpoints(self):\n mul_x, mul_out, b1_out, b2_out, mean_out = self.net(return_input=True)\n self.assertEqual(len(mean_out.block.ops), 4)\n self.assertEqual([op.type for op in mean_out.block.ops],\n [\"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([mul_x, b2_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 14)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"mul\", \"elementwise_add\",\n \"elementwise_add_grad\", \"elementwise_add_grad\", \"mul_grad\", \"sgd\",\n \"sgd\", \"sgd\"\n ])\n\n def test_apply_gradients(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b1_out])\n # apply backward\n params_grads = recompute_optimizer.backward(\n mean_out,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None)\n\n # apply gradient\n program = mean_out.block.program\n with framework.program_guard(program, None):\n optimize_ops = recompute_optimizer.apply_gradients(params_grads)\n\n self.assertEqual(len(mean_out.block.ops), 13)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"elementwise_add\", \"elementwise_add\", \"mean\",\n \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\", \"mul\",\n \"elementwise_add_grad\", \"mul_grad\", \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_load(self):\n mul_out, b1_out, b2_out, mean_out = self.net()\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b1_out])\n try:\n state_dict = {}\n recompute_optimizer.load(state_dict)\n except NotImplementedError as e:\n self.assertEqual(\n \"load function is not supported by Recompute Optimizer for now\",\n cpt.get_exception_message(e))\n\n def test_dropout(self):\n \"\"\"\n If there are dropout layers in the forward nets, we should add a\n seed op\n \"\"\"\n mul_out, b1_out, b2_out, mean_out = self.net(with_dropout=True)\n self.assertEqual(len(mean_out.block.ops), 5)\n self.assertEqual(\n [op.type for op in mean_out.block.ops],\n [\"mul\", \"dropout\", \"elementwise_add\", \"elementwise_add\", \"mean\"])\n sgd_optimizer = optimizer.SGD(learning_rate=1.0)\n recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)\n recompute_optimizer._set_checkpoints([b1_out])\n opts, params_grads = recompute_optimizer.minimize(mean_out)\n\n self.assertEqual(len(mean_out.block.ops), 17)\n self.assertEqual([op.type for op in mean_out.block.ops], [\n \"mul\", \"seed\", \"dropout\", \"elementwise_add\", \"elementwise_add\",\n \"mean\", \"fill_constant\", \"mean_grad\", \"elementwise_add_grad\", \"mul\",\n \"dropout\", \"elementwise_add_grad\", \"dropout_grad\", \"mul_grad\",\n \"sgd\", \"sgd\", \"sgd\"\n ])\n\n def test_dropout_with_seed(self):\n \"\"\"\n when we recompute a dropout op, make sure that the recomputed one\n\t is the same as the original var.\n\t \"\"\"\n\n def gen_data():\n return {\n \"x\": np.random.random(size=(100, 3)).astype('float32'),\n \"y\": np.random.randint(\n 2, size=(100, 1)).astype('int64')\n }\n\n def mlp(input_x, input_y):\n drop_res = fluid.layers.dropout(\n input_x, dropout_prob=0.5, name=\"dropout_with_seed_cpu\")\n prediction = fluid.layers.fc(input=[drop_res],\n size=2,\n act='softmax')\n cost = fluid.layers.cross_entropy(input=prediction, label=input_y)\n sum_cost = fluid.layers.reduce_mean(cost)\n return drop_res, prediction, sum_cost\n\n main_program = Program()\n startup_program = Program()\n scope = fluid.Scope()\n with fluid.scope_guard(scope):\n with program_guard(main_program, startup_program):\n input_x = fluid.layers.data(\n name=\"x\", shape=[3], dtype='float32')\n input_y = fluid.layers.data(name=\"y\", shape=[1], dtype='int64')\n drop_res, prediction, cost = mlp(input_x, input_y)\n sgd = fluid.optimizer.Adam(learning_rate=0.01)\n sgd = fluid.optimizer.RecomputeOptimizer(sgd)\n sgd._set_checkpoints([prediction])\n sgd.minimize(cost)\n\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n feed_data = gen_data()\n drop_vec = exe.run(feed=feed_data,\n program=fluid.default_main_program(),\n fetch_list=[\n \"dropout_with_seed_cpu.tmp_1\",\n \"dropout_with_seed_cpu.tmp_1.subprog_0\"\n ])\n self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist())\n\n\[email protected](not core.is_compiled_with_cuda(),\n \"core is not compiled with CUDA\")\nclass TestRecomputeOptimizerCUDA(unittest.TestCase):\n def test_dropout_with_seed(self):\n \"\"\"\n when we recompute a dropout op, make sure that the recomputed one\n is the same as the original var.\n \"\"\"\n\n def gen_data():\n return {\n \"x\": np.random.random(size=(100, 3)).astype('float32'),\n \"y\": np.random.randint(\n 2, size=(100, 1)).astype('int64')\n }\n\n def mlp(input_x, input_y):\n drop_res = fluid.layers.dropout(\n input_x, dropout_prob=0.5, name=\"dropout_with_seed_gpu\")\n prediction = fluid.layers.fc(input=[drop_res],\n size=2,\n act='softmax')\n cost = fluid.layers.cross_entropy(input=prediction, label=input_y)\n sum_cost = fluid.layers.reduce_mean(cost)\n return drop_res, prediction, sum_cost\n\n main_program = Program()\n startup_program = Program()\n scope = fluid.Scope()\n with fluid.scope_guard(scope):\n with program_guard(main_program, startup_program):\n input_x = fluid.layers.data(\n name=\"x\", shape=[3], dtype='float32')\n input_y = fluid.layers.data(name=\"y\", shape=[1], dtype='int64')\n drop_res, prediction, cost = mlp(input_x, input_y)\n sgd = fluid.optimizer.Adam(learning_rate=0.01)\n sgd = fluid.optimizer.RecomputeOptimizer(sgd)\n sgd._set_checkpoints([prediction])\n sgd.minimize(cost)\n\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n feed_data = gen_data()\n drop_vec = exe.run(feed=feed_data,\n program=fluid.default_main_program(),\n fetch_list=[\n \"dropout_with_seed_gpu.tmp_1\",\n \"dropout_with_seed_gpu.tmp_1.subprog_0\"\n ])\n self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist())\n\n\nclass TestGradientMergeOptimizer(unittest.TestCase):\n def net(self):\n program = framework.Program()\n block = program.global_block()\n mul_x = block.create_parameter(\n dtype=\"float32\", shape=[5, 10], lod_level=0, name=\"mul.x\")\n mul_y = block.create_var(\n dtype=\"float32\", shape=[10, 8], lod_level=0, name=\"mul.y\")\n mul_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"mul.out\")\n b1 = block.create_parameter(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b1\")\n b1_out = block.create_var(\n dtype=\"float32\", shape=[5, 8], lod_level=0, name=\"b1_out\")\n mean_out = block.create_var(\n dtype=\"float32\", shape=[1], lod_level=0, name=\"mean.out\")\n block.append_op(\n type=\"mul\",\n inputs={\"X\": mul_x,\n \"Y\": mul_y},\n outputs={\"Out\": mul_out},\n attrs={\"x_num_col_dims\": 1})\n block.append_op(\n type=\"elementwise_add\",\n inputs={\"X\": mul_out,\n \"Y\": b1},\n outputs={\"Out\": b1_out})\n block.append_op(\n type=\"mean\", inputs={\"X\": b1_out}, outputs={\"Out\": mean_out})\n return mean_out\n\n def test_program_desc(self, ):\n cost = self.net()\n main_program = cost.block.program\n init_program = framework.Program()\n self.assertEqual(main_program.num_blocks, 1)\n self.assertEqual(len(cost.block.ops), 3)\n self.assertEqual([op.type for op in cost.block.ops],\n [\"mul\", \"elementwise_add\", \"mean\"])\n\n opt = optimizer.SGD(learning_rate=1.0)\n opt = optimizer.GradientMergeOptimizer(opt, k_steps=4)\n with framework.program_guard(main_program, init_program):\n ops, params_grads = opt.minimize(cost)\n\n self.assertEqual(main_program.num_blocks, 2)\n\n # main block\n self.assertEqual(len(cost.block.ops), 13)\n self.assertEqual(\n [op.type for op in cost.block.ops],\n [\n 'mul',\n 'elementwise_add',\n 'mean',\n 'fill_constant',\n 'mean_grad',\n 'elementwise_add_grad',\n 'mul_grad',\n 'increment', # step += 1\n 'elementwise_mod', # step %= k_steps\n 'equal', # cond_var == (step == 0)\n 'elementwise_add',\n 'elementwise_add',\n 'conditional_block',\n ])\n\n # optimize block\n self.assertEqual(len(main_program.block(1).ops), 6)\n self.assertEqual([op.type for op in main_program.block(1).ops], [\n 'scale', 'scale', 'sgd', 'sgd', 'fill_constant', 'fill_constant'\n ])\n\n\nclass TestOptimizerDtype(unittest.TestCase):\n '''\n The dtype of optimizer should be inferred by parameters, and the learning rate\n is cteated with the same dtype.\n '''\n\n def check_with_dtype(self, dtype):\n class MyLayer(paddle.nn.Layer):\n def __init__(self, dtype):\n super(MyLayer, self).__init__()\n self._w = self.create_parameter([2, 3], dtype=dtype)\n self._b = self.create_parameter([2, 3], dtype=dtype)\n\n def forward(self, x):\n return x * self._w + self._b\n\n with paddle.fluid.dygraph.guard():\n model = MyLayer(dtype)\n x = paddle.rand([10, 2, 3], dtype=dtype)\n loss = model(x)\n adam = paddle.optimizer.Adam(parameters=model.parameters())\n loss.backward()\n adam.step()\n self.assertEqual(adam._dtype, convert_np_dtype_to_dtype_(dtype))\n\n def test_float64(self):\n self.check_with_dtype('float64')\n\n def test_float32(self):\n self.check_with_dtype('float32')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\nfrom paddle.fluid.core import AnalysisConfig\n\n\nclass TensorRTMatMulDims2Test(InferencePassTest):\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\", shape=[24, 24], dtype=\"float32\")\n matmul_out = fluid.layers.matmul(\n x=data,\n y=data,\n transpose_x=self.transpose_x,\n transpose_y=self.transpose_y,\n alpha=self.alpha)\n out = fluid.layers.batch_norm(matmul_out, is_test=True)\n\n self.feeds = {\"data\": np.ones([24, 24]).astype(\"float32\"), }\n self.enable_trt = True\n self.trt_parameters = TensorRTMatMulDims2Test.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.fetch_list = [out]\n\n def set_params(self):\n self.transpose_x = True\n self.transpose_y = True\n self.alpha = 2.0\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n self.check_output_with_option(use_gpu)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTMatMulTest(InferencePassTest):\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 6, 24, 24], dtype=\"float32\")\n matmul_out = fluid.layers.matmul(\n x=data,\n y=data,\n transpose_x=self.transpose_x,\n transpose_y=self.transpose_y,\n alpha=self.alpha)\n out = fluid.layers.batch_norm(matmul_out, is_test=True)\n\n self.feeds = {\"data\": np.ones([1, 6, 24, 24]).astype(\"float32\"), }\n self.enable_trt = True\n self.trt_parameters = TensorRTMatMulTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.fetch_list = [out]\n\n def set_params(self):\n self.transpose_x = False\n self.transpose_y = False\n self.alpha = 1.0\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n self.check_output_with_option(use_gpu)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTMatMulTransposeXTest(TensorRTMatMulTest):\n def set_params(self):\n self.transpose_x = True\n self.transpose_y = False\n self.alpha = 1.0\n\n\nclass TensorRTMatMulTransposeYTest(TensorRTMatMulTest):\n def set_params(self):\n self.transpose_x = False\n self.transpose_y = True\n self.alpha = 1.0\n\n\nclass TensorRTMatMulScaleTest(TensorRTMatMulTest):\n def set_params(self):\n self.transpose_x = False\n self.transpose_y = False\n self.alpha = 2.0\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.random", "numpy.random.seed" ], [ "numpy.sum", "numpy.random.seed" ], [ "numpy.resize", "numpy.ones", "numpy.round", "numpy.array", "numpy.zeros" ], [ "numpy.amax", "numpy.random.random", "numpy.multiply", "numpy.round", "numpy.random.uniform", "numpy.random.randint" ], [ "numpy.random.random", "numpy.random.randint" ], [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcvenkat/Python-Programs
[ "2ff66bbd5b07c8e093b11360e1dcac06740a5024" ]
[ "ARIMA1- Air Passengers.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 29 11:20:51 2020\r\n\r\n@author: 766810\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom statsmodels.tsa.seasonal import seasonal_decompose\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\n\r\ndf = pd.read_csv('airpassengers.csv', parse_dates = ['year'], index_col = ['year'])\r\ndf.head()\r\nplt.xlabel('Date')\r\nplt.ylabel('Number of air passengers')\r\nplt.plot(df)\r\n\r\n#Rolling Statistics\r\nrolling_mean = df.rolling(window = 12).mean()\r\nrolling_std = df.rolling(window = 12).std()\r\nplt.plot(df, color = 'blue', label = 'Original')\r\nplt.plot(rolling_mean, color = 'red', label = 'Rolling Mean')\r\nplt.plot(rolling_std, color = 'black', label = 'Rolling Std')\r\nplt.legend(loc = 'best')\r\nplt.title('Rolling Mean & Rolling Standard Deviation')\r\nplt.show()\r\n\r\n\r\n#ADF Statistics\r\nresult = adfuller(df['Passengers'])\r\nprint('ADF Statistic: {}'.format(result[0]))\r\nprint('p-value: {}'.format(result[1]))\r\nprint('Critical Values:')\r\nfor key, value in result[4].items():\r\n print('\\t{}: {}'.format(key, value))\r\n \r\ndf_log = np.log(df)\r\nplt.plot(df_log)\r\n\r\n\r\ndef get_stationarity(timeseries):\r\n \r\n # rolling statistics\r\n rolling_mean = timeseries.rolling(window=12).mean()\r\n rolling_std = timeseries.rolling(window=12).std()\r\n \r\n # rolling statistics plot\r\n original = plt.plot(timeseries, color='blue', label='Original')\r\n mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')\r\n std = plt.plot(rolling_std, color='black', label='Rolling Std')\r\n plt.legend(loc='best')\r\n plt.title('Rolling Mean & Standard Deviation')\r\n plt.show(block=False)\r\n \r\n # Dickey–Fuller test:\r\n result = adfuller(timeseries['Passengers'])\r\n print('ADF Statistic: {}'.format(result[0]))\r\n print('p-value: {}'.format(result[1]))\r\n print('Critical Values:')\r\n for key, value in result[4].items():\r\n print('\\t{}: {}'.format(key, value))\r\n\r\n\r\nrolling_mean = df_log.rolling(window=12).mean()\r\ndf_log_minus_mean = df_log - rolling_mean\r\ndf_log_minus_mean.dropna(inplace=True)\r\nget_stationarity(df_log_minus_mean)\r\n\r\n\r\nrolling_mean_exp_decay = df_log.ewm(halflife=12, min_periods=0, adjust=True).mean()\r\ndf_log_exp_decay = df_log - rolling_mean_exp_decay\r\ndf_log_exp_decay.dropna(inplace=True)\r\nget_stationarity(df_log_exp_decay)\r\n\r\ndf_log_shift = df_log - df_log.shift()\r\ndf_log_shift.dropna(inplace=True)\r\nget_stationarity(df_log_shift)\r\n\r\n\r\ndecomposition = seasonal_decompose(df_log) \r\nmodel = ARIMA(df_log, order=(2,1,2))\r\nresults = model.fit(disp=-1)\r\nplt.plot(df_log_shift)\r\nplt.plot(results.fittedvalues, color='red')\r\n\r\n\r\npredictions_ARIMA_diff = pd.Series(results.fittedvalues, copy=True)\r\npredictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\r\npredictions_ARIMA_log = pd.Series(df_log['Passengers'].iloc[0], index=df_log.index)\r\npredictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum, fill_value=0)\r\npredictions_ARIMA = np.exp(predictions_ARIMA_log)\r\nplt.plot(df)\r\nplt.plot(predictions_ARIMA)\r\n\r\nresults.plot_predict(1,264)\r\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "pandas.read_csv", "pandas.Series", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "pandas.plotting.register_matplotlib_converters", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amongstar/https-github.com-tflearn-tflearn
[ "af57b1759c0d251313c5bcde8cbb7274bf4b08c3", "8af77b5aebcb8aba0f1b855201aed732906c6de8" ]
[ "examples/nlp/cnn_sentence_classification.py", "examples/extending_tensorflow/summaries.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nSimple example using convolutional neural network to classify IMDB\nsentiment dataset.\n\nReferences:\n - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,\n and Christopher Potts. (2011). Learning Word Vectors for Sentiment\n Analysis. The 49th Annual Meeting of the Association for Computational\n Linguistics (ACL 2011).\n - Kim Y. Convolutional Neural Networks for Sentence Classification[C]. \n Empirical Methods in Natural Language Processing, 2014.\n\nLinks:\n - http://ai.stanford.edu/~amaas/data/sentiment/\n - http://emnlp2014.org/papers/pdf/EMNLP2014181.pdf\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport tensorflow as tf\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_1d, global_max_pool\nfrom tflearn.layers.merge_ops import merge\nfrom tflearn.layers.estimator import regression\nfrom tflearn.data_utils import to_categorical, pad_sequences\nfrom tflearn.datasets import imdb\n\n# IMDB Dataset loading\ntrain, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,\n valid_portion=0.1)\ntrainX, trainY = train\ntestX, testY = test\n\n# Data preprocessing\n# Sequence padding\ntrainX = pad_sequences(trainX, maxlen=100, value=0.)\ntestX = pad_sequences(testX, maxlen=100, value=0.)\n# Converting labels to binary vectors\ntrainY = to_categorical(trainY, nb_classes=2)\ntestY = to_categorical(testY, nb_classes=2)\n\n# Building convolutional network\nnetwork = input_data(shape=[None, 100], name='input')\nnetwork = tflearn.embedding(network, input_dim=10000, output_dim=128)\nbranch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer=\"L2\")\nbranch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer=\"L2\")\nbranch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer=\"L2\")\nnetwork = merge([branch1, branch2, branch3], mode='concat', axis=1)\nnetwork = tf.expand_dims(network, 2)\nnetwork = global_max_pool(network)\nnetwork = dropout(network, 0.5)\nnetwork = fully_connected(network, 2, activation='softmax')\nnetwork = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='target')\n# Training\nmodel = tflearn.DNN(network, tensorboard_verbose=0)\nmodel.fit(trainX, trainY, n_epoch = 5, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)\n", "\"\"\"\nThis example introduces the use of TFLearn functions to easily summarize\nvariables into tensorboard.\n\nTFLearn can summarize:\n- Loss / Accuracy: The model loss and accuracy over training steps.\n- Activations: Histogram of operation output values.(Requires to add each\n activation to monitor into tf.Graphkeys.ACTIVATIONS collection).\n- Gradients: Histogram of trainable variables gradient.\n- Weights: Histogram of trainable variables weights.\n- Weight Decay: Decay of trainable variables with regularizer. (Requires\n to add each decay into tf.Graphkeys.REGULARIZATION_LOSSES collection)\n- Sparsity: Sparsity of trainable variables.\n\nIt is useful to also be able to periodically monitor various variables\nduring training, e.g. confusion matrix entries or AUC metrics. This\ncan be done using \"validation_monitors\", an argument to regression or\nTrainOp; this argument takes a list of Tensor variables, and passes\nthem to the trainer, where they are evaluated each time a validation\nstep happens. The evaluation results are then summarized, and saved\nfor tensorboard visualization.\n\nSummaries are monitored according to the following verbose levels:\n- 0: Loss & Metric (Best speed).\n- 1: Loss, Metric & Gradients.\n- 2: Loss, Metric, Gradients & Weights.\n- 3: Loss, Metric, Gradients, Weights, Activations & Sparsity (Best\n Visualization).\n\nNote: If you are using TFLearn layers, summaries are automatically handled,\nso you do not need to manually add them.\n\n\"\"\"\n\nimport tensorflow as tf\nimport tflearn\n\n# Loading MNIST dataset\nimport tflearn.datasets.mnist as mnist\ntrainX, trainY, testX, testY = mnist.load_data(one_hot=True)\n\n# Define a dnn using Tensorflow\nwith tf.Graph().as_default():\n\n # Model variables\n X = tf.placeholder(\"float\", [None, 784])\n Y = tf.placeholder(\"float\", [None, 10])\n\n # Multilayer perceptron, with `tanh` functions activation monitor\n def dnn(x):\n with tf.name_scope('Layer1'):\n W1 = tf.Variable(tf.random_normal([784, 256]), name=\"W1\")\n b1 = tf.Variable(tf.random_normal([256]), name=\"b1\")\n x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))\n # Add this `tanh` op to activations collection or monitoring\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)\n # Add weights regularizer (Regul. summary automatically added)\n tflearn.add_weights_regularizer(W1, 'L2', weight_decay=0.001)\n\n with tf.name_scope('Layer2'):\n W2 = tf.Variable(tf.random_normal([256, 256]), name=\"W2\")\n b2 = tf.Variable(tf.random_normal([256]), name=\"b2\")\n x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))\n # Add this `tanh` op to activations collection or monitoring\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)\n # Add weights regularizer (Regul. summary automatically added)\n tflearn.add_weights_regularizer(W2, 'L2', weight_decay=0.001)\n\n with tf.name_scope('Layer3'):\n W3 = tf.Variable(tf.random_normal([256, 10]), name=\"W3\")\n b3 = tf.Variable(tf.random_normal([10]), name=\"b3\")\n x = tf.add(tf.matmul(x, W3), b3)\n\n return x\n\n net = dnn(X)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),\n name=\"acc\")\n\n # construct two varaibles to add as additional \"valiation monitors\"\n # these varaibles are evaluated each time validation happens (eg at a snapshot)\n # and the results are summarized and output to the tensorboard events file,\n # together with the accuracy and loss plots.\n #\n # Here, we generate a dummy variable given by the sum over the current\n # network tensor, and a constant variable. In practice, the validation\n # monitor may present useful information, like confusion matrix\n # entries, or an AUC metric.\n with tf.name_scope('CustomMonitor'):\n test_var = tf.reduce_sum(tf.cast(net, tf.float32), name=\"test_var\")\n test_const = tf.constant(32.0, name=\"custom_constant\")\n\n # Define a train op\n trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,\n validation_monitors=[test_var, test_const],\n metric=accuracy, batch_size=128)\n\n # Tensorboard logs stored in /tmp/tflearn_logs/. Using verbose level 2.\n trainer = tflearn.Trainer(train_ops=trainop,\n tensorboard_dir='/tmp/tflearn_logs/',\n tensorboard_verbose=2)\n # Training for 10 epochs.\n trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},\n n_epoch=10, show_metric=True, run_id='Summaries_example')\n\n # Run the following command to start tensorboard:\n # >> tensorboard /tmp/tflearn_logs/\n # Navigate with your web browser to http://0.0.0.0:6006/\n" ]
[ [ "tensorflow.expand_dims" ], [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.Graph", "tensorflow.constant", "tensorflow.matmul", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.name_scope", "tensorflow.argmax", "tensorflow.add_to_collection", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Vivoe/DeepSM
[ "bc35f2bfc3758199466079ec54de1d5297374921" ]
[ "bin/evaluate_step_placement.py" ]
[ "import os\nimport argparse\n\nimport numpy as np\nfrom sklearn import metrics\n\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.utils.data as datautils\n\nfrom deepSM import StepPlacement\nfrom deepSM import SMDUtils\nfrom deepSM import post_processing\nfrom deepSM import utils\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('placement_model', type=str)\nparser.add_argument('dataset_name', type=str)\nparser.add_argument('--n_batches', type=int, default=2000)\nparser.add_argument('--chunk_size', type=int, default=100)\nparser.add_argument('--batch_size', type=int, default=128)\n\nargs = parser.parse_args()\n\nprint(\"Testing model\", args.placement_model)\nprint(\"Datset name:\", args.dataset_name)\n\ntest_dataset = SMDUtils.get_dataset_from_file(\n args.dataset_name + '_test',\n 'placement',\n chunk_size=args.chunk_size)\n\ntest_loader = datautils.DataLoader(\n test_dataset,\n num_workers = 4,\n batch_size = args.batch_size)\n\nmodel = StepPlacement.RegularizedRecurrentStepPlacementModel()\nmodel.load_state_dict(torch.load(args.placement_model))\nmodel.cuda()\n\noutputs, labels = model.predict(test_loader, max_batches=args.n_batches)\n\npmodel_str = args.placement_model.split('/')[-1][:-3]\ntorch.save(outputs, f'outputs_{args.dataset_name}_{pmodel_str}.torch')\n\ndef evaluate(outputs, labels):\n\n def zscore(x):\n return (x - x.mean()) / np.std(x)\n\n preds = zscore(outputs) > 1.5\n acc = metrics.accuracy_score(labels, preds)\n print(\"Accuracy:\", acc)\n\n print(\"Percent positive:\", preds.mean())\n\n roc = metrics.roc_auc_score(labels, outputs)\n print(\"ROC-AUC:\", roc)\n\n precision, recall, thresh = metrics.precision_recall_curve(labels, outputs)\n\n prauc = metrics.auc(recall, precision)\n print(\"PR-AUC:\", prauc)\n\n f1 = metrics.f1_score(labels, preds)\n print(\"F1 score:\", f1)\n\nprint(\"Smoothed preds results:\")\nsmoothed_outputs = post_processing.smooth_outputs(outputs)\nevaluate(smoothed_outputs, labels)\n\nprint(\"Naive preds results:\")\nevaluate(outputs, labels)\n\n\nutils.notify(\"DONE\")\n" ]
[ [ "sklearn.metrics.roc_auc_score", "torch.load", "torch.utils.data.DataLoader", "sklearn.metrics.precision_recall_curve", "numpy.std", "torch.save", "sklearn.metrics.auc", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
craiggua/NaturalLanguageGen
[ "de7c83ed08eded17528f8e55c07a969e0f409e8a" ]
[ "code/LanguageGenChars_Predict.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPurpose: Character level Natural Language Generation (NLG). This file loads a\r\n previously trained character NLG model from LanguageGenChars_train.py, \r\n and predicts subsequent chars.\r\n\r\nTo run: \r\n 1) Set constants below to be the same as the languagegenchars_train.py file\r\n 2) At Anaconda command prompt enter\r\n >> python languagegenchars_predict.py\r\n\r\n\"\"\"\r\n\r\n# ---\r\n# Libs\r\n\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime, timedelta\r\n\r\nimport re\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import LSTM, Dense, Dropout\r\nfrom tensorflow.keras import utils as keras_utils\r\n\r\n\r\n# ---\r\n# Constants\r\n\r\n# Set CURR_DIR to the subdir with this PY file. Everything else is relative to this subdir.\r\nCURR_DIR = \"C:\\\\NaturalLanguageGen\\\\code\"\r\n\r\n# Predictions reuses the previously cleaned file.\r\nINPUT_FILE = '..\\\\data\\\\Complete_Shakespeare_cleaned.txt'\r\n\r\nMODEL_WEIGHTS_FILE = \"..\\\\Saved_Model\\\\training_GenChars\\\\cp_Epoch_{epoch:02d}_Loss_{loss:.3f}.ckpt\"\r\nMODEL_WEIGHTS_DIR = os.path.dirname(MODEL_WEIGHTS_FILE)\r\n\r\n# The constants below MUST be the SAME as the model trained in LanguageGenChars_training.py.\r\nSEQ_LEN = 100\r\n#BATCH_SIZE = 256\r\nUNITS = 128\r\n\r\nNUM_CHARS_PREDICT = 200\r\n\r\n# ---\r\n# Funcs\r\n\r\ndef clean_text(text):\r\n \"\"\"\r\n Purpose: Pass a string, this func will remove everything and only leave \r\n A-Z, a-z and sentence endings. It will also remove brackets [] and \r\n everything between those brackets like [_Exit._], [_Exeunt._], etc.\r\n \"\"\"\r\n\r\n # Remove brackets and the text within the brackets. \r\n text = \"\".join(re.split(\"\\(|\\)|\\[|\\]\", text)[::2])\r\n\r\n # Remove quotes and replace with no space. \r\n text = re.sub(r\"[\\'\\\"\\‘\\’\\`\\ʹ]\", \"\", text) \r\n \r\n # Keep only a-z and sentence endings, everything else gets a space. \r\n new_string = re.sub(\"[^a-zA-Z.?!;]\", \" \", text).strip()\r\n \r\n # Remove consective spaces and leave only one space.\r\n new_string = re.sub(\" +\", \" \", new_string)\r\n \r\n new_string = new_string.lower()\r\n \r\n return(new_string)\r\n\r\n\r\n# ---\r\n# Main\r\n\r\nstart_time = datetime.now()\r\n\r\nos.chdir(CURR_DIR)\r\n\r\n# Load the previously cleaned file.\r\nwith open(INPUT_FILE, 'r', encoding='utf-8') as file:\r\n text = file.read()\r\n\r\n# Load less data for optional model evaluations below. \r\ntext = text[0:int(len(text)/4)]\r\n\r\n# NOTE: No need to clean here since the previously cleaned TXT file from \r\n# the training file is reused here. Specified above as INPUT_FILE.\r\n\r\n\r\n# NN's and other ML algorithms work with numeric data vs. text. Here set() \r\n# gets unique characters. Next, each unique character is assigned an integer\r\n# in the order in which the characters were sorted. \r\nchars = sorted(list(set(text)))\r\nchar_num_map = dict((c, i) for i, c in enumerate(chars))\r\n\r\ninput_char_len = len(text)\r\nvocab_len = len(chars)\r\n\r\nprint(\"Total number of characters overall:\", input_char_len)\r\nprint(\"Total unique characters:\", vocab_len)\r\n\r\n# Do a 1-time conversion to convert each char to it's integer representation.\r\nint_text = []\r\nint_text = [char_num_map[char] for char in text]\r\n\r\nx_seq_num = []\r\ny_pred_num = []\r\n\r\n# x_seq_num is a list of lists. The inner list is a sequence of SEQ_LEN. For \r\n# each input sequence, save a corresponding integer in y_pred_num[] to be \r\n# predicted for that sequence.\r\nfor i in range(0, input_char_len - SEQ_LEN, 1):\r\n \r\n # Define an input sequence of integers. \r\n x_seq_num.append(int_text[i:i + SEQ_LEN])\r\n\r\n # Holds 1 predicted integer associated with 1 x_seq_num sequence above. \r\n y_pred_num.append(int_text[i + SEQ_LEN])\r\n \r\n \r\nnum_sequences = len(x_seq_num)\r\nprint(\"\\nNumber of sequences:\", num_sequences)\r\n\r\n# Numpy reshape will reshape x_seq_num to have samples, sequence length and \r\n# input dimensions. This input is expected by our NN. \r\nX = np.reshape(x_seq_num, (num_sequences, SEQ_LEN, 1))\r\n\r\n# Normalize the integers to be within a range of zero to one. When a NN is fit \r\n# on scaled data that uses a small range of values (like zero to 1) the \r\n# network can be more effective learning the output. \r\nX = X/float(vocab_len) \r\n \r\n# One-hot encode the char numbers to be predicted. \r\ny = keras_utils.to_categorical(y_pred_num)\r\n\r\n# Define the model. \r\n# Note, If no activation function is chosen it defaults to activation = 'tanh', \r\n# however added this param to be explicit. See model.get_config() output below \r\n# for details.\r\n# Simple Model. \r\n# Must be the SAME as the model trained in LanguageGenChars_train.py.\r\nmodel = Sequential()\r\nmodel.add(LSTM(UNITS, activation='tanh', input_shape=(X.shape[1], X.shape[2]), return_sequences = False, name = \"layer_1\"))\r\nmodel.add(Dropout(0.2, name = \"layer_2\"))\r\nmodel.add(Dense(y.shape[1], activation='softmax', name = \"layer_3\"))\r\n\r\n\r\n'''\r\n# Deeper model. 100 sequence length, 3 LSTM layers, each having 700 units, trained \r\n# across 100 epochs. Takes 45 mins PER epoch on P3.2xlarge EC2 instance, very costly!\r\nmodel = Sequential()\r\nmodel.add(LSTM(UNITS, activation='tanh', input_shape=(X.shape[1], X.shape[2]), return_sequences=True))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(LSTM(UNITS, return_sequences=True))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(LSTM(UNITS))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(y.shape[1], activation='softmax'))\r\n'''\r\n\r\nmodel.summary()\r\n\r\n# Compile the model above. \r\nmodel.compile(loss = 'categorical_crossentropy', \r\n optimizer='adam',\r\n #optimizer = RMSprop(learning_rate=0.01), # Maybe try a different optimizer and learning rate. \r\n metrics = ['accuracy'])\r\n\r\nprint(\"\\nModel Config:\\n\", model.get_config() )\r\n\r\n# Optional - Evaluate the Untrained model. \r\nprint(\"\\nEvaluating the untrained model...\")\r\nloss, acc = model.evaluate(X, y, verbose=2)\r\nprint(\"\\nUntrained model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\nmodel_weights = tf.train.latest_checkpoint(MODEL_WEIGHTS_DIR) \r\nprint(\"\\nLoading best model weight file: %s\" % model_weights)\r\nmodel.load_weights(model_weights)\r\n\r\n# Required - Re-evaluate the trained model to get it going before making predictions.\r\nprint(\"\\nEvaluating the trained model...\")\r\nloss, acc = model.evaluate(X, y, verbose=2)\r\nprint(\"\\nTrained model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\n\r\n# Make a prediction. \r\n\r\nnum_to_char = dict((i, c) for i, c in enumerate(chars))\r\n\r\n# x_seq_num is a list of lists. The inner list is numbers of SEQ_LEN long.\r\n# Get a random starting point in the inner list for 1 numeric sequence to make \r\n# a prediction below. \r\nstart = np.random.randint(0, len(x_seq_num) - 1)\r\nsequence = x_seq_num[start]\r\n\r\nprint(\"\\n-----\\nTry predict method 1\\n-----\")\r\nprint(\"Random Seed:\")\r\nprint(\"\\\"\", ''.join([num_to_char[value] for value in sequence]), \"\\\"\")\r\nprint(\"\\nNLG chars:\")\r\n\r\ngen_text = \"\"\r\n\r\nfor i in range(NUM_CHARS_PREDICT):\r\n \r\n # Reshape to samples, sequence length and input dimensions.\r\n x = np.reshape(sequence, (1, len(sequence), 1))\r\n \r\n # If the training file normalized the numbers between 0 to 1 then add that here.\r\n x = x / float(vocab_len) \r\n \r\n prediction = model.predict(x, verbose=0)\r\n \r\n # Prediction is for all chars. The total chars is in input_char_len above. \r\n # Need to get the highest prediction with argmax. Next, convert that \r\n # prediction index location to the predicted char. \r\n index = np.argmax(prediction)\r\n pred_char = num_to_char[index]\r\n \r\n # Save the generated text to print below. \r\n gen_text = gen_text + pred_char\r\n\r\n # Add the argmax predicted index location to our sequence then truncate the\r\n # beginning of the sequence list by 1 so that the sequence list remains \r\n # SEQ_LEN long. \r\n sequence.append(index)\r\n sequence = sequence[1:len(sequence)]\r\n\r\nprint(gen_text)\r\n\r\n\r\nstart = np.random.randint(0, len(x_seq_num) - 1)\r\nsequence = x_seq_num[start]\r\n\r\nprint(\"\\n-----\\nTry predict method 2\\n-----\")\r\nprint(\"Random Seed:\")\r\nprint(\"\\\"\", ''.join([num_to_char[value] for value in sequence]), \"\\\"\")\r\nprint(\"\\nNLG chars:\")\r\n\r\ngen_text = \"\"\r\n\r\nfor i in range(NUM_CHARS_PREDICT):\r\n x = np.reshape(sequence, (1, len(sequence), 1))\r\n \r\n # If the training file normalized the numbers between 0 to 1 then add that here.\r\n x = x / float(vocab_len) \r\n \r\n prediction = model.predict(x, verbose=0)\r\n \r\n # Predictions for each char in the vocabulary. With this prediction method, \r\n # get a random prediction with random.choice(). Next, convert that \r\n # prediction index location to the predicted char. \r\n X = prediction[0] \r\n index = np.random.choice(len(X), p=X)\r\n \r\n pred_char = num_to_char[index]\r\n \r\n # Save the generated text to print below. \r\n gen_text = gen_text + pred_char\r\n\r\n # Add the argmax predicted index location to our sequence then truncate the\r\n # beginning of the sequence list by 1 so that the sequence list remains \r\n # SEQ_LEN long. \r\n sequence.append(index)\r\n sequence = sequence[1:len(sequence)]\r\n\r\nprint(gen_text)\r\n\r\n\r\n# Print stats about the run.\r\nend_time = datetime.now()\r\nelapsed_time = end_time - start_time\r\ntime_diff_mins = elapsed_time / timedelta(minutes=1)\r\nprint(\"\\nTotal runtime %.1f minutes or %.1f hours.\" % (time_diff_mins, time_diff_mins / 60))\r\n\r\n" ]
[ [ "tensorflow.train.latest_checkpoint", "numpy.reshape", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.LSTM", "numpy.argmax", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential", "tensorflow.keras.utils.to_categorical" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
stylekilla/syncmrt
[ "816bb57d80d6595719b8b9d7f027f4f17d0a6c0a" ]
[ "QsWidgets/QsMpl/tools.py" ]
[ "from matplotlib.backend_tools import ToolBase, ToolToggleBase, Cursors\nfrom PyQt5.QtCore import QObject, pyqtSignal\nimport logging\n\n\nclass ToolPickPoint(ToolToggleBase,QObject):\n\t\"\"\" Marker selection tool. \"\"\"\n\t# Tool options for matplotlib.\n\tdescription = 'Pick a point on the image'\n\timage = 'pick.png'\n\tcursor = Cursors.SELECT_REGION\n\tradio_group = 'default'\n\t# Qt5 signals.\n\tnewPoint = pyqtSignal(object,float,float)\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._idPress = None\n\n\tdef enable(self, event):\n\t\t\"\"\"Connect press/release events and lock the canvas\"\"\"\n\t\tself.figure.canvas.widgetlock(self)\n\t\t# Add marker on button release.\n\t\tself._idPress = self.figure.canvas.mpl_connect('button_release_event', self.newMarker)\n\n\tdef disable(self,*args):\n\t\t\"\"\"Release the canvas and disconnect press/release events\"\"\"\n\t\tself.figure.canvas.widgetlock.release(self)\n\t\tself.figure.canvas.mpl_disconnect(self._idPress)\n\n\tdef trigger(self, sender, event, data=None):\n\t\t# What happens when it is triggered?\n\t\tToolToggleBase.trigger(self, sender, event, data)\n\n\tdef newMarker(self, event):\n\t\t# Need to emit axis plus location.\n\t\t# Store the data.\n\t\tif (event.button == 1):\n\t\t\tself.newPoint.emit(event.inaxes,event.xdata,event.ydata)\n\nclass ToolPickIso(ToolToggleBase,QObject):\n\t\"\"\" Marker selection tool. \"\"\"\n\t# Tool options for matplotlib.\n\tdescription = 'Pick the isocenter to treat'\n\timage = 'pickIso.png'\n\tcursor = Cursors.SELECT_REGION\n\tradio_group = 'default'\n\t# Qt5 signals.\n\tnewIsocenter = pyqtSignal(object,float,float)\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._idPress = None\n\n\tdef enable(self, event):\n\t\t\"\"\"Connect press/release events and lock the canvas\"\"\"\n\t\tself.figure.canvas.widgetlock(self)\n\t\t# Add marker on button release.\n\t\tself._idPress = self.figure.canvas.mpl_connect('button_release_event', self.newIso)\n\n\tdef disable(self,*args):\n\t\t\"\"\"Release the canvas and disconnect press/release events\"\"\"\n\t\tself.figure.canvas.widgetlock.release(self)\n\t\tself.figure.canvas.mpl_disconnect(self._idPress)\n\n\tdef trigger(self, sender, event, data=None):\n\t\t# What happens when it is triggered?\n\t\tToolToggleBase.trigger(self, sender, event, data)\n\n\tdef newIso(self, event):\n\t\t# Need to emit axis plus location.\n\t\t# Store the data.\n\t\tif (event.button == 1):\n\t\t\tself.newIsocenter.emit(event.inaxes,event.xdata,event.ydata)\n\nclass ToolClearPoints(ToolBase,QObject):\n\t\"\"\" Clear markers tool. \"\"\"\n\tdescription = 'Clear the points in the images'\n\timage = 'clear.svg'\n\tradio_group = 'default'\n\tdefault_toggled = False\n\t# Qt5 signals.\n\tclearPoints = pyqtSignal()\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._button_pressed = None\n\t\tself._xypress = None\n\t\tself._idPress = None\n\t\tself._idRelease = None\n\n\tdef trigger(self, sender, event, data=None):\n\t\tself.clearPoints.emit()" ]
[ [ "matplotlib.backend_tools.ToolToggleBase.trigger", "matplotlib.backend_tools.ToolToggleBase.__init__" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangjq933/HowtoSim_Script
[ "90fb8cca87d47d2c45b8ff5d07a35e8a6c846685" ]
[ "aggregate_CDF.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 19 19:24:49 2019\r\n@author: mlin\r\n\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nfrom collections import OrderedDict\r\nimport numpy as np\r\nimport copy\r\n\r\nclass ffd(): \r\n def __init__(self, ffd_file, incident_Power_W=1):\r\n self.incident_Power_W=incident_Power_W\r\n \r\n with open(ffd_file) as f:\r\n self.theta=[int(i) for i in f.readline().split()] \r\n self.phi=[int(i) for i in f.readline().split()]\r\n f.readline()\r\n self.frequency=float(f.readline().split()[1])\r\n \r\n theta_range=np.linspace(*self.theta)\r\n phi_range= np.linspace(*self.phi)\r\n \r\n self._dtheta=theta_range[1]-theta_range[0]\r\n self._dphi=phi_range[1]-phi_range[0]\r\n self._theta=np.array([i for i in theta_range for j in phi_range]) \r\n \r\n EF=np.loadtxt(ffd_file, skiprows=4)\r\n \r\n Etheta=np.vectorize(complex)(EF[:,0], EF[:,1])\r\n Ephi=np.vectorize(complex)(EF[:,2], EF[:,3])\r\n self._EF=np.column_stack((Etheta, Ephi)) \r\n self._calculate()\r\n \r\n def __eq__(self, other):\r\n if self.theta!=other.theta:\r\n return False \r\n if self.phi!=other.phi:\r\n return False \r\n if self.frequency!=other.frequency:\r\n return False \r\n return True\r\n \r\n def __add__(self, other):\r\n if self==other:\r\n x=copy.deepcopy(self)\r\n x._EF+=other._EF\r\n x.incident_Power_W+=other.incident_Power_W\r\n x._calculate() \r\n return x\r\n \r\n def _calculate(self):\r\n pd=np.sum(np.power(np.absolute(self._EF), 2),1)/377/2\r\n self.U=max(pd)\r\n self.cell_area=np.radians(self._dtheta)*np.radians(self._dphi)*np.sin(np.radians(self._theta))\r\n #self.radiated_power=sum(self.cell_area*pd)\r\n #uniform_power=self.radiated_power/sum(self.cell_area)\r\n #self.peak_directivity=self.U/uniform_power\r\n \r\n self.realized_gain=10*np.log10(pd/(self.incident_Power_W/4/np.pi))\r\n self.peak_realized_gain=max(self.realized_gain)\r\n\r\n def compare(self, other):\r\n x=np.abs(self._EF)\r\n dx=np.abs(other._EF-self._EF)\r\n return np.amax(dx/x) \r\n \r\n def __call__(self, mag, phase):\r\n x=copy.deepcopy(self)\r\n x._EF=np.sqrt(mag)*np.exp(1j*np.radians(phase))*self._EF\r\n x.incident_Power_W=mag\r\n x._calculate()\r\n return x \r\n \r\n def getCDF(self):\r\n x, y=[], []\r\n accumulated_area=0\r\n for gain, area in sorted(zip(self.realized_gain, self.cell_area)):\r\n x.append(gain)\r\n accumulated_area+=area\r\n y.append(accumulated_area)\r\n return x, y/y[-1]\r\n \r\n def plotRealizedGain(self):\r\n plt.figure(figsize=(8, 4))\r\n size=(self.theta[2], self.phi[2])\r\n gain_map=self.realized_gain.reshape(size)\r\n plt.title('Map of Realized Gain(dB)')\r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)')\r\n maxV=np.max(gain_map)\r\n [row, col] = np.where(gain_map==maxV)\r\n plt.plot(col, row, 'w*')\r\n plt.annotate(round(maxV,3), (col+3, row+3), color='white')\r\n plt.imshow(gain_map, cmap='jet')\r\n plt.colorbar()\r\n CS=plt.contour(gain_map) \r\n plt.clabel(CS, inline=1, fontsize=10)\r\n \r\nclass aggregatebeam():\r\n def __init__(self, *args):\r\n self.args=args\r\n self.max_gain=np.copy(args[0].realized_gain)\r\n self.beam_occupy=0*np.copy(self.max_gain)\r\n \r\n for beamid, i in enumerate(self.args[1:], 1):\r\n for n in range(len(self.max_gain)):\r\n if i.realized_gain[n]>self.max_gain[n]:\r\n self.beam_occupy[n]=beamid\r\n self.max_gain[n]=i.realized_gain[n]\r\n\r\n self.map_size=(args[0].theta[2], args[0].phi[2])\r\n\r\n \r\n def plotCDF(self):\r\n x, y=[], []\r\n accumulated_area=0\r\n for gain, area in sorted(zip(self.max_gain, self.args[0].cell_area)):\r\n x.append(gain)\r\n accumulated_area+=area\r\n y.append(accumulated_area)\r\n \r\n plt.figure()\r\n plt.title('Cumulative Distribution Function') \r\n plt.xlabel('Realized Gain (dB)')\r\n plt.ylabel('CDF')\r\n plt.grid(True)\r\n plt.plot(x, y/y[-1])\r\n plt.show()\r\n return (x, y/y[-1])\r\n\r\n \r\n def plotGainMap(self):\r\n gain_map=self.max_gain.reshape(self.map_size)\r\n \r\n plt.figure(figsize=(8, 4))\r\n plt.title('Gain Map(dB)')\r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)')\r\n maxV=np.max(gain_map)\r\n [row, col] = np.where(gain_map==maxV)\r\n plt.plot(col, row, 'w*')\r\n plt.annotate(round(maxV,3), (col+3, row+3), color='white')\r\n plt.imshow(gain_map, cmap='jet')\r\n plt.colorbar()\r\n CS=plt.contour(gain_map)\r\n plt.clabel(CS, inline=1, fontsize=10) \r\n\r\n \r\n def plotBeamMap(self):\r\n beam_map=self.beam_occupy.reshape(self.map_size)\r\n \r\n plt.figure(figsize=(8, 4))\r\n plt.title('Beam Map') \r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)') \r\n plt.imshow(beam_map, cmap='rainbow')\r\n plt.colorbar() \r\n plt.contour(beam_map)\r\n \r\ndef plotCDFtable(table, png=None):\r\n '''table={'A':(gain , cdf), 'B':(gain, cdf), }'''\r\n \r\n plt.figure()\r\n plt.title('Cumulative Distribution Function') \r\n plt.xlabel('Realized Gain (dB)')\r\n plt.ylabel('CDF')\r\n plt.grid(True)\r\n for i in table:\r\n plt.plot(*table[i], label=i)\r\n plt.legend()\r\n if png:\r\n plt.savefig(png) \r\n plt.show()\r\n\r\n \r\n \r\n#%%\r\npath='D:\\OneDrive - ANSYS, Inc/Workshop/2019/2019_Q4_5G_Array_Modula_Analysis/28000000000/'\r\nx1=ffd(path+'4x2_array1_Module_0_Bump_h1.ffd')\r\nx2=ffd(path+'4x2_array1_Module_0_Bump_h2.ffd')\r\nx3=ffd(path+'4x2_array1_Module_0_Bump_h3.ffd')\r\nx4=ffd(path+'4x2_array1_Module_0_Bump_h4.ffd')\r\n\r\n\r\n#%%\r\n\r\nbeam0=x1(1,0) +x2(1,0) +x3(1,0) +x4(1,0)\r\n#beam0.plotRealizedGain()\r\nbeam1=x1(1,0) +x2(1,75) +x3(1,150) +x4(1,225)\r\n#beam1.plotRealizedGain()\r\nbeam2=x1(1,0) +x2(1,150) +x3(1,300) +x4(1,450)\r\n#beam2.plotRealizedGain()\r\n\r\ntable=OrderedDict()\r\nz0=aggregatebeam(beam0, beam1, beam2)\r\ntable['z0']=z0.plotCDF()\r\nz1=aggregatebeam(beam0, beam1)\r\ntable['z1']=z1.plotCDF()\r\nz2=aggregatebeam(beam1, beam2)\r\ntable['z2']=z2.plotCDF()\r\nz3=aggregatebeam(beam0, beam2)\r\ntable['z3']=z3.plotCDF()\r\nplotCDFtable(table, 'd:/demo/aaa.png')\r\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.amax", "matplotlib.pyplot.imshow", "numpy.radians", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.plot", "numpy.max", "numpy.where", "numpy.copy", "numpy.column_stack", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.log10", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.absolute", "matplotlib.pyplot.clabel", "numpy.abs", "matplotlib.pyplot.colorbar", "numpy.vectorize", "matplotlib.pyplot.contour", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akac0297/PETLAB
[ "950cc153ce230d12d752ad0d11111e7fc22d9e7d", "950cc153ce230d12d752ad0d11111e7fc22d9e7d", "950cc153ce230d12d752ad0d11111e7fc22d9e7d", "950cc153ce230d12d752ad0d11111e7fc22d9e7d", "950cc153ce230d12d752ad0d11111e7fc22d9e7d", "950cc153ce230d12d752ad0d11111e7fc22d9e7d" ]
[ "Radiomics/Radiomics MPE images.py", "PET tumour segmentation code/PET_Segmentation-Copy1.py", "PET tumour segmentation code/PET_tumour_seg_test.py", "PET tumour segmentation code/PET_segmentation_WES_006_2.py", "MRI segmentation/Sphere generation WES_003_2.py", "PET tumour segmentation code/PET_segmentation_WES_010_0.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\nimport SimpleITK as sitk\nimport pandas as pd\nimport numpy as np\nimport radiomics\n\ndef radiomics_analysis(image_filepath, mask_filepath,img_label):\n img = sitk.ReadImage(image_filepath)\n mask = sitk.ReadImage(mask_filepath)\n \n #Z-score normalisation for MRI\n if img_label==(\"B50T_CAD_ADC_3000\" or \"B50_800 ADC\" or \"MPE\"):\n z_norm=False\n else:\n z_norm=True\n \n if z_norm==True:\n img_arr=sitk.GetArrayFromImage(img)\n img_mean=np.mean(img_arr)\n img_std=np.std(img_arr)\n img=sitk.Cast(img,sitk.sitkInt16)\n img=(img-img_mean)/img_std\n elif z_norm==False:\n img_arr=sitk.GetArrayFromImage(img)\n img=sitk.Cast(img,sitk.sitkInt16)\n\n #Grey-level discretisation for MRI \n img_arr = sitk.GetArrayFromImage(img) \n mask=sitk.Resample(mask,img)\n \n bin_number=512\n min_arr=np.min(img_arr)\n max_arr=np.max(img_arr)\n img_arr[img_arr!=np.max(img_arr)]=np.floor(bin_number*(img_arr[img_arr!=np.max(img_arr)]-min_arr)/(max_arr-min_arr))+1\n img_arr[img_arr==np.max(img_arr)]=bin_number\n \n new_img_arr = img_arr\n new_img=sitk.GetImageFromArray(new_img_arr)\n new_img.CopyInformation(img)\n img=new_img\n \n extractor = radiomics.firstorder.RadiomicsFirstOrder(img, mask)\n dict1 = extractor.execute()\n extractor_2 = radiomics.shape.RadiomicsShape(img, mask)\n dict2 = extractor_2.execute()\n extractor_3 = radiomics.glcm.RadiomicsGLCM(img, mask)\n dict3 = extractor_3.execute()\n extractor_4 = radiomics.glszm.RadiomicsGLSZM(img, mask)\n dict4 = extractor_4.execute()\n extractor_5 = radiomics.glrlm.RadiomicsGLRLM(img, mask)\n dict5 = extractor_5.execute()\n extractor_6 = radiomics.ngtdm.RadiomicsNGTDM(img, mask)\n dict6 = extractor_6.execute()\n extractor_7 = radiomics.gldm.RadiomicsGLDM(img, mask)\n dict7 = extractor_7.execute()\n \n dict1.update(dict2)\n dict1.update(dict3)\n dict1.update(dict4)\n dict1.update(dict5)\n dict1.update(dict6)\n dict1.update(dict7)\n new_img_label=img_label\n if img_label==\"B50T_CAD_ADC_3000\":\n new_img_label=\"B50T_CAD_ADC_3000 no norm\"\n if img_label==\"B50_800 ADC\":\n new_img_label=\"B50_800 ADC no norm\"\n dict1.update({'image label': new_img_label})\n\n return(dict1)\n\nmask_list=['new_seg_003_2_mri.nii.gz', 'new_seg_004_4_mri.nii.gz', 'new_seg_004_5_mri.nii.gz', 'new_seg_004_6_mri.nii.gz', 'new_seg_005_4_mri.nii.gz', 'new_seg_005_5_mri.nii.gz', 'new_seg_005_6_mri.nii.gz', 'new_seg_006_4_mri.nii.gz', 'new_seg_006_5_mri.nii.gz', 'new_seg_006_6_mri.nii.gz', 'new_seg_007_4_mri.nii.gz', 'new_seg_007_5_mri.nii.gz', 'new_seg_007_6_mri.nii.gz', 'new_seg_008_4_mri.nii.gz', 'new_seg_008_5_mri.nii.gz', 'new_seg_008_6_mri.nii.gz', 'new_seg_009_6_mri.nii.gz', 'new_seg_009_7_mri.nii.gz', 'new_seg_009_8_mri.nii.gz', 'new_seg_010_4_mri.nii.gz', 'new_seg_010_5_mri.nii.gz', 'new_seg_010_6_mri.nii.gz', 'new_seg_012_4_mri.nii.gz', 'new_seg_012_5_mri.nii.gz', 'new_seg_012_6_mri.nii.gz', 'new_seg_013_4_mri.nii.gz', 'new_seg_013_5_mri.nii.gz', 'new_seg_013_6_mri.nii.gz', 'new_seg_014_4_mri.nii.gz', 'new_seg_014_5_mri.nii.gz', 'new_seg_014_6_mri.nii.gz', 'new_seg_015_4_mri.nii.gz', 'new_seg_015_5_mri.nii.gz', 'new_seg_015_6_mri.nii.gz', 'new_seg_016_3_mri.nii.gz', 'new_seg_016_4_mri.nii.gz', 'new_seg_016_5_mri.nii.gz', 'new_seg_017_3_mri.nii.gz', 'new_seg_018_4_mri.nii.gz', 'new_seg_018_5_mri.nii.gz', 'new_seg_018_6_mri.nii.gz', 'new_seg_019_3_mri.nii.gz', 'new_seg_019_4_mri.nii.gz', 'new_seg_019_5_mri.nii.gz', 'new_seg_021_2_mri.nii.gz', 'new_seg_021_3_mri.nii.gz', 'new_seg_021_4_mri.nii.gz', 'new_seg_023_2_mri.nii.gz', 'new_seg_023_3_mri.nii.gz', 'new_seg_023_4_mri.nii.gz', 'new_seg_024_3_mri.nii.gz', 'new_seg_024_4_mri.nii.gz', 'new_seg_024_5_mri.nii.gz']\nMPE_list=['max_img_WES_003_2.nii.gz', 'MPE_sub_WES_004_4.nii.gz', 'MPE_sub_WES_004_5.nii.gz', 'MPE_sub_WES_004_6.nii.gz', 'MPE_sub_WES_005_4.nii.gz', 'MPE_sub_WES_005_5.nii.gz', 'MPE_sub_WES_005_6.nii.gz', 'max_img_WES_006_4.nii.gz', 'max_img_WES_006_5.nii.gz', 'max_img_WES_006_6.nii.gz', 'max_img_WES_007_4.nii.gz', 'max_img_WES_007_5.nii.gz', 'max_img_WES_007_6.nii.gz', 'MPE_sub_WES_008_4.nii.gz', 'MPE_sub_WES_008_5.nii.gz', 'MPE_sub_WES_008_6.nii.gz', 'MPE_sub_WES_009_6.nii.gz', 'MPE_sub_WES_009_7.nii.gz', 'MPE_sub_WES_009_8.nii.gz', 'MPE_sub_WES_010_4.nii.gz', 'MPE_sub_WES_010_5.nii.gz', 'MPE_sub_WES_010_6.nii.gz', 'MPE_sub_WES_012_4.nii.gz', 'MPE_sub_WES_012_5.nii.gz', 'MPE_sub_WES_012_6.nii.gz', 'max_img_WES_013_4.nii.gz', 'max_img_WES_013_5.nii.gz', 'max_img_WES_013_6.nii.gz', 'max_img_WES_014_4.nii.gz', 'max_img_WES_014_5.nii.gz', 'max_img_WES_014_6.nii.gz', 'max_img_WES_015_4.nii.gz', 'max_img_WES_015_5.nii.gz', 'max_img_WES_015_6.nii.gz', 'max_img_WES_016_3.nii.gz', 'max_img_WES_016_4.nii.gz', 'max_img_WES_016_5.nii.gz', 'max_img_WES_017_3.nii.gz', 'max_img_WES_018_4.nii.gz', 'max_img_WES_018_5.nii.gz', 'max_img_WES_018_6.nii.gz', 'max_img_WES_019_3.nii.gz', 'max_img_WES_019_4.nii.gz', 'max_img_WES_019_5.nii.gz', 'max_img_WES_021_2.nii.gz', 'max_img_WES_021_3.nii.gz', 'max_img_WES_021_4.nii.gz', 'max_img_WES_023_2.nii.gz', 'max_img_WES_023_3.nii.gz', 'max_img_WES_023_4.nii.gz', 'max_img_WES_024_3.nii.gz', 'max_img_WES_024_4.nii.gz', 'max_img_WES_024_5.nii.gz']\nsphere_list=['image_sphere_WES_003_2.nii.gz' 'image_sphere_WES_004_4.nii.gz', 'image_sphere_WES_004_5.nii.gz', 'image_sphere_WES_004_6.nii.gz', 'image_sphere_WES_005_4.nii.gz', 'image_sphere_WES_005_5.nii.gz', 'image_sphere_WES_005_6.nii.gz', 'image_sphere_WES_006_4.nii.gz', 'image_sphere_WES_006_5.nii.gz', 'image_sphere_WES_006_6.nii.gz', 'image_sphere_WES_007_4.nii.gz', 'image_sphere_WES_007_5.nii.gz', 'image_sphere_WES_007_6.nii.gz', 'image_sphere_WES_008_4.nii.gz', 'image_sphere_WES_008_5.nii.gz', 'image_sphere_WES_008_6.nii.gz', 'image_sphere_WES_009_6.nii.gz', 'image_sphere_WES_009_7.nii.gz', 'image_sphere_WES_009_8.nii.gz', 'image_sphere_WES_010_4.nii.gz', 'image_sphere_WES_010_5.nii.gz', 'image_sphere_WES_010_6.nii.gz', 'image_sphere_WES_012_4.nii.gz', 'image_sphere_WES_012_5.nii.gz', 'image_sphere_WES_012_6.nii.gz', 'image_sphere_WES_013_4.nii.gz', 'image_sphere_WES_013_5.nii.gz', 'image_sphere_WES_013_6.nii.gz', 'image_sphere_WES_014_4.nii.gz', 'image_sphere_WES_014_5.nii.gz', 'image_sphere_WES_014_6.nii.gz', 'image_sphere_WES_015_4.nii.gz', 'image_sphere_WES_015_5.nii.gz', 'image_sphere_WES_015_6.nii.gz', 'image_sphere_WES_016_3.nii.gz', 'image_sphere_WES_016_4.nii.gz', 'image_sphere_WES_016_5.nii.gz', 'image_sphere_WES_017_3.nii.gz', 'image_sphere_WES_018_4.nii.gz', 'image_sphere_WES_018_5.nii.gz', 'image_sphere_WES_018_6.nii.gz', 'image_sphere_WES_019_3.nii.gz', 'image_sphere_WES_019_4.nii.gz', 'image_sphere_WES_019_5.nii.gz', 'image_sphere_WES_021_2.nii.gz', 'image_sphere_WES_021_3.nii.gz', 'image_sphere_WES_021_4.nii.gz', 'image_sphere_WES_023_2.nii.gz', 'image_sphere_WES_023_3.nii.gz', 'image_sphere_WES_023_4.nii.gz', 'image_sphere_WES_024_3.nii.gz', 'image_sphere_WES_024_4.nii.gz', 'image_sphere_WES_024_5.nii.gz']\n\ndf=pd.DataFrame()\nfor image in range(0,10):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_0_to_9.csv\")\n\ndf=pd.DataFrame()\nfor image in range(0,10):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_0_to_9.csv\")\n\ndf=pd.DataFrame()\nfor image in range(10,20):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_10_to_19.csv\")\n\ndf=pd.DataFrame()\nfor image in range(10,20):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_10_to_19.csv\")\n\ndf=pd.DataFrame()\nfor image in range(20,30):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_20_to_29.csv\")\n\ndf=pd.DataFrame()\nfor image in range(20,30):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_20_to_29.csv\")\n\ndf=pd.DataFrame()\nfor image in range(30,40):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_30_to_39.csv\")\n\ndf=pd.DataFrame()\nfor image in range(30,40):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_30_to_39.csv\")\n\ndf=pd.DataFrame()\nfor image in range(40,53):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_40_to_53.csv\")\n\ndf=pd.DataFrame()\nfor image in range(40,53):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_40_to_53.csv\")\n\ndf1=pd.read_csv(\"./df_MPE_tumour_0_to_9.csv\")\ndf2=pd.read_csv(\"./df_MPE_tumour_10_to_19.csv\")\ndf3=pd.read_csv(\"./df_MPE_tumour_20_to_29.csv\")\ndf4=pd.read_csv(\"./df_MPE_tumour_30_to_39.csv\")\ndf5=pd.read_csv(\"./df_MPE_tumour_40_to_53.csv\")\ndf=df1.append(df2)\ndf=df.append(df3)\ndf=df.append(df4)\ndf=df.append(df5)\ndf.to_csv(\"./df_MPE_tumours.csv\")\n\ndf1=pd.read_csv(\"./df_MPE_sphere_0_to_9.csv\")\ndf2=pd.read_csv(\"./df_MPE_sphere_10_to_19.csv\")\ndf3=pd.read_csv(\"./df_MPE_sphere_20_to_29.csv\")\ndf4=pd.read_csv(\"./df_MPE_sphere_30_to_39.csv\")\ndf5=pd.read_csv(\"./df_MPE_sphere_40_to_53.csv\")\ndf=df1.append(df2)\ndf=df.append(df3)\ndf=df.append(df4)\ndf=df.append(df5)\ndf.to_csv(\"./df_MPE_spheres.csv\")\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\nImport modules\n\"\"\"\n\nimport pathlib\nimport numpy as np\n\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\n\nfrom platipy.imaging.registration.registration import (\n initial_registration,\n fast_symmetric_forces_demons_registration,\n transform_propagation,\n apply_field\n)\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\n\n# In[2]:\n\n\n#PET/CT tumour segmentation:\n#read in PET and CT code and register them. Visualise with correct cut\nimage_ct_0=sitk.ReadImage(\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\")\nimage_pt_0_raw=sitk.ReadImage(\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_PT_AC_4.nii.gz\")\n\nimage_pt_0=sitk.Resample(image_pt_0_raw, image_ct_0)\n\n\n# In[3]:\n\n\nprint(image_ct_0.GetSize())\nprint(image_pt_0_raw.GetSpacing())\n\n\n# In[4]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[240,290,256], window=[-250, 500]) #original cut = [174,290,256]\nfig = vis.show()\n\n\n# In[5]:\n\n\nvis = ImageVisualiser(image_pt_0, colormap=plt.cm.magma, cut=[240,290,256], window=[0.1, 10000])\nfig = vis.show()\n\n\n# In[6]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[240,290,256], window=[-250, 500])\nvis.add_scalar_overlay(image_pt_0, colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nfig = vis.show()\n\n\n# In[7]:\n\n\nimage_ct_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_007/IMAGES/WES_007_6_20180925_CT_15.nii.gz\")\ncontour_rbreast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_007/STRUCTURES/WES_007_6_20180925_RTSTRUCT_WHOLE_BREAST_CTV_0.nii.gz\")\n\n\n# In[8]:\n\n\nvis = ImageVisualiser(image_ct_plan, axis='z', cut=60, window=[-250, 500], figure_size_in=8)\nvis.add_contour({'R BREAST' :contour_rbreast_plan})\nfig = vis.show()\n\n\n# In[11]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n image_ct_0,\n image_ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[12]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[174,290,256], window=[-250, 500]) #this doesn't look ideal\nvis.add_comparison_overlay(image_ct_plan_to_0_rigid)\nfig = vis.show()\n\n\n# In[13]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n image_ct_0,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[14]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[240,290,256], window=[-250, 500]) #cut=[240,290,256]\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[15]:\n\n\ncontour_rbreast_plan_to_0_rigid = transform_propagation(\n image_ct_0,\n contour_rbreast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_rbreast_plan_to_0_dir = apply_field(\n contour_rbreast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[16]:\n\n\nvis = ImageVisualiser(image_ct_0, axis='z', cut=240, window=[-250, 500])\nvis.add_scalar_overlay(image_pt_0, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_rbreast_plan_to_0_dir, name='R BREAST', color='g') #not being overlaid..\nfig = vis.show()\n\n\n# In[17]:\n\n\n#use structure information for breast to mask out all but the breast area (create new array with everything but\n#this area set to 0)\nmasked_pet_rbreast = sitk.Mask(image_pt_0, contour_rbreast_plan_to_0_dir)\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_rbreast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,3500,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[18]:\n\n\nsitk.WriteImage(masked_pet_rbreast, \"masked_pet_rbreast_WES_007_1.nii.gz\")\n\n\n# In[19]:\n\n\ntest=sitk.ReadImage(\"masked_pet_rbreast_WES_007_1.nii.gz\")\n\n\n# In[21]:\n\n\nvis=ImageVisualiser(test)\nfig=vis.show(test)\n\n\n# In[17]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n\n\n# In[18]:\n\n\nmasked_pet_rbreast_arr=sitk.GetArrayFromImage(masked_pet_rbreast)\nmasked_pet_rbreast_arr=masked_pet_rbreast_arr.flatten() #need to round the values to the nearest integer?\nmasked_pet_rbreast_arr=np.rint(masked_pet_rbreast_arr)\nmode=mode1(masked_pet_rbreast_arr[masked_pet_rbreast_arr>0])\nprint(mode)\n\n\n# In[19]:\n\n\np = np.percentile(masked_pet_rbreast_arr[masked_pet_rbreast_arr>0], 95) # return 95th percentile\nprint(p)\n\n\n# In[20]:\n\n\nmax_val=np.max(masked_pet_rbreast_arr)\nprint(max_val)\n\n\n# In[21]:\n\n\nfrom scipy import stats\n\narr=np.array([[1,2,3],[5,2,4]])\nm=stats.mode(arr, axis=None)\nprint(m)\n\n\n# In[3]:\n\n\ndef PET_segmentation(ct=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_PT_AC_4.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_6_20180925_CT_15.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_007/STRUCTURES/WES_007_6_20180925_RTSTRUCT_WHOLE_BREAST_CTV_0.nii.gz\",\n mask_output_file=\"masked_pet_rbreast_WES_007_1.nii.gz\"):\n \n ct=sitk.ReadImage(ct)\n pet_raw=sitk.ReadImage(pet)\n\n pet=sitk.Resample(pet_raw, ct)\n \n ct_plan = sitk.ReadImage(ct_plan)\n contour_breast_plan = sitk.ReadImage(contour_breast_plan)\n \n #vis = ImageVisualiser(ct_plan, axis='z', cut=60, window=[-250, 500], figure_size_in=8)\n #vis.add_contour({'BREAST' :contour_breast_plan})\n #fig = vis.show()\n \n image_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n \n image_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n )\n \n #vis = ImageVisualiser(ct, cut=[240,290,256], window=[-250, 500])\n #vis.add_comparison_overlay(image_ct_plan_to_0_dir)\n #fig = vis.show()\n \n contour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n )\n\n contour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n )\n \n #vis = ImageVisualiser(ct, axis='z', cut=240, window=[-250, 500])\n #vis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\n #vis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\n #fig = vis.show()\n \n masked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\n sitk.WriteImage(masked_pet_breast, mask_output_file)\n\n values = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\n fig, ax = plt.subplots(1,1)\n ax.hist(values, bins=np.linspace(1,7000,50), histtype='stepfilled', lw=2)\n ax.set_yscale('log')\n ax.grid()\n ax.set_axisbelow(True)\n ax.set_xlabel('PET value')\n ax.set_ylabel('Frequency')\n fig.show()\n \n def mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n \n masked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\n masked_pet_breast_arr=masked_pet_breast_arr.flatten()\n masked_pet_breast_arr=np.rint(masked_pet_breast_arr)\n mode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\n p = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\n max_val=np.max(masked_pet_breast_arr)\n\n return mode, p, max_val\n\n\n# In[ ]:\n\n\nmode,p,max_val=PET_segmentation(ct=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_1_20180814_PT_AC_4.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_6_20180925_CT_15.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_007/STRUCTURES/WES_007_6_20180925_RTSTRUCT_WHOLE_BREAST_CTV_0.nii.gz\",\n mask_output_file=\"masked_pet_rbreast_WES_007_1.nii.gz\")\n\n\n# In[1]:\n\n\nprint(mode,p,max_val)\n\n\n# In[4]:\n\n\nmode1,p1,max_val1=PET_segmentation(ct=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_2_20180321_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_2_20180321_PT_AC_4.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_6_20180925_CT_15.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_007/STRUCTURES/WES_007_6_20180925_RTSTRUCT_WHOLE_BREAST_CTV_0.nii.gz\",\n mask_output_file=\"masked_pet_rbreast_WES_007_2.nii.gz\")\n\n\n# In[5]:\n\n\nprint(mode1,p1,max_val1)\n\n\n# In[6]:\n\n\nmode2,p2,max_val2=PET_segmentation(ct=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_4_20180502_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_4_20180502_PT_AC_4.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_007/IMAGES/WES_007_6_20180925_CT_15.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_007/STRUCTURES/WES_007_6_20180925_RTSTRUCT_WHOLE_BREAST_CTV_0.nii.gz\",\n mask_output_file=\"masked_pet_rbreast_WES_007_4.nii.gz\")\n\n\n# In[7]:\n\n\nprint(mode2,p2,max_val2)\n\n\n# In[8]:\n\n\nmode,p,max_val=PET_segmentation(ct=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_0_20180202_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_0_20180202_PT_AC_7.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_6_20180319_CT_10.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_005/STRUCTURES/WES_005_6_RTSTRUCT_WHOLE_BREAST_CTV.nii.gz\",\n mask_output_file=\"masked_pet_breast_WES_005_0.nii.gz\")\n\n\n# In[12]:\n\n\nprint(mode,p,max_val)\n\n\n# In[9]:\n\n\nmode1,p1,max_val1=PET_segmentation(ct=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_2_20170911_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\",\n pet=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_2_20170911_PT_AC_4.nii.gz\", \n ct_plan=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_6_20180319_CT_10.nii.gz\", \n contour_breast_plan=\"/home/alicja/Documents/WES_005/STRUCTURES/WES_005_6_RTSTRUCT_WHOLE_BREAST_CTV.nii.gz\",\n mask_output_file=\"masked_pet_breast_WES_005_2.nii.gz\")\n\n\n# In[2]:\n\n\nct=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_2_20170911_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_2_20170911_PT_AC_4.nii.gz\"\nct_plan=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_6_20180319_CT_10.nii.gz\"\ncontour_breast_plan=\"/home/alicja/Documents/WES_005/STRUCTURES/WES_005_6_RTSTRUCT_WHOLE_BREAST_CTV.nii.gz\"\nmask_output_file=\"masked_pet_breast_WES_005_2.nii.gz\"\n\nct=sitk.ReadImage(ct)\npet_raw=sitk.ReadImage(pet)\n\npet=sitk.Resample(pet_raw, ct)\n\nct_plan = sitk.ReadImage(ct_plan)\ncontour_breast_plan = sitk.ReadImage(contour_breast_plan)\n\n\n# In[11]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[12]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[ ]:\n\n\nvis = ImageVisualiser(ct, cut=[240,290,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[13]:\n\n\ncontour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[19]:\n\n\nvis = ImageVisualiser(ct, cut=[200,270,246], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[20]:\n\n\nvis = ImageVisualiser(ct, axis='z', cut=190, window=[-250, 500])\nvis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\nfig = vis.show()\n\n\n# In[27]:\n\n\nmasked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\nsitk.WriteImage(masked_pet_breast, \"masked_pet_rbreast_WES_005_2.nii.gz\")\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,17500,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[22]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n \nmasked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmasked_pet_breast_arr=masked_pet_breast_arr.flatten()\nmasked_pet_breast_arr=np.rint(masked_pet_breast_arr)\nmode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\np = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\nmax_val=np.max(masked_pet_breast_arr)\n\n\n# In[23]:\n\n\nprint(mode,p,max_val)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[10]:\n\n\nct=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_4_20170718_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_4_20170718_PT_AC_4.nii.gz\"\nct_plan=\"/home/alicja/Documents/WES_005/IMAGES/WES_005_6_20180319_CT_10.nii.gz\"\ncontour_breast_plan=\"/home/alicja/Documents/WES_005/STRUCTURES/WES_005_6_RTSTRUCT_WHOLE_BREAST_CTV.nii.gz\"\n\n\n# In[11]:\n\n\nct=sitk.ReadImage(ct)\npet_raw=sitk.ReadImage(pet)\n\npet=sitk.Resample(pet_raw, ct)\n\nct_plan = sitk.ReadImage(ct_plan)\ncontour_breast_plan = sitk.ReadImage(contour_breast_plan)\n\n\n# In[12]:\n\n\nct[:,:,240:] = -1000\n\n\n# In[13]:\n\n\nvis = ImageVisualiser(ct, cut=[200,270,256], window=[-250, 500])\nfig = vis.show()\n\n\n# In[14]:\n\n\npet[:,:,240:] = -1000\n\n\n# In[15]:\n\n\nvis = ImageVisualiser(pet, cut=[200,270,256], window=[0, 3000])\nfig = vis.show()\n\n\n# In[16]:\n\n\nct_plan[:,:,100:] = -1000\nvis = ImageVisualiser(ct_plan, cut=[90,250,176], window=[-250, 500])\nfig = vis.show()\n\n\n# In[17]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[18]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[19]:\n\n\nvis = ImageVisualiser(ct, cut=[200,270,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[20]:\n\n\ncontour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[21]:\n\n\nvis = ImageVisualiser(ct, axis='z', cut=190, window=[-250, 500])\nvis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\nfig = vis.show()\n\n\n# In[22]:\n\n\nmasked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\nsitk.WriteImage(masked_pet_breast, \"masked_pet_rbreast_WES_005_4.nii.gz\")\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,17500,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[23]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n \nmasked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmasked_pet_breast_arr=masked_pet_breast_arr.flatten()\nmasked_pet_breast_arr=np.rint(masked_pet_breast_arr)\nmode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\np = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\nmax_val=np.max(masked_pet_breast_arr)\n\n\n# In[24]:\n\n\nprint(mode,p,max_val)\n\n\n# In[4]:\n\n\nfrom platipy.imaging.utils.tools import get_com\n\n\n# In[2]:\n\n\nct=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_0_20180306_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_0_20180306_PT_AC_4.nii.gz\"\nct_plan=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_6_20181109_CT_3.nii.gz\"\ncontour_breast_plan=\"/home/alicja/Documents/WES_006/STRUCTURES/WES_006_6_20181109_RTSTRUCT_COMBINED_SCFAX_4.nii.gz\"\n\n\n# In[3]:\n\n\nct_plan=sitk.ReadImage(ct_plan)\ncontour_breast_plan=sitk.ReadImage(contour_breast_plan)\nct=sitk.ReadImage(ct)\npet_raw=sitk.ReadImage(pet)\n\n\n# In[5]:\n\n\nvis = ImageVisualiser(ct_plan, cut=get_com(contour_breast_plan), window=[-250, 500])\nvis.add_contour(contour_breast_plan)\nfig = vis.show()\n\n\n# In[6]:\n\n\npet=sitk.Resample(pet_raw, ct)\n\n#ct_plan = sitk.ReadImage(ct_plan)\n#contour_breast_plan = sitk.ReadImage(contour_breast_plan)\n\n\n# In[7]:\n\n\nvis = ImageVisualiser(ct_plan, cut=[90,250,176], window=[-250, 500])\nfig = vis.show()\n\n\n# In[8]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[9]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[10]:\n\n\nvis = ImageVisualiser(ct, cut=[190,270,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[11]:\n\n\ncontour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[12]:\n\n\nM_filter=sitk.MinimumMaximumImageFilter()\nM_filter.Execute(contour_breast_plan_to_0_dir)\nprint(M_filter.GetMaximum())\nprint(M_filter.GetMinimum())\n\n\n# In[13]:\n\n\n#pet=sitk.ReadImage(pet)\npet=sitk.Resample(pet,ct)\n\n\n# In[14]:\n\n\nvis = ImageVisualiser(ct, axis='z', cut=get_com(contour_breast_plan_to_0_dir), window=[-250, 500])\nvis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\nfig = vis.show()\n\n\n# In[15]:\n\n\nmasked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\nsitk.WriteImage(masked_pet_breast,\"masked_pet_rbreast_WES_006_0.nii.gz\")\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,17500,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[16]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n \nmasked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmasked_pet_breast_arr=masked_pet_breast_arr.flatten()\nmasked_pet_breast_arr=np.rint(masked_pet_breast_arr)\nmode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\np = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\nmax_val=np.max(masked_pet_breast_arr)\n\n\n# In[17]:\n\n\nprint(mode, p, max_val)\n\n\n# In[ ]:\n\n\n\n\n\n# In[18]:\n\n\nct=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_1_20180905_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_1_20180905_PT_AC_4.nii.gz\"\nct_plan=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_6_20181109_CT_3.nii.gz\"\ncontour_breast_plan=\"/home/alicja/Documents/WES_006/STRUCTURES/WES_006_6_20181109_RTSTRUCT_COMBINED_SCFAX_4.nii.gz\"\n\n\n# In[19]:\n\n\nct_plan=sitk.ReadImage(ct_plan)\ncontour_breast_plan=sitk.ReadImage(contour_breast_plan)\nct=sitk.ReadImage(ct)\npet_raw=sitk.ReadImage(pet)\n\n\n# In[20]:\n\n\nvis = ImageVisualiser(ct_plan, cut=get_com(contour_breast_plan), window=[-250, 500])\nvis.add_contour(contour_breast_plan)\nfig = vis.show()\n\n\n# In[21]:\n\n\npet=sitk.Resample(pet_raw, ct)\n\n\n# In[22]:\n\n\nvis = ImageVisualiser(ct_plan, cut=[90,250,176], window=[-250, 500])\nfig = vis.show()\n\n\n# In[23]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[24]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[25]:\n\n\nvis = ImageVisualiser(ct, cut=[190,270,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[26]:\n\n\ncontour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[27]:\n\n\nvis = ImageVisualiser(ct, axis='z', cut=get_com(contour_breast_plan_to_0_dir), window=[-250, 500])\nvis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\nfig = vis.show()\n\n\n# In[28]:\n\n\nmasked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\nsitk.WriteImage(masked_pet_breast,\"masked_pet_rbreast_WES_006_1.nii.gz\")\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,10000,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[29]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n\n\n# In[30]:\n\n\nmasked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmasked_pet_breast_arr=masked_pet_breast_arr.flatten()\nmasked_pet_breast_arr=np.rint(masked_pet_breast_arr)\nmode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\np = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\nmax_val=np.max(masked_pet_breast_arr)\n\nprint(mode,p,max_val)\n\n\n# In[ ]:\n\n\n\n\n\n# In[31]:\n\n\nct=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_3_20180417_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_3_20180417_PT_AC_4.nii.gz\"\nct_plan=\"/home/alicja/Documents/WES_006/IMAGES/WES_006_6_20181109_CT_3.nii.gz\"\ncontour_breast_plan=\"/home/alicja/Documents/WES_006/STRUCTURES/WES_006_6_20181109_RTSTRUCT_COMBINED_SCFAX_4.nii.gz\"\n\n\n# In[32]:\n\n\nct_plan=sitk.ReadImage(ct_plan)\ncontour_breast_plan=sitk.ReadImage(contour_breast_plan)\nct=sitk.ReadImage(ct)\npet_raw=sitk.ReadImage(pet)\n\n\n# In[33]:\n\n\nvis = ImageVisualiser(ct_plan, cut=get_com(contour_breast_plan), window=[-250, 500])\nvis.add_contour(contour_breast_plan)\nfig = vis.show()\n\n\n# In[34]:\n\n\npet=sitk.Resample(pet_raw, ct)\n\n\n# In[35]:\n\n\nvis = ImageVisualiser(ct_plan, cut=[90,250,176], window=[-250, 500])\nfig = vis.show()\n\n\n# In[36]:\n\n\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n ct,\n ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[37]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n ct,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[38]:\n\n\nvis = ImageVisualiser(ct, cut=[190,270,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[39]:\n\n\ncontour_breast_plan_to_0_rigid = transform_propagation(\n ct,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[40]:\n\n\nvis = ImageVisualiser(ct, axis='z', cut=get_com(contour_breast_plan_to_0_dir), window=[-250, 500])\nvis.add_scalar_overlay(pet, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='BREAST', color='g')\nfig = vis.show()\n\n\n# In[41]:\n\n\nmasked_pet_breast = sitk.Mask(pet, contour_breast_plan_to_0_dir)\nsitk.WriteImage(masked_pet_breast,\"masked_pet_rbreast_WES_006_3.nii.gz\")\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,7500,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[42]:\n\n\ndef mode1(x):\n values, counts = np.unique(x, return_counts=True)\n m = counts.argmax()\n return values[m], counts[m]\n\n\n# In[43]:\n\n\nmasked_pet_breast_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmasked_pet_breast_arr=masked_pet_breast_arr.flatten()\nmasked_pet_breast_arr=np.rint(masked_pet_breast_arr)\nmode=mode1(masked_pet_breast_arr[masked_pet_breast_arr>0])\np = np.percentile(masked_pet_breast_arr[masked_pet_breast_arr>0], 95) # return 95th percentile\nmax_val=np.max(masked_pet_breast_arr)\n\nprint(mode,p,max_val)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[29]:\n\n\n\"\"\"\nThings to try:\n- visualise the registration results at each time point for verification\n- extract the mean and maximum (masked) PET image values at each time point\n\"\"\"\n\ndays = [0, 250, 400]\nvalues = [410, 300, 270]\n\nfig, ax = plt.subplots(1,1)\nax.plot(days, values)\nfig.show()\n\n", "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nImport modules\n\"\"\"\nimport numpy as np\n\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\nfrom platipy.imaging import ImageVisualiser\nfrom platipy.imaging.label.utils import get_com\n\nfrom platipy.imaging.registration.linear import linear_registration\nfrom platipy.imaging.registration.deformable import fast_symmetric_forces_demons_registration\nfrom platipy.imaging.registration.utils import apply_transform\nfrom platipy.imaging.registration.utils import apply_linear_transform\n\nimage_ct_0=sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/WES_004/IMAGES/WES_004_TIMEPOINT_1_CT_AC.nii.gz\")\nimage_pt_0_raw=sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/WES_004/IMAGES/WES_004_TIMEPOINT_1_PET.nii.gz\")\nimage_ct_plan = sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/WES_004/IMAGES/WES_004_CT_RTSIM.nii.gz\")\ncontour_breast_plan = sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/WES_004/LABELS/WES_004_RTSIM_LABEL_CHESTWALL_LT_CTV.nii.gz\")\n\nimage_ct_0=sitk.Resample(image_ct_0,image_ct_plan)\nimage_pt_0=sitk.Resample(image_pt_0_raw,image_ct_0)\n\nbreast_plan=sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/contour_breast_plan_PET_004.nii.gz\")\n#breast_plan_dilate=sitk.BinaryDilate(breast_plan,(3,3,3))\n#sitk.WriteImage(breast_plan_dilate,\"/home/alicja/PET_LAB_PROCESSED/dilated_PET_breast_004.nii.gz\")\n\nbreast_plan_dilate=sitk.ReadImage(\"/home/alicja/PET_LAB_PROCESSED/dilated_PET_breast_004.nii.gz\")\n\nbreast_plan_dilate=sitk.Resample(breast_plan_dilate,image_ct_0)\n\n#vis = ImageVisualiser(image_ct_0, axis='z', cut=get_com(breast_plan_dilate), window=[-250, 500])\n#vis.add_scalar_overlay(image_pt_0, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\n#vis.add_contour(breast_plan_dilate, name='R BREAST', color='g')\n#fig = vis.show()\n#fig.savefig(f\"/home/alicja/PET_LAB_PROCESSED/PET_masked_contour_dilate.jpeg\",dpi=400)\n\nPET_breast=sitk.Mask(image_pt_0,breast_plan_dilate)\n#sitk.WriteImage(PET_breast,\"/home/alicja/PET_LAB_PROCESSED/PET_breast_masked.nii.gz\")\n\ndef getPETseg(PET_breast,image_pt_0):\n mask_arr=sitk.GetArrayFromImage(PET_breast)\n mask_arr=mask_arr.flatten() \n\n p = np.percentile(mask_arr[mask_arr>0], 98)\n print(\"percentile: \", p)\n tum = sitk.Mask(image_pt_0, PET_breast>p)\n tum = sitk.Cast(tum, sitk.sitkInt64)\n tum_cc = sitk.RelabelComponent(sitk.ConnectedComponent(tum))\n tum = (tum_cc==1)\n sitk.WriteImage(tum, \"/home/alicja/PET_LAB_PROCESSED/PET_TUMOUR_test.nii.gz\")\n\n return(tum)\n\ntum = getPETseg(PET_breast,image_pt_0)\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\n\"\"\"\nImport modules\n\"\"\"\n\nimport pathlib\nimport numpy as np\n\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\nfrom platipy.imaging.utils.tools import get_com\n\n\nfrom platipy.imaging.registration.registration import (\n initial_registration,\n fast_symmetric_forces_demons_registration,\n transform_propagation,\n apply_field\n)\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\n\n# In[8]:\n\n\npatient_no=\"06\"\ntimepoint=\"2\"\n\nct=\"WES_006_2_20180905_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"WES_006_2_20180905_PT_AC_4.nii.gz\"\nct_plan=\"WES_006_3_20181109_CT_3.nii.gz\"\nbreast_struct=\"WES_006_3_0_RTSTRUCT_CHEST_WALL_CTV.nii.gz\" #if present\n\nimage_ct_0=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+ct)\nimage_pt_0_raw=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+pet)\nimage_ct_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+ct_plan)\ncontour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/STRUCTURES/\"+breast_struct)\n#L contour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_012/STRUCTURES/WES_012_3_0_RTSTRUCT_BREAST_LT_PTV.nii.gz\")\n#R contour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_010/STRUCTURES/WES_010_3_0_RTSTRUCT_BREAST_RT_PTV.nii.gz\")\n\nimage_pt_0=sitk.Resample(image_pt_0_raw, image_ct_0)\n\n\n# In[9]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nfig = vis.show()\n\n\n# In[10]:\n\n\nvis = ImageVisualiser(image_pt_0, colormap=plt.cm.magma, cut=[180,220,256], window=[0.1, 10000])\nfig = vis.show()\n\n\n# In[11]:\n\n\nvis = ImageVisualiser(image_ct_plan, axis='z', cut=60, window=[-250, 500], figure_size_in=8)\nvis.add_contour({'BREAST' :contour_breast_plan})\nfig = vis.show()\n\n\n# In[12]:\n\n\n#register planning CT to CT\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n image_ct_0,\n image_ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[13]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_rigid)\nfig = vis.show()\n\n\n# In[14]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n image_ct_0,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[15]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[16]:\n\n\n#register breast structure to CT\ncontour_breast_plan_to_0_rigid = transform_propagation(\n image_ct_0,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[17]:\n\n\ncontour_breast_plan_to_0_dir_arr=sitk.GetArrayFromImage(contour_breast_plan_to_0_dir)\n#contour_breast_plan_to_0_dir_arr[:,:,220:]=0\n#contour_breast_plan_to_0_dir_arr[:163,:,:]=0\n#contour_breast_plan_to_0_dir_arr[186:,:,:]=0\ncontour_breast_plan_to_0_dir2=sitk.GetImageFromArray(contour_breast_plan_to_0_dir_arr)\ncontour_breast_plan_to_0_dir2.CopyInformation(contour_breast_plan_to_0_dir)\ncontour_breast_plan_to_0_dir=contour_breast_plan_to_0_dir2\n\n\n# In[18]:\n\n\nbreast_contour_dilate=sitk.BinaryDilate(contour_breast_plan_to_0_dir, (2,2,2)) #if using different structure\n\n\n# In[19]:\n\n\nsitk.WriteImage(breast_contour_dilate,\"breast_contour_dilate_\"+patient_no+\"_\"+timepoint+\".nii.gz\")\n\n\n# In[21]:\n\n\nvis = ImageVisualiser(image_ct_0, axis='z', cut=180, window=[-250, 500])\nvis.add_scalar_overlay(image_pt_0, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='R BREAST', color='g') #or breast_contour_dilate\nfig = vis.show()\n\n\n# In[22]:\n\n\n#use structure information for breast to mask out all but the breast area (create new array with everything but\n#this area set to 0)\nmasked_pet_breast = sitk.Mask(image_pt_0, contour_breast_plan_to_0_dir) #or breast_contour_dilate\n\n\n# In[23]:\n\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,30000,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[24]:\n\n\nsitk.WriteImage(masked_pet_breast, \"masked_pet_breast_WES_0\" + patient_no + \"_\" + timepoint + \".nii.gz\")\n\n\n# In[25]:\n\n\n#get 95th percentile, then mask the breast volume\nmasked_pet_breast=sitk.Resample(masked_pet_breast, image_pt_0_raw)\nmask_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmask_arr=mask_arr.flatten() \n\np = np.percentile(mask_arr[mask_arr>0], 95)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\n\n# In[ ]:\n\n\n\n\n\n# In[26]:\n\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_95pc.nii.gz\")\n\n\n# In[27]:\n\n\np = np.percentile(mask_arr[mask_arr>0], 90)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_90pc.nii.gz\")\n\n\n# In[28]:\n\n\np = np.percentile(mask_arr[mask_arr>0], 97)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_97pc.nii.gz\")\n\n\n# In[29]:\n\n\ntum=sitk.Cast(tum,sitk.sitkInt16)\nnew_tums = sitk.RelabelComponent(sitk.ConnectedComponent(tum))\nnew_tum = (new_tums==1)\nsitk.WriteImage(new_tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_97pc.nii.gz\")\n\n\n# In[ ]:\n\n\n\n\n", "#!/usr/bin/env python3\n# coding: utf-8\n\nimport SimpleITK as sitk\nimport numpy as np\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\nfrom platipy.imaging.utils.tools import get_com\nfrom platipy.imaging.registration.registration import initial_registration\nfrom platipy.imaging.registration.registration import fast_symmetric_forces_demons_registration\nfrom platipy.imaging.registration.registration import apply_field\nfrom platipy.imaging.registration.registration import smooth_and_resample\n\nimage_template = sitk.ReadImage(\"TEMPLATE_MRI_T2W_TSE_2D_SPAIR.nii.gz\")\ncontour_lb_template = sitk.ReadImage(\"TEMPLATE_CONTOUR_L_BREAST.nii.gz\")\ncontour_rb_template = sitk.ReadImage(\"TEMPLATE_CONTOUR_R_BREAST.nii.gz\")\n\nimg_label=\"T2_TSE_TRA_SPAIR\"\npatient_no=\"03\"\ntimepoint=\"2\"\nlaterality = \"L\"\nradius = 21.58 #calculated from tumour volume\nimage_test = sitk.ReadImage(\"/home/alicja/Documents/WES_0\"+patient_no+\"/IMAGES/WES_003_2_20170207_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz\")\n\ndef visualiseImages(image_template,contour_lb_template,contour_rb_template,image_test,laterality):\n if laterality == \"L\":\n vis = ImageVisualiser(image_template, cut=get_com(contour_lb_template), window=(0,200), figure_size_in=5)\n elif laterality == \"R\":\n vis = ImageVisualiser(image_template, cut=get_com(contour_rb_template), window=(0,200), figure_size_in=5)\n\n vis.add_contour({\"contour_lb_template\":contour_lb_template, \"contour_rb_template\":contour_rb_template})\n fig = vis.show()\n\n vis = ImageVisualiser(image_test, window=(0,200), figure_size_in=5)\n fig = vis.show()\n\nvisualiseImages(image_template,contour_lb_template,contour_rb_template,image_test,laterality)\n\ndef RegisterContoursToImage(image_test,image_template,contour_lb_template, contour_rb_template):\n image_template_reg_linear, tfm_template_linear = initial_registration(\n image_test,\n image_template,\n default_value=0,\n options={\n 'shrink_factors': (16,8,4),\n 'smooth_sigmas': [0,0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n )\n\n contour_lb_template_reg_linear = apply_field(\n contour_lb_template,\n #reference_image=image_test,\n transform=tfm_template_linear,\n default_value=0,\n interp=1\n )\n\n contour_rb_template_reg_linear = apply_field(\n contour_rb_template,\n #reference_image=image_test,\n transform=tfm_template_linear,\n default_value=0,\n interp=1\n )\n\n vis = ImageVisualiser(image_test, window=(0,200), figure_size_in=5)\n vis.add_comparison_overlay(image_template_reg_linear)\n vis.add_contour({\"contour_lb_template_reg_linear\":contour_lb_template_reg_linear})\n fig1 = vis.show()\n\n vis = ImageVisualiser(image_test, window=(0,200), figure_size_in=5)\n vis.add_comparison_overlay(image_template_reg_linear)\n vis.add_contour({\"contour_rb_template_reg_linear\":contour_rb_template_reg_linear})\n fig2 = vis.show()\n\n _, tfm_template_deformable = fast_symmetric_forces_demons_registration(\n image_test,\n image_template_reg_linear,\n resolution_staging=[12, 6, 3],\n iteration_staging=[20, 20, 20],\n isotropic_resample=True,\n initial_displacement_field=None,\n smoothing_sigma_factor=1,\n smoothing_sigmas=False,\n default_value=0,\n ncores=8,\n interp_order=2\n )\n\n contour_lb_template_reg_deformable = apply_field(\n contour_lb_template_reg_linear,\n #reference_image=image_test,\n transform=tfm_template_deformable,\n default_value=0,\n interp=1\n )\n\n contour_rb_template_reg_deformable = apply_field(\n contour_rb_template_reg_linear,\n #reference_image=image_test,\n transform=tfm_template_deformable,\n default_value=0,\n interp=1\n )\n\n vis = ImageVisualiser(image_test, cut=get_com(contour_lb_template_reg_deformable), window=(0,200), figure_size_in=5)\n #vis.add_comparison_overlay(image_template_reg_deformable)\n vis.add_contour({\"contour_lb_template_reg_deformable\":contour_lb_template_reg_deformable, \n \"contour_rb_template_reg_deformable\":contour_rb_template_reg_deformable})\n fig = vis.show()\n\n return(contour_lb_template_reg_deformable,contour_rb_template_reg_deformable)\n\ncontour_lb_template_reg_deformable,contour_rb_template_reg_deformable = RegisterContoursToImage(image_test,image_template,contour_lb_template, contour_rb_template)\n\ndef WriteSmoothContour(contour_lb_template_reg_deformable,contour_rb_template_reg_deformable,laterality=\"L\"):\n contour_lb_template_reg_deformable_smooth = sitk.BinaryMorphologicalClosing(contour_lb_template_reg_deformable, (15,15,15))\n contour_rb_template_reg_deformable_smooth = sitk.BinaryMorphologicalClosing(contour_rb_template_reg_deformable, (15,15,15))\n\n sitk.WriteImage(contour_lb_template_reg_deformable_smooth,\"contour_lb_template_reg_deformable_smooth_WES_0\"+patient_no+\"_\"+timepoint+img_label+\".nii.gz\")\n sitk.WriteImage(contour_rb_template_reg_deformable_smooth,\"contour_rb_template_reg_deformable_smooth_WES_0\"+patient_no+\"_\"+timepoint+img_label+\".nii.gz\")\n\n if laterality == \"L\":\n vis = ImageVisualiser(image_test, cut=get_com(contour_lb_template_reg_deformable), window=(0,200), figure_size_in=5) \n elif laterality == \"R\":\n vis = ImageVisualiser(image_test, cut=get_com(contour_rb_template_reg_deformable), window=(0,200), figure_size_in=5) \n #vis.add_comparison_overlay(image_template_reg_deformable)\n vis.add_contour({\"contour_lb_template_reg_deformable_smooth\":contour_lb_template_reg_deformable_smooth,\n \"contour_rb_template_reg_deformable_smooth\":contour_rb_template_reg_deformable_smooth})\n fig = vis.show()\n return(contour_lb_template_reg_deformable_smooth,contour_rb_template_reg_deformable_smooth)\n\ncontour_lb_template_reg_deformable_smooth,contour_rb_template_reg_deformable_smooth=WriteSmoothContour(contour_lb_template_reg_deformable,contour_rb_template_reg_deformable,laterality=\"L\")\n\ndef insert_sphere(arr,sp_radius,sp_centre):\n sp_radius=int(sp_radius)\n for x in range(sp_centre[0]-sp_radius,sp_centre[0]+sp_radius+1):\n for y in range(sp_centre[1]-sp_radius,sp_centre[1]+sp_radius+1):\n for z in range(sp_centre[2]-sp_radius,sp_centre[2]+sp_radius+1):\n dist_squared=sp_radius**2-abs(sp_centre[0]-x)**2-abs(sp_centre[1]-y)**2-abs(sp_centre[2]-z)**2\n sign=np.sign(dist_squared)\n dist=np.sqrt(abs(dist_squared))*sign\n if dist>=0:\n arr[x,y,z]=1\n return(arr)\n\ndef generateSphere(contour_template_reg_deformable=contour_lb_template_reg_deformable,\ncontour_template_reg_deformable_smooth=contour_lb_template_reg_deformable_smooth,radius=radius):\n blank_image_res = smooth_and_resample(contour_template_reg_deformable, smoothing_sigma=0,shrink_factor=1,isotropic_resample=True)\n\n centre = get_com(blank_image_res)\n print(\"Centre is\", centre)\n\n blank_arr = sitk.GetArrayFromImage(blank_image_res*0)\n sphere_arr = insert_sphere(blank_arr, sp_radius=radius, sp_centre=centre)\n image_sphere = sitk.GetImageFromArray(sphere_arr)\n image_sphere.CopyInformation(blank_image_res)\n image_sphere = sitk.Resample(image_sphere, contour_template_reg_deformable)\n\n vis = ImageVisualiser(image_test, cut=get_com(contour_template_reg_deformable), window=(0,200), figure_size_in=5)\n vis.add_contour(\n {\"contour_template_reg_deformable_smooth\":contour_template_reg_deformable_smooth,\n \"image_sphere\":image_sphere\n }\n )\n fig = vis.show()\n\n sitk.WriteImage(image_sphere,\"image_sphere_WES_0\"+patient_no+\"_\"+timepoint+\".nii.gz\")\n return(image_sphere)\n\nimage_sphere=generateSphere(contour_template_reg_deformable=contour_lb_template_reg_deformable,\ncontour_template_reg_deformable_smooth=contour_lb_template_reg_deformable_smooth,radius=radius)\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[9]:\n\n\n\"\"\"\nImport modules\n\"\"\"\n\nimport pathlib\nimport numpy as np\n\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\nfrom platipy.imaging.visualisation.tools import ImageVisualiser\nfrom platipy.imaging.utils.tools import get_com\n\n\nfrom platipy.imaging.registration.registration import (\n initial_registration,\n fast_symmetric_forces_demons_registration,\n transform_propagation,\n apply_field\n)\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\n\n# In[ ]:\n\n\npatient_no=\"10\"\ntimepoint=\"0\"\n\nct=\"WES_010_0_20180827_CT_10_PETCT_WBHDIN_ONC_3.nii.gz\"\npet=\"WES_010_0_20180827_PT_AC_4.nii.gz\"\nct_plan=\"WES_010_3_20190410_CT_3.nii.gz\"\nbreast_struct=\"WES_010_3_0_RTSTRUCT_WHOLE_BREAST_CTV.nii.gz\" #if present\n\nimage_ct_0=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+ct)\nimage_pt_0_raw=sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+pet)\nimage_ct_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/IMAGES/\"+ct_plan)\ncontour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_0\" + patient_no + \"/STRUCTURES/\"+breast_struct)\n#L contour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_012/STRUCTURES/WES_012_3_0_RTSTRUCT_BREAST_LT_PTV.nii.gz\")\n#R contour_breast_plan = sitk.ReadImage(\"/home/alicja/Documents/WES_010/STRUCTURES/WES_010_3_0_RTSTRUCT_BREAST_RT_PTV.nii.gz\")\n\nimage_pt_0=sitk.Resample(image_pt_0_raw, image_ct_0)\n\n\n# In[ ]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nfig = vis.show()\n\n\n# In[ ]:\n\n\nvis = ImageVisualiser(image_pt_0, colormap=plt.cm.magma, cut=[180,220,256], window=[0.1, 10000])\nfig = vis.show()\n\n\n# In[ ]:\n\n\nvis = ImageVisualiser(image_ct_plan, axis='z', cut=60, window=[-250, 500], figure_size_in=8)\nvis.add_contour({'BREAST' :contour_breast_plan})\nfig = vis.show()\n\n\n# In[12]:\n\n\n#register planning CT to CT\nimage_ct_plan_to_0_rigid, tfm_plan_to_0_rigid = initial_registration(\n image_ct_0,\n image_ct_plan,\n options={\n 'shrink_factors': [8,4],\n 'smooth_sigmas': [0,0],\n 'sampling_rate': 0.5,\n 'final_interp': 2,\n 'metric': 'mean_squares',\n 'optimiser': 'gradient_descent_line_search',\n 'number_of_iterations': 25},\n reg_method='Rigid')\n\n\n# In[13]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_rigid)\nfig = vis.show()\n\n\n# In[14]:\n\n\nimage_ct_plan_to_0_dir, tfm_plan_to_0_dir = fast_symmetric_forces_demons_registration(\n image_ct_0,\n image_ct_plan_to_0_rigid,\n resolution_staging=[4,2],\n iteration_staging=[10,10]\n)\n\n\n# In[15]:\n\n\nvis = ImageVisualiser(image_ct_0, cut=[180,220,256], window=[-250, 500])\nvis.add_comparison_overlay(image_ct_plan_to_0_dir)\nfig = vis.show()\n\n\n# In[16]:\n\n\n#register breast structure to CT\ncontour_breast_plan_to_0_rigid = transform_propagation(\n image_ct_0,\n contour_breast_plan,\n tfm_plan_to_0_rigid,\n structure=True\n)\n\ncontour_breast_plan_to_0_dir = apply_field(\n contour_breast_plan_to_0_rigid,\n tfm_plan_to_0_dir,\n structure=True\n)\n\n\n# In[17]:\n\n\ncontour_breast_plan_to_0_dir_arr=sitk.GetArrayFromImage(contour_breast_plan_to_0_dir)\n#contour_breast_plan_to_0_dir_arr[:,:,220:]=0\n#contour_breast_plan_to_0_dir_arr[:163,:,:]=0\n#contour_breast_plan_to_0_dir_arr[186:,:,:]=0\ncontour_breast_plan_to_0_dir2=sitk.GetImageFromArray(contour_breast_plan_to_0_dir_arr)\ncontour_breast_plan_to_0_dir2.CopyInformation(contour_breast_plan_to_0_dir)\ncontour_breast_plan_to_0_dir=contour_breast_plan_to_0_dir2\n\n\n# In[18]:\n\n\nbreast_contour_dilate=sitk.BinaryDilate(contour_breast_plan_to_0_dir, (0,0,0)) #if using different structure\n\n\n# In[19]:\n\n\nsitk.WriteImage(breast_contour_dilate,\"breast_contour_dilate_\"+patient_no+\"_\"+timepoint+\".nii.gz\")\n\n\n# In[20]:\n\n\nvis = ImageVisualiser(image_ct_0, axis='z', cut=240, window=[-250, 500])\nvis.add_scalar_overlay(image_pt_0, name='PET SUV', colormap=plt.cm.magma, min_value=0.1, max_value=10000)\nvis.add_contour(contour_breast_plan_to_0_dir, name='R BREAST', color='g') #or breast_contour_dilate\nfig = vis.show()\n\n\n# In[21]:\n\n\n#use structure information for breast to mask out all but the breast area (create new array with everything but\n#this area set to 0)\nmasked_pet_breast = sitk.Mask(image_pt_0, contour_breast_plan_to_0_dir) #or breast_contour_dilate\n\n\n# In[22]:\n\n\nvalues = sitk.GetArrayViewFromImage(masked_pet_breast).flatten()\n\nfig, ax = plt.subplots(1,1)\nax.hist(values, bins=np.linspace(1,30000,50), histtype='stepfilled', lw=2)\nax.set_yscale('log')\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel('PET value')\nax.set_ylabel('Frequency')\nfig.show()\n\n\n# In[23]:\n\n\nsitk.WriteImage(masked_pet_breast, \"masked_pet_breast_WES_0\" + patient_no + \"_\" + timepoint + \".nii.gz\")\n\n\n# In[24]:\n\n\n#get 95th percentile, then mask the breast volume\nmasked_pet_breast=sitk.Resample(masked_pet_breast, image_pt_0_raw)\nmask_arr=sitk.GetArrayFromImage(masked_pet_breast)\nmask_arr=mask_arr.flatten() \n\np = np.percentile(mask_arr[mask_arr>0], 95)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\n\n# In[ ]:\n\n\n\n\n\n# In[25]:\n\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_95pc.nii.gz\")\n\n\n# In[26]:\n\n\np = np.percentile(mask_arr[mask_arr>0], 90)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_90pc.nii.gz\")\n\n\n# In[27]:\n\n\np = np.percentile(mask_arr[mask_arr>0], 97)\nprint(p)\n\ntum = sitk.Mask(image_pt_0_raw, masked_pet_breast>p)\n\nsitk.WriteImage(tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_97pc.nii.gz\")\n\n\n# In[28]:\n\n\ntum=sitk.Cast(tum,sitk.sitkInt16)\nnew_tums = sitk.RelabelComponent(sitk.ConnectedComponent(tum))\nnew_tum = (new_tums==1)\nsitk.WriteImage(new_tum, \"pet_seg_0\"+patient_no+\"_\"+timepoint+\"_97pc.nii.gz\")\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.min", "pandas.DataFrame", "numpy.max", "numpy.std", "numpy.mean" ], [ "numpy.linspace", "numpy.unique", "numpy.rint", "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.max", "scipy.stats.mode", "numpy.array" ], [ "numpy.percentile" ], [ "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.linspace" ], [ "numpy.sign" ], [ "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uri-granta/GPflow
[ "94b432847cb82c7627a57987f5c5ddd7fc400414" ]
[ "tests/gpflow/posteriors/test_bo_integration.py" ]
[ "# Copyright 2022 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, DefaultDict, Dict, Iterator, Mapping, Set, Tuple, Type, TypeVar\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom _pytest.fixtures import SubRequest\n\nimport gpflow\nfrom gpflow.base import RegressionData\nfrom gpflow.config import default_float\nfrom gpflow.inducing_variables import InducingPoints, InducingVariables\nfrom gpflow.kernels import Kernel, Matern52\nfrom gpflow.likelihoods import Exponential, Likelihood\nfrom gpflow.models import GPR, SGPR, SVGP, VGP, GPModel, training_loss_closure\nfrom gpflow.models.vgp import update_vgp_data\nfrom gpflow.posteriors import AbstractPosterior, PrecomputeCacheType\n\n_CreateModel = Callable[[RegressionData], GPModel]\n_C = TypeVar(\"_C\", bound=_CreateModel)\n\n_MULTI_OUTPUT = \"multi_output\"\n_MODEL_FACTORIES: Dict[_CreateModel, Mapping[str, Any]] = {}\n\n# This exists to make it easy to disable tf.function, for debugging.\n_COMPILE = True\n_MAXITER = 10\n_DEFAULT_ATOL = 1e-10\n_DEFAULT_RTOL = 1e-7\n\n\[email protected](name=\"register_posterior_bo_integration_test\")\ndef _register_posterior_bo_integration_test(\n request: SubRequest,\n tested_posteriors: DefaultDict[str, Set[Type[AbstractPosterior]]],\n) -> Callable[[AbstractPosterior], None]:\n def _register_posterior(posterior: AbstractPosterior) -> None:\n tested_posteriors[request.function.__name__].add(posterior.__class__)\n\n return _register_posterior\n\n\ndef model_factory(\n *flags: str, atol: float = _DEFAULT_ATOL, rtol: float = _DEFAULT_RTOL\n) -> Callable[[_C], _C]:\n \"\"\" Decorator for adding a function to the `_MODEL_FACTORIES` list. \"\"\"\n\n properties = {\n \"atol\": atol,\n \"rtol\": rtol,\n **{flag: True for flag in flags},\n }\n\n def register(create_model: _C) -> _C:\n _MODEL_FACTORIES[create_model] = properties\n return create_model\n\n return register\n\n\ndef create_kernel() -> Kernel:\n return Matern52()\n\n\ndef create_likelihood() -> Likelihood:\n return Exponential()\n\n\ndef create_inducing_points(data: RegressionData) -> InducingPoints:\n n_features = data[0].shape[1]\n n_inducing_points = 5\n rng = np.random.default_rng(20220208)\n Z = tf.constant(rng.random((n_inducing_points, n_features)))\n return InducingPoints(Z)\n\n\ndef create_q(\n inducing_variable: InducingVariables, *, row_scale: int = 1, column_scale: int = 1\n) -> Tuple[bool, tf.Tensor, tf.Tensor]:\n n_inducing_points = inducing_variable.num_inducing\n rng = np.random.default_rng(20220133)\n q_diag = True\n q_mu = tf.constant(rng.random((row_scale * n_inducing_points, column_scale)))\n q_sqrt = tf.constant(rng.random((row_scale * n_inducing_points, column_scale))) ** 2\n return q_diag, q_mu, q_sqrt\n\n\n@model_factory(rtol=1e-3)\ndef create_gpr(data: RegressionData) -> GPR:\n return GPR(data=data, kernel=create_kernel())\n\n\n@model_factory(rtol=1e-4)\ndef create_sgpr(data: RegressionData) -> SGPR:\n return SGPR(data=data, kernel=create_kernel(), inducing_variable=create_inducing_points(data))\n\n\n@model_factory(rtol=5e-3)\ndef create_vgp(data: RegressionData) -> VGP:\n return VGP(data=data, kernel=create_kernel(), likelihood=create_likelihood())\n\n\n@model_factory()\ndef create_svgp__independent_single_output(data: RegressionData) -> SVGP:\n inducing_variable = create_inducing_points(data)\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=create_kernel(),\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__fully_correlated_multi_output(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)\n inducing_variable = create_inducing_points(data)\n q_diag, q_mu, q_sqrt = create_q(inducing_variable, row_scale=n_outputs)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__independent_multi_output(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)\n inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(\n create_inducing_points(data)\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable, column_scale=n_outputs)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__fallback_independent_latent_posterior(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n rng = np.random.default_rng(20220131)\n kernel = gpflow.kernels.LinearCoregionalization(\n [create_kernel()],\n W=tf.constant(rng.standard_normal((n_outputs, 1))),\n )\n inducing_variable = gpflow.inducing_variables.FallbackSeparateIndependentInducingVariables(\n [create_inducing_points(data)]\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__linear_coregionalization(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n rng = np.random.default_rng(20220131)\n kernel = gpflow.kernels.LinearCoregionalization(\n [create_kernel()], W=tf.constant(rng.standard_normal((n_outputs, 1)))\n )\n inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(\n create_inducing_points(data)\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\[email protected](params=_MODEL_FACTORIES)\ndef _create_model(request: SubRequest) -> _CreateModel:\n return request.param\n\n\[email protected]\ndef _multi_output(_create_model: _CreateModel) -> bool:\n return _MULTI_OUTPUT in _MODEL_FACTORIES[_create_model]\n\n\[email protected]\ndef _rtol(_create_model: _CreateModel) -> float:\n return _MODEL_FACTORIES[_create_model][\"rtol\"]\n\n\[email protected]\ndef _atol(_create_model: _CreateModel) -> float:\n return _MODEL_FACTORIES[_create_model][\"atol\"]\n\n\[email protected]\ndef _f_minimum(_multi_output: bool) -> tf.Tensor:\n return (\n tf.constant(\n [\n [0.2, 0.4],\n [0.4, 0.6],\n [0.6, 0.8],\n ],\n dtype=default_float(),\n )\n if _multi_output\n else tf.constant([[0.3, 0.5]], dtype=default_float())\n )\n\n\[email protected]\ndef _f(_f_minimum: tf.Tensor) -> Callable[[tf.Tensor], tf.Tensor]:\n def f(X: tf.Tensor) -> tf.Tensor:\n err = X[:, None, :] - _f_minimum[None, :, :]\n err_sq = err ** 2\n return tf.reduce_sum(err_sq, axis=-1)\n\n return f\n\n\[email protected]\ndef _data(\n _f: Callable[[tf.Tensor], tf.Tensor], _f_minimum: tf.Tensor\n) -> Tuple[tf.Variable, tf.Variable]:\n n_initial_data = 3\n n_outputs, n_features = _f_minimum.shape\n\n rng = np.random.default_rng(20220126)\n X = tf.Variable(\n rng.random((n_initial_data, n_features)),\n shape=[None, n_features],\n dtype=default_float(),\n trainable=False,\n )\n Y = tf.Variable(\n _f(X),\n shape=[None, n_outputs],\n dtype=default_float(),\n trainable=False,\n )\n\n return X, Y\n\n\[email protected]\ndef _extend_data(\n _data: Tuple[tf.Variable, tf.Variable], _f: Callable[[tf.Tensor], tf.Tensor]\n) -> Callable[[GPModel], Iterator[int]]:\n n_iterations = 3\n rng = np.random.default_rng(20220127)\n X, Y = _data\n n_features = X.shape[1]\n\n def iterate(model: GPModel) -> Iterator[int]:\n for i in range(n_iterations):\n X_new = tf.constant(rng.random((1, n_features)))\n Y_new = _f(X_new)\n X_i = tf.concat([X, X_new], axis=0)\n Y_i = tf.concat([Y, Y_new], axis=0)\n\n if isinstance(model, VGP):\n update_vgp_data(model, (X_i, Y_i))\n else:\n X.assign(X_i)\n Y.assign(Y_i)\n yield i\n\n return iterate\n\n\[email protected]\ndef _X_new(_data: Tuple[tf.Variable, tf.Variable]) -> tf.Tensor:\n rng = np.random.default_rng(20220128)\n X, _Y = _data\n n_features = X.shape[1]\n return tf.constant(rng.random((3, n_features)))\n\n\[email protected]\ndef _optimize(_data: Tuple[tf.Variable, tf.Variable]) -> Callable[[GPModel], None]:\n def optimize(model: GPModel) -> None:\n gpflow.optimizers.Scipy().minimize(\n training_loss_closure(model, _data, compile=_COMPILE),\n variables=model.trainable_variables,\n options=dict(maxiter=_MAXITER),\n method=\"BFGS\",\n compile=_COMPILE,\n )\n\n return optimize\n\n\ndef test_posterior_bo_integration__predict_f(\n register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],\n _create_model: _CreateModel,\n _data: Tuple[tf.Variable, tf.Variable],\n _extend_data: Callable[[GPModel], Iterator[int]],\n _X_new: tf.Tensor,\n _rtol: float,\n _atol: float,\n) -> None:\n \"\"\"\n Check that data added incrementally is correctly reflected in `predict_f`.\n \"\"\"\n _X, Y = _data\n n_rows_new = _X_new.shape[0]\n n_outputs = Y.shape[1]\n\n model = _create_model(_data)\n posterior = model.posterior(PrecomputeCacheType.VARIABLE)\n register_posterior_bo_integration_test(posterior)\n predict_f = posterior.predict_f\n if _COMPILE:\n predict_f = tf.function(predict_f)\n\n for _ in _extend_data(model):\n posterior.update_cache()\n compiled_mean, compiled_var = predict_f(_X_new)\n\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)\n\n eager_model = _create_model(_data)\n eager_mean, eager_var = eager_model.predict_f(_X_new)\n\n np.testing.assert_allclose(eager_mean, compiled_mean, rtol=_rtol, atol=_atol)\n np.testing.assert_allclose(eager_var, compiled_var, rtol=_rtol, atol=_atol)\n\n\ndef test_posterior_bo_integration__optimization(\n register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],\n _create_model: _CreateModel,\n _data: Tuple[tf.Variable, tf.Variable],\n _extend_data: Callable[[GPModel], Iterator[int]],\n _X_new: tf.Tensor,\n _optimize: Callable[[GPModel], None],\n _rtol: float,\n _atol: float,\n) -> None:\n \"\"\"\n Check that data added incrementally is considered when optimizing a model.\n \"\"\"\n _X, Y = _data\n n_rows_new = _X_new.shape[0]\n n_outputs = Y.shape[1]\n\n model = _create_model(_data)\n posterior = model.posterior(PrecomputeCacheType.VARIABLE)\n register_posterior_bo_integration_test(posterior)\n predict_f = posterior.predict_f\n if _COMPILE:\n predict_f = tf.function(predict_f)\n\n # Add all the data first, and then `optimize`, so that both models are optimized the same number\n # of times and with the same data, so they converge to the same result.\n\n for _ in _extend_data(model):\n pass\n\n _optimize(model)\n posterior.update_cache()\n compiled_mean, compiled_var = predict_f(_X_new)\n\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)\n\n eager_model = _create_model(_data)\n _optimize(eager_model)\n eager_mean, eager_var = eager_model.predict_f(_X_new)\n\n np.testing.assert_allclose(eager_mean, compiled_mean, rtol=_rtol, atol=_atol)\n np.testing.assert_allclose(eager_var, compiled_var, rtol=_rtol, atol=_atol)\n" ]
[ [ "numpy.testing.assert_equal", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.function", "numpy.testing.assert_allclose", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] } ]
tirkarthi/odin-ai
[ "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "7900bef82ad8801d0c73880330d5b24d9ff7cd06" ]
[ "odin/ml/plda.py", "odin/fuel/_image_cifar.py", "odin/preprocessing/audio/audio.py", "odin/networks/skip_connection.py", "examples/logistic_regression.py", "odin/bay/distributions/combined.py", "odin/backend/metrics.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\"\nauthor: 'Omid Sadjadi, Timothee Kheyrkhah'\nemail: '[email protected]'\n\"\"\"\nimport time\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\nfrom scipy.linalg import cholesky, eigh, inv, solve, svd\nfrom six import string_types\n\nfrom odin.backend import calc_white_mat, length_norm\nfrom odin.ml.base import BaseEstimator, Evaluable, TransformerMixin\nfrom odin.ml.scoring import (VectorNormalizer, compute_class_avg,\n compute_within_cov)\nfrom odin.utils import unique\n\n\ndef logdet(A):\n u = cholesky(A)\n y = 2 * np.log(np.diag(u)).sum()\n return y\n\nclass PLDA(BaseEstimator, TransformerMixin, Evaluable):\n r\"\"\" Probabilistic LDA\n\n Parameters\n ----------\n n_phi : int\n number of dimension for the latent space\n\n centering : bool (default: True)\n mean normalization the data before EM\n\n wccn : bool (default: True)\n within class covariance normalization before EM\n\n unit_length : bool (default: True)\n normalize vector length of each sample to 1 before EM\n\n n_iter : {integer, 'auto'}\n if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)\n compared to the `improve_threshold`\n\n improve_threshold : scalar\n Only used in case `n_iter='auto'`\n\n labels : {list of string, or None} (default: None)\n labels information for `evaluate` method\n\n seed : int\n random seed for reproducibility\n\n verbose : int (default: 0)\n verbose level, 0 for turning off all logging activities,\n 1 for basics notification, 2 for fitting progress.\n if `2`, compute log-likelihood during fitting EM, this will\n significantly slows down the process, only suggested for debugging\n\n Attributes\n ----------\n Sigma_ : [feat_dim, feat_dim]\n Phi_ : [feat_dim, n_phi]\n Sb_ : [feat_dim, feat_dim]\n St_ : [feat_dim, feat_dim]\n Lambda : []\n Uk : []\n Q_hat : []\n X_model_ : [num_class, feat_dim]\n class-dependence feature vectors\n \"\"\"\n\n def __init__(self, n_phi=None,\n centering=True, wccn=True, unit_length=True,\n n_iter='auto', improve_threshold=1e-1,\n labels=None, dtype='float64', random_state=None,\n verbose=0):\n super(PLDA, self).__init__()\n # ====== check n_phi ====== #\n if n_phi is not None:\n n_phi = int(n_phi)\n self.n_phi_ = n_phi\n # ====== check num_iter ====== #\n if isinstance(n_iter, string_types):\n n_iter = n_iter.lower()\n assert n_iter == 'auto', 'Invalid `n_iter` value: %s' % n_iter\n elif isinstance(n_iter, Number):\n assert n_iter > 0, \"`n_iter` must greater than 0, but given: %d\" % n_iter\n self.n_iter_ = n_iter\n self.improve_threshold_ = float(improve_threshold)\n # ====== other ====== #\n self.feat_dim_ = None\n self._labels = labels\n self.verbose_ = int(verbose)\n # for normalization\n self._normalizer = VectorNormalizer(\n centering=centering, wccn=wccn, unit_length=unit_length,\n lda=False, concat=False)\n self._dtype = np.dtype(dtype)\n # ====== check random state ====== #\n if random_state is None:\n self._rand_state = np.random.RandomState(None)\n elif isinstance(random_state, Number):\n self._rand_state = np.random.RandomState(seed=random_state)\n elif isinstance(random_state, np.random.RandomState):\n self._rand_state = random_state\n else:\n raise ValueError(\"Invalid argument for `random_state`: %s\" % str(random_state))\n # Attributes\n self.Sigma_ = None\n self.Phi_ = None\n self.Sb_ = None\n self.St_ = None\n\n # ==================== properties ==================== #\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def feat_dim(self):\n return self.feat_dim_\n\n @property\n def normalizer(self):\n return self._normalizer\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_classes(self):\n return len(self._labels)\n\n @property\n def is_fitted(self):\n if not hasattr(self, 'Lambda_') or \\\n not hasattr(self, 'Uk_') or \\\n not hasattr(self, 'Q_hat_') or \\\n not hasattr(self, 'X_model_'):\n return False\n return True\n\n # ==================== Pickling ==================== #\n def __getstate__(self):\n if not self.is_fitted:\n raise RuntimeError(\"The PLDA have not been fitted, nothing to pickle!\")\n return (self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,\n self._normalizer, self._dtype, self._rand_state,\n self.Sigma_, self.Phi_, self.Sb_, self.St_,\n self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_)\n\n def __setstate__(self, states):\n (self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,\n self._normalizer, self._dtype, self._rand_state,\n self.Sigma_, self.Phi_, self.Sb_, self.St_,\n self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_) = states\n\n # ==================== helpers ==================== #\n def initialize(self, X, labels):\n feat_dim = X.shape[1]\n if self.feat_dim is None or self._num_classes is None:\n self.feat_dim_ = int(feat_dim)\n if self._labels is None:\n self._labels = labels\n if self.feat_dim <= self.n_phi_:\n raise RuntimeError(\"`feat_dim=%d` must be greater than `n_phi=%d`\" %\n (self.feat_dim, self.n_phi_))\n # ====== initialize ====== #\n # covariance matrix of the residual term\n # self.Sigma_ = 1. / self.feat_dim * np.eye(self.feat_dim, dtype=self.dtype)\n self.Sigma_ = (1. / self.feat_dim * np.eye(self.feat_dim) +\n self._rand_state.randn(self.feat_dim, self.feat_dim)\n ).astype(self.dtype)\n # self.Sigma_ = np.cov(X.T).astype(self.dtype)\n # self.Sigma_ = (np.cov(X.T) +\n # self._rand_state.randn(self.feat_dim, self.feat_dim)\n # ).astype(self.dtype)\n # self.Sigma_ = 100 * self._rand_state.randn(\n # self.feat_dim, self.feat_dim).astype(self.dtype)\n # factor loading matrix (Eignevoice matrix) [feat_dim, n_phi]\n # self.Phi_ = np.r_[np.eye(self.n_phi_),\n # np.zeros((self.feat_dim - self.n_phi_, self.n_phi_))]\n # self.Phi_ = self._rand_state.randn(self.feat_dim, self.n_phi_).astype(self.dtype)\n self.Phi_ = self.normalizer.transform(\n self._rand_state.randn(self.n_phi_, self.feat_dim)\n ).T.astype(self.dtype)\n self.Sb_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)\n self.St_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)\n # ====== validate the dimension ====== #\n if self.feat_dim != feat_dim:\n raise ValueError(\"Mismatch the input feature dimension, %d != %d\" %\n (self.feat_dim, feat_dim))\n if self.num_classes != len(labels):\n raise ValueError(\"Mismatch the number of output classes, %d != %d\" %\n (self.num_classes, len(labels)))\n\n # ==================== sklearn ==================== #\n def _update_caches(self):\n # ====== update cached matrices for scoring ====== #\n iSt = inv(self.St_) # [feat_dim, feat_dim]\n iS = inv(self.St_ - np.dot(np.dot(self.Sb_, iSt), self.Sb_))\n Q = iSt - iS # [feat_dim, feat_dim]\n P = np.dot(np.dot(iSt, self.Sb_), iS) # [feat_dim, feat_dim]\n U, s, V = svd(P, full_matrices=False)\n self.Lambda_ = np.diag(s[:self.n_phi_]) # [n_phi, n_phi]\n self.Uk_ = U[:, :self.n_phi_] # [feat_dim, n_phi]\n self.Q_hat_ = np.dot(np.dot(self.Uk_.T, Q), self.Uk_) # [n_phi, n_phi]\n\n def fit_maximum_likelihood(self, X, y):\n # ====== preprocessing ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n if isinstance(y, (tuple, list)):\n y = np.asarray(y)\n # ====== normalizing and initializing ====== #\n X = self.normalizer.fit(X, y).transform(X)\n classes = np.unique(y)\n self.initialize(X, labels=classes)\n # ====== ml ====== #\n Sw = compute_within_cov(X, y, classes)\n self.St_ = np.cov(X.T)\n self.Sb_ = self.St_ - Sw\n # ====== the default class_avg ====== #\n self._update_caches()\n model_vecs = compute_class_avg(X, y, classes=classes)\n self.X_model_ = np.dot(model_vecs, self.Uk_)\n return self\n\n def fit(self, X, y):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n y : [num_samples]\n \"\"\"\n # ====== preprocessing ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n if isinstance(y, (tuple, list)):\n y = np.asarray(y)\n assert X.shape[0] == y.shape[0], \\\n \"Number of samples mismatch in `X` and `y`, %d != %d\" % \\\n (X.shape[0], y.shape[0])\n # ====== normalize and initialize ====== #\n y_counts = np.bincount(y) # sessions per speaker\n classes = np.unique(y)\n X = self.normalizer.fit(X, y).transform(X)\n self.initialize(X, labels=classes)\n # ====== Initializing ====== #\n F = np.zeros((self.num_classes, self.feat_dim))\n for clz in np.unique(y):\n # Speaker indices\n F[clz, :] = X[y == clz, :].sum(axis=0)\n if self.verbose_ > 0:\n print('Re-estimating the Eigenvoice subspace with {} factors ...'.format(self.n_phi_))\n X_sqr = np.dot(X.T, X)\n # ====== iteration ====== #\n iter = 0\n last_llk_value = None\n while True:\n e_time = time.time()\n # expectation\n Ey, Eyy = self.expectation_plda(F, y_counts)\n e_time = time.time() - e_time\n # maximization\n m_time = time.time()\n self.maximization_plda(X, X_sqr, F, Ey, Eyy)\n m_time = time.time() - m_time\n # log-likelihood\n llk = 'None'\n llk_value = None\n if self.verbose_ > 1 or isinstance(self.n_iter_, string_types):\n llk_value = self.compute_llk(X)\n llk = '%.2f' % llk_value\n if self.verbose_ > 0:\n print('#iter:%-3d \\t [llk = %s] \\t [E-step = %.2f s] [M-step = %.2f s]' %\n (iter + 1, llk, e_time, m_time))\n # check breaking condition\n iter += 1\n if isinstance(self.n_iter_, Number):\n if iter >= self.n_iter_:\n break\n elif iter > 2 and last_llk_value is not None:\n if llk_value - last_llk_value < self.improve_threshold_:\n break\n last_llk_value = llk_value\n # ====== Update the eigenvoice space ====== #\n self.Sb_ = self.Phi_.dot(self.Phi_.T)\n self.St_ = self.Sb_ + self.Sigma_\n # ====== the default class_avg ====== #\n self._update_caches()\n model_vecs = compute_class_avg(X, y, classes=classes)\n self.X_model_ = np.dot(model_vecs, self.Uk_)\n\n def expectation_plda(self, F, cls_counts):\n \"\"\"\n Parameters\n ----------\n F : [num_classes, feat_dim]\n cls_count : [num_classes]\n \"\"\"\n # computes the posterior mean and covariance of the factors\n num_classes = F.shape[0]\n Eyy = np.zeros(shape=(self.n_phi_, self.n_phi_))\n Ey_clz = np.zeros(shape=(num_classes, self.n_phi_))\n # initialize common terms to save computations\n uniqFreqs = unique(cls_counts, keep_order=True)\n n_uniq = len(uniqFreqs)\n invTerms = np.empty(shape=(n_uniq, self.n_phi_, self.n_phi_))\n PhiT_invS = solve(self.Sigma_.T, self.Phi_).T # [n_phi, feat_dim]\n PhiT_invS_Phi = np.dot(PhiT_invS, self.Phi_) # [n_phi, n_phi]\n I = np.eye(self.n_phi_)\n\n for ix in range(n_uniq):\n nPhiT_invS_Phi = uniqFreqs[ix] * PhiT_invS_Phi\n invTerms[ix] = inv(I + nPhiT_invS_Phi)\n\n for clz in range(num_classes):\n num_samples = cls_counts[clz]\n PhiT_invS_y = np.dot(PhiT_invS, F[clz, :])\n idx = np.flatnonzero(uniqFreqs == num_samples)[0]\n Cyy = invTerms[idx]\n Ey_clz[clz, :] = np.dot(Cyy, PhiT_invS_y)\n Eyy += num_samples * Cyy\n\n Eyy += np.dot((Ey_clz * cls_counts[:, None]).T, Ey_clz)\n return Ey_clz, Eyy\n\n def compute_llk(self, X):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n \"\"\"\n num_samples = X.shape[0]\n S = np.dot(self.Phi_, self.Phi_.T) + self.Sigma_ # [feat_dim, feat_dim]\n llk = -0.5 * (self.feat_dim * num_samples * np.log(2 * np.pi) +\n num_samples * logdet(S) +\n np.sum(X * solve(S, X.T).T))\n return llk\n\n def maximization_plda(self, X, X_sqr, F, Ey, Eyy):\n \"\"\"\n ML re-estimation of the Eignevoice subspace and the covariance of the\n residual noise (full).\n\n Paremters\n ---------\n X : [num_samples, feat_dim]\n X_cov : [feat_dim, feat_dim]\n F : [num_classes, feat_dim]\n Ey : [num_classes, n_phi]\n Eyy : [n_phi, n_phi]\n \"\"\"\n num_samples = X.shape[0]\n Ey_FT = np.dot(Ey.T, F) # [n_phi, feat_dim]\n self.Phi_ = solve(Eyy.T, Ey_FT).T # [feat_dim, n_phi]\n self.Sigma_ = 1. / num_samples * (X_sqr - np.dot(self.Phi_, Ey_FT))\n\n def transform(self, X):\n if not self.is_fitted:\n raise RuntimeError(\"This model hasn't been fitted!\")\n # ====== check X ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n # ====== transform into latent space ====== #\n X_norm = self.normalizer.transform(X)\n X_project = np.dot(X_norm, self.Uk_) # [num_samples, n_phi]\n return X_project\n # return np.dot(X_project, self.Q_hat_)\n # h = np.dot(X_project, self.Q_hat_) * X_project\n # return h\n\n def predict_log_proba(self, X, X_model=None):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n X_model : [num_classes, feat_dim]\n if None, use class average extracted based on fitted data\n\n Return\n ------\n log-probabilities matrix [num_samples, num_classes]\n \"\"\"\n if not self.is_fitted:\n raise RuntimeError(\"This model hasn't been fitted!\")\n # ====== check X_model ====== #\n if X_model is None:\n X_model = self.X_model_\n else:\n # [num_classes, n_phi]\n X_model = np.dot(self.normalizer.transform(X_model), self.Uk_)\n if X_model.shape[0] != self.num_classes:\n warnings.warn(\"The model matrix contains %d classes, but the \"\n \"fitted number of classes is %d\" %\n (X_model.shape[0], self.num_classes))\n # ====== check X ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n # ====== transform the input matrices ====== #\n X = np.dot(self.normalizer.transform(X), self.Uk_) # [num_samples, n_phi]\n # [num_classes, 1]\n score_h1 = np.sum(np.dot(X_model, self.Q_hat_) * X_model, axis=1, keepdims=True)\n # [num_samples, 1]\n score_h2 = np.sum(np.dot(X, self.Q_hat_) * X, axis=1, keepdims=True)\n # [num_samples, num_classes]\n score_h1h2 = 2 * np.dot(X, np.dot(X_model, self.Lambda_).T)\n # [num_samples, num_classes]\n scores = score_h1h2 + score_h1.T + score_h2\n return scores\n", "from __future__ import absolute_import, division, print_function\n\nimport os\nimport pickle\nimport shutil\nimport tarfile\nfrom urllib.request import urlretrieve\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom odin.fuel._image_base import ImageDataset, get_partition\nfrom odin.utils import get_file, md5_checksum, md5_folder, one_hot\n\n\nclass CIFAR(ImageDataset):\n r\"\"\" CIFAR10 \"\"\"\n\n URL = {\n 10: r\"https://www.cs.toronto.edu/%7Ekriz/cifar-10-python.tar.gz\",\n 100: r\"https://www.cs.toronto.edu/%7Ekriz/cifar-100-python.tar.gz\"\n }\n\n MD5 = {\n 10: r\"c58f30108f718f92721af3b95e74349a\",\n 100: r\"eb9058c3a382ffc7106e4002c42a8d85\"\n }\n\n MD5_EXTRACT = {\n 10: r\"341026eedb2822e04c43dfb5a62e1d19\",\n 100: r\"fb755dd51de7edcbd1a5f794883159d0\"\n }\n\n DIR_NAME = {10: \"cifar-10-batches-py\", 100: \"cifar-100-python\"}\n\n def __init__(self, version, path=\"~/tensorflow_datasets/cifar\"):\n path = os.path.abspath(os.path.expanduser(path))\n if not os.path.exists(path):\n os.makedirs(path)\n version = int(version)\n assert version in (10, 100), \"Only support CIFAR-10 and CIFAR-100\"\n ## download and extract\n url = CIFAR.URL[version]\n basename = os.path.basename(url)\n zip_path = os.path.join(path, basename)\n if os.path.exists(\n zip_path) and md5_checksum(zip_path) != CIFAR.MD5[version]:\n os.remove(zip_path)\n if not os.path.exists(zip_path):\n from tqdm import tqdm\n prog = tqdm(desc=f\"Downloading file '{basename}'\", unit=\"kB\")\n\n def dl_progress(count, block_size, total_size):\n kB = count * block_size / 1024.\n prog.update(kB - prog.n)\n\n urlretrieve(url, zip_path, reporthook=dl_progress)\n prog.clear()\n prog.close()\n # extract\n data_dir = os.path.join(path, CIFAR.DIR_NAME[version])\n if os.path.exists(\n data_dir) and md5_folder(data_dir) != CIFAR.MD5_EXTRACT[version]:\n shutil.rmtree(data_dir)\n if not os.path.exists(data_dir):\n with tarfile.open(zip_path, \"r:gz\") as f:\n print(\"Extract zip file to \")\n f.extractall(path)\n ## load data\n X_train = []\n y_train = []\n y_train_coarse = []\n X_test = []\n y_test = []\n y_test_coarse = []\n for i in os.listdir(data_dir):\n if '.' not in i:\n with open(os.path.join(data_dir, i), 'rb') as f:\n data = pickle.load(f, encoding='bytes')\n if b'batch_label' not in data: # metadata\n continue\n # labels\n if b\"labels\" in data:\n lab = data[b'labels']\n elif b\"fine_labels\" in data:\n lab = data[b'fine_labels']\n lab_coarse = data[\n b'coarse_labels'] if b'coarse_labels' in data else []\n # store the data\n if b'test' in data[b'batch_label'] or 'test' in i:\n X_test.append(data[b'data'])\n y_test += lab\n y_test_coarse += lab_coarse\n else:\n X_train.append(data[b'data'])\n y_train += lab\n y_train_coarse += lab_coarse\n\n X_train = np.concatenate(X_train, axis=0)\n y_train = np.array(y_train)\n self.X_test = np.concatenate(X_test, axis=0)\n self.y_test = np.array(y_test)\n self.X_valid = X_train[:5000]\n self.y_valid = y_train[:5000]\n self.X_train = X_train[5000:]\n self.y_train = y_train[5000:]\n if len(y_train_coarse) > 0:\n y_train_coarse = np.array(y_train_coarse)\n self.y_valid_coarse = y_train_coarse[:5000]\n self.y_train_coarse = y_train_coarse[5000:]\n self.y_test_coarse = np.array(y_test_coarse)\n\n @property\n def is_binary(self):\n return False\n\n @property\n def shape(self):\n return (32, 32, 3)\n\n def create_dataset(self,\n batch_size=64,\n drop_remainder=False,\n shuffle=1000,\n prefetch=tf.data.experimental.AUTOTUNE,\n cache='',\n parallel=None,\n partition='train',\n inc_labels=False,\n seed=1) -> tf.data.Dataset:\n r\"\"\"\n Arguments:\n partition : {'train', 'valid', 'test'}\n inc_labels : a Boolean or Scalar. If True, return both image and label,\n otherwise, only image is returned.\n If a scalar is provided, it indicate the percent of labelled data\n in the mask.\n\n Return :\n tensorflow.data.Dataset :\n image - `(tf.float32, (None, 28, 28, 1))`\n label - `(tf.float32, (None, 10))`\n mask - `(tf.bool, (None, 1))` if 0. < inc_labels < 1.\n where, `mask=1` mean labelled data, and `mask=0` for unlabelled data\n \"\"\"\n X, y = get_partition(partition,\n train=(self.X_train, self.y_train),\n valid=(self.X_valid, self.y_valid),\n test=(self.X_test, self.y_test))\n inc_labels = float(inc_labels)\n gen = tf.random.experimental.Generator.from_seed(seed=seed)\n assert X.shape[0] == y.shape[0]\n X = np.reshape(X, (-1, 3, 32, 32))\n X = np.transpose(X, (0, 2, 3, 1))\n y = one_hot(y, self.n_labels)\n\n def _process(*data):\n image = tf.cast(data[0], tf.float32)\n image = self.normalize_255(image)\n if inc_labels:\n label = tf.cast(data[1], tf.float32)\n if 0. < inc_labels < 1.: # semi-supervised mask\n mask = gen.uniform(shape=(1,)) < inc_labels\n return dict(inputs=(image, label), mask=mask)\n return image, label\n return image\n\n ds = tf.data.Dataset.from_tensor_slices(X)\n if inc_labels > 0.:\n ds = tf.data.Dataset.zip((ds, tf.data.Dataset.from_tensor_slices(y)))\n ds = ds.map(_process, parallel)\n if cache is not None:\n ds = ds.cache(str(cache))\n # shuffle must be called after cache\n if shuffle is not None and shuffle > 0:\n ds = ds.shuffle(int(shuffle))\n ds = ds.batch(batch_size, drop_remainder)\n if prefetch is not None:\n ds = ds.prefetch(prefetch)\n return ds\n\n\n# ===========================================================================\n# Shortcuts\n# ===========================================================================\nclass CIFAR10(CIFAR):\n\n def __init__(self, path=\"~/tensorflow_datasets/cifar\"):\n super().__init__(10, path=path)\n\n @property\n def labels(self):\n return np.array([\n 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck'\n ])\n\n\nclass CIFAR100(CIFAR):\n\n def __init__(self, coarse_labels=False, path=\"~/tensorflow_datasets/cifar\"):\n super().__init__(100, path=path)\n self._coarse_labels = bool(coarse_labels)\n if self._coarse_labels:\n self.y_train = self.y_train_coarse\n self.y_valid = self.y_valid_coarse\n self.y_test = self.y_test_coarse\n\n @property\n def labels(self):\n if self._coarse_labels:\n y = [\n 'aquatic_mammals', 'fish', 'flowers', 'food_containers',\n 'fruit_and_vegetables', 'household_electrical_devices',\n 'household_furniture', 'insects', 'large_carnivores',\n 'large_man-made_outdoor_things', 'large_natural_outdoor_scenes',\n 'large_omnivores_and_herbivores', 'medium_mammals',\n 'non-insect_invertebrates', 'people', 'reptiles', 'small_mammals',\n 'trees', 'vehicles_1', 'vehicles_2'\n ]\n else:\n y = [\n 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee',\n 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus',\n 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle',\n 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'cra',\n 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish',\n 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard',\n 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man',\n 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom',\n 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear',\n 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',\n 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea',\n 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake',\n 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper',\n 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor',\n 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale',\n 'willow_tree', 'wolf', 'woman', 'worm'\n ]\n return np.array(y)\n\n\nclass CIFAR20(CIFAR100):\n\n def __init__(self, path=\"~/tensorflow_datasets/cifar\"):\n super().__init__(coarse_labels=True, path=path)\n", "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\n\n\n# returns a list of augmented audio data, stereo or mono\ndef augment_audio(y,\n sr,\n n_augment=0,\n allow_speedandpitch=True,\n allow_pitch=True,\n allow_speed=True,\n allow_dyn=True,\n allow_noise=True,\n allow_timeshift=True,\n tab=\"\",\n quiet=False):\n\n mods = [y] # always returns the original as element zero\n length = y.shape[0]\n\n for i in range(n_augment):\n if not quiet:\n print(tab + \"augment_audio: \", i + 1, \"of\", n_augment)\n y_mod = y\n count_changes = 0\n\n # change speed and pitch together\n if (allow_speedandpitch) and random_onoff():\n length_change = np.random.uniform(low=0.9, high=1.1)\n speed_fac = 1.0 / length_change\n if not quiet:\n print(tab + \" resample length_change = \", length_change)\n tmp = np.interp(np.arange(0, len(y), speed_fac), np.arange(0, len(y)), y)\n #tmp = resample(y,int(length*lengt_fac)) # signal.resample is too slow\n minlen = min(y.shape[0], tmp.shape[0]) # keep same length as original;\n y_mod *= 0 # pad with zeros\n y_mod[0:minlen] = tmp[0:minlen]\n count_changes += 1\n\n # change pitch (w/o speed)\n if (allow_pitch) and random_onoff():\n bins_per_octave = 24 # pitch increments are quarter-steps\n pitch_pm = 4 # +/- this many quarter steps\n pitch_change = pitch_pm * 2 * (np.random.uniform() - 0.5)\n if not quiet:\n print(tab + \" pitch_change = \", pitch_change)\n y_mod = librosa.effects.pitch_shift(y,\n sr,\n n_steps=pitch_change,\n bins_per_octave=bins_per_octave)\n count_changes += 1\n\n # change speed (w/o pitch),\n if (allow_speed) and random_onoff():\n speed_change = np.random.uniform(low=0.9, high=1.1)\n if not quiet:\n print(tab + \" speed_change = \", speed_change)\n tmp = librosa.effects.time_stretch(y_mod, speed_change)\n minlen = min(y.shape[0], tmp.shape[0]) # keep same length as original;\n y_mod *= 0 # pad with zeros\n y_mod[0:minlen] = tmp[0:minlen]\n count_changes += 1\n\n # change dynamic range\n if (allow_dyn) and random_onoff():\n dyn_change = np.random.uniform(low=0.5, high=1.1) # change amplitude\n if not quiet:\n print(tab + \" dyn_change = \", dyn_change)\n y_mod = y_mod * dyn_change\n count_changes += 1\n\n # add noise\n if (allow_noise) and random_onoff():\n noise_amp = 0.005 * np.random.uniform() * np.amax(y)\n if random_onoff():\n if not quiet:\n print(tab + \" gaussian noise_amp = \", noise_amp)\n y_mod += noise_amp * np.random.normal(size=length)\n else:\n if not quiet:\n print(tab + \" uniform noise_amp = \", noise_amp)\n y_mod += noise_amp * np.random.normal(size=length)\n count_changes += 1\n\n # shift in time forwards or backwards\n if (allow_timeshift) and random_onoff():\n timeshift_fac = 0.2 * 2 * (np.random.uniform() - 0.5\n ) # up to 20% of length\n if not quiet:\n print(tab + \" timeshift_fac = \", timeshift_fac)\n start = int(length * timeshift_fac)\n if (start > 0):\n y_mod = np.pad(y_mod, (start, 0), mode='constant')[0:y_mod.shape[0]]\n else:\n y_mod = np.pad(y_mod, (0, -start), mode='constant')[0:y_mod.shape[0]]\n count_changes += 1\n\n # last-ditch effort to make sure we made a change (recursive/sloppy, but...works)\n if (0 == count_changes):\n if not quiet:\n print(\"No changes made to signal, trying again\")\n mods.append(\n augment_audio(y, sr, n_augment=1, tab=\" \", quiet=quiet)[1])\n else:\n mods.append(y_mod)\n\n return mods\n\n\n\"\"\" scale frequency axis logarithmically \"\"\"\n\n\ndef logscale_spec(spec, sr=44100, factor=20., alpha=1.0, f0=0.9, fmax=1):\n spec = spec[:, 0:256]\n timebins, freqbins = np.shape(spec)\n scale = np.linspace(0, 1, freqbins) #** factor\n\n # http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=650310&url=http%3A%2F%2Fieeexplore.ieee.org%2Fiel4%2F89%2F14168%2F00650310\n scale = np.array(\n map(\n lambda x: x * alpha if x <= f0 else (fmax - alpha * f0) /\n (fmax - f0) * (x - f0) + alpha * f0, scale))\n scale *= (freqbins - 1) / max(scale)\n\n newspec = np.complex128(np.zeros([timebins, freqbins]))\n allfreqs = np.abs(np.fft.fftfreq(freqbins * 2, 1. / sr)[:freqbins + 1])\n freqs = [0.0 for i in range(freqbins)]\n totw = [0.0 for i in range(freqbins)]\n for i in range(0, freqbins):\n if (i < 1 or i + 1 >= freqbins):\n newspec[:, i] += spec[:, i]\n freqs[i] += allfreqs[i]\n totw[i] += 1.0\n continue\n else:\n # scale[15] = 17.2\n w_up = scale[i] - np.floor(scale[i])\n w_down = 1 - w_up\n j = int(np.floor(scale[i]))\n\n newspec[:, j] += w_down * spec[:, i]\n freqs[j] += w_down * allfreqs[i]\n totw[j] += w_down\n\n newspec[:, j + 1] += w_up * spec[:, i]\n freqs[j + 1] += w_up * allfreqs[i]\n totw[j + 1] += w_up\n\n for i in range(len(freqs)):\n if (totw[i] > 1e-6):\n freqs[i] /= totw[i]\n\n return newspec, freqs\n", "import inspect\nfrom numbers import Number\n\nimport tensorflow as tf\nfrom six import string_types\nfrom tensorflow.python import keras\n\n\ndef skip_connect(inputs, outputs, mode):\n ishape = inputs.shape\n oshape = outputs.shape\n if len(ishape) != len(oshape):\n n = abs(len(ishape) - len(oshape))\n # first expand\n for _ in range(n):\n if len(ishape) < len(oshape):\n inputs = tf.expand_dims(inputs, axis=1)\n else:\n outputs = tf.expand_dims(outputs, axis=1)\n # now repeat\n for i in range(1, n + 1):\n if len(ishape) < len(oshape):\n inputs = tf.repeat(inputs, outputs.shape[i], axis=i)\n else:\n outputs = tf.repeat(outputs, inputs.shape[i], axis=i)\n ### Concatenation\n if mode == 'concat':\n return tf.concat([outputs, inputs], axis=-1)\n ### Identity, a.k.a residual connection\n elif mode == 'identity':\n return inputs + outputs\n ### No support\n else:\n raise NotImplementedError(\"No support for skip connect mode: '%s'\" % mode)\n return outputs\n\n\nclass SkipConnection(keras.Sequential):\n\n def __init__(self, layers, mode='concat', name=None):\n super().__init__(layers, name=name)\n self.mode = mode\n\n def call(self, inputs, training=None, mask=None):\n outputs = super().call(inputs, training=training, mask=mask)\n return skip_connect(inputs, outputs, self.mode)\n", "import matplotlib\nmatplotlib.use('Agg')\n\nimport os\nos.environ['ODIN'] = 'gpu,float32'\nimport pickle\n\nimport numpy as np\n\nfrom odin import ml\nfrom odin import fuel as F\nfrom odin.utils import ctext, ArgController\nfrom odin import visual as V\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nargs = ArgController(\n).add('--reset', \"re-run the fitting of the model\", False\n).parse()\n# ===========================================================================\n# Const\n# ===========================================================================\nds = F.MNIST.load()\nprint(ds)\nnb_classes = 10\nPATH = '/tmp/lore.ai'\n# ===========================================================================\n# Model\n# ===========================================================================\nif not os.path.exists(PATH) or args.reset:\n f = ml.LogisticRegression(nb_classes=nb_classes, tol=1e-4,\n fit_intercept=True, path=PATH,\n batch_size=256, dtype='float32')\n cross_validation = (ds['X_valid'], ds['y_valid'])\n f.fit(X=ds['X_train'], y=ds['y_train'],\n cv=cross_validation)\nelse:\n with open(PATH, 'rb') as f:\n f = pickle.load(f)\n# ===========================================================================\n# Evaluation\n# ===========================================================================\nf.evaluate(ds['X_test'], ds['y_test'], path='/tmp/tmp.pdf',\n title=\"MNIST Test Set\",\n xlims=(0., 0.88), ylims=(0., 0.88))\n", "from typing import List\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_probability.python.distributions import kullback_leibler\n\nfrom odin.bay import distributions as tfd\n\n__all__ = ['CombinedDistribution']\n\n\nclass CombinedDistribution(tfd.Distribution):\n r\"\"\" Convert a list of homogeneous distributions into a single distribution\n by concatenating their output along event shape\n\n If the `event_shape` mismatch, it is flattened.\n \"\"\"\n\n def __init__(self, distributions, validate_args=False, name=None):\n parameters = dict(locals())\n batch_shape = [d.batch_shape for d in distributions]\n for shape in batch_shape:\n tf.assert_equal(\n shape, batch_shape,\n \"All distributions must have the same batch_shape but given: %s\" %\n str(batch_shape))\n self._distributions = distributions\n # check if need flatten\n flatten = False\n event_shape = [d.event_shape for d in distributions]\n s0 = event_shape[0]\n for s in event_shape[1:]:\n if len(s) != len(s0) or not all(i == j for i, j in zip(s[:-1], s0[:-1])):\n flatten = True\n self._flatten = flatten\n super(CombinedDistribution, self).__init__(\n dtype=self._distributions[0].dtype,\n reparameterization_type=self._distributions[0].reparameterization_type,\n validate_args=validate_args,\n allow_nan_stats=self._distributions[0].allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @property\n def distributions(self) -> List[tfd.Distribution]:\n return self._distributions\n\n def _batch_shape_tensor(self):\n return self.distributions[0].batch_shape_tensor()\n\n def _batch_shape(self):\n return self.distributions[0].batch_shape\n\n def _event_shape_tensor(self):\n shape = self.distributions[0].event_shape_tensor()\n if self._flatten:\n shape = tf.reduce_prod(shape)\n for d in self.distributions[1:]:\n shape += tf.reduce_prod(d.event_shape_tensor())\n else:\n shape = tf.concat(\n [\n shape[:-1],\n tf.reduce_sum(\n [d.event_shape_tensor()[-1] for d in self.distributions])\n ],\n axis=0,\n )\n return shape\n\n def _event_shape(self):\n if self._flatten:\n return (tf.reduce_sum(\n [tf.reduce_prod(d.event_shape) for d in self.distributions]),)\n else:\n shape = self.distributions[0].event_shape\n return tf.concat(\n [shape[:-1], (sum(d.event_shape[-1] for d in self.distributions),)],\n axis=0)\n\n def __getitem__(self, slices):\n return self.copy(\n distributions=[d.__getitem__(slices) for d in self.distributions])\n\n ######## Helpers\n def _concat_events(self, tensors):\n if self._flatten:\n tensors = tf.concat(\n [\n tf.reshape(\n t, tf.concat([t.shape[:-len(d.event_shape)], (-1,)], axis=0))\n for t, d in zip(tensors, self._distributions)\n ],\n axis=-1,\n )\n return tf.concat(tensors, axis=-1)\n\n def _split_evidences(self, x):\n x = tf.nest.flatten(x)\n if len(x) >= len(self.distributions):\n return x\n elif len(x) == 1:\n x = x[0]\n if self._flatten:\n size = np.cumsum(\n [0] + [int(np.prod(d.event_shape)) for d in self.distributions])\n x = [x[..., s:e] for s, e in zip(size, size[1:])]\n x = [\n tf.reshape(i, tf.concat([i.shape[:-1], d.event_shape], axis=0))\n for i, d in zip(x, self.distributions)\n ]\n else:\n size = np.cumsum([0] + [d.event_shape[-1] for d in self.distributions])\n x = [x[..., s:e] for s, e in zip(size, size[1:])]\n return x\n raise RuntimeError(\"Given %s distributions but only %s evidences\" %\n (str(self.distributions), str([i.shape for i in x])))\n\n ######## Methods from Distribution\n def _log_prob(self, x, **kwargs):\n return sum(\n d._log_prob(x, **kwargs)\n for x, d in zip(self._split_evidences(x), self.distributions))\n\n def _log_cdf(self, x, **kwargs):\n return sum(\n d._log_cdf(x, **kwargs)\n for x, d in zip(self._split_evidences(x), self.distributions))\n\n ######## Statistics\n def _sample_n(self, n, seed, **kwargs):\n return self._concat_events(\n [d.sample(n, seed, **kwargs) for d in self._distributions])\n\n def _entropy(self, **kwargs):\n return self._concat_events(\n [d._entropy(**kwargs) for d in self.distributions])\n\n def _mean(self, **kwargs):\n return self._concat_events([d._mean(**kwargs) for d in self.distributions])\n\n def _variance(self, **kwargs):\n return self._concat_events(\n [d._variance(**kwargs) for d in self.distributions])\n\n def _stddev(self, **kwargs):\n return self._concat_events(\n [d._stddev(**kwargs) for d in self.distributions])\n\n def _mode(self, **kwargs):\n return self._concat_events([d._mode(**kwargs) for d in self.distributions])\n\n\n@kullback_leibler.RegisterKL(CombinedDistribution, CombinedDistribution)\ndef _kl_independent(a: CombinedDistribution,\n b: CombinedDistribution,\n name='kl_combined'):\n r\"\"\"Batched KL divergence `KL(a || b)` for CombinedDistribution distributions.\n\n Just the summation of all distributions KL\n \"\"\"\n kl = 0.\n for d1, d2 in zip(a.distributions, b.distributions):\n kl += kullback_leibler.kl_divergence(d1, d2, name=name)\n return kl\n", "# ===========================================================================\n# Others\n# ===========================================================================\nfrom __future__ import absolute_import, division, print_function\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import confusion_matrix as tf_cm\n\nfrom odin.backend.maths import to_llr\nfrom odin.backend.tensor import nonzeros, transpose\nfrom odin.utils import as_tuple, is_number\n\n\n# ===========================================================================\n# Losses\n# ===========================================================================\ndef binary_accuracy(y_true,\n y_pred,\n threshold=0.5,\n reduction=tf.reduce_mean,\n name=None):\n \"\"\" Non-differentiable \"\"\"\n with tf.name_scope(name, \"binary_accuracy\", [y_pred, y_true, threshold]):\n if y_pred.shape.ndims > 1:\n y_pred = tf.reshape(y_pred, (-1,))\n if y_true.shape.ndims > 1:\n y_true = tf.reshape(y_true, (-1,))\n y_pred = tf.greater_equal(y_pred, threshold)\n match_values = tf.cast(tf.equal(tf.cast(y_pred, 'int32'),\n tf.cast(y_true, 'int32')),\n dtype='int32')\n return reduction(match_values)\n\n\ndef categorical_accuracy(y_true,\n y_pred,\n top_k=1,\n reduction=tf.reduce_mean,\n name=None):\n \"\"\" Non-differentiable \"\"\"\n with tf.name_scope(name, \"categorical_accuracy\", [y_true, y_pred]):\n if y_true.shape.ndims == y_pred.shape.ndims:\n y_true = tf.argmax(y_true, axis=-1)\n elif y_true.shape.ndims != y_pred.shape.ndims - 1:\n raise TypeError('rank mismatch between y_true and y_pred')\n if top_k == 1:\n # standard categorical accuracy\n top = tf.argmax(y_pred, axis=-1)\n y_true = tf.cast(y_true, top.dtype.base_dtype)\n match_values = tf.equal(top, y_true)\n else:\n match_values = tf.nn.in_top_k(y_pred, tf.cast(y_true, 'int32'), k=top_k)\n match_values = tf.cast(match_values, dtype='float32')\n return reduction(match_values)\n\n\ndef confusion_matrix(y_true, y_pred, labels=None, normalize=False, name=None):\n \"\"\"\n Computes the confusion matrix of given vectors containing\n actual observations and predicted observations.\n\n Parameters\n ----------\n y_true : 1-d or 2-d tensor variable\n true values\n y_pred : 1-d or 2-d tensor variable\n prediction values\n normalize : bool\n if True, normalize each row to [0., 1.]\n labels : array, shape = [nb_classes], int (nb_classes)\n List of labels to index the matrix. This may be used to reorder\n or select a subset of labels.\n If none is given, those that appear at least once\n in ``y_true`` or ``y_pred`` are used in sorted order.\n\n Note\n ----\n if you want to calculate: Precision, Recall, F1 scores from the\n confusion matrix, set `normalize=False`\n\n \"\"\"\n with tf.name_scope(name, 'confusion_matrix', [y_true, y_pred]):\n nb_classes = None\n if y_true.shape.ndims == 2:\n nb_classes = y_true.shape.as_list()[-1]\n y_true = tf.argmax(y_true, -1)\n elif y_true.shape.ndims != 1:\n raise ValueError('actual must be 1-d or 2-d tensor variable')\n if y_pred.shape.ndims == 2:\n nb_classes = y_pred.shape.as_list()[-1]\n y_pred = tf.argmax(y_pred, -1)\n elif y_pred.shape.ndims != 1:\n raise ValueError('pred must be 1-d or 2-d tensor variable')\n # check valid labels\n if labels is None:\n if nb_classes is None:\n raise RuntimeError(\n \"Cannot infer the number of classes for confusion matrix\")\n labels = int(nb_classes)\n elif is_number(labels):\n labels = int(labels)\n elif hasattr(labels, '__len__'):\n labels = len(labels)\n # transpose to match the format of sklearn\n cm = tf_cm(labels=y_true, predictions=y_pred, num_classes=labels)\n if normalize:\n cm = tf.cast(cm, dtype='float32')\n cm = cm / tf.reduce_sum(cm, axis=1, keep_dims=True)\n return cm\n\n\ndef detection_matrix(y_true, y_pred):\n # TODO\n pass\n\n\n# ===========================================================================\n# Speech task metrics\n# ===========================================================================\ndef compute_Cavg(y_llr,\n y_true,\n cluster_idx=None,\n Ptrue=0.5,\n Cfa=1.,\n Cmiss=1.,\n probability_input=False):\n ''' Fast calculation of Cavg (for only 1 clusters)\n\n Parameters\n ----------\n y_llr: (nb_samples, nb_classes)\n log likelihood ratio: llr = log (P(data|target) / P(data|non-target))\n y_true: numpy array of shape (nb_samples,)\n Class labels.\n cluster_idx: list,\n Each element is a list that represents a particular language\n cluster and contains all class labels that belong to the cluster.\n Ptar: float, optional\n Probability of a target trial.\n Cfa: float, optional\n Cost for False Acceptance error.\n Cmiss: float, optional\n Cost for False Rejection error.\n probability_input: boolean\n if True, `y_llr` is the output probability from softmax and perform\n llr transform for `y_llr`\n\n Returns\n -------\n cluster_cost: numpy array of shape (n_clusters,)\n It contains average percentage costs for each cluster as defined by\n NIST LRE-15 language detection task. See\n http://www.nist.gov/itl/iad/mig/upload/LRE15_EvalPlan_v22-3.pdf\n total_cost: float\n An average percentage cost over all clusters.\n\n '''\n if probability_input:\n y_llr = to_llr(y_llr)\n thresh = np.log(Cfa / Cmiss) - np.log(Ptrue / (1 - Ptrue))\n nb_classes = y_llr.shape[1].value\n if isinstance(y_true, (list, tuple)):\n y_true = np.asarray(y_true)\n if y_true.shape.ndims == 1:\n y_true = tf.one_hot(y_true, depth=nb_classes, axis=-1)\n y_true = tf.cast(y_true, y_llr.dtype.base_dtype)\n # ====== statistics ====== #\n # invert of y_true, False Negative mask\n y_false = 1. - y_true\n y_positive = tf.cast(tf.greater_equal(y_llr, thresh), y_llr.dtype.base_dtype)\n # invert of y_positive\n y_negative = tf.cast(tf.less(y_llr, thresh), y_llr.dtype.base_dtype)\n distribution = tf.clip_by_value(tf.reduce_sum(y_true, axis=0), 10e-8,\n 10e8) # no zero values\n # ====== Pmiss ====== #\n miss = tf.reduce_sum(y_true * y_negative, axis=0)\n Pmiss = 100 * (Cmiss * Ptrue * miss) / distribution\n # ====== Pfa ====== # This calculation give different results\n fa = tf.reduce_sum(y_false * y_positive, axis=0)\n Pfa = 100 * (Cfa * (1 - Ptrue) * fa) / distribution\n Cavg = tf.reduce_mean(Pmiss) + tf.reduce_mean(Pfa) / (nb_classes - 1)\n return Cavg\n\n\ndef compute_Cnorm(y_true,\n y_score,\n Ptrue=[0.1, 0.5],\n Cfa=1.,\n Cmiss=1.,\n probability_input=False):\n \"\"\" Computes normalized detection cost function (DCF) given\n the costs for false accepts and false rejects as well as a priori\n probability for target speakers.\n\n * This is the actual cost, different from the min cost (minDCF)\n\n (By convention, the more positive the score,\n the more likely is the target hypothesis.)\n\n Parameter\n ---------\n y_true: {array [n_samples], or list of array}\n each array is labels of binary or multi-classes\n detection tasks, each array can be an array of\n classes indices, or one-hot-encoded matrix.\n If multiple array are given, calculating `equalized cost`\n of all partitions, an example of 2 partitions are:\n VAST and MLSR14 files\n y_score: {array [n_samples, n_classes], or list of array}\n the outputs scores, can be probabilities values or log-likelihood\n values by default, the\n Ptrue: float [0.,1.], or list of float\n hypothesized prior probabilities of positive class,\n you can given multiple values by providing an array\n Cfa: float\n weight for False Alarm - False Positive error\n Cmiss: float\n weight for Miss - False Negative error\n\n Return\n ------\n C_norm: array [len(Ptrue)]\n minimum detection cost accordingly for each given value of `Ptrue`.\n C_norm_array: array [len(Ptrue), n_classes]\n minimum detection cost for each class, accordingly to each\n given value of `Ptrue`\n\n \"\"\"\n y_true = as_tuple(y_true, t=np.ndarray)\n y_score = as_tuple(y_score, t=np.ndarray)\n if len(y_true) != len(y_score):\n raise ValueError(\"There are %d partitions for `y_true`, but %d \"\n \"partitions for `y_score`.\" % (len(y_true), len(y_score)))\n if len(set(i.shape[1] for i in y_score)) != 1:\n raise ValueError(\n \"The number of classes among scores array is inconsistent.\")\n nb_partitions = len(y_true)\n # ====== preprocessing ====== #\n y_true = [np.argmax(i, axis=-1) if i.ndim >= 2 else i for i in y_true]\n nb_classes = y_score[0].shape[1]\n # threshold\n Ptrue = np.asarray(as_tuple(Ptrue), dtype=float)\n nb_threshold = len(Ptrue)\n # log(beta) is threshold, i.e.\n # if Ptrue=0.5 => beta=1. => threshold=0.\n beta = (Cfa / Cmiss) * ((1 - Ptrue) / Ptrue)\n beta = np.clip(beta, a_min=np.finfo(float).eps, a_max=np.inf)\n # ====== Cavg ====== #\n global_cm_array = np.zeros(shape=(nb_threshold, nb_classes, nb_classes))\n # Apply threshold on the scores and compute the confusion matrix\n for scores, labels in zip(y_score, y_true):\n actual_TP_per_class = np.lib.arraysetops.unique(ar=labels,\n return_counts=True)[1]\n if probability_input: # special case input is probability values\n scores = to_llr(scores)\n for theta_ix, theta in enumerate(np.log(beta)):\n thresholded_scores = (scores > theta).astype(int)\n # compute confusion matrix, this is different from\n # general implementation of confusion matrix above\n cm = np.zeros(shape=(nb_classes, nb_classes), dtype=np.int64)\n for i, (trial, target) in enumerate(zip(thresholded_scores, labels)):\n cm[target, :] += trial\n # miss and fa\n predic_TP_per_class = cm.diagonal()\n # Compute the number of miss per class\n nb_miss_per_class = actual_TP_per_class - predic_TP_per_class\n cm_miss_fa = cm\n cm_miss_fa[np.diag_indices_from(cm)] = nb_miss_per_class\n cm_probabilities = cm_miss_fa / actual_TP_per_class[:, None]\n # update global\n global_cm_array[theta_ix] += cm_probabilities\n # normalize by partitions\n global_cm_array /= nb_partitions\n # Extract probabilities of false negatives from confusion matrix\n p_miss_arr = global_cm_array.diagonal(0, 1, 2)\n p_miss = p_miss_arr.mean(1)\n # Extract probabilities of false positives from confusion matrix\n p_false_alarm_arr = (global_cm_array.sum(1) - p_miss_arr) / (nb_classes - 1)\n p_false_alarm = p_false_alarm_arr.mean(1)\n # Compute costs per languages\n C_Norm_arr = p_miss_arr + beta[:, None] * p_false_alarm_arr\n # Compute overall cost\n C_Norm = p_miss + beta * p_false_alarm\n return C_Norm, C_Norm_arr\n\n\ndef compute_minDCF(Pfa, Pmiss, Cmiss=1, Cfa=1, Ptrue=0.5):\n \"\"\" Estimating the min value of the detection\n cost function (DCF)\n\n Parameters\n ----------\n Pfa: array, [n_samples]\n false alarm rate or false positive rate\n Pmiss: array, [n_samples]\n miss rate or false negative rate\n Cmiss: scalar\n weight for false positive mistakes\n Cfa: scalar\n weight for false negative mistakes\n Ptrue: scalar [0., 1.]\n prior probability of positive cases.\n\n Return\n ------\n min_DCF: scalar\n minimum value of the detection cost function for\n a given detection error trade-off curve\n Pfa_optimum: scalar\n and false alarm trade-off probabilities.\n Pmiss_optimum: scalar\n the correcponding miss\n\n \"\"\"\n assert Pmiss.shape == Pfa.shape\n Pfalse = 1 - Ptrue\n # detection cost function vector\n DCF_vector = (Cmiss * Pmiss * Ptrue) + \\\n (Cfa * Pfa * Pfalse)\n # get the optimal value and corresponding index\n min_idx = np.argmin(DCF_vector)\n min_val = DCF_vector[min_idx]\n return min_val, Pfa[min_idx], Pmiss[min_idx]\n\n\ndef compute_EER(Pfa, Pmiss):\n \"\"\" computes the equal error rate (EER) given\n Pmiss or False Negative Rate\n and\n Pfa or False Positive Rate\n calculated for a range of operating points on the DET curve\n\n @Author: \"Timothee Kheyrkhah, Omid Sadjadi\"\n \"\"\"\n fpr, fnr = Pfa, Pmiss\n diff_pm_fa = fnr - fpr\n x1 = np.flatnonzero(diff_pm_fa >= 0)[0]\n x2 = np.flatnonzero(diff_pm_fa < 0)[-1]\n a = (fnr[x1] - fpr[x1]) / (fpr[x2] - fpr[x1] - (fnr[x2] - fnr[x1]))\n return fnr[x1] + a * (fnr[x2] - fnr[x1])\n\n\ndef compute_AUC(x, y, reorder=False):\n \"\"\"Compute Area Under the Curve (AUC) using the trapezoidal rule\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : array, shape = [n]\n x coordinates.\n y : array, shape = [n]\n y coordinates.\n reorder : boolean, optional (default=False)\n If True, assume that the curve is ascending in the case of ties, as for\n an ROC curve. If the curve is non-ascending, the result will be wrong.\n\n Returns\n -------\n auc : float\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n\n \"\"\"\n from sklearn.metrics import auc\n return auc(x, y, reorder)\n\n\ndef roc_curve(y_true,\n y_score,\n pos_label=None,\n sample_weight=None,\n drop_intermediate=True):\n \"\"\"Compute Receiver operating characteristic (ROC)\n\n @copy from sklearn for convenience\n\n Note: this implementation is restricted to the binary classification task.\n\n Parameters\n ----------\n\n y_true : array, shape = [n_samples]\n True binary labels in range {0, 1} or {-1, 1}. If labels are not\n binary, pos_label should be explicitly given.\n y_score : array, shape = [n_samples]\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n pos_label : int or str, default=None\n Label considered as positive and others are considered negative.\n sample_weight : array-like of shape = [n_samples], optional\n Sample weights.\n drop_intermediate : boolean, optional (default=True)\n Whether to drop some suboptimal thresholds which would not appear\n on a plotted ROC curve. This is useful in order to create lighter\n ROC curves.\n\n Returns\n -------\n fpr : array, shape = [>2]\n Increasing false positive rates such that element i is the false\n positive rate of predictions with score >= thresholds[i].\n tpr : array, shape = [>2]\n Increasing true positive rates such that element i is the true\n positive rate of predictions with score >= thresholds[i].\n thresholds : array, shape = [n_thresholds]\n Decreasing thresholds on the decision function used to compute\n fpr and tpr. `thresholds[0]` represents no instances being predicted\n and is arbitrarily set to `max(y_score) + 1`.\n\n Notes\n -----\n Since the thresholds are sorted from low to high values, they\n are reversed upon returning them to ensure they correspond to both ``fpr``\n and ``tpr``, which are sorted in reversed order during their calculation.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Receiver operating characteristic\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)\n >>> fpr\n array([ 0. , 0.5, 0.5, 1. ])\n >>> tpr\n array([ 0.5, 0.5, 1. , 1. ])\n >>> thresholds\n array([ 0.8 , 0.4 , 0.35, 0.1 ])\n\n \"\"\"\n from sklearn.metrics import roc_curve\n return roc_curve(y_true, y_score, pos_label, sample_weight, drop_intermediate)\n\n\ndef prc_curve(y_true, y_probas, pos_label=None, sample_weight=None):\n \"\"\"Compute precision-recall pairs for different probability thresholds\n\n Note: this implementation is restricted to the binary classification task.\n\n The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of\n true positives and ``fp`` the number of false positives. The precision is\n intuitively the ability of the classifier not to label as positive a sample\n that is negative.\n\n The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of\n true positives and ``fn`` the number of false negatives. The recall is\n intuitively the ability of the classifier to find all the positive samples.\n\n The last precision and recall values are 1. and 0. respectively and do not\n have a corresponding threshold. This ensures that the graph starts on the\n x axis.\n\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\n\n Parameters\n ----------\n y_true : array, shape = [n_samples]\n True targets of binary classification in range {-1, 1} or {0, 1}.\n y_probas : array, shape = [n_samples]\n Estimated probabilities or decision function.\n pos_label : int or str, default=None\n The label of the positive class\n sample_weight : array-like of shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n precision : array, shape = [n_thresholds + 1]\n Precision values such that element i is the precision of\n predictions with score >= thresholds[i] and the last element is 1.\n recall : array, shape = [n_thresholds + 1]\n Decreasing recall values such that element i is the recall of\n predictions with score >= thresholds[i] and the last element is 0.\n thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]\n Increasing thresholds on the decision function used to compute\n precision and recall.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import precision_recall_curve\n >>> y_true = np.array([0, 0, 1, 1])\n >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> precision, recall, thresholds = precision_recall_curve(\n ... y_true, y_scores)\n >>> precision # doctest: +ELLIPSIS\n array([ 0.66..., 0.5 , 1. , 1. ])\n >>> recall\n array([ 1. , 0.5, 0.5, 0. ])\n >>> thresholds\n array([ 0.35, 0.4 , 0.8 ])\n\n \"\"\"\n from sklearn.metrics import precision_recall_curve\n return precision_recall_curve(y_true, y_probas, pos_label, sample_weight)\n\n\ndef det_curve(y_true, y_score, pos_label=None, sample_weight=None):\n \"\"\"Detection Error Tradeoff\n Compute error rates for different probability thresholds\n\n @Original implementaion from NIST\n The function is adapted to take input format same as\n NIST original code and `sklearn.metrics`\n\n Note: this implementation is restricted to the binary classification task.\n (By convention, the more positive the score,\n the more likely is the target hypothesis.)\n\n Parameters\n ----------\n y_true : array, shape = [n_samples]\n True targets of binary classification in range {-1, 1} or {0, 1}.\n y_score : array, shape = [n_samples]\n Estimated probabilities or decision function.\n pos_label : int, optional (default=None)\n The label of the positive class\n sample_weight : array-like of shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n with `n_samples = n_true_samples + n_false_samples`\n P_fa: array, shape = [n_samples]\n fpr - False Positive rate, or false alarm probabilities\n P_miss : array, shape = [n_samples]\n fnr - False Negative rate, or miss probabilities\n\n References\n ----------\n .. [1] `Wikipedia entry for Detection error tradeoff\n <https://en.wikipedia.org/wiki/Detection_error_tradeoff>`_\n .. [2] `The DET Curve in Assessment of Detection Task Performance\n <http://www.itl.nist.gov/iad/mig/publications/storage_paper/det.pdf>`_\n .. [3] `2008 NIST Speaker Recognition Evaluation Results\n <http://www.itl.nist.gov/iad/mig/tests/sre/2008/official_results/>`_\n .. [4] `DET-Curve Plotting software for use with MATLAB\n <http://www.itl.nist.gov/iad/mig/tools/DETware_v2.1.targz.htm>`_\n\n Examples\n --------\n >>> import numpy as np\n >>> from odin import backend as K\n >>> y_true = np.array([0, 0, 1, 1])\n >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fnr, fpr = K.metrics.det_curve(y_true, y_scores)\n >>> print(fpr)\n array([ 0.5, 0.5, 0. ])\n >>> print(fnr)\n array([ 0. , 0.5, 0.5])\n >>> print(thresholds)\n array([ 0.35, 0.4 , 0.8 ])\n \"\"\"\n # ====== ravel everything in cased of multi-classes ====== #\n y_score = y_score.ravel()\n y_true = np.array(y_true)\n if y_true.ndim >= 2:\n y_true = np.argmax(y_true, axis=-1)\n nb_classes = len(np.lib.arraysetops.unique(y_true))\n # multi-classes\n if nb_classes > 2:\n total_samples = nb_classes * len(y_true)\n indices = np.arange(0, total_samples, nb_classes) + y_true\n y_true = np.zeros(total_samples, dtype=np.int)\n y_true[indices] = 1\n # ====== check weights ====== #\n if sample_weight is not None:\n if len(sample_weight) != len(y_score):\n raise ValueError(\"Provided `sample_weight` for %d samples, but got \"\n \"scores for %d samples.\" %\n (len(sample_weight), len(y_score)))\n else:\n sample_weight = np.ones(shape=(len(y_score),), dtype=y_score.dtype)\n # ====== processing ====== #\n if pos_label is not None:\n y_true = (y_true == pos_label).astype(np.int)\n # ====== start ====== #\n sorted_ndx = np.argsort(y_score, kind='mergesort')\n y_true = y_true[sorted_ndx]\n # sort the weights also, dont forget this\n sample_weight = sample_weight[sorted_ndx]\n tgt_weights = sample_weight * y_true\n imp_weights = sample_weight * (1 - y_true)\n # FNR\n Pmiss = np.cumsum(tgt_weights) / np.sum(tgt_weights)\n # FPR\n Pfa = 1 - np.cumsum(imp_weights) / np.sum(imp_weights)\n return Pfa, Pmiss\n\n\n\n\n\n\n# ===========================================================================\n# Distance measurement\n# ===========================================================================\ndef _LevenshteinDistance(s1, s2):\n ''' Implementation of the wikipedia algorithm, optimized for memory\n Reference: http://rosettacode.org/wiki/Levenshtein_distance#Python\n '''\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n distances = range(len(s1) + 1)\n for index2, char2 in enumerate(s2):\n newDistances = [index2 + 1]\n for index1, char1 in enumerate(s1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1], distances[index1 + 1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]\n\n\ntry:\n # Cython implementation about 100 time faster\n import Levenshtein\n Levenshtein_distance = Levenshtein.distance\nexcept ImportError as e:\n # python implementation\n Levenshtein_distance = _LevenshteinDistance\n\nedit_distance = Levenshtein_distance\n\n\ndef LER(y_true, y_pred, return_mean=True):\n ''' This function calculates the Labelling Error Rate (PER) of the decoded\n networks output sequence (out) and a target sequence (tar) with Levenshtein\n distance and dynamic programming. This is the same algorithm as commonly used\n for calculating the word error rate (WER), or phonemes error rate (PER).\n\n Parameters\n ----------\n y_true : ndarray (nb_samples, seq_labels)\n true values of sequences\n y_pred : ndarray (nb_samples, seq_labels)\n prediction values of sequences\n\n Returns\n -------\n return : float\n Labelling error rate\n '''\n if not hasattr(y_true[0], '__len__') or isinstance(y_true[0], str):\n y_true = [y_true]\n if not hasattr(y_pred[0], '__len__') or isinstance(y_pred[0], str):\n y_pred = [y_pred]\n\n results = []\n for ytrue, ypred in zip(y_true, y_pred):\n results.append(Levenshtein_distance(ytrue, ypred) / len(ytrue))\n if return_mean:\n return np.mean(results)\n return results\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.log", "scipy.linalg.svd", "numpy.unique", "numpy.asarray", "numpy.eye", "numpy.dtype", "numpy.flatnonzero", "scipy.linalg.solve", "scipy.linalg.cholesky", "numpy.cov", "numpy.bincount", "scipy.linalg.inv", "numpy.random.RandomState", "numpy.zeros", "numpy.empty" ], [ "tensorflow.random.experimental.Generator.from_seed", "numpy.reshape", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "numpy.concatenate", "numpy.transpose", "numpy.array" ], [ "numpy.amax", "numpy.pad", "numpy.linspace", "numpy.random.normal", "numpy.shape", "numpy.floor", "numpy.fft.fftfreq", "numpy.random.uniform", "numpy.zeros" ], [ "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.repeat" ], [ "matplotlib.use" ], [ "tensorflow.concat", "numpy.cumsum", "tensorflow.reduce_prod", "numpy.prod", "tensorflow.nest.flatten" ], [ "tensorflow.python.ops.confusion_matrix", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "numpy.cumsum", "numpy.lib.arraysetops.unique", "numpy.argmin", "numpy.mean", "numpy.arange", "sklearn.metrics.precision_recall_curve", "numpy.flatnonzero", "numpy.finfo", "numpy.argmax", "numpy.diag_indices_from", "tensorflow.name_scope", "tensorflow.argmax", "numpy.zeros", "numpy.log", "tensorflow.less", "sklearn.metrics.roc_curve", "tensorflow.one_hot", "numpy.argsort", "sklearn.metrics.auc", "numpy.array", "numpy.sum", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.greater_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ryorda/tensorflow-viennacl
[ "d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a", "054b515feec0a3fca4cfb1f29adbf423c9027c3a" ]
[ "tensorflow/python/ops/nn_grad_test.py", "tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks.py", "tensorflow/python/kernel_tests/bincount_op_test.py", "tensorflow/contrib/receptive_field/python/util/examples/write_inception_resnet_v2_graph.py", "tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py", "tensorflow/contrib/gan/python/features/python/clip_weights_test.py", "tensorflow/contrib/quantize/python/copy_graph.py", "tensorflow/python/keras/_impl/keras/layers/gru_test.py", "tensorflow/contrib/all_reduce/python/all_reduce_test.py", "tensorflow/contrib/data/python/ops/grouping.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Python ops defined in nn_grad.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_grad\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass Relu6OpTest(test.TestCase):\n def testRelu6GradGrad(self):\n inputs = constant_op.constant([[-2, -1, 1, 3], [5, 7, 8, 9]],\n dtype=dtypes.float32)\n x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])\n r = nn_ops.relu6(inputs)\n r_g = gradients_impl.gradients(r, inputs)[0]\n with self.test_session():\n error = gradient_checker.compute_gradient_error(\n inputs, inputs.get_shape().as_list(),\n r_g, r_g.get_shape().as_list(),\n x_init_value=x_init_value)\n self.assertLess(error, 1e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hooks for use with GTFlow Estimator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.contrib.learn.python.learn import session_run_hook\nfrom tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs\nfrom tensorflow.core.framework.summary_pb2 import Summary\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.summary_io import SummaryWriterCache\n\n\nclass FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):\n \"\"\"Hook to save feature importance summaries.\"\"\"\n\n def __init__(self, model_dir, every_n_steps=1):\n \"\"\"Create a FeatureImportanceSummarySaver Hook.\n\n This hook creates scalar summaries representing feature importance\n for each feature column during training.\n\n Args:\n model_dir: model base output directory.\n every_n_steps: frequency, in number of steps, for logging summaries.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n \"\"\"\n if model_dir is None:\n raise ValueError(\"model dir must be specified.\")\n self._model_dir = model_dir\n self._every_n_steps = every_n_steps\n self._last_triggered_step = None\n\n def begin(self):\n self._global_step_tensor = training_util.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use FeatureImportanceSummarySaver.\")\n graph = ops.get_default_graph()\n self._feature_names_tensor = graph.get_tensor_by_name(\n \"gbdt/feature_names:0\")\n self._feature_usage_counts_tensor = graph.get_tensor_by_name(\n \"gbdt/feature_usage_counts:0\")\n self._feature_gains_tensor = graph.get_tensor_by_name(\n \"gbdt/feature_gains:0\")\n\n def before_run(self, run_context):\n del run_context # Unused by feature importance summary saver hook.\n requests = {\n \"global_step\": self._global_step_tensor,\n \"feature_names\": self._feature_names_tensor,\n \"feature_usage_counts\": self._feature_usage_counts_tensor,\n \"feature_gains\": self._feature_gains_tensor\n }\n return SessionRunArgs(requests)\n\n def after_run(self, run_context, run_values):\n del run_context # Unused by feature importance summary saver hook.\n\n # Read result tensors.\n global_step = run_values.results[\"global_step\"]\n feature_names = run_values.results[\"feature_names\"]\n feature_usage_counts = run_values.results[\"feature_usage_counts\"]\n feature_gains = run_values.results[\"feature_gains\"]\n\n # Ensure summaries are logged at desired frequency\n if (self._last_triggered_step is not None and\n global_step < self._last_triggered_step + self._every_n_steps):\n return\n\n # Validate tensors.\n if (len(feature_names) != len(feature_usage_counts) or\n len(feature_names) != len(feature_gains)):\n raise RuntimeError(\n \"Feature names and importance measures have inconsistent lengths.\")\n\n # Compute total usage.\n total_usage_count = 0.0\n for usage_count in feature_usage_counts:\n total_usage_count += usage_count\n usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0\n\n # Compute total gain.\n total_gain = 0.0\n for gain in feature_gains:\n total_gain += gain\n gain_norm = 1.0 / total_gain if total_gain else 1.0\n\n # Output summary for each feature.\n self._last_triggered_step = global_step\n for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,\n feature_gains):\n output_dir = os.path.join(self._model_dir, name.decode(\"utf-8\"))\n summary_writer = SummaryWriterCache.get(output_dir)\n usage_count_summary = Summary(value=[\n Summary.Value(\n tag=\"feature_importance/usage_counts\", simple_value=usage_count)\n ])\n usage_fraction_summary = Summary(value=[\n Summary.Value(\n tag=\"feature_importance/usage_fraction\",\n simple_value=usage_count * usage_count_norm)\n ])\n summary_writer.add_summary(usage_count_summary, global_step)\n summary_writer.add_summary(usage_fraction_summary, global_step)\n gains_summary = Summary(value=[\n Summary.Value(tag=\"feature_importance/gains\", simple_value=gain)\n ])\n gains_fraction_summary = Summary(value=[\n Summary.Value(\n tag=\"feature_importance/gains_fraction\",\n simple_value=gain * gain_norm)\n ])\n summary_writer.add_summary(gains_summary, global_step)\n summary_writer.add_summary(gains_fraction_summary, global_step)\n\n\nclass FeedFnHook(session_run_hook.SessionRunHook):\n \"\"\"Runs feed_fn and sets the feed_dict accordingly.\"\"\"\n\n def __init__(self, feed_fn):\n self.feed_fn = feed_fn\n\n def before_run(self, run_context):\n del run_context # unused by FeedFnHook.\n return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)\n\n\nclass StopAfterNTrees(session_run_hook.SessionRunHook):\n \"\"\"Stop training after building N full trees.\"\"\"\n\n def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor):\n self._num_trees = n\n # num_attempted_trees_tensor and num_finalized_trees_tensor are both\n # tensors.\n self._num_attempted_trees_tensor = num_attempted_trees_tensor\n self._num_finalized_trees_tensor = num_finalized_trees_tensor\n\n def before_run(self, run_context):\n del run_context # unused by StopTrainingAfterNTrees.\n return session_run_hook.SessionRunArgs({\n \"num_attempted_trees\": self._num_attempted_trees_tensor,\n \"num_finalized_trees\": self._num_finalized_trees_tensor,\n })\n\n def after_run(self, run_context, run_values):\n num_attempted_trees = run_values.results[\"num_attempted_trees\"]\n num_finalized_trees = run_values.results[\"num_finalized_trees\"]\n assert num_attempted_trees is not None\n assert num_finalized_trees is not None\n # Stop when the required number of finalized trees is reached, or when we\n # try enough times to build a tree but keep failing.\n if (num_finalized_trees >= self._num_trees or\n num_attempted_trees > 2 * self._num_trees):\n logging.info(\"Requesting stop since we have reached %d trees.\",\n num_finalized_trees)\n run_context.request_stop()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for math_ops.bincount.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import googletest\n\nclass BincountTest(test_util.TensorFlowTestCase):\n\n def test_empty(self):\n with self.test_session(use_gpu=True):\n self.assertAllEqual(\n math_ops.bincount([], minlength=5).eval(), [0, 0, 0, 0, 0])\n self.assertAllEqual(math_ops.bincount([], minlength=1).eval(), [0])\n self.assertAllEqual(math_ops.bincount([], minlength=0).eval(), [])\n self.assertEqual(\n math_ops.bincount([], minlength=0, dtype=np.float32).eval().dtype,\n np.float32)\n self.assertEqual(\n math_ops.bincount([], minlength=3, dtype=np.float64).eval().dtype,\n np.float64)\n\n def test_values(self):\n with self.test_session(use_gpu=True):\n self.assertAllEqual(\n math_ops.bincount([1, 1, 1, 2, 2, 3]).eval(), [0, 3, 2, 1])\n arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]\n self.assertAllEqual(math_ops.bincount(arr).eval(), [0, 5, 4, 3, 2, 1])\n arr += [0, 0, 0, 0, 0, 0]\n self.assertAllEqual(math_ops.bincount(arr).eval(), [6, 5, 4, 3, 2, 1])\n\n self.assertAllEqual(math_ops.bincount([]).eval(), [])\n self.assertAllEqual(math_ops.bincount([0, 0, 0]).eval(), [3])\n self.assertAllEqual(math_ops.bincount([5]).eval(), [0, 0, 0, 0, 0, 1])\n self.assertAllEqual(\n math_ops.bincount(np.arange(10000)).eval(), np.ones(10000))\n\n def test_maxlength(self):\n with self.test_session(use_gpu=True):\n self.assertAllEqual(math_ops.bincount([5], maxlength=3).eval(), [0, 0, 0])\n self.assertAllEqual(math_ops.bincount([1], maxlength=3).eval(), [0, 1])\n self.assertAllEqual(math_ops.bincount([], maxlength=3).eval(), [])\n\n def test_random_with_weights(self):\n num_samples = 10000\n with self.test_session(use_gpu=True):\n np.random.seed(42)\n for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:\n arr = np.random.randint(0, 1000, num_samples)\n if dtype == dtypes.int32 or dtype == dtypes.int64:\n weights = np.random.randint(-100, 100, num_samples)\n else:\n weights = np.random.random(num_samples)\n self.assertAllClose(\n math_ops.bincount(arr, weights).eval(),\n np.bincount(arr, weights))\n\n def test_random_without_weights(self):\n num_samples = 10000\n with self.test_session(use_gpu=True):\n np.random.seed(42)\n for dtype in [np.int32, np.float32]:\n arr = np.random.randint(0, 1000, num_samples)\n weights = np.ones(num_samples).astype(dtype)\n self.assertAllClose(\n math_ops.bincount(arr, None).eval(),\n np.bincount(arr, weights))\n\n def test_zero_weights(self):\n with self.test_session(use_gpu=True):\n self.assertAllEqual(\n math_ops.bincount(np.arange(1000), np.zeros(1000)).eval(),\n np.zeros(1000))\n\n def test_negative(self):\n # unsorted_segment_sum will only report InvalidArgumentError on CPU\n with self.test_session():\n with self.assertRaises(errors.InvalidArgumentError):\n math_ops.bincount([1, 2, 3, -1, 6, 8]).eval()\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Simple script to write Inception-ResNet-v2 model to graph file.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nfrom nets import inception\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import app\n\ncmd_args = None\n\n\ndef main(unused_argv):\n # Model definition.\n g = ops.Graph()\n with g.as_default():\n images = array_ops.placeholder(\n dtypes.float32, shape=(1, None, None, 3), name='input_image')\n inception.inception_resnet_v2_base(images)\n\n graph_io.write_graph(g.as_graph_def(), cmd_args.graph_dir,\n cmd_args.graph_filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--graph_dir',\n type=str,\n default='/tmp',\n help='Directory where graph will be saved.')\n parser.add_argument(\n '--graph_filename',\n type=str,\n default='graph.pbtxt',\n help='Filename of graph that will be saved.')\n cmd_args, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the GTFlow prediction Ops.\n\nThe tests cover tree traversal and additive models for single and\nmulti class problems.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.boosted_trees.proto import learner_pb2\nfrom tensorflow.contrib.boosted_trees.proto import tree_config_pb2\nfrom tensorflow.contrib.boosted_trees.python.ops import model_ops\nfrom tensorflow.contrib.boosted_trees.python.ops import prediction_ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.platform import googletest\n\n\ndef _append_to_leaf(leaf, c_id, w):\n \"\"\"Helper method for building tree leaves.\n\n Appends weight contributions for the given class index to a leaf node.\n\n Args:\n leaf: leaf node to append to.\n c_id: class Id for the weight update.\n w: weight contribution value.\n \"\"\"\n leaf.sparse_vector.index.append(c_id)\n leaf.sparse_vector.value.append(w)\n\n\ndef _append_multi_values_to_leaf(leaf, c_ids, w):\n \"\"\"Helper method for building tree leaves with sparse vector of values.\n\n Appends weight contributions for the given class index to a leaf node.\n\n Args:\n leaf: leaf node to append to.\n c_ids: list of class ids\n w: corresponding weight contributions for the classes in c_ids\n \"\"\"\n for i in range(len(c_ids)):\n leaf.sparse_vector.index.append(c_ids[i])\n leaf.sparse_vector.value.append(w[i])\n\n\ndef _append_multi_values_to_dense_leaf(leaf, w):\n \"\"\"Helper method for building tree leaves with dense vector of values.\n\n Appends weight contributions to a leaf. w is assumed to be for all classes.\n\n Args:\n leaf: leaf node to append to.\n w: corresponding weight contributions for all classes.\n \"\"\"\n for x in w:\n leaf.vector.value.append(x)\n\n\ndef _set_float_split(split, feat_col, thresh, l_id, r_id):\n \"\"\"Helper method for building tree float splits.\n\n Sets split feature column, threshold and children.\n\n Args:\n split: split node to update.\n feat_col: feature column for the split.\n thresh: threshold to split on forming rule x <= thresh.\n l_id: left child Id.\n r_id: right child Id.\n \"\"\"\n split.feature_column = feat_col\n split.threshold = thresh\n split.left_id = l_id\n split.right_id = r_id\n\n\ndef _set_categorical_id_split(split, feat_col, feat_id, l_id, r_id):\n \"\"\"Helper method for building tree categorical id splits.\n\n Sets split feature column, feature id and children.\n\n Args:\n split: categorical id split node.\n feat_col: feature column for the split.\n feat_id: feature id forming rule x == id.\n l_id: left child Id.\n r_id: right child Id.\n \"\"\"\n split.feature_column = feat_col\n split.feature_id = feat_id\n split.left_id = l_id\n split.right_id = r_id\n\n\nclass PredictionOpsTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n \"\"\"Sets up the prediction tests.\n\n Create a batch of two examples having one dense float, two sparse float and\n one sparse int features.\n The data looks like the following:\n | Instance | Dense0 | SparseF0 | SparseF1 | SparseI0 |\n | 0 | 7 | -3 | | 9,1 |\n | 1 | -2 | | 4 | |\n \"\"\"\n super(PredictionOpsTest, self).setUp()\n self._dense_float_tensor = np.array([[7.0], [-2.0]])\n self._sparse_float_indices1 = np.array([[0, 0]])\n self._sparse_float_values1 = np.array([-3.0])\n self._sparse_float_shape1 = np.array([2, 1])\n self._sparse_float_indices2 = np.array([[1, 0]])\n self._sparse_float_values2 = np.array([4.0])\n self._sparse_float_shape2 = np.array([2, 1])\n self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])\n self._sparse_int_values1 = np.array([9, 1])\n self._sparse_int_shape1 = np.array([2, 2])\n self._seed = 123\n\n def _get_predictions(self,\n tree_ensemble_handle,\n learner_config,\n apply_dropout=False,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=False):\n return prediction_ops.gradient_trees_prediction(\n tree_ensemble_handle,\n self._seed, [self._dense_float_tensor],\n [self._sparse_float_indices1, self._sparse_float_indices2],\n [self._sparse_float_values1, self._sparse_float_values2],\n [self._sparse_float_shape1, self._sparse_float_shape2],\n [self._sparse_int_indices1], [self._sparse_int_values1],\n [self._sparse_int_shape1],\n learner_config=learner_config,\n apply_dropout=apply_dropout,\n apply_averaging=apply_averaging,\n center_bias=center_bias,\n reduce_dim=reduce_dim)\n\n def testEmptyEnsemble(self):\n with self.test_session():\n # Empty tree ensenble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"empty\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n self.assertAllEqual([[0], [0]], result.eval())\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testBiasEnsembleSingleClass(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"bias\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n self.assertAllClose([[-0.4], [-0.4]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testBiasEnsembleMultiClass(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n leaf = tree.nodes.add().leaf\n _append_to_leaf(leaf, 0, -0.4)\n _append_to_leaf(leaf, 1, 0.9)\n\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"multiclass\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 3\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testFullEnsembleSingleClass(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)\n\n # Depth 3 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)\n _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n\n # The first example will get bias -0.4 from first tree and\n # leaf 4 payload of -0.9 hence -1.3, the second example will\n # get the same bias -0.4 and leaf 3 payload (sparse feature missing)\n # of 1.2 hence 0.8.\n self.assertAllClose([[-1.3], [0.8]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testExcludeNonFinalTree(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)\n\n # Depth 3 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = False\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)\n _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n\n # All the examples should get only the bias since the second tree is\n # non-finalized\n self.assertAllClose([[-0.4], [-0.4]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testIncludeNonFinalTree(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)\n\n # Depth 3 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = False\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)\n _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n\n # The first example will get bias -0.4 from first tree and\n # leaf 4 payload of -0.9 hence -1.3, the second example will\n # get the same bias -0.4 and leaf 3 payload (sparse feature missing)\n # of 1.2 hence 0.8. Note that the non-finalized tree is included.\n self.assertAllClose([[-1.3], [0.8]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testMetadataMissing(self):\n # Sometimes we want to do prediction on trees that are not added to ensemble\n # (for example in\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree.\n tree1 = tree_ensemble_config.trees.add()\n _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)\n\n # Depth 3 tree.\n tree2 = tree_ensemble_config.trees.add()\n # We are not setting the tree_ensemble_config.tree_metadata in this test.\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)\n _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n\n # The first example will get bias -0.4 from first tree and\n # leaf 4 payload of -0.9 hence -1.3, the second example will\n # get the same bias -0.4 and leaf 3 payload (sparse feature missing)\n # of 1.2 hence 0.8.\n self.assertAllClose([[-1.3], [0.8]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n # For TREE_PER_CLASS strategy, predictions size is num_classes-1\n def testFullEnsembleMultiClassTreePerClassStrategy(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree only for second class.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)\n\n # Depth 2 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_right.split, 1, 4.0,\n 1, 2)\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"ensemble_multi_class\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 3\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.TREE_PER_CLASS)\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=True)\n # The first example will get bias class 1 -0.2 from first tree and\n # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],\n # the second example will get the same bias class 1 -0.2 and leaf 3\n # payload of class 1 1.2 hence [0.0, 1.0].\n self.assertAllClose([[0.5, -0.2], [0, 1.0]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n # For tree-per-class multiclass handling strategies, predictions vec\n # will have the size of the number of classes.\n # This test is when leafs have SPARSE weights stored (class id and\n # contribution).\n def testFullEnsembleMultiNotClassTreePerClassStrategySparseVector(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree only for second class.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)\n\n # Depth 2 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_right.split, 1, 4.0,\n 1, 2)\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)\n _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)\n _append_multi_values_to_leaf(tree2.nodes.add().leaf, [1, 2], [1.2, -0.7])\n _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"ensemble_multi_class\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 3\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.FULL_HESSIAN)\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=False)\n # The first example will get bias class 1 -0.2 from first tree and\n # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],\n # the second example will get the same bias class 1 -0.2 and leaf 3\n # payload of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -0.7].\n self.assertAllClose([[0.5, -0.2, 0.0], [0, 1.0, -0.7]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n # For all non-tree-per class multiclass handling strategies, predictions vec\n # will have the size of the number of classes.\n # This test is when leafs have DENSE weights stored (weight for each class)\n def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Bias tree only for second class.\n tree1 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2])\n\n # Depth 2 tree.\n tree2 = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _set_float_split(tree2.nodes.add()\n .sparse_float_binary_split_default_right.split, 1, 4.0,\n 1, 2)\n _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)\n _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0])\n _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7])\n _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0])\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_weights.append(1.0)\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"ensemble_multi_class\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Prepare learner config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 3\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.FULL_HESSIAN)\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n reduce_dim=False)\n # The first example will get bias class 1 -0.2 and -2 for class 2 from\n # first tree and leaf 2 payload (sparse feature missing) of 0.5 hence\n # 0.5, -0.2], the second example will get the same bias and leaf 3 payload\n # of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7].\n self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval())\n\n # Empty dropout.\n self.assertAllEqual([[], []], dropout_info.eval())\n\n def testDropout(self):\n with self.test_session():\n # Empty tree ensenble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Add 1000 trees with some weights.\n for i in range(0, 999):\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n tree_ensemble_config.tree_weights.append(i + 1)\n\n # Prepare learner/dropout config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5\n learner_config.learning_rate_tuner.dropout.learning_rate = 1.0\n learner_config.num_classes = 2\n\n # Apply dropout.\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n # We expect approx 500 trees were dropped.\n dropout_info = dropout_info.eval()\n self.assertIn(dropout_info[0].size, range(400, 601))\n self.assertEqual(dropout_info[0].size, dropout_info[1].size)\n\n for i in range(dropout_info[0].size):\n dropped_index = dropout_info[0][i]\n dropped_weight = dropout_info[1][i]\n # We constructed the trees so tree number + 1 is the tree weight, so\n # we can check here the weights for dropped trees.\n self.assertEqual(dropped_index + 1, dropped_weight)\n\n # Don't apply dropout.\n result_no_dropout, no_dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=False,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n self.assertEqual(result.eval().size, result_no_dropout.eval().size)\n for i in range(result.eval().size):\n self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i])\n\n # We expect none of the trees were dropped.\n self.assertAllEqual([[], []], no_dropout_info.eval())\n\n def testDropoutCenterBiasNoGrowingMeta(self):\n # This is for normal non-batch mode where ensemble does not contain the tree\n # that is being built currently.\n num_trees = 10\n with self.test_session():\n # Empty tree ensemble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Add 10 trees with some weights.\n for i in range(0, num_trees):\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n tree_ensemble_config.tree_weights.append(i + 1)\n\n # Prepare learner/dropout config.\n learner_config = learner_pb2.LearnerConfig()\n # Drop all the trees.\n learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0\n learner_config.learning_rate_tuner.dropout.learning_rate = 1.0\n learner_config.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n result_center, dropout_info_center = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=True,\n reduce_dim=True)\n\n dropout_info = dropout_info.eval()\n dropout_info_center = dropout_info_center.eval()\n\n # With centering, the bias tree is not dropped.\n num_dropped = dropout_info[0].size\n self.assertEqual(num_dropped, num_trees)\n num_dropped_center = dropout_info_center[0].size\n self.assertEqual(num_dropped_center, num_trees - 1)\n\n result = result.eval()\n result_center = result_center.eval()\n for i in range(result.size):\n self.assertNotEqual(result[i], result_center[i])\n\n # First dropped tree is a bias tree 0.\n self.assertEqual(0, dropout_info[0][0])\n # Last dropped tree is the last tree.\n self.assertEqual(num_trees - 1, dropout_info[0][num_dropped - 1])\n\n # First dropped tree is a tree 1.\n self.assertEqual(1, dropout_info_center[0][0])\n # Last dropped tree is the last tree.\n self.assertEqual(num_trees - 1, dropout_info_center[0][num_dropped_center\n - 1])\n\n def testDropoutCenterBiasWithGrowingMeta(self):\n # This is batch mode where ensemble already contains the tree that we are\n # building. This tree should never be dropped.\n num_trees = 10\n with self.test_session():\n # Empty tree ensenble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Add 10 trees with some weights.\n for i in range(0, num_trees):\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n tree_ensemble_config.tree_weights.append(i + 1)\n\n # Add growing metadata to indicate batch mode.\n tree_ensemble_config.growing_metadata.num_trees_attempted = num_trees\n tree_ensemble_config.growing_metadata.num_layers_attempted = num_trees\n\n # Prepare learner/dropout config.\n learner_config = learner_pb2.LearnerConfig()\n # Drop all the trees.\n learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0\n learner_config.learning_rate_tuner.dropout.learning_rate = 1.0\n learner_config.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n result_center, dropout_info_center = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=True,\n reduce_dim=True)\n\n dropout_info = dropout_info.eval()\n dropout_info_center = dropout_info_center.eval()\n\n # Last tree is never dropped, the bias tree can be dropped.\n num_dropped = dropout_info[0].size\n self.assertEqual(num_dropped, num_trees - 1)\n num_dropped_center = dropout_info_center[0].size\n self.assertEqual(num_dropped_center, num_trees - 2)\n\n result = result.eval()\n result_center = result_center.eval()\n for i in range(result.size):\n self.assertNotEqual(result[i], result_center[i])\n\n # First dropped tree is a bias tree 0.\n self.assertEqual(0, dropout_info[0][0])\n # Last dropped tree is not the last tree (not tree num_trees-1).\n self.assertNotEqual(num_trees - 1, dropout_info[0][num_dropped - 1])\n # First dropped tree is a tree 1.\n self.assertEqual(1, dropout_info_center[0][0])\n # Last dropped tree is not the last tree in ensemble.\n self.assertNotEqual(num_trees - 1,\n dropout_info_center[0][num_dropped_center - 1])\n\n def testDropoutSeed(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Add 10 trees with some weights.\n for i in range(0, 999):\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n tree_ensemble_config.tree_weights.append(i + 1)\n\n # Prepare learner/dropout config.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5\n learner_config.learning_rate_tuner.dropout.learning_rate = 1.0\n learner_config.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"empty\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n _, dropout_info_1 = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n _, dropout_info_2 = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n # Different seed.\n _, dropout_info_3 = prediction_ops.gradient_trees_prediction(\n tree_ensemble_handle,\n 112314, [self._dense_float_tensor],\n [self._sparse_float_indices1, self._sparse_float_indices2],\n [self._sparse_float_values1, self._sparse_float_values2],\n [self._sparse_float_shape1, self._sparse_float_shape2],\n [self._sparse_int_indices1], [self._sparse_int_values1],\n [self._sparse_int_shape1],\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n # First seed with centering bias.\n _, dropout_info_4 = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=True,\n reduce_dim=True)\n\n # The same seed returns the same results.\n self.assertAllEqual(dropout_info_1.eval(), dropout_info_2.eval())\n # Different seeds give diff results.\n self.assertNotEqual(dropout_info_3.eval().shape,\n dropout_info_2.eval().shape)\n # With centering bias and the same seed does not give the same result.\n self.assertNotEqual(dropout_info_4.eval(), dropout_info_1.eval())\n # With centering bias has 1 less tree dropped (bias tree is not dropped).\n self.assertEqual(\n len(dropout_info_4.eval()[0]) + 1, len(dropout_info_1.eval()[0]))\n\n def testDropOutZeroProb(self):\n with self.test_session():\n # Empty tree ensenble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Add 1000 trees with some weights.\n for i in range(0, 999):\n tree = tree_ensemble_config.trees.add()\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n tree_ensemble_config.tree_weights.append(i + 1)\n\n # Dropout with 0 probability.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.dropout.dropout_probability = 0.0\n learner_config.learning_rate_tuner.dropout.learning_rate = 1.0\n learner_config.num_classes = 2\n\n # Apply dropout, but expect nothing dropped.\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=True,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n result_no_dropout, _ = self._get_predictions(\n tree_ensemble_handle,\n learner_config=learner_config.SerializeToString(),\n apply_dropout=False,\n apply_averaging=False,\n center_bias=False,\n reduce_dim=True)\n\n self.assertAllEqual([[], []], dropout_info.eval())\n self.assertAllClose(result.eval(), result_no_dropout.eval())\n\n def testAveragingAllTrees(self):\n with self.test_session():\n # Empty tree ensenble.\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n adjusted_tree_ensemble_config = (\n tree_config_pb2.DecisionTreeEnsembleConfig())\n # Add 100 trees with some weights.\n # When averaging is applied, the tree weights will essentially change to\n # 1, 98/99, 97/99 etc, so lets create the ensemble with such weights.\n # too\n total_num = 100\n for i in range(0, total_num):\n tree = tree_ensemble_config.trees.add()\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n tree_ensemble_config.tree_weights.append(1.0)\n # This is how the weight will look after averaging\n copy_tree = adjusted_tree_ensemble_config.trees.add()\n _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)\n\n adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True\n adjusted_tree_ensemble_config.tree_weights.append(\n 1.0 * (total_num - i) / total_num)\n\n # Prepare learner config WITH AVERAGING.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.averaging_config.average_last_percent_trees = 1.0\n\n # No averaging config.\n learner_config_no_averaging = learner_pb2.LearnerConfig()\n learner_config_no_averaging.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n\n # This is how our ensemble will \"look\" during averaging\n adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(\n ),\n name=\"adjusted\")\n\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Do averaging.\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config.SerializeToString(),\n apply_averaging=True,\n reduce_dim=True)\n\n pattern_result, pattern_dropout_info = self._get_predictions(\n adjusted_tree_ensemble_handle,\n learner_config_no_averaging.SerializeToString(),\n apply_averaging=False,\n reduce_dim=True)\n\n self.assertAllEqual(result.eval(), pattern_result.eval())\n self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())\n\n def testAveragingSomeTrees(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n adjusted_tree_ensemble_config = (\n tree_config_pb2.DecisionTreeEnsembleConfig())\n # Add 1000 trees with some weights.\n total_num = 100\n num_averaged = 25\n j = 0\n for i in range(0, total_num):\n tree = tree_ensemble_config.trees.add()\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n tree_ensemble_config.tree_weights.append(1.0)\n\n # This is how the weight will look after averaging - we are adjusting\n # the weights of the last 25 trees\n copy_tree = adjusted_tree_ensemble_config.trees.add()\n _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)\n\n adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True\n if i >= 75:\n adjusted_tree_ensemble_config.tree_weights.append(\n 1.0 * (num_averaged - j) / num_averaged)\n j += 1\n else:\n adjusted_tree_ensemble_config.tree_weights.append(1.0)\n\n # Prepare learner config WITH AVERAGING.\n learner_config_1 = learner_pb2.LearnerConfig()\n learner_config_1.num_classes = 2\n learner_config_1.averaging_config.average_last_percent_trees = 0.25\n\n # This is equivalent.\n learner_config_2 = learner_pb2.LearnerConfig()\n learner_config_2.num_classes = 2\n learner_config_2.averaging_config.average_last_n_trees = 25\n\n # No averaging config.\n learner_config_no_averaging = learner_pb2.LearnerConfig()\n learner_config_no_averaging.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n\n # This is how our ensemble will \"look\" during averaging\n adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(\n ),\n name=\"adjusted\")\n\n resources.initialize_resources(resources.shared_resources()).run()\n\n result_1, dropout_info_1 = self._get_predictions(\n tree_ensemble_handle,\n learner_config_1.SerializeToString(),\n apply_averaging=True,\n reduce_dim=True)\n\n result_2, dropout_info_2 = self._get_predictions(\n tree_ensemble_handle,\n learner_config_2.SerializeToString(),\n apply_averaging=True,\n reduce_dim=True)\n\n pattern_result, pattern_dropout_info = self._get_predictions(\n adjusted_tree_ensemble_handle,\n learner_config_no_averaging.SerializeToString(),\n apply_averaging=False,\n reduce_dim=True)\n\n self.assertAllEqual(result_1.eval(), pattern_result.eval())\n self.assertAllEqual(result_2.eval(), pattern_result.eval())\n\n self.assertAllEqual(dropout_info_1.eval(), pattern_dropout_info.eval())\n self.assertAllEqual(dropout_info_2.eval(), pattern_dropout_info.eval())\n\n def testAverageMoreThanNumTreesExist(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n adjusted_tree_ensemble_config = (\n tree_config_pb2.DecisionTreeEnsembleConfig())\n # When we say to average over more trees than possible, it is averaging\n # across all trees.\n total_num = 100\n for i in range(0, total_num):\n tree = tree_ensemble_config.trees.add()\n _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)\n\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n tree_ensemble_config.tree_weights.append(1.0)\n # This is how the weight will look after averaging\n copy_tree = adjusted_tree_ensemble_config.trees.add()\n _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)\n\n adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True\n adjusted_tree_ensemble_config.tree_weights.append(\n 1.0 * (total_num - i) / total_num)\n\n # Prepare learner config WITH AVERAGING.\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n # We have only 100 trees but we ask to average over 250.\n learner_config.averaging_config.average_last_n_trees = 250\n\n # No averaging config.\n learner_config_no_averaging = learner_pb2.LearnerConfig()\n learner_config_no_averaging.num_classes = 2\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"existing\")\n\n # This is how our ensemble will \"look\" during averaging\n adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(\n ),\n name=\"adjusted\")\n\n resources.initialize_resources(resources.shared_resources()).run()\n\n result, dropout_info = self._get_predictions(\n tree_ensemble_handle,\n learner_config.SerializeToString(),\n apply_averaging=True,\n reduce_dim=True)\n\n pattern_result, pattern_dropout_info = self._get_predictions(\n adjusted_tree_ensemble_handle,\n learner_config_no_averaging.SerializeToString(),\n apply_averaging=False,\n reduce_dim=True)\n\n self.assertAllEqual(result.eval(), pattern_result.eval())\n self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())\n\n\nclass PartitionExamplesOpsTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n \"\"\"Sets up the prediction tests.\n\n Create a batch of two examples having one dense float, two sparse float and\n one sparse int features.\n The data looks like the following:\n | Instance | Dense0 | SparseF0 | SparseF1 | SparseI0 |\n | 0 | 7 | -3 | | 9,1 |\n | 1 | -2 | | 4 | |\n \"\"\"\n super(PartitionExamplesOpsTest, self).setUp()\n self._dense_float_tensor = np.array([[7.0], [-2.0]])\n self._sparse_float_indices1 = np.array([[0, 0]])\n self._sparse_float_values1 = np.array([-3.0])\n self._sparse_float_shape1 = np.array([2, 1])\n self._sparse_float_indices2 = np.array([[1, 0]])\n self._sparse_float_values2 = np.array([4.0])\n self._sparse_float_shape2 = np.array([2, 1])\n self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])\n self._sparse_int_values1 = np.array([9, 1])\n self._sparse_int_shape1 = np.array([2, 2])\n\n def testEnsembleEmpty(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result = prediction_ops.gradient_trees_partition_examples(\n tree_ensemble_handle, [self._dense_float_tensor], [\n self._sparse_float_indices1, self._sparse_float_indices2\n ], [self._sparse_float_values1, self._sparse_float_values2],\n [self._sparse_float_shape1,\n self._sparse_float_shape2], [self._sparse_int_indices1],\n [self._sparse_int_values1], [self._sparse_int_shape1])\n\n self.assertAllEqual([0, 0], result.eval())\n\n def testTreeNonFinalized(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Depth 3 tree.\n tree1 = tree_ensemble_config.trees.add()\n _set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree1.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)\n _set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_metadata.add().is_finalized = False\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result = prediction_ops.gradient_trees_partition_examples(\n tree_ensemble_handle, [self._dense_float_tensor], [\n self._sparse_float_indices1, self._sparse_float_indices2\n ], [self._sparse_float_values1, self._sparse_float_values2],\n [self._sparse_float_shape1,\n self._sparse_float_shape2], [self._sparse_int_indices1],\n [self._sparse_int_values1], [self._sparse_int_shape1])\n\n self.assertAllEqual([5, 3], result.eval())\n\n def testTreeFinalized(self):\n with self.test_session():\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n # Depth 3 tree.\n tree1 = tree_ensemble_config.trees.add()\n _set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)\n _set_float_split(tree1.nodes.add()\n .sparse_float_binary_split_default_left.split, 0, -20.0,\n 3, 4)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)\n _set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,\n 0, 9, 5, 6)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)\n\n tree_ensemble_config.tree_weights.append(1.0)\n tree_ensemble_config.tree_metadata.add().is_finalized = True\n\n tree_ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config.SerializeToString(),\n name=\"full_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n\n result = prediction_ops.gradient_trees_partition_examples(\n tree_ensemble_handle, [self._dense_float_tensor], [\n self._sparse_float_indices1, self._sparse_float_indices2\n ], [self._sparse_float_values1, self._sparse_float_values2],\n [self._sparse_float_shape1,\n self._sparse_float_shape2], [self._sparse_int_indices1],\n [self._sparse_int_values1], [self._sparse_int_shape1])\n\n self.assertAllEqual([0, 0], result.eval())\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tfgan.python.features.clip_weights.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.contrib.gan.python.features.python import clip_weights_impl as clip_weights\n\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import training\n\n\nclass ClipWeightsTest(test.TestCase):\n \"\"\"Tests for `discriminator_weight_clip`.\"\"\"\n\n def setUp(self):\n self.variables = [variables.Variable(2.0)]\n self.tuple = collections.namedtuple(\n 'VarTuple', ['discriminator_variables'])(self.variables)\n\n def _test_weight_clipping_helper(self, use_tuple):\n loss = self.variables[0] * 2.0\n opt = training.GradientDescentOptimizer(1.0)\n if use_tuple:\n opt_clip = clip_weights.weight_clip(opt, self.variables, 0.1)\n else:\n opt_clip = clip_weights.discriminator_weight_clip(opt, self.tuple, 0.1)\n\n train_op1 = opt.minimize(loss, var_list=self.variables)\n train_op2 = opt_clip.minimize(loss, var_list=self.variables)\n\n with self.test_session(use_gpu=True) as sess:\n sess.run(variables.global_variables_initializer())\n self.assertEqual(2.0, self.variables[0].eval())\n sess.run(train_op1)\n self.assertLess(0.1, self.variables[0].eval())\n\n with self.test_session(use_gpu=True) as sess:\n sess.run(variables.global_variables_initializer())\n self.assertEqual(2.0, self.variables[0].eval())\n sess.run(train_op2)\n self.assertNear(0.1, self.variables[0].eval(), 1e-7)\n\n def test_weight_clipping_argsonly(self):\n self._test_weight_clipping_helper(False)\n\n def test_weight_clipping_ganmodel(self):\n self._test_weight_clipping_helper(True)\n\n def _test_incorrect_weight_clip_value_helper(self, use_tuple):\n opt = training.GradientDescentOptimizer(1.0)\n\n if use_tuple:\n with self.assertRaisesRegexp(ValueError, 'must be positive'):\n clip_weights.clip_discriminator_weights(opt, self.tuple, weight_clip=-1)\n else:\n with self.assertRaisesRegexp(ValueError, 'must be positive'):\n clip_weights.clip_weights(opt, self.variables, weight_clip=-1)\n\n def test_incorrect_weight_clip_value_argsonly(self):\n self._test_incorrect_weight_clip_value_helper(False)\n\n def test_incorrect_weight_clip_value_tuple(self):\n self._test_incorrect_weight_clip_value_helper(True)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility to copy a tf.Graph.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training import saver as saver_lib\n\n\ndef CopyGraph(graph):\n \"\"\"Return a copy of graph.\"\"\"\n meta_graph = saver_lib.export_meta_graph(\n graph=graph, collection_list=graph.get_all_collection_keys())\n graph_copy = ops.Graph()\n with graph_copy.as_default():\n _ = saver_lib.import_meta_graph(meta_graph)\n return graph_copy\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for GRU layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.keras._impl import keras\nfrom tensorflow.python.keras._impl.keras import testing_utils\nfrom tensorflow.python.platform import test\n\n\nclass GRULayerTest(test.TestCase):\n\n def test_return_sequences_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n with self.test_session():\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'return_sequences': True},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_dynamic_behavior_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n with self.test_session():\n layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))\n model = keras.models.Sequential()\n model.add(layer)\n model.compile('sgd', 'mse')\n x = np.random.random((num_samples, timesteps, embedding_dim))\n y = np.random.random((num_samples, units))\n model.train_on_batch(x, y)\n\n def test_dropout_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n with self.test_session():\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'dropout': 0.1,\n 'recurrent_dropout': 0.1},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_implementation_mode_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n with self.test_session():\n for mode in [0, 1, 2]:\n testing_utils.layer_test(\n keras.layers.GRU,\n kwargs={'units': units,\n 'implementation': mode},\n input_shape=(num_samples, timesteps, embedding_dim))\n\n def test_statefulness_GRU(self):\n num_samples = 2\n timesteps = 3\n embedding_dim = 4\n units = 2\n layer_class = keras.layers.GRU\n with self.test_session():\n model = keras.models.Sequential()\n model.add(\n keras.layers.Embedding(\n 4,\n embedding_dim,\n mask_zero=True,\n input_length=timesteps,\n batch_input_shape=(num_samples, timesteps)))\n layer = layer_class(\n units, return_sequences=False, stateful=True, weights=None)\n model.add(layer)\n model.compile(optimizer='sgd', loss='mse')\n out1 = model.predict(np.ones((num_samples, timesteps)))\n self.assertEqual(out1.shape, (num_samples, units))\n\n # train once so that the states change\n model.train_on_batch(\n np.ones((num_samples, timesteps)), np.ones((num_samples, units)))\n out2 = model.predict(np.ones((num_samples, timesteps)))\n\n # if the state is not reset, output should be different\n self.assertNotEqual(out1.max(), out2.max())\n\n # check that output changes after states are reset\n # (even though the model itself didn't change)\n layer.reset_states()\n out3 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out2.max(), out3.max())\n\n # check that container-level reset_states() works\n model.reset_states()\n out4 = model.predict(np.ones((num_samples, timesteps)))\n np.testing.assert_allclose(out3, out4, atol=1e-5)\n\n # check that the call to `predict` updated the states\n out5 = model.predict(np.ones((num_samples, timesteps)))\n self.assertNotEqual(out4.max(), out5.max())\n\n # Check masking\n layer.reset_states()\n\n left_padded_input = np.ones((num_samples, timesteps))\n left_padded_input[0, :1] = 0\n left_padded_input[1, :2] = 0\n out6 = model.predict(left_padded_input)\n\n layer.reset_states()\n\n right_padded_input = np.ones((num_samples, timesteps))\n right_padded_input[0, -1:] = 0\n right_padded_input[1, -2:] = 0\n out7 = model.predict(right_padded_input)\n\n np.testing.assert_allclose(out7, out6, atol=1e-5)\n\n def test_regularizers_GRU(self):\n embedding_dim = 4\n layer_class = keras.layers.GRU\n with self.test_session():\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_regularizer=keras.regularizers.l1(0.01),\n recurrent_regularizer=keras.regularizers.l1(0.01),\n bias_regularizer='l2',\n activity_regularizer='l1')\n layer.build((None, None, 2))\n self.assertEqual(len(layer.losses), 3)\n layer(keras.backend.variable(np.ones((2, 3, 2))))\n self.assertEqual(len(layer.losses), 4)\n\n def test_constraints_GRU(self):\n embedding_dim = 4\n layer_class = keras.layers.GRU\n with self.test_session():\n k_constraint = keras.constraints.max_norm(0.01)\n r_constraint = keras.constraints.max_norm(0.01)\n b_constraint = keras.constraints.max_norm(0.01)\n layer = layer_class(\n 5,\n return_sequences=False,\n weights=None,\n input_shape=(None, embedding_dim),\n kernel_constraint=k_constraint,\n recurrent_constraint=r_constraint,\n bias_constraint=b_constraint)\n layer.build((None, None, embedding_dim))\n self.assertEqual(layer.kernel.constraint, k_constraint)\n self.assertEqual(layer.recurrent_kernel.constraint, r_constraint)\n self.assertEqual(layer.bias.constraint, b_constraint)\n\n def test_with_masking_layer_GRU(self):\n layer_class = keras.layers.GRU\n with self.test_session():\n inputs = np.random.random((2, 3, 4))\n targets = np.abs(np.random.random((2, 3, 5)))\n targets /= targets.sum(axis=-1, keepdims=True)\n model = keras.models.Sequential()\n model.add(keras.layers.Masking(input_shape=(3, 4)))\n model.add(layer_class(units=5, return_sequences=True, unroll=False))\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)\n\n def test_from_config_GRU(self):\n layer_class = keras.layers.GRU\n for stateful in (False, True):\n l1 = layer_class(units=1, stateful=stateful)\n l2 = layer_class.from_config(l1.get_config())\n assert l1.get_config() == l2.get_config()\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.contrib.all_reduce.python..all_reduce.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\n\nfrom tensorflow.contrib.all_reduce.python import all_reduce as ar\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\nclass AllReduceTest(test_util.TensorFlowTestCase):\n\n def testRingPermutations(self):\n # 0 devices\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, [])\n self.assertEqual(pred_by_c_d, [])\n self.assertEqual(rank_by_c_d, [])\n # 1 worker, 1 subchunk cases\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])\n self.assertEqual(pred_by_c_d, [[0]])\n self.assertEqual(rank_by_c_d, [[0]])\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2])\n self.assertEqual(pred_by_c_d, [[2, 0, 1]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2]])\n # multiple workers, 1 subchunk cases\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2])\n self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]])\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2])\n self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]])\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0])\n self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]])\n self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]])\n # 1 worker, multiple subchunk cases\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])\n self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]])\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3])\n self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2],\n [3, 0, 1, 2], [3, 0, 1, 2]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2],\n [2, 3, 0, 1], [1, 2, 3, 0]])\n # multiple worker, multiple subchunk cases\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3])\n self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6],\n [3, 0, 5, 2, 7, 4, 1, 6]])\n self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7],\n [2, 3, 0, 1, 6, 7, 4, 5]])\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1])\n self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4],\n [1, 2, 7, 0, 5, 6, 3, 4]])\n self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5],\n [2, 1, 0, 3, 6, 5, 4, 7]])\n\n def _buildInput(self, num_workers, num_gpus):\n t8 = constant_op.constant(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],\n types_pb2.DT_FLOAT)\n input_tensors = []\n device_names = []\n for w in range(0, num_workers):\n for d in range(0, num_gpus):\n dn = \"/replica:0/task:%d/device:GPU:%d\" % (w, d % num_gpus)\n device_names.append(dn)\n with ops.device(dn):\n input_tensors.append(array_ops.identity(t8))\n return input_tensors, device_names\n\n def testBuildRingGatherPassStructure(self):\n # 1 worker, 1 device\n input_tensors, device_names = self._buildInput(1, 1)\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])\n output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,\n pred_by_c_d, rank_by_c_d,\n math_ops.add)\n self.assertEqual(output_tensors, input_tensors)\n # 1 worker, 4 devices, 2 subchunks\n input_tensors, device_names = self._buildInput(1, 4)\n pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])\n output_tensors, pad_len = ar._build_ring_gather(\n input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)\n self.assertEqual(0, pad_len)\n # same number outputs as inputs\n self.assertEqual(len(output_tensors), len(input_tensors))\n num_chunks = 2 * len(input_tensors)\n tlen = input_tensors[0].shape[0].value\n for otl in output_tensors:\n self.assertEqual(len(otl), num_chunks)\n for ot in otl:\n self.assertEqual(ot.shape, [tlen/num_chunks])\n\n def _buildInitialVars(self, shape, dev_list):\n values = []\n num_devices = len(dev_list)\n dim = np.prod(shape)\n for d in range(0, num_devices):\n with ops.device(dev_list[d]):\n npt = np.zeros(shape).astype(np.float32)\n alias = np.frombuffer(npt.data, dtype=np.float32)\n for i in range(0, dim):\n alias[i] = i + 0.01 * d\n var = state_ops.variable_op(shape, types_pb2.DT_FLOAT)\n state_ops.init_variable(var, npt).op.run()\n values.append(var)\n return values\n\n # pylint: disable=g-long-lambda\n\n def _buildRing(self, num_workers, num_gpus, subdiv):\n gpu_perm = range(0, num_gpus)\n return lambda x, un_op: ar.build_ring_all_reduce(\n x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)\n\n def _testAllReduce(self, num_workers, num_gpus, shape, build_f):\n # Use local CPU as device for all inputs.\n num_devices = num_workers * num_gpus\n dev_list = [\"/replica:0/task:0/device:CPU:0\"\n for _ in range(num_devices)]\n with self.test_session():\n input_tensors = self._buildInitialVars(shape, dev_list)\n un_op = lambda x: math_ops.div(\n x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))\n simple_sum = math_ops.add_n(input_tensors)\n simple_sum.op.run()\n output_tensors = build_f(input_tensors, un_op)\n sum_reduced = math_ops.add_n(output_tensors)\n sum_reduced.op.run()\n self.assertAllClose(sum_reduced.eval(), simple_sum.eval())\n\n def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):\n start_time = time.time()\n build_f = self._buildRing(num_workers, num_gpus, subdiv)\n self._testAllReduce(num_workers, num_gpus, shape, build_f)\n elapsed = time.time() - start_time\n tf_logging.info(\"RingAllReduce num_workers=%d num_gpus=%d shape=%s \"\n \"subdiv=%d elapsed=%f\" %\n (num_workers, num_gpus, shape, subdiv, elapsed))\n\n def testRingAllReduce(self):\n self._testRingAllReduce(1, 2, [8], 1)\n self._testRingAllReduce(1, 2, [4, 4], 1)\n self._testRingAllReduce(6, 1, [8], 1)\n self._testRingAllReduce(1, 8, [32], 1)\n self._testRingAllReduce(1, 8, [120], 1)\n self._testRingAllReduce(2, 8, [7, 13], 1)\n self._testRingAllReduce(2, 8, [8, 8], 2)\n self._testRingAllReduce(2, 8, [8, 8], 4)\n # TODO(tucker): The following test is surprisingly slow.\n # Diagnose and fix before re-enabling.\n # self._testRingAllReduce(4, 8, [8, 8, 2], 4)\n\n def _buildShuffle(self, num_workers, num_gpus, num_shards):\n # Use local CPU for all shuffle shards\n gather_devices = [\"/replica:0/task:0/device:CPU:0\"\n for _ in range(num_shards)]\n return lambda x, un_op: ar.build_shuffle_all_reduce(\n x, gather_devices, math_ops.add_n, un_op)\n\n def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):\n start_time = time.time()\n build_f = self._buildShuffle(num_workers, num_gpus, num_shards)\n self._testAllReduce(num_workers, num_gpus, shape, build_f)\n elapsed = time.time() - start_time\n tf_logging.info(\"ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s \"\n \"elapsed=%f\" % (num_workers, num_gpus, shape, elapsed))\n\n def testShuffleAllReduce(self):\n self._testShuffleAllReduce(1, 2, [8], 1)\n self._testShuffleAllReduce(1, 2, [4, 4], 1)\n self._testShuffleAllReduce(1, 8, [32], 1)\n self._testShuffleAllReduce(1, 8, [120], 1)\n self._testShuffleAllReduce(2, 8, [7, 13], 3)\n self._testShuffleAllReduce(2, 8, [8, 8], 2)\n self._testShuffleAllReduce(2, 8, [8, 8], 4)\n self._testShuffleAllReduce(4, 8, [8, 8, 2], 4)\n\n def _buildRecursiveHD(self, num_workers, num_gpus):\n return lambda x, un_op: ar.build_recursive_hd_all_reduce(\n x, math_ops.add, un_op)\n\n # pylint: enable=g-long-lambda\n\n def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):\n start_time = time.time()\n build_f = self._buildRecursiveHD(num_workers, num_gpus)\n self._testAllReduce(num_workers, num_gpus, shape, build_f)\n elapsed = time.time() - start_time\n tf_logging.info(\"RecursiveHDAllReduce num_workers=%d num_gpus=%d \"\n \"shape=%s elapsed=%f\" %\n (num_workers, num_gpus, shape, elapsed))\n\n def testRecursiveHDAllReduce(self):\n self._testRecursiveHDAllReduce(1, 2, [8])\n self._testRecursiveHDAllReduce(1, 2, [4, 4])\n self._testRecursiveHDAllReduce(1, 8, [32])\n self._testRecursiveHDAllReduce(1, 8, [120])\n self._testRecursiveHDAllReduce(2, 8, [8, 8])\n self._testRecursiveHDAllReduce(4, 8, [8, 8, 2])\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Grouping dataset transformations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.data.python.ops import gen_dataset_ops\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\n\n\ndef group_by_window(key_func,\n reduce_func,\n window_size=None,\n window_size_func=None):\n \"\"\"A transformation that groups windows of elements by key and reduces them.\n\n This transformation maps each consecutive element in a dataset to a key\n using `key_func` and groups the elements by key. It then applies\n `reduce_func` to at most `window_size_func(key)` elements matching the same\n key. All execpt the final window for each key will contain\n `window_size_func(key)` elements; the final window may be smaller.\n\n You may provide either a constant `window_size` or a window size determined by\n the key through `window_size_func`.\n\n Args:\n key_func: A function mapping a nested structure of tensors\n (having shapes and types defined by `self.output_shapes` and\n `self.output_types`) to a scalar `tf.int64` tensor.\n reduce_func: A function mapping a key and a dataset of up to `batch_size`\n consecutive elements matching that key to another dataset.\n window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements matching the same key to combine in a single\n batch, which will be passed to `reduce_func`. Mutually exclusive with\n `window_size_func`.\n window_size_func: A function mapping a key to a `tf.int64` scalar\n `tf.Tensor`, representing the number of consecutive elements matching\n the same key to combine in a single batch, which will be passed to\n `reduce_func`. Mutually exclusive with `window_size`.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}.\n\n Raises:\n ValueError: if neither or both of {`window_size`, `window_size_func`} are\n passed.\n \"\"\"\n if (window_size is not None and window_size_func or\n not (window_size is not None or window_size_func)):\n raise ValueError(\"Must pass either window_size or window_size_func.\")\n\n if window_size is not None:\n\n def constant_window_func(unused_key):\n return ops.convert_to_tensor(window_size, dtype=dtypes.int64)\n\n window_size_func = constant_window_func\n\n assert window_size_func is not None\n\n def _apply_fn(dataset):\n \"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"\n return GroupByWindowDataset(dataset, key_func, reduce_func,\n window_size_func)\n\n return _apply_fn\n\n\nclass _VariantDataset(dataset_ops.Dataset):\n \"\"\"A Dataset wrapper for a tf.variant-typed function argument.\"\"\"\n\n def __init__(self, dataset_variant, output_types, output_shapes):\n super(_VariantDataset, self).__init__()\n self._dataset_variant = dataset_variant\n self._output_types = output_types\n self._output_shapes = output_shapes\n\n def _as_variant_tensor(self):\n return self._dataset_variant\n\n @property\n def output_shapes(self):\n return self._output_shapes\n\n @property\n def output_types(self):\n return self._output_types\n\n\nclass GroupByWindowDataset(dataset_ops.Dataset):\n \"\"\"A `Dataset` that groups its input and performs a windowed reduction.\"\"\"\n\n def __init__(self, input_dataset, key_func, reduce_func, window_size_func):\n \"\"\"See `group_by_window()` for details.\"\"\"\n super(GroupByWindowDataset, self).__init__()\n\n self._input_dataset = input_dataset\n\n self._make_key_func(key_func, input_dataset)\n self._make_reduce_func(reduce_func, input_dataset)\n self._make_window_size_func(window_size_func)\n\n def _make_window_size_func(self, window_size_func):\n \"\"\"Make wrapping Defun for window_size_func.\"\"\"\n\n @function.Defun(dtypes.int64)\n def tf_window_size_func(key):\n key.set_shape([])\n window_size = ops.convert_to_tensor(\n window_size_func(key), dtype=dtypes.int64)\n if window_size.dtype != dtypes.int64:\n raise ValueError(\n \"`window_size_func` must return a single tf.int64 tensor.\")\n return window_size\n\n self._window_size_func = tf_window_size_func\n self._window_size_func.add_to_graph(ops.get_default_graph())\n\n def _make_key_func(self, key_func, input_dataset):\n \"\"\"Make wrapping Defun for key_func.\"\"\"\n\n @function.Defun(*nest.flatten(input_dataset.output_types))\n def tf_key_func(*args):\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\n # Pass in shape information from the input_dataset.\n for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):\n arg.set_shape(shape)\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\n # pylint: disable=protected-access\n if dataset_ops._should_unpack_args(nested_args):\n ret = key_func(*nested_args)\n # pylint: enable=protected-access\n else:\n ret = key_func(nested_args)\n ret = ops.convert_to_tensor(ret, dtype=dtypes.int64)\n if ret.dtype != dtypes.int64:\n raise ValueError(\"`key_func` must return a single tf.int64 tensor.\")\n return ret\n\n self._key_func = tf_key_func\n self._key_func.add_to_graph(ops.get_default_graph())\n\n def _make_reduce_func(self, reduce_func, input_dataset):\n \"\"\"Make wrapping Defun for reduce_func.\"\"\"\n\n @function.Defun(dtypes.int64, dtypes.variant)\n def tf_reduce_func(key, window_dataset_variant):\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\n key.set_shape([])\n window_dataset = _VariantDataset(window_dataset_variant,\n input_dataset.output_types,\n input_dataset.output_shapes)\n if not isinstance(window_dataset, dataset_ops.Dataset):\n raise TypeError(\"`window_dataset` must return a `Dataset` object.\")\n output_dataset = reduce_func(key, window_dataset)\n if not isinstance(output_dataset, dataset_ops.Dataset):\n raise TypeError(\"`reduce_func` must return a `Dataset` object.\")\n self._output_types = output_dataset.output_types\n self._output_shapes = output_dataset.output_shapes\n return output_dataset._as_variant_tensor() # pylint: disable=protected-access\n\n self._reduce_func = tf_reduce_func\n self._reduce_func.add_to_graph(ops.get_default_graph())\n\n @property\n def output_shapes(self):\n return self._output_shapes\n\n @property\n def output_types(self):\n return self._output_types\n\n def _as_variant_tensor(self):\n return gen_dataset_ops.group_by_window_dataset(\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\n self._key_func.captured_inputs,\n self._reduce_func.captured_inputs,\n self._window_size_func.captured_inputs,\n key_func=self._key_func,\n reduce_func=self._reduce_func,\n window_size_func=self._window_size_func,\n output_types=nest.flatten(self.output_types),\n output_shapes=nest.flatten(self.output_shapes))\n" ]
[ [ "tensorflow.python.ops.nn_ops.relu6", "tensorflow.python.platform.test.main", "numpy.array", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.training.summary_io.SummaryWriterCache.get", "tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs", "tensorflow.python.training.training_util.get_global_step", "tensorflow.core.framework.summary_pb2.Summary.Value", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.ops.get_default_graph" ], [ "numpy.random.random", "numpy.random.seed", "numpy.arange", "numpy.ones", "tensorflow.python.ops.math_ops.bincount", "numpy.bincount", "tensorflow.python.platform.googletest.main", "numpy.zeros", "numpy.random.randint" ], [ "tensorflow.python.platform.app.run", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.framework.ops.Graph" ], [ "tensorflow.python.ops.resources.shared_resources", "tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_prediction", "tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_partition_examples", "tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig", "tensorflow.python.platform.googletest.main", "numpy.array", "tensorflow.contrib.boosted_trees.proto.tree_config_pb2.DecisionTreeEnsembleConfig" ], [ "tensorflow.contrib.gan.python.features.python.clip_weights_impl.clip_discriminator_weights", "tensorflow.contrib.gan.python.features.python.clip_weights_impl.discriminator_weight_clip", "tensorflow.contrib.gan.python.features.python.clip_weights_impl.weight_clip", "tensorflow.python.ops.variables.Variable", "tensorflow.python.training.training.GradientDescentOptimizer", "tensorflow.contrib.gan.python.features.python.clip_weights_impl.clip_weights", "tensorflow.python.ops.variables.global_variables_initializer" ], [ "tensorflow.python.framework.ops.Graph", "tensorflow.python.training.saver.import_meta_graph" ], [ "tensorflow.python.keras._impl.keras.layers.Masking", "tensorflow.python.keras._impl.keras.constraints.max_norm", "numpy.random.random", "tensorflow.python.keras._impl.keras.layers.Embedding", "tensorflow.python.keras._impl.keras.testing_utils.layer_test", "numpy.ones", "tensorflow.python.platform.test.main", "tensorflow.python.keras._impl.keras.layers.GRU", "numpy.testing.assert_allclose", "tensorflow.python.keras._impl.keras.regularizers.l1", "tensorflow.python.keras._impl.keras.models.Sequential" ], [ "tensorflow.contrib.all_reduce.python.all_reduce._build_ring_gather", "tensorflow.contrib.all_reduce.python.all_reduce.build_shuffle_all_reduce", "tensorflow.python.ops.math_ops.add_n", "tensorflow.python.ops.state_ops.init_variable", "tensorflow.python.platform.tf_logging.info", "numpy.frombuffer", "tensorflow.python.platform.test.main", "tensorflow.contrib.all_reduce.python.all_reduce._ring_permutations", "numpy.prod", "tensorflow.python.framework.ops.device", "tensorflow.contrib.all_reduce.python.all_reduce.build_ring_all_reduce", "tensorflow.python.ops.array_ops.identity", "numpy.zeros", "tensorflow.python.ops.state_ops.variable_op", "tensorflow.contrib.all_reduce.python.all_reduce.build_recursive_hd_all_reduce", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.function.Defun", "tensorflow.python.data.ops.dataset_ops._should_unpack_args", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10", "1.4" ] } ]
arminnh/deep-q-learning
[ "e6ec12cfa2468b86f60a6cb2635f5feb12dcd7a6", "e6ec12cfa2468b86f60a6cb2635f5feb12dcd7a6" ]
[ "src/universe-driving/drive.py", "src/reinforcement-learning-test.py" ]
[ "#https://github.com/openai/universe-starter-agent/blob/master/envs.py\n\nimport gym\nimport universe\nimport socketio\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\nfrom TORCH_DQN import DQN\nfrom enum import Enum\nimport torchvision.transforms as T\nimport ast\nimport torch\nfrom env import create_flash_env\n\nclass Moves(Enum):\n LEFT = 0\n RIGHT = 1\n ACCELERATE = 2\n BRAKE = 3\n TURBO = 4\n\n def __str__(self):\n if self == Moves.ACCELERATE:\n return \"up\"\n elif self == Moves.BRAKE:\n return \"down\"\n elif self == Moves.LEFT:\n return \"left\"\n elif self == Moves.RIGHT:\n return \"right\"\n elif self == Moves.TURBO:\n return \"x\"\n\nclass SelfDrivingAgent:\n\n def __init__(self):\n #4 moves\n self.DQN = DQN(len(Moves))\n\n self.state = None\n self.lastScreen = None\n\ndef main():\n # Create env\n env, w, h = create_flash_env('flashgames.DuskDrive-v0')\n _ = env.reset()\n\n agent = SelfDrivingAgent()\n #print(observation_n)\n agent.state = torch.zeros((1,128,200)).numpy()\n agent.lastScreen = torch.zeros((1,128,200)).numpy()\n\n next_state = torch.zeros((1,128,200)).numpy()\n count = 1\n while True:\n action = agent.DQN.act(agent.state)\n\n observation_n, reward_n, done_n, info = env.step(action)\n if \"global/episode_reward\" in info:\n count += 1\n # we have finished an episode\n if count in [100,200,300,400,500,600,700,800,900] or count % 1000 == 0:\n #save\n agent.DQN.save(\"agent_ep{}\".format(count))\n\n #print(\"learning\")\n agent.DQN.remember(agent.state, action, reward_n, next_state, False)\n #print(observation_n)\n next_state = observation_n - agent.lastScreen\n agent.lastScreen = observation_n\n\n agent.state = next_state\n agent.DQN.replay(128)\n\n env.render()\n\nmain()", "# code based on https://www.oreilly.com/learning/introduction-to-reinforcement-learning-and-openai-gym\nimport gym\nimport numpy as np\n\n# Environment for a simple text game\nenv = gym.make(\"Taxi-v2\")\n# initialize environment\nenv.reset()\nenv.render()\n\nprint(\"observation space\", env.observation_space.n)\nprint(\"action space\", env.action_space.n)\n\n# Random agent\n# state = env.reset()\n# counter = 0\n# reward = None\n# while reward != 20:\n# state, reward, done, info = env.step(env.action_space.sample())\n# counter += 1\n# print(\"Random agent reward after\", counter, \"steps\")\n\n# Q table as a numpy array. A value for each pair of (state, action),\n# so sizo of Q table size = |states|*|actions|\n# If we were to create a Q table for atari games and use images as state, we would need\n# to represent images of resolution 160 by 192 with let's say 32 different grayscale values\n# => not feasible\nQ = np.zeros([env.observation_space.n, env.action_space.n])\n\n# learning rate\nalpha = 0.7\n# discount factor\ngamma = 0.9\nepisodes = 1000\n\n# basic Q learning algorithm\nfor episode in range(episodes):\n done = False\n # sum of rewards for an episode\n R = 0\n reward = 0\n # reset the environment for the new episode\n state = env.reset()\n while not done:\n # Select the action that currently has the best Q value\n action = np.argmax(Q[state])\n # Take the action and observe the results\n state2, reward, done, info = env.step(action)\n # Update the (state, action) pairin the Q table (Bellman equation)\n Q[state, action] = (1-alpha) * Q[state, action] + alpha * (reward + gamma* np.max(Q[state2]))\n\n R += reward\n state = state2\n\n if (episode+1) % 25 == 0:\n print(\"Episode:\", episode+1, \"\\tReward:\", R)\n\n# Do a run of the game after learning the Q-values\ndone = False\nstate = env.reset()\nwhile not done:\n state, reward, done, info = env.step(np.argmax(Q[state]))\n env.render()\n" ]
[ [ "torch.zeros" ], [ "numpy.max", "numpy.argmax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
selfemergence/NEAT-multiprocessing
[ "3dc57c6ec18658253398ae0b361b72f78e3fd0c9" ]
[ "experiments/mario/gym-nes-mario-bros-master/src/run-mario.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# By Lilian Besson (Naereen)\n# https://github.com/Naereen/gym-nes-mario-bros\n# MIT License https://lbesson.mit-license.org/\n#\nfrom __future__ import division, print_function # Python 2 compatibility\n\nimport os\nimport sys\nfrom collections import deque\nfrom time import sleep\n\nPARALLEL_EMULATORS = 1 # XXX Turn down parallel emulators if needed\n# PARALLEL_EMULATORS = 4 # Nb of cores, to have exactly one emulator process by cores\nPARALLEL_EMULATORS = int(os.getenv('N', PARALLEL_EMULATORS))\nif PARALLEL_EMULATORS > 1:\n print(\"WARNING: It's not working with more than one emulator in parallel!\")\n\n\n# FIXME use joblib for something smart?\n# from joblib import Parallel, delayed\n# Parallel(n_jobs=PARALLEL_EMULATORS)(\n# delayed(delayed_play)(XXX)\n# for repeatId in range(PARALLEL_EMULATORS)\n# )\n\n\nimport gym\nfrom gym import wrappers\nimport nesgym\nfrom nesgym.wrappers import CROPPED_WIDTH, CROPPED_HEIGHT\nimport numpy as np\n\nfrom dqn.model import DoubleDQN\nfrom dqn.utils import PiecewiseSchedule\n\n\ndqn_model_name = \"DQN_MarioBros_v1\"\n\n\ndef get_env():\n print(\"Creating gym environment...\") # DEBUG\n env = gym.make('nesgym/MarioBros-v0')\n env = nesgym.wrap_nes_env(env)\n expt_dir = '/tmp/mario/'\n env = wrappers.Monitor(env, os.path.join(expt_dir, \"gym\"), force=True)\n return env\n\n\ndef get_envs(N=1):\n print(\"Creating {} gym environments...\".format(N)) # DEBUG\n envs = []\n for n in range(N):\n print(\"Creating gym environment #{}/{}...\".format(n + 1, N)) # DEBUG\n env = gym.make('nesgym/MarioBros-v0')\n env = nesgym.wrap_nes_env(env)\n expt_dir = '/tmp/mario-{}/'.format(n)\n env = wrappers.Monitor(env, os.path.join(expt_dir, \"gym\"), force=True)\n envs.append(env)\n sleep(1)\n return envs\n\n# Keep a log of the max score seen so far, to plot it as a function of time steps\ndef log_max_seen_score(step, max_seen_score, max_seen_score_csv):\n with open(max_seen_score_csv, 'a') as f:\n f.write(\"\\n{}, {}\".format(step, max_seen_score))\n\n\ndef mario_main(N=1, dqn_model_name=dqn_model_name):\n envs = get_envs(N=N)\n\n # env = envs[0].env.env.env.env\n env = envs[0]\n while hasattr(env, 'env'):\n env = env.env\n env0 = env\n\n last_observations = [ 0 for env in envs ]\n # FIXME finish the support for running emulators in parallel\n for emulatornumber, env in enumerate(envs):\n last_observations[emulatornumber] = env.reset()\n\n try:\n # _emulatornumber = envs[0].env.env.env.env._emulatornumber\n _emulatornumber = env0._emulatornumber\n except:\n _emulatornumber = 0\n dqn_model_name = \"{}-{}\".format(dqn_model_name, _emulatornumber)\n\n max_timesteps = 10000000 # 10 millions steps for 216996 parameters...\n max_seen_score = 0\n\n # Create the log file if needed\n max_seen_score_csv = \"max_seen_score_{}.csv\".format(_emulatornumber)\n if not os.path.isfile(max_seen_score_csv):\n with open(max_seen_score_csv, 'w') as f:\n f.write(\"step, max_seen_score\")\n\n exploration_schedule = PiecewiseSchedule(\n [\n (0, 1.0),\n (1e5, 0.1),\n (max_timesteps / 2, 0.01),\n ], outside_value=0.01\n )\n\n dqn = DoubleDQN(\n image_shape=(CROPPED_WIDTH, CROPPED_HEIGHT, 1),\n num_actions=envs[0].action_space.n,\n # # --- XXX heavy simulations\n # training_starts=10000,\n # target_update_freq=5000,\n # training_batch_size=32,\n # training_freq=4,\n # # --- XXX light simulations?\n training_starts=20,\n target_update_freq=10,\n training_freq=4,\n training_batch_size=4,\n # --- Other parameters...\n frame_history_len=8, # XXX is it more efficient with history?\n replay_buffer_size=10000, # XXX reduce if MemoryError\n # frame_history_len=8, # XXX is it more efficient with history?\n # replay_buffer_size=100000, # XXX reduce if MemoryError\n exploration=exploration_schedule,\n name=dqn_model_name\n )\n\n # How to save the DQN to a file after every training\n # in order to resume from previous step if training was stopped?\n if os.path.isfile(dqn_model_name + '.h5'):\n try:\n dqn.load_weights(dqn_model_name + '.h5')\n print(\"Successfully loaded the DQN weights from file '{}'...\".format(dqn_model_name + '.h5')) # DEBUG\n except (ValueError, NotImplementedError, AttributeError):\n print(\"Unable to load the DQN weights from file '{}'...\".format(dqn_model_name + '.h5')) # DEBUG\n\n dqn.save_model()\n dqn.plot_model()\n\n reward_sum_episode = 0\n num_episodes = 0\n episode_rewards = deque(maxlen=100)\n\n for step in range(max_timesteps):\n if step > 0 and step % 100 == 0:\n print(\"step: \", step,\n \"; episodes:\", num_episodes,\n \"; epsilon:\", exploration_schedule.value(step),\n \"; learning rate:\", dqn.get_learning_rate(),\n \"; last 100 training loss mean\", dqn.get_avg_loss()\n )\n if len(episode_rewards) > 0:\n print(\"last 100 episode mean rewards: \", np.mean(np.array(episode_rewards)))\n\n # also print summary of the model!\n dqn.summary()\n # and save the model!\n dqn.save_weights(dqn_model_name + '.h5')\n\n # --- Parallel loops for different environments\n for emulatornumber, env in enumerate(envs):\n last_obs = last_observations[emulatornumber]\n\n # XXX Enable this to see the Python view of the screen (PIL.imshow)\n # env.render()\n\n if len(envs) > 1:\n print(\"Emulator #\", emulatornumber) # DEBUG\n\n action = dqn.choose_action(step, last_obs)\n obs, reward, done, info = env.step(action)\n reward_sum_episode += reward\n\n if done and reward < 0:\n reward = 0 # force this manually to avoid bug of getting -400 10 times in a row!\n dqn.learn(step, action, reward, done, info)\n\n print(\"Step {:>6}, action {:>2} (#{:>2}), gave reward {:>6}, score {:>6} and max score {:>6}, life {:>2} and level {:>2}.\".format(step, env0.actions[action], action, reward, info['score'], max_seen_score, info['life'], info['level'])) # DEBUG\n\n if info['score'] > max_seen_score:\n max_seen_score = info['score']\n print(\"!!New total score record!!\", max_seen_score)\n log_max_seen_score(step, max_seen_score, max_seen_score_csv)\n if done:\n last_obs = env.reset()\n if info['frame'] > 0: # we actually played a few frames\n print(\"\\ndone, reward_sum_episode =\", reward_sum_episode)\n episode_rewards.append(reward_sum_episode)\n reward_sum_episode = 0\n num_episodes += 1\n else:\n last_obs = obs\n\n last_observations[emulatornumber] = last_obs\n\n print(\"Simulation is done, exiting now\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n # FIXME finish the support for running emulators in parallel\n mario_main(N=PARALLEL_EMULATORS)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
susanwe/world-models
[ "0f246a430683e6ab741726df0a97f35830044356", "0f246a430683e6ab741726df0a97f35830044356" ]
[ "models/vae.py", "data/carracing.py" ]
[ "\n\"\"\"\nVariational encoder model, used as a visual model\nfor our model of the world.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Decoder(nn.Module):\n \"\"\" VAE decoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(Decoder, self).__init__()\n self.latent_size = latent_size\n self.img_channels = img_channels\n\n self.fc1 = nn.Linear(latent_size, 1024)\n self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)\n self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)\n self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)\n self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)\n\n def forward(self, x): # pylint: disable=arguments-differ\n x = F.relu(self.fc1(x))\n x = x.unsqueeze(-1).unsqueeze(-1)\n x = F.relu(self.deconv1(x))\n x = F.relu(self.deconv2(x))\n x = F.relu(self.deconv3(x))\n reconstruction = torch.sigmoid(self.deconv4(x))\n return reconstruction\n\nclass Encoder(nn.Module): # pylint: disable=too-many-instance-attributes\n \"\"\" VAE encoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(Encoder, self).__init__()\n self.latent_size = latent_size\n #self.img_size = img_size\n self.img_channels = img_channels\n\n self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)\n self.conv2 = nn.Conv2d(32, 64, 4, stride=2)\n self.conv3 = nn.Conv2d(64, 128, 4, stride=2)\n self.conv4 = nn.Conv2d(128, 256, 4, stride=2)\n\n self.fc_mu = nn.Linear(2*2*256, latent_size)\n self.fc_logsigma = nn.Linear(2*2*256, latent_size)\n\n\n def forward(self, x): # pylint: disable=arguments-differ\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = x.view(x.size(0), -1)\n\n mu = self.fc_mu(x)\n logsigma = self.fc_logsigma(x)\n\n return mu, logsigma\n\nclass VAE(nn.Module):\n \"\"\" Variational Autoencoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(VAE, self).__init__()\n self.encoder = Encoder(img_channels, latent_size)\n self.decoder = Decoder(img_channels, latent_size)\n\n def forward(self, x): # pylint: disable=arguments-differ\n mu, logsigma = self.encoder(x)\n sigma = logsigma.exp()\n eps = torch.randn_like(sigma)\n z = eps.mul(sigma).add_(mu)\n\n recon_x = self.decoder(z)\n return recon_x, mu, logsigma\n", "\"\"\"\nGenerating data from the CarRacing gym environment.\n!!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!!\n\"\"\"\nimport argparse\nfrom os.path import join, exists\nimport gym\nimport numpy as np\nfrom utils.misc import sample_continuous_policy\n\ndef generate_data(rollouts, data_dir, noise_type): # pylint: disable=R0914\n \"\"\" Generates data \"\"\"\n assert exists(data_dir), \"The data directory does not exist...\"\n\n env = gym.make(\"CarRacing-v0\")\n seq_len = 1000\n\n for i in range(rollouts):\n env.reset()\n env.env.viewer.window.dispatch_events()\n if noise_type == 'white':\n a_rollout = [env.action_space.sample() for _ in range(seq_len)]\n elif noise_type == 'brown':\n a_rollout = sample_continuous_policy(env.action_space, seq_len, 1. / 50)\n\n s_rollout = []\n r_rollout = []\n d_rollout = []\n\n t = 0\n while True:\n action = a_rollout[t]\n t += 1\n\n # The CarRacing-v0 environment has a step limit of 1000, this can be seen in env.spec.max_episode_steps\n s, r, done, _ = env.step(action)\n env.env.viewer.window.dispatch_events()\n s_rollout += [s]\n r_rollout += [r]\n d_rollout += [done]\n if done:\n # Because these are random policies, most of them will not be done before the step limit of 1000\n print(\"> End of rollout {}, {} frames...\".format(i, len(s_rollout)))\n np.savez(join(data_dir, 'rollout_{}'.format(i)),\n observations=np.array(s_rollout),\n rewards=np.array(r_rollout),\n actions=np.array(a_rollout),\n terminals=np.array(d_rollout))\n break\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--rollouts', type=int, help=\"Number of rollouts\")\n parser.add_argument('--dir', type=str, help=\"Where to place rollouts\")\n parser.add_argument('--policy', type=str, choices=['white', 'brown'],\n help='Noise type used for action sampling.',\n default='brown')\n args = parser.parse_args()\n generate_data(args.rollouts, args.dir, args.policy)\n" ]
[ [ "torch.randn_like", "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.ConvTranspose2d" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tidepool-org/data-science-models
[ "cd06e9aad95a0bc6cc2a81871e567c88159b86d3" ]
[ "tidepool_data_science_models/models/icgm_sensor_generator.py" ]
[ "\"\"\"\nCreates iCGM Sensors given a trueBG trace\n\nThe Dexcom G6 Specifications in this file are publicly available from:\n “EVALUATION OF AUTOMATIC CLASS III DESIGNATION FOR\n Dexcom G6 Continuous Glucose Monitoring System.” n.d.\n https://www.accessdata.fda.gov/cdrh_docs/reviews/DEN170088.pdf.\n\n\"\"\"\n\n# %% Libraries\nimport numpy as np\nfrom scipy.optimize import brute, fmin\nfrom tidepool_data_science_models.models.icgm_sensor import iCGMSensor\nimport tidepool_data_science_models.models.icgm_sensor_generator_functions as sf\nimport multiprocessing\nmultiprocessing.set_start_method(\"fork\")\n\n\n# %% Definitions\nclass iCGMSensorGenerator(object):\n \"\"\"iCGM Sensor Generator object which fits a Johnsonsu distribution to a true_bg_trace\n and generates sensors using this distribution\"\"\"\n\n def __init__(\n self,\n sc_thresholds=None, # This is required only for iCGM sensors for now (A-G)\n batch_training_size=30,\n use_g6_accuracy_in_loss=False,\n bias_type=\"percentage_of_value\",\n bias_drift_type=\"random\",\n random_seed=0,\n verbose=False,\n true_bg_trace=None,\n true_dataset_name=\"default\",\n ):\n \"\"\"\n Sensor Generator Initialization\n\n Parameters\n ----------\n sc_thresholds : float array\n The 7 special control thresholds A-G\n use_g6_accuracy_in_loss : bool\n Whether or not to use the G6 accuracy loss during fit\n bias_type : str\n Type of overall bias used which defines the normalization factor\n bias_drift_type : str\n Type of drift used in the sensor bias (random, linear, none)\n random_seed : int\n Random seed used throughout generator for reproducible sensors and values\n verbose : bool\n Verbosity setting for the brute force distribution parameter search\n true_bg_trace : float array\n The time-series of true bgs the iCGM distribution is fit to\n true_dataset_name : str\n Name of the true bg dataset used to fit\n \"\"\"\n\n if sc_thresholds is None:\n sc_thresholds = [\n 0.85,\n 0.70,\n 0.80,\n 0.98,\n 0.99,\n 0.99,\n 0.87,\n ] # This is required only for iCGM sensors (Criteria A-G)\n\n self.sc_thresholds = sc_thresholds\n self.batch_training_size = batch_training_size\n self.use_g6_accuracy_in_loss = use_g6_accuracy_in_loss\n self.bias_type = bias_type\n self.bias_drift_type = bias_drift_type\n self.random_seed = random_seed\n self.verbose = verbose\n self.true_bg_trace = true_bg_trace\n self.true_dataset_name = true_dataset_name\n\n # pick delay based upon data in:\n # Vettoretti et al., 2019, Sensors 2019, 19, 5320\n if use_g6_accuracy_in_loss:\n self.delay = 5 # time delay between iCGM value and true value\n else:\n self.delay = 10\n\n self.johnson_parameter_search_range, self.search_range_inputs = sf.get_search_range()\n\n # set the random seed for reproducibility\n np.random.seed(seed=random_seed)\n\n self.icgm_traces = None\n self.individual_sensor_properties = None\n self.batch_sensor_brute_search_results = None\n self.batch_sensor_properties = None\n self.dist_params = None\n\n return\n\n def fit(self, true_bg_trace=None):\n \"\"\"Fits the optimal sensor characteristics fit to a true_bg_trace using a brute search range\n\n Parameters\n ----------\n true_bg_trace : float array\n The true_bg_trace (mg/dL) used to fit a johnsonsu distribution\n training_size : int\n Number of sensors used when fitting the optimal distribution of sensor characteristics\n\n \"\"\"\n\n if true_bg_trace is None:\n raise Exception(\"No true_bg_trace given\")\n\n self.true_bg_trace = true_bg_trace\n\n batch_sensor_brute_search_results = brute(\n sf.johnsonsu_icgm_sensor,\n self.johnson_parameter_search_range,\n args=(\n true_bg_trace,\n self.sc_thresholds,\n self.batch_training_size,\n self.bias_type,\n self.bias_drift_type,\n self.delay,\n self.random_seed,\n self.verbose,\n self.use_g6_accuracy_in_loss,\n ),\n workers=-1,\n full_output=True,\n finish=fmin, # fmin will look for a local minimum around the grid point\n )\n\n self.batch_sensor_brute_search_results = batch_sensor_brute_search_results\n self.dist_params = self.batch_sensor_brute_search_results[0]\n\n return\n\n def generate_sensors(self, n_sensors, sensor_start_datetime, sensor_start_time_index=0):\n\n if self.dist_params is None:\n raise Exception(\"iCGM Sensor Generator has not been fit() to a true_bg_trace distribution.\")\n\n (\n a,\n b,\n mu,\n sigma,\n noise_coefficient,\n bias_drift_range_min,\n bias_drift_range_max,\n bias_drift_oscillations,\n ) = self.dist_params\n\n bias_drift_range = [bias_drift_range_min, bias_drift_range_max]\n\n # STEP 3 apply the results\n # Convert to a generate_sensor(global_params) --> Sensor(obj)\n self.icgm_traces, self.individual_sensor_properties = sf.generate_icgm_sensors(\n self.true_bg_trace,\n dist_params=self.dist_params[:4],\n n_sensors=n_sensors,\n bias_type=self.bias_type,\n bias_drift_type=self.bias_drift_type,\n bias_drift_range=bias_drift_range,\n bias_drift_oscillations=bias_drift_oscillations,\n noise_coefficient=noise_coefficient,\n delay=self.delay,\n random_seed=self.random_seed,\n )\n\n sensors = []\n\n for sensor_num in range(n_sensors):\n sensor_properties = self.individual_sensor_properties.loc[sensor_num]\n sensors.append(\n iCGMSensor(\n sensor_properties=sensor_properties,\n time_index=sensor_start_time_index,\n current_datetime=sensor_start_datetime,\n )\n )\n\n self.n_sensors = n_sensors\n self.sensors = sensors # Array of sensor objects\n\n return sensors\n" ]
[ [ "scipy.optimize.brute", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
tomroesch/iqplot
[ "e13f9ac888d75093da05353ba80786804ec99418" ]
[ "iqplot/dist.py" ]
[ "\"\"\"Visualization of how data are distributed, split or colored by a\ncategorical variable.\"\"\"\n\nimport copy\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport colorcet\n\nimport bokeh.models\nimport bokeh.plotting\n\nfrom . import utils\n\n\ndef ecdf(\n data=None,\n q=None,\n cats=None,\n q_axis=\"x\",\n palette=None,\n order=None,\n p=None,\n show_legend=True,\n legend_label=None,\n legend_location=\"right\",\n legend_orientation=\"vertical\",\n tooltips=None,\n complementary=False,\n kind=\"collection\",\n style=\"dots\",\n conf_int=False,\n ptiles=[2.5, 97.5],\n n_bs_reps=10000,\n click_policy=\"hide\",\n marker=\"circle\",\n marker_kwargs=None,\n line_kwargs=None,\n conf_int_kwargs=None,\n horizontal=None,\n val=None,\n **kwargs,\n):\n \"\"\"\n Make an ECDF plot.\n\n Parameters\n ----------\n data : Pandas DataFrame, 1D Numpy array, or xarray\n DataFrame containing tidy data for plotting. If a Numpy array,\n a single category is assumed and a box plot generated from\n data.\n q : hashable\n Name of column to use as quantitative variable if `data` is a\n Pandas DataFrame. Otherwise, `q` is used as the quantitative\n axis label.\n cats : hashable or list of hashables\n Name of column(s) to use as categorical variable(s).\n q_axis : str, either 'x' or 'y', default 'x'\n Axis along which the quantitative value varies.\n palette : list colors, or single color string \n If a list, color palette to use. If a single string representing\n a color, all glyphs are colored with that color. Default is\n colorcet.b_glasbey_category10 from the colorcet package.\n order : list or None\n If not None, must be a list of unique group names when the input\n data frame is grouped by `cats`. The order of the list specifies\n the ordering of the categorical variables in the legend. If\n None, the categories appear in the order in which they appeared\n in the inputted data frame.\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n show_legend : bool, default False\n If True, display legend.\n legend_label : str, default None\n If `cats` is None and `show_legend` is True, then if\n `legend_label` is not None, a legend is created for the glyph\n on the plot and labeled with `legend_label`. Otherwise, no\n legend is created if `cats` is None.\n legend_location : str, default 'right'\n Location of legend. If one of \"right\", \"left\", \"above\", or\n \"below\", the legend is placed outside of the plot area. If one\n of \"top_left\", \"top_center\", \"top_right\", \"center_right\",\n \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\",\n or \"center\", the legend is placed within the plot area. If a\n 2-tuple, legend is placed according to the coordinates in the\n tuple.\n legend_orientation : str, default 'vertical'\n Either 'horizontal' or 'vertical'.\n tooltips : list of 2-tuples\n Specification for tooltips as per Bokeh specifications. For\n example, if we want `col1` and `col2` tooltips, we can use\n `tooltips=[('label 1': '@col1'), ('label 2': '@col2')]`. Ignored\n if `style` is 'staircase'.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution function.\n kind : str, default 'collection'\n If 'collection', the figure is populated with a collection of\n ECDFs coded with colors based on the categorical variables. If\n 'colored', the figure is populated with a single ECDF with\n circles colored based on the categorical variables.\n style : str, default 'dots'\n The style of ECDF to make.\n\n - dots: Each data point is plotted as a dot.\n - staircase: ECDF is plotted as a traditional staircase.\n - formal: Strictly adhere to the definition of an ECDF.\n conf_int : bool, default False\n If True, display confidence interval of ECDF.\n ptiles : list, default [2.5, 97.5]\n The percentiles to use for the confidence interval. Ignored if\n `conf_int` is False.\n n_bs_reps : int, default 1000\n Number of bootstrap replicates to do to compute confidence\n interval. Ignored if `conf_int` is False.\n click_policy : str, default 'hide'\n Either 'hide', 'mute', or None; how the glyphs respond when the\n corresponding category is clicked in the legend.\n marker : str, default 'circle'\n Name of marker to be used in the plot (ignored if `style` is\n 'staircase'). Must be one of['asterisk', 'circle',\n 'circle_cross', 'circle_x', 'cross', 'dash', 'diamond',\n 'diamond_cross', 'hex', 'inverted_triangle', 'square',\n 'square_cross', 'square_x', 'triangle', 'x']\n marker_kwargs : dict\n Keyword arguments to be passed to `p.circle()`.\n line_kwargs : dict\n Kwargs to be passed to `p.line()`, `p.ray()`, and `p.segment()`.\n conf_int_kwargs : dict\n kwargs to pass into patches depicting confidence intervals.\n horizontal : bool or None, default None\n Deprecated. Use `q_axis`.\n val : hashable\n Deprecated, use `q`.\n kwargs\n Any kwargs to be passed to `bokeh.plotting.figure()` when making\n the plot.\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDFs.\n \"\"\"\n # Protect against mutability of dicts\n marker_kwargs = copy.copy(marker_kwargs)\n line_kwargs = copy.copy(line_kwargs)\n conf_int_kwargs = copy.copy(conf_int_kwargs)\n\n q = utils._parse_deprecations(q, q_axis, val, horizontal, \"y\")\n\n if style == \"formal\" and complementary:\n raise NotImplementedError(\"Complementary formal ECDFs not yet implemented.\")\n\n if palette is None:\n palette = colorcet.b_glasbey_category10\n elif type(palette) == str:\n palette = [palette]\n\n data, q, cats, show_legend = utils._data_cats(\n data, q, cats, show_legend, legend_label\n )\n\n cats, cols = utils._check_cat_input(\n data, cats, q, None, None, tooltips, palette, order, marker_kwargs\n )\n\n kwargs = utils._fig_dimensions(kwargs)\n\n if conf_int and \"y_axis_type\" in kwargs and kwargs[\"y_axis_type\"] == \"log\":\n warnings.warn(\n \"Cannot reliably draw confidence intervals with a y-axis on a log scale because zero cannot be represented. Omitting confidence interval.\"\n )\n conf_int = False\n if (\n conf_int\n and \"x_axis_type\" in kwargs\n and kwargs[\"x_axis_type\"] == \"log\"\n and (data[q] <= 0).any()\n ):\n warnings.warn(\n \"Cannot draw confidence intervals with a x-axis on a log scale because some values are negative. Any negative values will be omitted from the ECDF.\"\n )\n conf_int = False\n\n if marker_kwargs is None:\n marker_kwargs = {}\n if line_kwargs is None:\n line_kwargs = {}\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n if q_axis == \"y\":\n if \"x_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"x_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"x_axis_label\"] = \"ECDF\"\n else:\n if \"y_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"y_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"y_axis_label\"] = \"ECDF\"\n\n if q_axis == \"y\":\n if \"y_axis_label\" not in kwargs:\n kwargs[\"y_axis_label\"] = q\n else:\n if \"x_axis_label\" not in kwargs:\n kwargs[\"x_axis_label\"] = q\n\n if style in [\"formal\", \"staircase\"] and \"line_width\" not in line_kwargs:\n line_kwargs[\"line_width\"] = 2\n\n if conf_int_kwargs is None:\n conf_int_kwargs = {}\n if \"fill_alpha\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_alpha\"] = 0.5\n if \"line_alpha\" not in conf_int_kwargs and \"line_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"line_alpha\"] = 0\n\n df = data.copy()\n if kind == \"collection\":\n if style == \"dots\":\n df[y] = df.groupby(cats)[q].transform(_ecdf_y, complementary=complementary)\n elif kind == \"colored\":\n df[y] = df[q].transform(_ecdf_y, complementary=complementary)\n cols += [y]\n else:\n raise RuntimeError(\"`kind` must be in `['collection', 'colored']\")\n\n _, df[\"__label\"] = utils._source_and_labels_from_cats(df, cats)\n cols += [\"__label\"]\n\n if order is not None:\n if type(cats) in [list, tuple]:\n df[\"__sort\"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)\n else:\n df[\"__sort\"] = df.apply(lambda r: order.index(r[cats]), axis=1)\n df = df.sort_values(by=\"__sort\")\n\n if p is None:\n p = bokeh.plotting.figure(**kwargs)\n\n if style == \"dots\":\n marker_fun = utils._get_marker(p, marker)\n\n if tooltips is not None:\n if style in [\"formal\", \"staircase\"]:\n warnings.warn(\n \"Cannot have tooltips for formal ECDFs because there are no points to hover over. Omitting tooltips\"\n )\n else:\n p.add_tools(bokeh.models.HoverTool(tooltips=tooltips))\n\n markers = []\n lines = []\n patches = []\n labels = []\n\n if kind == \"collection\":\n # Explicitly loop to enable click policies on the legend\n # (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n labels.append(g[\"__label\"].iloc[0])\n if conf_int:\n conf_int_kwargs[\"fill_color\"] = palette[i % len(palette)]\n # conf_int_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n p, patch = _ecdf_conf_int(\n p,\n g[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n patches.append(patch)\n\n marker_kwargs[\"color\"] = palette[i % len(palette)]\n # marker_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n line_kwargs[\"color\"] = palette[i % len(palette)]\n # line_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n if style == \"staircase\":\n p, new_line = _staircase_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n line_kwargs=line_kwargs,\n )\n lines.append(new_line)\n elif style == \"dots\":\n if q_axis == \"y\":\n markers.append(marker_fun(source=g, x=y, y=q, **marker_kwargs))\n else:\n markers.append(marker_fun(source=g, x=q, y=y, **marker_kwargs))\n elif style == \"formal\":\n p, circle, segment = _formal_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n marker_kwargs=marker_kwargs,\n line_kwargs=line_kwargs,\n )\n markers.append(circle)\n lines.append(segment)\n elif kind == \"colored\":\n if style in [\"formal\", \"staircase\"]:\n raise RuntimeError(\n \"Cannot have a formal or staircase ECDF with `kind='colored'`.\"\n )\n\n if conf_int:\n if \"fill_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_color\"] = \"gray\"\n\n p, patch = _ecdf_conf_int(\n p,\n df[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n # Explicitly loop to enable click policies on the legend (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n source = bokeh.models.ColumnDataSource(g[cols])\n mkwargs = marker_kwargs\n # mkwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n mkwargs[\"color\"] = palette[i % len(palette)]\n labels.append(g[\"__label\"].iloc[0])\n if q_axis == \"y\":\n markers.append(marker_fun(source=source, x=y, y=q, **mkwargs))\n else:\n markers.append(marker_fun(source=source, x=q, y=y, **mkwargs))\n\n return _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n markers,\n lines,\n patches,\n )\n\n\ndef histogram(\n data=None,\n q=None,\n cats=None,\n palette=None,\n order=None,\n q_axis=\"x\",\n p=None,\n rug=True,\n rug_height=0.05,\n show_legend=None,\n legend_label=None,\n legend_location=\"right\",\n legend_orientation=\"vertical\",\n bins=\"freedman-diaconis\",\n density=False,\n kind=\"step_filled\",\n click_policy=\"hide\",\n line_kwargs=None,\n fill_kwargs=None,\n rug_kwargs=None,\n horizontal=None,\n val=None,\n **kwargs,\n):\n \"\"\"\n Make a plot of histograms.\n\n Parameters\n ----------\n data : Pandas DataFrame, 1D Numpy array, or xarray\n DataFrame containing tidy data for plotting. If a Numpy array,\n a single category is assumed and a box plot generated from\n data.\n q : hashable\n Name of column to use as quantitative variable if `data` is a\n Pandas DataFrame. Otherwise, `q` is used as the quantitative\n axis label.\n cats : hashable or list of hashables\n Name of column(s) to use as categorical variable(s).\n q_axis : str, either 'x' or 'y', default 'x'\n Axis along which the quantitative value varies.\n palette : list colors, or single color string \n If a list, color palette to use. If a single string representing\n a color, all glyphs are colored with that color. Default is\n colorcet.b_glasbey_category10 from the colorcet package.\n order : list or None\n If not None, must be a list of unique group names when the input\n data frame is grouped by `cats`. The order of the list specifies\n the ordering of the categorical variables in the legend. If\n None, the categories appear in the order in which they appeared\n in the inputted data frame.\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n legend_label : str, default None\n If `cats` is None and `show_legend` is True, then if\n `legend_label` is not None, a legend is created for the glyph\n on the plot and labeled with `legend_label`. Otherwise, no\n legend is created if `cats` is None.\n legend_location : str, default 'right'\n Location of legend. If one of \"right\", \"left\", \"above\", or\n \"below\", the legend is placed outside of the plot area. If one\n of \"top_left\", \"top_center\", \"top_right\", \"center_right\",\n \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\",\n or \"center\", the legend is placed within the plot area. If a\n 2-tuple, legend is placed according to the coordinates in the\n tuple.\n legend_orientation : str, default 'vertical'\n Either 'horizontal' or 'vertical'.\n bins : int, array_like, or str, default 'freedman-diaconis'\n If int or array_like, setting for `bins` kwarg to be passed to\n `np.histogram()`. If 'exact', then each unique value in the\n data gets its own bin. If 'integer', then integer data is\n assumed and each integer gets its own bin. If 'sqrt', uses the\n square root rule to determine number of bins. If\n `freedman-diaconis`, uses the Freedman-Diaconis rule for number\n of bins.\n rug : bool, default True\n If True, also include a rug plot. If, however, `bins` is 'exact'\n or 'integer', the `rug` kwarg is ignored.\n rug_height : float, default 0.05\n Height of the rug plot as a fraction of the highest point in the\n histograms.\n density : bool, default False\n If True, normalize the histograms. Otherwise, base the\n histograms on counts.\n kind : str, default 'step_filled'\n The kind of histogram to display. Allowed values are 'step' and\n 'step_filled'.\n click_policy : str, default 'hide'\n Either 'hide', 'mute', or None; how the glyphs respond when the\n corresponding category is clicked in the legend.\n line_kwargs : dict\n Keyword arguments to pass to `p.line()` in constructing the\n histograms. By default, {\"line_width\": 2}.\n fill_kwargs : dict\n Keyword arguments to pass to `p.patch()` when making the fill\n for the step-filled histogram. Ignored if `kind = 'step'`. By\n default {\"fill_alpha\": 0.3, \"line_alpha\": 0}.\n rug_kwargs : dict\n Keyword arguments to pass to `p.multi_line()` when making the\n rug plot.\n horizontal : bool or None, default None\n Deprecated. Use `q_axis`.\n val : hashable\n Deprecated, use `q`.\n kwargs\n Any kwargs to be passed to `bokeh.plotting.figure()` when making\n the plot.\n\n Returns\n -------\n output : Bokeh figure\n Figure populated with histograms.\n \"\"\"\n # Protect against mutability of dicts\n line_kwargs = copy.copy(line_kwargs)\n fill_kwargs = copy.copy(fill_kwargs)\n rug_kwargs = copy.copy(rug_kwargs)\n\n if type(bins) == str and bins in [\"integer\", \"exact\"]:\n rug = False\n\n q = utils._parse_deprecations(q, q_axis, val, horizontal, \"y\")\n\n if palette is None:\n palette = colorcet.b_glasbey_category10\n elif type(palette) == str:\n palette = [palette]\n\n df, q, cats, show_legend = utils._data_cats(\n data, q, cats, show_legend, legend_label\n )\n\n if show_legend is None:\n if cats is None:\n show_legend = False\n else:\n show_legend = True\n\n if type(bins) == str and bins not in [\n \"integer\",\n \"exact\",\n \"sqrt\",\n \"freedman-diaconis\",\n ]:\n raise RuntimeError(\"Invalid bin specification.\")\n\n if cats is None:\n df[\"__cat\"] = \"__dummy_cat\"\n if show_legend:\n raise RuntimeError(\"No legend to show if `cats` is None.\")\n if order is not None:\n raise RuntimeError(\"No `order` is allowed if `cats` is None.\")\n cats = \"__cat\"\n\n cats, cols = utils._check_cat_input(\n df, cats, q, None, None, None, palette, order, kwargs\n )\n\n kwargs = utils._fig_dimensions(kwargs)\n\n if line_kwargs is None:\n line_kwargs = {\"line_width\": 2}\n if fill_kwargs is None:\n fill_kwargs = {}\n if \"fill_alpha\" not in fill_kwargs:\n fill_kwargs[\"fill_alpha\"] = 0.3\n if \"line_alpha\" not in fill_kwargs:\n fill_kwargs[\"line_alpha\"] = 0\n\n _, df[\"__label\"] = utils._source_and_labels_from_cats(df, cats)\n cols += [\"__label\"]\n\n if order is not None:\n if type(cats) in [list, tuple]:\n df[\"__sort\"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)\n else:\n df[\"__sort\"] = df.apply(lambda r: order.index(r[cats]), axis=1)\n df = df.sort_values(by=\"__sort\")\n\n if type(bins) == str and bins == \"exact\":\n a = np.unique(df[q])\n if len(a) == 1:\n bins = np.array([a[0] - 0.5, a[0] + 0.5])\n else:\n bins = np.concatenate(\n (\n (a[0] - (a[1] - a[0]) / 2,),\n (a[1:] + a[:-1]) / 2,\n (a[-1] + (a[-1] - a[-2]) / 2,),\n )\n )\n elif type(bins) == str and bins == \"integer\":\n if np.any(df[q] != np.round(df[q])):\n raise RuntimeError(\"'integer' bins chosen, but data are not integer.\")\n bins = np.arange(df[q].min() - 1, df[q].max() + 1) + 0.5\n\n if p is None:\n kwargs = utils._fig_dimensions(kwargs)\n\n if \"x_axis_label\" not in kwargs:\n kwargs[\"x_axis_label\"] = q\n\n if \"y_axis_label\" not in kwargs:\n if density:\n kwargs[\"y_axis_label\"] = \"density\"\n else:\n kwargs[\"y_axis_label\"] = \"count\"\n if \"y_range\" not in kwargs:\n kwargs[\"y_range\"] = bokeh.models.DataRange1d(start=0)\n\n p = bokeh.plotting.figure(**kwargs)\n\n # Explicitly loop to enable click policies on the legend (not possible with factors)\n max_height = 0\n lines = []\n labels = []\n patches = []\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n e0, f0 = _compute_histogram(g[q], bins, density)\n\n max_height = max(f0.max(), max_height)\n\n line_kwargs[\"color\"] = palette[i % len(palette)]\n\n if q_axis == \"y\":\n lines.append(p.line(f0, e0, **line_kwargs))\n else:\n lines.append(p.line(e0, f0, **line_kwargs))\n labels.append(g[\"__label\"].iloc[0])\n\n if kind == \"step_filled\":\n x2 = [e0.min(), e0.max()]\n y2 = [0, 0]\n fill_kwargs[\"color\"] = palette[i % len(palette)]\n if q_axis == \"y\":\n p, patch = utils._fill_between(p, f0, e0, y2, x2, **fill_kwargs)\n else:\n p, patch = utils._fill_between(p, e0, f0, x2, y2, **fill_kwargs)\n patches.append(patch)\n\n # Put in the rug plot\n if rug:\n if rug_kwargs is None:\n rug_kwargs = dict(alpha=0.5, line_width=0.5)\n elif type(rug_kwargs) != dict:\n raise RuntimeError(\"`rug_kwargs` must be a dictionary.\")\n if \"alpha\" not in rug_kwargs and \"line_alpha\" not in rug_kwargs:\n rug_kwargs[\"alpha\"] = 0.5\n if \"line_width\" not in rug_kwargs:\n rug_kwargs[\"line_width\"] = 0.5\n\n y = [0, max_height * rug_height]\n\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n xs = [[q_val, q_val] for q_val in g[q].values]\n ys = [y] * len(g)\n if \"color\" not in rug_kwargs and \"line_color\" not in rug_kwargs:\n p.multi_line(xs, ys, color=palette[i % len(palette)], **rug_kwargs)\n else:\n p.multi_line(xs, ys, **rug_kwargs)\n\n return _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n [],\n lines,\n patches,\n )\n\n\ndef _staircase_ecdf(p, data, complementary=False, q_axis=\"x\", line_kwargs={}):\n \"\"\"\n Create a plot of an ECDF.\n\n Parameters\n ----------\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n data : array_like\n One-dimensional array of data. Nan's are ignored.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution functon.\n q_axis : str, default 'x'\n Which axis has the quantitative variable.\n line_kwargs : dict\n kwargs to be passed into p.line and p.ray.\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDF.\n \"\"\"\n # Extract data\n data = utils._convert_data(data)\n\n # Data points on ECDF\n x, y = _ecdf_vals(data, True, complementary)\n\n # Line of steps\n if q_axis == \"y\":\n line = p.line(y, x, **line_kwargs)\n elif q_axis == \"x\":\n line = p.line(x, y, **line_kwargs)\n\n # Rays for ends\n if q_axis == \"y\":\n if complementary:\n p.ray(x=1, y=x[0], length=0, angle=-np.pi / 2, **line_kwargs)\n p.ray(x=0, y=x[-1], length=0, angle=np.pi / 2, **line_kwargs)\n else:\n p.ray(x=0, y=x[0], length=0, angle=-np.pi / 2, **line_kwargs)\n p.ray(x=1, y=x[-1], length=0, angle=np.pi / 2, **line_kwargs)\n elif q_axis == \"x\":\n if complementary:\n p.ray(x=x[0], y=1, length=0, angle=np.pi, **line_kwargs)\n p.ray(x=x[-1], y=0, length=0, angle=0, **line_kwargs)\n else:\n p.ray(x=x[0], y=0, length=0, angle=np.pi, **line_kwargs)\n p.ray(x=x[-1], y=1, length=0, angle=0, **line_kwargs)\n\n return p, line\n\n\ndef _formal_ecdf(\n p, data, complementary=False, q_axis=\"x\", marker_kwargs={}, line_kwargs={}\n):\n \"\"\"\n Create a plot of an ECDF.\n\n Parameters\n ----------\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n data : array_like\n One-dimensional array of data. Nan's are ignored.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution functon.\n marker_kwargs : dict\n Any kwargs to be passed to p.circle().\n line_kwargs : dict\n Any kwargs to be passed to p.segment() and p.ray().\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDF.\n \"\"\"\n # Extract data\n data = utils._convert_data(data)\n\n # Data points on ECDF\n x, y = _ecdf_vals(data, complementary)\n\n # Copy of marker kwargs for unfilled points\n unfilled_kwargs = marker_kwargs.copy()\n unfilled_kwargs[\"fill_color\"] = \"white\"\n\n if q_axis == \"y\":\n segment = p.segment(y[:-1], x[:-1], y[1:], x[:-1], **line_kwargs)\n p.ray(x=0, y=x[0], angle=-np.pi / 2, length=0, **line_kwargs)\n p.ray(x=1, y=x[-1], angle=np.pi / 2, length=0, **line_kwargs)\n circle = p.circle(y, x, **marker_kwargs)\n p.circle([0], [0], **unfilled_kwargs)\n p.circle(y[:-1], x[1:], **unfilled_kwargs)\n elif q_axis == \"x\":\n segment = p.segment(x[:-1], y[:-1], x[1:], y[:-1], **line_kwargs)\n p.ray(x=x[0], y=0, angle=np.pi, length=0, **line_kwargs)\n p.ray(x=x[-1], y=1, angle=0, length=0, **line_kwargs)\n circle = p.circle(x, y, **marker_kwargs)\n p.circle([0], [0], **unfilled_kwargs)\n p.circle(x[1:], y[:-1], **unfilled_kwargs)\n\n return p, circle, segment\n\n\ndef _ecdf_vals(data, staircase=False, complementary=False):\n \"\"\"Get x, y, values of an ECDF for plotting.\n Parameters\n ----------\n data : ndarray\n One dimensional Numpy array with data.\n staircase : bool, default False\n If True, generate x and y values for ECDF (staircase). If\n False, generate x and y values for ECDF as dots.\n complementary : bool\n If True, return values for ECCDF.\n\n Returns\n -------\n x : ndarray\n x-values for plot\n y : ndarray\n y-values for plot\n \"\"\"\n x = np.sort(data)\n y = np.arange(1, len(data) + 1) / len(data)\n\n if staircase:\n x, y = _to_staircase(x, y)\n if complementary:\n y = 1 - y\n elif complementary:\n y = 1 - y + 1 / len(y)\n\n return x, y\n\n\ndef _to_staircase(x, y):\n \"\"\"Convert to formal ECDF.\"\"\"\n # Set up output arrays\n x_staircase = np.empty(2 * len(x))\n y_staircase = np.empty(2 * len(x))\n\n # y-values for steps\n y_staircase[0] = 0\n y_staircase[1::2] = y\n y_staircase[2::2] = y[:-1]\n\n # x- values for steps\n x_staircase[::2] = x\n x_staircase[1::2] = x\n\n return x_staircase, y_staircase\n\n\ndef _ecdf_conf_int(\n p,\n data,\n complementary=False,\n q_axis=\"x\",\n n_bs_reps=1000,\n ptiles=[2.5, 97.5],\n **kwargs,\n):\n \"\"\"Add an ECDF confidence interval to a plot.\n\n This method of computing a confidence interval can be thought of as\n computing confidence intervals of the *inverse* ECDF in the sense\n that we compute a confidence interval for the x-values for each of\n the discrete values of the ECDF. This is equivalent to computing\n bootstrap confidence intervals for the ECDF. Here is why.\n\n Imagine we draw bootstrap samples and for each we make an ECDF.\n Let's say we make 5 such ECDFs and we wish to compute a 60%\n confidence interval. (You can generalize to arbitrary number of\n ECDFs and confidence interval.)\n\n Each of these 5 ECDFs can be defined as starting at the same point\n and ending at the same point. Specifically, they start at\n x = min(data), y = 0 and end at x = max(data), y = 1. Furthermore,\n they are all monotonically increasing functions.\n\n Now, let's say we are constructing a confidence interval for the\n ECDF at position x. To do so, we put a dot on the second ECDF from\n the top at x and a dot on the second ECDF from the bottom. This\n gives us the middle 60% of ECDF values.\n\n Now, say we are constructing a confidence interval for the IECDF. We\n go to ECDF value y and we find the second ECDF from the left and\n place a dot on it. We also put a dot on the second ECDF from the\n right.\n\n Because all ECDFs are monotonic and start and end at the same\n points, the dot we put on the second-leftmost ECDF is also on the\n second curve from the top for some other x. Similarly, the\n second-rightmost ECDF is also on the second curve from the bottom\n for some other x. (You can sketch this out, and it becomes clear.)\n\n So, any dot we put on an ECDF for computing a confidence interval\n for an IECDF is also a dot we would put on an ECDF for computing a\n confidence of the ECDF. If we want to compute the confidence\n interval over the whole domain of x-values, we will cover the same\n set of points if we compute the confidence interval of the ECDF or\n the IECDF. So, we end up filling between the same two sets of\n curves.\n\n It turns out that the IECDF formulation is actually much easier to\n implement.\n \"\"\"\n data = utils._convert_data(data)\n\n bs_reps = np.array(\n [np.sort(np.random.choice(data, size=len(data))) for _ in range(n_bs_reps)]\n )\n\n # Compute the confidence intervals\n iecdf_low, iecdf_high = np.percentile(np.array(bs_reps), ptiles, axis=0)\n\n # y-values for ECDFs\n y = np.arange(1, len(data) + 1) / len(data)\n\n # Make them staircases\n x_low, y_plot = _to_staircase(x=iecdf_low, y=y)\n x_high, _ = _to_staircase(x=iecdf_high, y=y)\n\n if q_axis == \"y\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=1 - y_plot, y1=x_low, x2=1 - y_plot, y2=x_high, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=y_plot, y1=x_low, x2=y_plot, y2=x_high, **kwargs\n )\n elif q_axis == \"x\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=1 - y_plot, x2=x_high, y2=1 - y_plot, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=y_plot, x2=x_high, y2=y_plot, **kwargs\n )\n else:\n raise RuntimeError(\"`q_axis` must be either 'x' or 'y'.\")\n\n return p, patch\n\n\ndef _ecdf_y(data, complementary=False):\n \"\"\"Give y-values of an ECDF for an unsorted column in a data frame.\n\n Parameters\n ----------\n data : Pandas Series\n Series (or column of a DataFrame) from which to generate ECDF\n values\n complementary : bool, default False\n If True, give the ECCDF values.\n\n Returns\n -------\n output : Pandas Series\n Corresponding y-values for an ECDF when plotted with dots.\n\n Notes\n -----\n .. This only works for plotting an ECDF with points, not for formal\n or staircase ECDFs\n \"\"\"\n if complementary:\n return 1 - data.rank(method=\"first\") / len(data) + 1 / len(data)\n else:\n return data.rank(method=\"first\") / len(data)\n\n\ndef _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n markers,\n lines,\n patches,\n):\n \"\"\"Add a legend to a histogram or ECDF plot.\n \"\"\"\n if show_legend:\n if len(markers) > 0:\n if len(lines) > 0:\n if len(patches) > 0:\n items = [\n (label, [marker, line, patch])\n for label, marker, line, patch in zip(\n labels, markers, lines, patches\n )\n ]\n else:\n items = [\n (label, [marker, line])\n for label, marker, line in zip(labels, lines, markers)\n ]\n else:\n if len(patches) > 0:\n items = [\n (label, [marker, patch])\n for label, marker, patch in zip(labels, markers, patches)\n ]\n else:\n items = [\n (label, [marker]) for label, marker in zip(labels, markers)\n ]\n else:\n if len(patches) > 0:\n items = [\n (label, [line, patch])\n for label, line, patch in zip(labels, lines, patches)\n ]\n else:\n items = [(label, [line]) for label, line in zip(labels, lines)]\n\n if len(p.legend) == 1:\n for item in items:\n p.legend.items.append(\n bokeh.models.LegendItem(label=item[0], renderers=item[1])\n )\n else:\n if len(p.legend) > 1:\n warnings.warn(\n \"Ambiguous which legend to add glyphs to. Creating new legend.\"\n )\n if legend_location in [\"right\", \"left\", \"above\", \"below\"]:\n legend = bokeh.models.Legend(\n items=items, location=\"center\", orientation=legend_orientation\n )\n p.add_layout(legend, legend_location)\n elif (\n legend_location\n in [\n \"top_left\",\n \"top_center\",\n \"top_right\",\n \"center_right\",\n \"bottom_right\",\n \"bottom_center\",\n \"bottom_left\",\n \"center_left\",\n \"center\",\n ]\n or type(legend_location) == tuple\n ):\n legend = bokeh.models.Legend(\n items=items,\n location=legend_location,\n orientation=legend_orientation,\n )\n p.add_layout(legend, \"center\")\n else:\n raise RuntimeError(\n 'Invalid `legend_location`. Must be a 2-tuple specifying location or one of [\"right\", \"left\", \"above\", \"below\", \"top_left\", \"top_center\", \"top_right\", \"center_right\", \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\", \"center\"]'\n )\n\n p.legend.click_policy = click_policy\n\n return p\n\n\ndef _compute_histogram(data, bins, density):\n if type(bins) == str and bins == \"sqrt\":\n bins = int(np.ceil(np.sqrt(len(data))))\n elif type(bins) == str and bins == \"freedman-diaconis\":\n h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(len(data))\n if h == 0.0:\n bins = 3\n else:\n bins = int(np.ceil((data.max() - data.min()) / h))\n\n f, e = np.histogram(data, bins=bins, density=density)\n e0 = np.empty(2 * len(e))\n f0 = np.empty(2 * len(e))\n e0[::2] = e\n e0[1::2] = e\n f0[0] = 0\n f0[-1] = 0\n f0[1:-1:2] = f\n f0[2:-1:2] = f\n\n return e0, f0\n" ]
[ [ "numpy.unique", "numpy.sort", "numpy.percentile", "numpy.concatenate", "numpy.round", "numpy.array", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ccacNMorris/dat129_ccac
[ "587e35f7886d1e883ad988cbe2ec027eb9cf3043" ]
[ "icon.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 2 18:34:05 2020\n\n@author: leemshari\n\"\"\"\n# I wanted to create an n cubed icon\n#My goal was to get this to get this to print with & or @ signs but I was unable to \n#get this array to look decent with anything but integers.\n#I changed the dtype to str and the int_array[a,b] to @ but the array would be off \nimport numpy as np\n#function to pass in an array and tuple so that we can get image\ndef icon (tuple_list,int_array):\n#tuple is basically coordinates for the image to populate in\n for a,b in tuple_list:\n#I set the value that will populated at the coordinates to 3\n int_array[a,b] = 3\n\n return int_array\n\n#function to manipulate arrary and elements in it\ndef roll_rotate(a_tuple,a_array):\n#We want the array with the image to rotate and roll \n b_array = icon(a_tuple,a_array)\n#Numpy has different functions already built into it to manipulate arrays\n print(np.roll(b_array,1))\n \n print('')\n \n print(np.flipud(b_array)) \n \n#Inention was to scale array up to 15x15 array \ndef resize(b_tuple,b_array):\n#Need to grab image again so that it can be manipulated \n c_array = icon(b_tuple,b_array)\n#Output makes the icon unreadable unfortunately but this numpy function will make it bigger \n print(np.resize(c_array,(15,15)))\n\ndef main():\n#Tuple that will be passed into the functions above\n image = ((0,6),(0,7),(0,8),(1,8),(2,7),(2,8),(3,8),(4,1),(4,6),(4,7),(4,8),\n (5,1),(5,2),(5,3),(5,4),(5,5),(6,1),(6,5),(7,1),(7,5),(8,1),(8,5),(9,1),(9,5))\n#Array full of zeros that will be populated with 3s at correct coordinates\n image_array = np.zeros((10,10), dtype = int)\n#printing image with tuple and array passed in\n print(icon(image,image_array))\n \n print('')\n#Calling function to manipulate array \n roll_rotate(image,image_array)\n \n print('')\n#Calling function to scale array up \n resize(image,image_array)\n\nmain()\n\n\n" ]
[ [ "numpy.resize", "numpy.flipud", "numpy.zeros", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KaijuML/data2text-macro-plan-py
[ "17cebc5db507723d601d21a075adea59b0bd9ffb" ]
[ "onmt/translate/translation.py" ]
[ "\"\"\" Translation main class \"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport torch\nfrom onmt.inputters.text_dataset import TextMultiField\n\n\nclass TranslationBuilder(object):\n \"\"\"\n Build a word-based translation from the batch output\n of translator and the underlying dictionaries.\n\n Replacement based on \"Addressing the Rare Word\n Problem in Neural Machine Translation\" :cite:`Luong2015b`\n\n Args:\n data (onmt.inputters.Dataset): Data.\n fields (List[Tuple[str, torchtext.data.Field]]): data fields\n n_best (int): number of translations produced\n replace_unk (bool): replace unknown words using attention\n has_tgt (bool): will the batch have gold targets\n \"\"\"\n\n def __init__(self, data, fields, n_best=1, replace_unk=False,\n has_tgt=False, phrase_table=\"\"):\n self.data = data\n self.fields = fields\n self._has_text_src = isinstance(\n dict(self.fields)[\"src\"], TextMultiField)\n self.n_best = n_best\n self.replace_unk = replace_unk\n self.phrase_table = phrase_table\n self.has_tgt = has_tgt\n\n def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):\n tgt_field = dict(self.fields)[\"tgt\"].base_field\n eos_idx = tgt_field.vocab.stoi[tgt_field.eos_token]\n vocab = tgt_field.vocab\n tokens = []\n for tok in pred:\n tokens.append(str(tok.item()))\n if tokens[-1] == eos_idx:\n tokens = tokens[:-1]\n break\n if self.replace_unk and attn is not None and src is not None:\n for i in range(len(tokens)):\n if tokens[i] == tgt_field.unk_token:\n _, max_index = attn[i][:len(src_raw)].max(0)\n tokens[i] = src_raw[max_index.item()]\n if self.phrase_table != \"\":\n with open(self.phrase_table, \"r\") as f:\n for line in f:\n if line.startswith(src_raw[max_index.item()]):\n tokens[i] = line.split('|||')[1].strip()\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert(len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, pred_score, attn, gold_score, indices = list(zip(\n *sorted(zip(translation_batch[\"predictions\"],\n translation_batch[\"scores\"],\n translation_batch[\"attention\"],\n translation_batch[\"gold_score\"],\n batch.indices.data),\n key=lambda x: x[-1])))\n\n # Sorting\n inds, perm = torch.sort(batch.indices)\n if self._has_text_src:\n src = batch.src[0][:, :, 0].index_select(1, perm)\n else:\n src = None\n tgt = batch.tgt[:, :, 0].index_select(1, perm) \\\n if self.has_tgt else None\n\n translations = []\n for b in range(batch_size):\n if self._has_text_src:\n src_vocab = self.data.src_vocabs[inds[b]] \\\n if self.data.src_vocabs else None\n src_raw = self.data.examples[inds[b]].src[0]\n else:\n src_vocab = None\n src_raw = None\n pred_sents = [self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n preds[b][n], attn[b][n])\n for n in range(self.n_best)]\n gold_sent = None\n if tgt is not None:\n gold_sent = self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n tgt[1:, b] if tgt is not None else None, None)\n\n translation = Translation(\n src[:, b] if src is not None else None,\n src_raw, pred_sents, attn[b], pred_score[b],\n gold_sent, gold_score[b]\n )\n translations.append(translation)\n\n return translations\n\n\nclass Translation(object):\n \"\"\"Container for a translated sentence.\n\n Attributes:\n src (LongTensor): Source word IDs.\n src_raw (List[str]): Raw source words.\n pred_sents (List[List[str]]): Words from the n-best translations.\n pred_scores (List[List[float]]): Log-probs of n-best translations.\n attns (List[FloatTensor]) : Attention distribution for each\n translation.\n gold_sent (List[str]): Words from gold translation.\n gold_score (List[float]): Log-prob of gold translation.\n \"\"\"\n\n __slots__ = [\"src\", \"src_raw\", \"pred_sents\", \"attns\", \"pred_scores\",\n \"gold_sent\", \"gold_score\"]\n\n def __init__(self, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n msg = ['\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)]\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n msg.append('PRED {}: {}\\n'.format(sent_number, pred_sent))\n msg.append(\"PRED SCORE: {:.4f}\\n\".format(best_score))\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n msg.append('GOLD {}: {}\\n'.format(sent_number, tgt_sent))\n msg.append((\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score)))\n if len(self.pred_sents) > 1:\n msg.append('\\nBEST HYP:\\n')\n for score, sent in zip(self.pred_scores, self.pred_sents):\n msg.append(\"[{:.4f}] {}\\n\".format(score, sent))\n\n return \"\".join(msg)\n" ]
[ [ "torch.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonathansick/skyoffset
[ "369f54d8a237f48cd56f550e80bf1d39b355bfcd" ]
[ "skyoffset/diffplot.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nPlot distributions of difference pixels.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport astropy.io.fits\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport matplotlib.gridspec as gridspec\n\n\ndef plot_diffs(mosaic_doc, plot_dir):\n \"\"\"Make diff pixels histogram plots for all differences in the given\n mosaic document.\n \n Parameters\n ----------\n mosaic_doc : dict\n The document from MosaicDB for this mosaic.\n plot_dir : str\n Directory to save plots to.\n \"\"\"\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n for pair_key, diff in mosaic_doc['couplings']['diff_paths'].iteritems():\n median = mosaic_doc['couplings']['diffs'][pair_key]\n sigma = mosaic_doc['couplings']['sigmas'][pair_key]\n plot_path = os.path.join(plot_dir, pair_key)\n plot_diff(diff, median, sigma, plot_path)\n\n\ndef plot_diff(diff_path, median, sigma, plot_path):\n \"\"\"Plot histogram of the difference image.\"\"\"\n fits = astropy.io.fits.open(diff_path)\n pixels = fits[0].data\n pixels = pixels[np.isfinite(pixels)].ravel()\n\n fig = Figure(figsize=(3.5, 3.5))\n canvas = FigureCanvas(fig)\n gs = gridspec.GridSpec(1, 1, left=0.15, right=0.95, bottom=0.15, top=0.95,\n wspace=None, hspace=None, width_ratios=None, height_ratios=None)\n ax = fig.add_subplot(gs[0])\n ax.hist(pixels, 1000, histtype='stepfilled',\n edgecolor='None', facecolor='dodgerblue')\n ax.axvline(median, ls='-', c='k', lw=2)\n ax.axvline(median - sigma, ls='--', c='k', lw=1)\n ax.axvline(median + sigma, ls='--', c='k', lw=1)\n ax.text(0.1, 0.9, r\"$%.2f \\pm %.2f$\" % (median, sigma),\n ha='left', va='top',\n transform=ax.transAxes)\n ax.set_xlim(median - 3 * sigma, median + 3 * sigma)\n gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)\n canvas.print_figure(plot_path + \".pdf\", format=\"pdf\")\n\n fits.close()\n" ]
[ [ "matplotlib.backends.backend_agg.FigureCanvasAgg", "matplotlib.gridspec.GridSpec", "numpy.isfinite", "matplotlib.figure.Figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Alenichel/CodiglioniNichelini_recsys-polimi-2019
[ "ca97e71da7612644833c20155a0d4d3a57850527", "ca97e71da7612644833c20155a0d4d3a57850527" ]
[ "src/run_utils.py", "src/Base/Evaluation/metrics.py" ]
[ "#!/usr/bin/env python3\n\nimport os\nfrom enum import Enum\nimport numpy as np\nimport scipy.sparse as sps\nfrom sklearn.preprocessing import LabelEncoder\nfrom tqdm import tqdm, trange\nfrom cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython\nfrom csv_utils import load_csv, export_csv\nfrom multiprocessing import Pool\nfrom collections import namedtuple\n\n\nclass DataFiles:\n TRAIN = 'data/data_train.csv'\n TARGET_USERS_TEST = 'data/data_target_users_test.csv'\n ICM_ASSET = 'data/data_ICM_asset.csv'\n ICM_PRICE = 'data/data_ICM_price.csv'\n ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'\n UCM_AGE = 'data/data_UCM_age.csv'\n UCM_REGION = 'data/data_UCM_region.csv'\n CLUSTERS = 'data/user_clustered.csv'\n\n\nclass SplitType(Enum):\n PROBABILISTIC = 1\n LOO = 2\n LOO_CYTHON = 3\n\n\ndef set_seed(seed):\n print('seed = {0}'.format(seed))\n os.environ['RECSYS_SEED'] = str(seed)\n np.random.seed(seed)\n\n\ndef get_seed():\n env = os.getenv('RECSYS_SEED')\n if env:\n return int(env)\n return -1\n\n\ndef build_urm():\n urm_data = load_csv(DataFiles.TRAIN)\n urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]\n users, items, ratings = map(np.array, zip(*urm_data))\n return sps.csr_matrix((ratings, (users, items)))\n\n\ndef clusterize():\n data = load_csv(DataFiles.CLUSTERS)\n data = [[int(row[i]) for i in range(len(row))] for row in data]\n _, user_ids, cluster_ids = map(list, zip(*data))\n assert len(user_ids) == len(cluster_ids)\n data_len = len(user_ids)\n clusters = dict()\n for n in range(max(cluster_ids) + 1):\n clusters[n] = list()\n for i in range(data_len):\n user_id = user_ids[i]\n cluster_id = cluster_ids[i]\n clusters[cluster_id].append(user_id)\n return clusters\n\n\ndef get_cold_users(urm_train, return_warm=False):\n profile_lengths = np.ediff1d(urm_train.indptr)\n cold_users = np.where(profile_lengths == 0)[0]\n if return_warm:\n warm_users = np.where(profile_lengths > 0)[0]\n return cold_users, warm_users\n return cold_users\n\n\ndef build_price_icm(n_items):\n price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)\n price_icm_values = __encode_values(price_icm_values)\n n_features = max(price_icm_values) + 1\n shape = (n_items, n_features)\n ones = np.ones(len(price_icm_values))\n price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)\n return price_icm\n\n\ndef build_asset_icm(n_items):\n asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)\n asset_icm_values += 1\n asset_icm_values = __encode_values(asset_icm_values)\n n_features = max(asset_icm_values) + 1\n shape = (n_items, n_features)\n ones = np.ones(len(asset_icm_values))\n asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)\n return asset_icm\n\n\ndef build_subclass_icm(n_items):\n subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)\n n_features = max(subclass_icm_features) + 1\n shape = (n_items, n_features)\n subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)\n return subclass_icm\n\n\ndef build_icm(n_items):\n price_icm = build_price_icm(n_items)\n asset_icm = build_asset_icm(n_items)\n subclass_icm = build_subclass_icm(n_items)\n return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()\n\n\ndef build_age_ucm(n_users):\n age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)\n n_features = max(age_ucm_features) + 1\n shape = (n_users, n_features)\n age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)\n return age_ucm\n\n\ndef build_region_ucm(n_users):\n region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)\n n_features = max(region_ucm_features) + 1\n shape = (n_users, n_features)\n region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)\n return region_ucm\n\n\ndef build_ucm(n_users):\n age_ucm = build_age_ucm(n_users)\n region_ucm = build_region_ucm(n_users)\n return sps.hstack((age_ucm, region_ucm))\n\n\ndef build_target_users():\n target_users = load_csv(DataFiles.TARGET_USERS_TEST)\n return [int(x[0]) for x in target_users]\n\n\ndef build_all_matrices():\n urm = build_urm()\n n_users, n_items = urm.shape\n icm = build_icm(n_items)\n ucm = build_ucm(n_users)\n target_users = build_target_users()\n return urm, icm, ucm, target_users\n\n\ndef train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):\n if split_type == SplitType.PROBABILISTIC:\n return __train_test_split(urm, split)\n elif split_type == SplitType.LOO:\n return __train_test_loo_split(urm)\n elif split_type == SplitType.LOO_CYTHON:\n return __train_test_loo_split_cython(urm)\n\n\ndef evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):\n from evaluation import evaluate_algorithm\n if cython:\n if verbose:\n print('Ignoring argument excluded_users')\n from cython_modules.evaluation import evaluate_cython\n if verbose:\n print('Using Cython evaluation')\n return evaluate_cython(recommender, urm_test, verbose=verbose)\n else:\n return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)\n\n\ndef evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):\n assert type(urm_tests) == list\n assert len(urm_tests) >= 1\n assert type(n_processes) == int\n if n_processes == 0:\n n_processes = len(urm_tests)\n with Pool(processes=n_processes) as pool:\n args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]\n maps = pool.starmap(evaluate, args, chunksize=1)\n maps = [x['MAP'] for x in maps]\n return np.mean(maps)\n\n\ndef export(target_users, recommender):\n print('Exporting recommendations...')\n data = list()\n for u_id in tqdm(target_users, desc='Export'):\n data.append((u_id, recommender.recommend(u_id, at=10)))\n export_csv(('user_id', 'item_list'), data)\n print('OK')\n\n\ndef __train_test_split(urm, split=0.8):\n print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))\n urm = urm.tocoo()\n num_interactions = urm.nnz\n shape = urm.shape\n train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])\n urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)\n urm_train = urm_train.tocsr()\n test_mask = np.logical_not(train_mask)\n urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)\n urm_test = urm_test.tocsr()\n return urm_train, urm_test\n\n\ndef __train_test_loo_split(urm):\n print('Using LeaveOneOut')\n urm = urm.tocsr()\n num_users = urm.shape[0]\n num_items = urm.shape[1]\n urm_train = urm.copy()\n urm_test = sps.lil_matrix((num_users, num_items), dtype=int)\n for user_id in trange(num_users, desc='LeaveOneOut'):\n start_pos = urm_train.indptr[user_id]\n end_pos = urm_train.indptr[user_id + 1]\n user_profile = urm_train.indices[start_pos:end_pos]\n if user_profile.size > 0:\n item_id = np.random.choice(user_profile, 1)\n urm_train[user_id, item_id] = 0\n urm_test[user_id, item_id] = 1\n urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)\n urm_train.eliminate_zeros()\n urm_test.eliminate_zeros()\n return urm_train, urm_test\n\n\ndef __load_icm_csv(filename, third_type):\n data = load_csv(filename)\n data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]\n items, features, values = map(np.array, zip(*data))\n return items, features, values\n\n\ndef __encode_values(values):\n le = LabelEncoder()\n le.fit(values)\n return le.transform(values)\n\n\ngroup_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])\n\n\ndef user_segmenter(urm_train, n_groups=10):\n groups = dict()\n users = dict()\n profile_length = np.ediff1d(urm_train.indptr)\n group_size = int(profile_length.size/n_groups)\n sorted_users = np.argsort(profile_length)\n for group_id in range(n_groups):\n start_pos = group_id * group_size\n end_pos = min((group_id + 1) * group_size, len(profile_length))\n users_in_group = sorted_users[start_pos:end_pos]\n for user in users_in_group:\n users[user] = group_id\n users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)\n users_not_in_group = sorted_users[users_not_in_group_flag]\n groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)\n return groups, users\n\n\ndef multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):\n urm, icm, ucm, target_users = build_all_matrices()\n trains = list()\n tests = list()\n for seed in seeds:\n set_seed(seed)\n urm_train, urm_test = train_test_split(urm)\n trains.append(urm_train)\n tests.append(urm_test)\n return trains, tests, seeds\n\n\nif __name__ == '__main__':\n from evaluation import evaluate_by_cluster\n from cf import ItemCFKNNRecommender\n from basic_recommenders import TopPopRecommender\n\n np.random.seed(42)\n urm, icm, ucm, target_users = build_all_matrices()\n urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)\n top_pop = TopPopRecommender()\n top_pop.fit(urm_train)\n cf = ItemCFKNNRecommender(fallback_recommender=top_pop)\n cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')\n evaluate_by_cluster(cf, urm_test, clusterise())", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Maurizio Ferrari Dacrema, Massimo Quadrana\n\"\"\"\n\n\nimport numpy as np\nimport unittest\n\nclass Metrics_Object(object):\n \"\"\"\n Abstract class that should be used as superclass of all metrics requiring an object, therefore a state, to be computed\n \"\"\"\n def __init__(self):\n pass\n\n def add_recommendations(self, recommended_items_ids):\n raise NotImplementedError()\n\n def get_metric_value(self):\n raise NotImplementedError()\n\n def merge_with_other(self, other_metric_object):\n raise NotImplementedError()\n\n\n\nclass Coverage_Item(Metrics_Object):\n \"\"\"\n Item coverage represents the percentage of the overall items which were recommended\n https://gab41.lab41.org/recommender-systems-its-not-all-about-the-accuracy-562c7dceeaff\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Coverage_Item, self).__init__()\n self.recommended_mask = np.zeros(n_items, dtype=np.bool)\n self.n_ignore_items = len(ignore_items)\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_mask[recommended_items_ids] = True\n\n def get_metric_value(self):\n return self.recommended_mask.sum()/(len(self.recommended_mask)-self.n_ignore_items)\n\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Coverage_Item, \"Coverage_Item: attempting to merge with a metric object of different type\"\n\n self.recommended_mask = np.logical_or(self.recommended_mask, other_metric_object.recommended_mask)\n\n\n\n\nclass Coverage_User(Metrics_Object):\n \"\"\"\n User coverage represents the percentage of the overall users for which we can make recommendations.\n If there is at least one recommendation the user is considered as covered\n https://gab41.lab41.org/recommender-systems-its-not-all-about-the-accuracy-562c7dceeaff\n \"\"\"\n\n def __init__(self, n_users, ignore_users):\n super(Coverage_User, self).__init__()\n self.users_mask = np.zeros(n_users, dtype=np.bool)\n self.n_ignore_users = len(ignore_users)\n\n def add_recommendations(self, recommended_items_ids, user_id):\n self.users_mask[user_id] = len(recommended_items_ids)>0\n\n def get_metric_value(self):\n return self.users_mask.sum()/(len(self.users_mask)-self.n_ignore_users)\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Coverage_User, \"Coverage_User: attempting to merge with a metric object of different type\"\n\n self.users_mask = np.logical_or(self.users_mask, other_metric_object.users_mask)\n\n\n\n\nclass MAP(Metrics_Object):\n \"\"\"\n Mean Average Precision, defined as the mean of the AveragePrecision over all users\n\n \"\"\"\n\n def __init__(self):\n super(MAP, self).__init__()\n self.cumulative_AP = 0.0\n self.n_users = 0\n\n def add_recommendations(self, is_relevant, pos_items):\n self.cumulative_AP += average_precision(is_relevant, pos_items)\n self.n_users += 1\n\n def get_metric_value(self):\n return self.cumulative_AP/self.n_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is MAP, \"MAP: attempting to merge with a metric object of different type\"\n\n self.cumulative_AP += other_metric_object.cumulative_AP\n self.n_users += other_metric_object.n_users\n\n\n\n\n\nclass MRR(Metrics_Object):\n \"\"\"\n Mean Reciprocal Rank, defined as the mean of the Reciprocal Rank over all users\n\n \"\"\"\n\n def __init__(self):\n super(MRR, self).__init__()\n self.cumulative_RR = 0.0\n self.n_users = 0\n\n def add_recommendations(self, is_relevant):\n self.cumulative_RR += rr(is_relevant)\n self.n_users += 1\n\n def get_metric_value(self):\n return self.cumulative_RR/self.n_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is MAP, \"MRR: attempting to merge with a metric object of different type\"\n\n self.cumulative_RR += other_metric_object.cumulative_RR\n self.n_users += other_metric_object.n_users\n\n\n\n\n\nclass Gini_Diversity(Metrics_Object):\n \"\"\"\n Gini diversity index, computed from the Gini Index but with inverted range, such that high values mean higher diversity\n This implementation ignores zero-occurrence items\n\n # From https://github.com/oliviaguest/gini\n # based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif\n # from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n #\n # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.459.8174&rep=rep1&type=pdf\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Gini_Diversity, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n recommended_counter = self.recommended_counter.copy()\n\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n recommended_counter_mask[recommended_counter == 0] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n n_items = len(recommended_counter)\n\n recommended_counter_sorted = np.sort(recommended_counter) # values must be sorted\n index = np.arange(1, n_items+1) # index per array element\n\n #gini_index = (np.sum((2 * index - n_items - 1) * recommended_counter_sorted)) / (n_items * np.sum(recommended_counter_sorted))\n gini_diversity = 2*np.sum((n_items + 1 - index)/(n_items+1) * recommended_counter_sorted/np.sum(recommended_counter_sorted))\n\n return gini_diversity\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Gini_Diversity, \"Gini_Diversity: attempting to merge with a metric object of different type\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\nclass Diversity_Herfindahl(Metrics_Object):\n \"\"\"\n The Herfindahl index is also known as Concentration index, it is used in economy to determine whether the market quotas\n are such that an excessive concentration exists. It is here used as a diversity index, if high means high diversity.\n\n It is known to have a small value range in recommender systems, between 0.9 and 1.0\n\n The Herfindahl index is a function of the square of the probability an item has been recommended to any user, hence\n The Herfindahl index is equivalent to MeanInterList diversity as they measure the same quantity.\n\n # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.459.8174&rep=rep1&type=pdf\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Diversity_Herfindahl, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n recommended_counter = self.recommended_counter.copy()\n\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n if recommended_counter.sum() != 0:\n herfindahl_index = 1 - np.sum((recommended_counter / recommended_counter.sum()) ** 2)\n else:\n herfindahl_index = np.nan\n\n return herfindahl_index\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Diversity_Herfindahl, \"Diversity_Herfindahl: attempting to merge with a metric object of different type\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\n\nclass Shannon_Entropy(Metrics_Object):\n \"\"\"\n Shannon Entropy is a well known metric to measure the amount of information of a certain string of data.\n Here is applied to the global number of times an item has been recommended.\n\n It has a lower bound and can reach values over 12.0 for random recommenders.\n A high entropy means that the distribution is random uniform across all users.\n\n Note that while a random uniform distribution\n (hence all items with SIMILAR number of occurrences)\n will be highly diverse and have high entropy, a perfectly uniform distribution\n (hence all items with EXACTLY IDENTICAL number of occurrences)\n will have 0.0 entropy while being the most diverse possible.\n\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Shannon_Entropy, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n assert np.all(self.recommended_counter >= 0.0), \"Shannon_Entropy: self.recommended_counter contains negative counts\"\n\n recommended_counter = self.recommended_counter.copy()\n\n # Ignore from the computation both ignored items and items with zero occurrence.\n # Zero occurrence items will have zero probability and will not change the result, butt will generate nans if used in the log\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n recommended_counter_mask[recommended_counter == 0] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n n_recommendations = recommended_counter.sum()\n\n recommended_probability = recommended_counter/n_recommendations\n\n shannon_entropy = -np.sum(recommended_probability * np.log2(recommended_probability))\n\n return shannon_entropy\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Gini_Diversity, \"Shannon_Entropy: attempting to merge with a metric object of different type\"\n\n assert np.all(self.recommended_counter >= 0.0), \"Shannon_Entropy: self.recommended_counter contains negative counts\"\n assert np.all(other_metric_object.recommended_counter >= 0.0), \"Shannon_Entropy: other.recommended_counter contains negative counts\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\n\nimport scipy.sparse as sps\n\n\n\nclass Novelty(Metrics_Object):\n \"\"\"\n Novelty measures how \"novel\" a recommendation is in terms of how popular the item was in the train set.\n\n Due to this definition, the novelty of a cold item (i.e. with no interactions in the train set) is not defined,\n in this implementation cold items are ignored and their contribution to the novelty is 0.\n\n A recommender with high novelty will be able to recommend also long queue (i.e. unpopular) items.\n\n Mean self-information (Zhou 2010)\n \"\"\"\n\n def __init__(self, URM_train):\n super(Novelty, self).__init__()\n\n URM_train = sps.csc_matrix(URM_train)\n URM_train.eliminate_zeros()\n self.item_popularity = np.ediff1d(URM_train.indptr)\n\n self.novelty = 0.0\n self.n_evaluated_users = 0\n self.n_items = len(self.item_popularity)\n self.n_interactions = self.item_popularity.sum()\n\n\n def add_recommendations(self, recommended_items_ids):\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids)>0:\n recommended_items_popularity = self.item_popularity[recommended_items_ids]\n\n probability = recommended_items_popularity/self.n_interactions\n probability = probability[probability!=0]\n\n self.novelty += np.sum(-np.log2(probability)/self.n_items)\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.novelty/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Novelty, \"Novelty: attempting to merge with a metric object of different type\"\n\n self.novelty = self.novelty + other_metric_object.novelty\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\n\n\n\nclass AveragePopularity(Metrics_Object):\n \"\"\"\n Average popularity the recommended items have in the train data.\n The popularity is normalized by setting as 1 the item with the highest popularity in the train data\n \"\"\"\n\n def __init__(self, URM_train):\n super(AveragePopularity, self).__init__()\n\n URM_train = sps.csc_matrix(URM_train)\n URM_train.eliminate_zeros()\n item_popularity = np.ediff1d(URM_train.indptr)\n\n\n self.cumulative_popularity = 0.0\n self.n_evaluated_users = 0\n self.n_items = URM_train.shape[0]\n self.n_interactions = item_popularity.sum()\n\n self.item_popularity_normalized = item_popularity/item_popularity.max()\n\n\n def add_recommendations(self, recommended_items_ids):\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids)>0:\n recommended_items_popularity = self.item_popularity_normalized[recommended_items_ids]\n\n self.cumulative_popularity += np.sum(recommended_items_popularity)/len(recommended_items_ids)\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.cumulative_popularity/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Novelty, \"AveragePopularity: attempting to merge with a metric object of different type\"\n\n self.cumulative_popularity = self.cumulative_popularity + other_metric_object.cumulative_popularity\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\n\n\nclass Diversity_similarity(Metrics_Object):\n \"\"\"\n Intra list diversity computes the diversity of items appearing in the recommendations received by each single user, by using an item_diversity_matrix.\n\n It can be used, for example, to compute the diversity in terms of features for a collaborative recommender.\n\n A content-based recommender will have low IntraList diversity if that is computed on the same features the recommender uses.\n A TopPopular recommender may exhibit high IntraList diversity.\n\n \"\"\"\n\n def __init__(self, item_diversity_matrix):\n super(Diversity_similarity, self).__init__()\n\n assert np.all(item_diversity_matrix >= 0.0) and np.all(item_diversity_matrix <= 1.0), \\\n \"item_diversity_matrix contains value greated than 1.0 or lower than 0.0\"\n\n self.item_diversity_matrix = item_diversity_matrix\n\n self.n_evaluated_users = 0\n self.diversity = 0.0\n\n\n def add_recommendations(self, recommended_items_ids):\n\n current_recommended_items_diversity = 0.0\n\n for item_index in range(len(recommended_items_ids)-1):\n\n item_id = recommended_items_ids[item_index]\n\n item_other_diversity = self.item_diversity_matrix[item_id, recommended_items_ids]\n item_other_diversity[item_index] = 0.0\n\n current_recommended_items_diversity += np.sum(item_other_diversity)\n\n\n self.diversity += current_recommended_items_diversity/(len(recommended_items_ids)*(len(recommended_items_ids)-1))\n\n self.n_evaluated_users += 1\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.diversity/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Diversity_similarity, \"Diversity: attempting to merge with a metric object of different type\"\n\n self.diversity = self.diversity + other_metric_object.diversity\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\nclass Diversity_MeanInterList(Metrics_Object):\n \"\"\"\n MeanInterList diversity measures the uniqueness of different users' recommendation lists.\n\n It can be used to measure how \"diversified\" are the recommendations different users receive.\n\n While the original proposal called this metric \"Personalization\", we do not use this name since the highest MeanInterList diversity\n is exhibited by a non personalized Random recommender.\n\n It can be demonstrated that this metric does not require to compute the common items all possible couples of users have in common\n but rather it is only sensitive to the total amount of time each item has been recommended.\n\n MeanInterList diversity is a function of the square of the probability an item has been recommended to any user, hence\n MeanInterList diversity is equivalent to the Herfindahl index as they measure the same quantity.\n\n A TopPopular recommender that does not remove seen items will have 0.0 MeanInterList diversity.\n\n\n pag. 3, http://www.pnas.org/content/pnas/107/10/4511.full.pdf\n\n @article{zhou2010solving,\n title={Solving the apparent diversity-accuracy dilemma of recommender systems},\n author={Zhou, Tao and Kuscsik, Zolt{\\'a}n and Liu, Jian-Guo and Medo, Mat{\\'u}{\\v{s}} and Wakeling, Joseph Rushton and Zhang, Yi-Cheng},\n journal={Proceedings of the National Academy of Sciences},\n volume={107},\n number={10},\n pages={4511--4515},\n year={2010},\n publisher={National Acad Sciences}\n }\n\n # The formula is diversity_cumulative += 1 - common_recommendations(user1, user2)/cutoff\n # for each couple of users, except the diagonal. It is VERY computationally expensive\n # We can move the 1 and cutoff outside of the summation. Remember to exclude the diagonal\n # co_counts = URM_predicted.dot(URM_predicted.T)\n # co_counts[np.arange(0, n_user, dtype=np.int):np.arange(0, n_user, dtype=np.int)] = 0\n # diversity = (n_user**2 - n_user) - co_counts.sum()/self.cutoff\n\n # If we represent the summation of co_counts separating it for each item, we will have:\n # co_counts.sum() = co_counts_item1.sum() + co_counts_item2.sum() ...\n # If we know how many times an item has been recommended, co_counts_item1.sum() can be computed as how many couples of\n # users have item1 in common. If item1 has been recommended n times, the number of couples is n*(n-1)\n # Therefore we can compute co_counts.sum() value as:\n # np.sum(np.multiply(item-occurrence, item-occurrence-1))\n\n # The naive implementation URM_predicted.dot(URM_predicted.T) might require an hour of computation\n # The last implementation has a negligible computational time even for very big datasets\n\n \"\"\"\n\n def __init__(self, n_items, cutoff):\n super(Diversity_MeanInterList, self).__init__()\n\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n\n self.n_evaluated_users = 0\n self.n_items = n_items\n self.diversity = 0.0\n self.cutoff = cutoff\n\n\n def add_recommendations(self, recommended_items_ids):\n\n assert len(recommended_items_ids) <= self.cutoff, \"Diversity_MeanInterList: recommended list is contains more elements than cutoff\"\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n\n\n\n def get_metric_value(self):\n\n # Requires to compute the number of common elements for all couples of users\n if self.n_evaluated_users == 0:\n return 1.0\n\n cooccurrences_cumulative = np.sum(self.recommended_counter**2) - self.n_evaluated_users*self.cutoff\n\n # All user combinations except diagonal\n all_user_couples_count = self.n_evaluated_users**2 - self.n_evaluated_users\n\n diversity_cumulative = all_user_couples_count - cooccurrences_cumulative/self.cutoff\n\n self.diversity = diversity_cumulative/all_user_couples_count\n\n return self.diversity\n\n\n def get_theoretical_max(self):\n\n global_co_occurrence_count = (self.n_evaluated_users*self.cutoff)**2/self.n_items - self.n_evaluated_users*self.cutoff\n\n mild = 1 - 1/(self.n_evaluated_users**2 - self.n_evaluated_users)*(global_co_occurrence_count/self.cutoff)\n\n return mild\n\n def merge_with_other(self, other_metric_object):\n\n assert other_metric_object is Diversity_MeanInterList, \"Diversity_MeanInterList: attempting to merge with a metric object of different type\"\n\n assert np.all(self.recommended_counter >= 0.0), \"Diversity_MeanInterList: self.recommended_counter contains negative counts\"\n assert np.all(other_metric_object.recommended_counter >= 0.0), \"Diversity_MeanInterList: other.recommended_counter contains negative counts\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n self.n_evaluated_users += other_metric_object.n_evaluated_users\n\n\n\n\n\ndef roc_auc(is_relevant):\n\n ranks = np.arange(len(is_relevant))\n pos_ranks = ranks[is_relevant]\n neg_ranks = ranks[~is_relevant]\n auc_score = 0.0\n\n if len(neg_ranks) == 0:\n return 1.0\n\n if len(pos_ranks) > 0:\n for pos_pred in pos_ranks:\n auc_score += np.sum(pos_pred < neg_ranks, dtype=np.float32)\n auc_score /= (pos_ranks.shape[0] * neg_ranks.shape[0])\n\n assert 0 <= auc_score <= 1, auc_score\n return auc_score\n\n\n\ndef arhr(is_relevant):\n # average reciprocal hit-rank (ARHR) of all relevant items\n # As opposed to MRR, ARHR takes into account all relevant items and not just the first\n # pag 17\n # http://glaros.dtc.umn.edu/gkhome/fetch/papers/itemrsTOIS04.pdf\n # https://emunix.emich.edu/~sverdlik/COSC562/ItemBasedTopTen.pdf\n\n p_reciprocal = 1/np.arange(1,len(is_relevant)+1, 1.0, dtype=np.float64)\n arhr_score = is_relevant.dot(p_reciprocal)\n\n #assert 0 <= arhr_score <= p_reciprocal.sum(), \"arhr_score {} should be between 0 and {}\".format(arhr_score, p_reciprocal.sum())\n assert not np.isnan(arhr_score), \"ARHR is NaN\"\n return arhr_score\n\n\n\ndef precision(is_relevant):\n\n if len(is_relevant) == 0:\n precision_score = 0.0\n else:\n precision_score = np.sum(is_relevant, dtype=np.float32) / len(is_relevant)\n\n assert 0 <= precision_score <= 1, precision_score\n return precision_score\n\n\ndef precision_recall_min_denominator(is_relevant, n_test_items):\n\n if len(is_relevant) == 0:\n precision_score = 0.0\n else:\n precision_score = np.sum(is_relevant, dtype=np.float32) / min(n_test_items, len(is_relevant))\n\n assert 0 <= precision_score <= 1, precision_score\n return precision_score\n\n\ndef rmse(all_items_predicted_ratings, relevant_items, relevant_items_rating):\n\n # Important, some items will have -np.inf score and are treated as if they did not exist\n\n # RMSE with test items\n relevant_items_error = (all_items_predicted_ratings[relevant_items]-relevant_items_rating)**2\n\n finite_prediction_mask = np.isfinite(relevant_items_error)\n\n if finite_prediction_mask.sum() == 0:\n rmse = np.nan\n\n else:\n relevant_items_error = relevant_items_error[finite_prediction_mask]\n\n squared_error = np.sum(relevant_items_error)\n\n # # Second the RMSE against all non-test items assumed having true rating 0\n # # In order to avoid the need of explicitly indexing all non-relevant items, use a difference\n # squared_error += np.sum(all_items_predicted_ratings[np.isfinite(all_items_predicted_ratings)]**2) - \\\n # np.sum(all_items_predicted_ratings[relevant_items][np.isfinite(all_items_predicted_ratings[relevant_items])]**2)\n\n mean_squared_error = squared_error/finite_prediction_mask.sum()\n rmse = np.sqrt(mean_squared_error)\n\n return rmse\n\n\ndef recall(is_relevant, pos_items):\n\n recall_score = np.sum(is_relevant, dtype=np.float32) / pos_items.shape[0]\n\n assert 0 <= recall_score <= 1, recall_score\n return recall_score\n\n\ndef rr(is_relevant):\n # reciprocal rank of the FIRST relevant item in the ranked list (0 if none)\n\n ranks = np.arange(1, len(is_relevant) + 1)[is_relevant]\n\n if len(ranks) > 0:\n return 1. / ranks[0]\n else:\n return 0.0\n\n\ndef average_precision(is_relevant, pos_items):\n\n if len(is_relevant) == 0:\n a_p = 0.0\n else:\n p_at_k = is_relevant * np.cumsum(is_relevant, dtype=np.float32) / (1 + np.arange(is_relevant.shape[0]))\n a_p = np.sum(p_at_k) / np.min([pos_items.shape[0], is_relevant.shape[0]])\n\n assert 0 <= a_p <= 1, a_p\n return a_p\n\n\ndef ndcg(ranked_list, pos_items, relevance=None, at=None):\n\n if relevance is None:\n relevance = np.ones_like(pos_items)\n assert len(relevance) == pos_items.shape[0]\n\n # Create a dictionary associating item_id to its relevance\n # it2rel[item] -> relevance[item]\n it2rel = {it: r for it, r in zip(pos_items, relevance)}\n\n # Creates array of length \"at\" with the relevance associated to the item in that position\n rank_scores = np.asarray([it2rel.get(it, 0.0) for it in ranked_list[:at]], dtype=np.float32)\n\n # IDCG has all relevances to 1, up to the number of items in the test set\n ideal_dcg = dcg(np.sort(relevance)[::-1])\n\n # DCG uses the relevance of the recommended items\n rank_dcg = dcg(rank_scores)\n\n if rank_dcg == 0.0:\n return 0.0\n\n ndcg_ = rank_dcg / ideal_dcg\n # assert 0 <= ndcg_ <= 1, (rank_dcg, ideal_dcg, ndcg_)\n return ndcg_\n\n\ndef dcg(scores):\n return np.sum(np.divide(np.power(2, scores) - 1, np.log(np.arange(scores.shape[0], dtype=np.float32) + 2)),\n dtype=np.float32)\n\n\nmetrics = ['AUC', 'Precision' 'Recall', 'MAP', 'NDCG']\n\n\ndef pp_metrics(metric_names, metric_values, metric_at):\n \"\"\"\n Pretty-prints metric values\n :param metrics_arr:\n :return:\n \"\"\"\n assert len(metric_names) == len(metric_values)\n if isinstance(metric_at, int):\n metric_at = [metric_at] * len(metric_values)\n return ' '.join(['{}: {:.4f}'.format(mname, mvalue) if mcutoff is None or mcutoff == 0 else\n '{}@{}: {:.4f}'.format(mname, mcutoff, mvalue)\n for mname, mcutoff, mvalue in zip(metric_names, metric_at, metric_values)])\n\n\nclass TestAUC(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4])\n ranked_list = np.asarray([1, 2, 3, 4, 5])\n self.assertTrue(np.allclose(roc_auc(ranked_list, pos_items),\n (2. / 3 + 1. / 3) / 2))\n\n\nclass TestRecall(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(recall(ranked_list_1, pos_items), 3. / 4))\n self.assertTrue(np.allclose(recall(ranked_list_2, pos_items), 1.0))\n self.assertTrue(np.allclose(recall(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 4, 1. / 4, 2. / 4, 3. / 4]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(recall(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestPrecision(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(precision(ranked_list_1, pos_items), 3. / 5))\n self.assertTrue(np.allclose(precision(ranked_list_2, pos_items), 4. / 5))\n self.assertTrue(np.allclose(precision(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 2, 1. / 3, 2. / 4, 3. / 5]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(precision(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestRR(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(rr(ranked_list_1, pos_items), 1. / 2))\n self.assertTrue(np.allclose(rr(ranked_list_2, pos_items), 1.))\n self.assertTrue(np.allclose(rr(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 2, 1. / 2, 1. / 2, 1. / 2]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(rr(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestMAP(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n ranked_list_4 = np.asarray([11, 12, 13, 14, 15, 16, 2, 4, 5, 10])\n ranked_list_5 = np.asarray([2, 11, 12, 13, 14, 15, 4, 5, 10, 16])\n self.assertTrue(np.allclose(map(ranked_list_1, pos_items), (1. / 2 + 2. / 4 + 3. / 5) / 4))\n self.assertTrue(np.allclose(map(ranked_list_2, pos_items), 1.0))\n self.assertTrue(np.allclose(map(ranked_list_3, pos_items), 0.0))\n self.assertTrue(np.allclose(map(ranked_list_4, pos_items), (1. / 7 + 2. / 8 + 3. / 9 + 4. / 10) / 4))\n self.assertTrue(np.allclose(map(ranked_list_5, pos_items), (1. + 2. / 7 + 3. / 8 + 4. / 9) / 4))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [\n 0.0,\n 1. / 2 / 2,\n 1. / 2 / 3,\n (1. / 2 + 2. / 4) / 4,\n (1. / 2 + 2. / 4 + 3. / 5) / 4\n ]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(map(ranked_list_1, pos_items, at)), val))\n\n\nclass TestNDCG(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n pos_relevances = np.asarray([5, 4, 3, 2])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5]) # rel = 0, 5, 0, 4, 3\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3]) # rel = 2, 3, 5, 4, 0\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8]) # rel = 0, 0, 0, 0, 0\n idcg = ((2 ** 5 - 1) / np.log(2) +\n (2 ** 4 - 1) / np.log(3) +\n (2 ** 3 - 1) / np.log(4) +\n (2 ** 2 - 1) / np.log(5))\n self.assertTrue(np.allclose(dcg(np.sort(pos_relevances)[::-1]), idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_1, pos_items, pos_relevances),\n ((2 ** 5 - 1) / np.log(3) +\n (2 ** 4 - 1) / np.log(5) +\n (2 ** 3 - 1) / np.log(6)) / idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_2, pos_items, pos_relevances),\n ((2 ** 2 - 1) / np.log(2) +\n (2 ** 3 - 1) / np.log(3) +\n (2 ** 5 - 1) / np.log(4) +\n (2 ** 4 - 1) / np.log(5)) / idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_3, pos_items, pos_relevances), 0.0))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.logical_not", "scipy.sparse.coo_matrix", "numpy.random.seed", "numpy.random.choice", "numpy.ediff1d", "scipy.sparse.csr_matrix", "numpy.mean", "numpy.argsort", "scipy.sparse.hstack", "sklearn.preprocessing.LabelEncoder", "numpy.where", "numpy.isin", "scipy.sparse.lil_matrix" ], [ "scipy.sparse.csc_matrix", "numpy.log", "numpy.log2", "numpy.ones_like", "numpy.sqrt", "numpy.isfinite", "numpy.min", "numpy.ediff1d", "numpy.arange", "numpy.isnan", "numpy.asarray", "numpy.power", "numpy.cumsum", "numpy.sort", "numpy.logical_or", "numpy.all", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
PatternRecognition/OpenBMI
[ "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c", "75daf901b2dbe215852cbff243606dcfcd10f05c" ]
[ "PR_BCI_team/Team_StarLab/DKHan/examples/Deep_Learning_Models/OpenBMI_dataset/Preprocessing/convert_gigamne_to_smt.py", "PR_BCI_team/Team_StarLab/DKHan/examples/basic_CNN/giga_csp_lda_100hz.py", "PR_BCI_team/Team_StarLab/DKHan/examples/CSDG/get_common.py", "PR_BCI_team/Team_StarLab/DKHan/examples/bcic_dg/plot_bcic_iv_2a_moabb_trial_DG.py", "PR_BCI_team/Team_StarLab/DKHan/examples/eeg_dg/main_subject_independent_chsel_subjsel_gpu1.py", "PR_BCI_team/Team_StarLab/DKHan/examples/speech/convert.py", "PR_BCI_team/Team_StarLab/YJKim/shallow_cnn_with_mne/gigadata2.py", "PR_BCI_team/Team_StarLab/DKHan/examples/bcic_dg/bcic_dk_ds_crop.py" ]
[ "import scipy.io as sio\nimport numpy as np\nimport os\nimport mne\nimport gigadata\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import ShuffleSplit, cross_val_score\n\nfrom pyriemann.estimation import Covariances\n\nfrom mne import Epochs, pick_types, find_events\nfrom mne.channels import read_layout\nfrom mne.io import concatenate_raws, read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.decoding import CSP\nfrom datetime import datetime\n\nimport pickle\n\nwith open('MI_62ch_100Hz_4-50.pkl', 'rb') as f:\n data = pickle.load(f)\n\nsess = 2\nsub= 43\n\nfor sess in [1,2]:\n print(\"session:\",sess)\n for sub in range(1,55):\n print(\"subject#\",sub)\n if sess == 1 :\n epochs = data[sub-1]\n else :\n epochs = data[sub+53]\n\n\n epochs_train = epochs.copy()\n\n\n if sess == 1 and sub ==1:\n epochs_data_train = epochs_train.get_data()\n labels = epochs.events[:, -1] - 1\n else:\n epoch_temp = epochs_train.get_data()\n epochs_data_train = np.append(epochs_data_train, epoch_temp,axis=0)\n label_temp = epochs.events[:, -1] - 1\n labels = np.hstack((labels, label_temp))\n\n print(epochs_data_train.shape)\n\n\nnp.save('x_data_450',epochs_data_train)\nnp.save('y_data',labels)\n\n", "import scipy.io as sio\nimport numpy as np\nimport os\nimport mne\nimport gigadata\nfrom mayavi import mlab\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import ShuffleSplit, cross_val_score\n\nfrom mne import Epochs, pick_types, find_events\nfrom mne.channels import read_layout\nfrom mne.io import concatenate_raws, read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.decoding import CSP\nfrom datetime import datetime\n\nimport pickle\n\n#giga science mi data csp-lda example\n\nwith open('C:\\\\Users\\dk\\PycharmProjects\\giga_cnn\\convert\\MI_62ch_250Hz.pkl', 'rb') as f:\n data = pickle.load(f)\n\nf = open(\"CSP_62ch_100hz\" + datetime.today().strftime(\"%m_%d_%H_%M\") + \".txt\", 'w')\n\nfor sess in [1,2]:\n print(\"session:\",sess)\n for sub in range(1,55):\n print(\"subject#\",sub)\n if sess == 1 :\n epochs = data[sub-1]\n else :\n epochs = data[sub+53]\n\n sess= 2\n sub = 2\n sub2epo = data[51+54-1].copy()\n epochs = sub2epo.copy()\n epochs.filter(l_freq=8, h_freq=30)\n\n idx = np.array(list(range(7, 11)) + list(range(12, 15)) + list(range(17, 21)) + list(range(32, 41)))\n chans = np.array(epochs.ch_names)[idx].tolist()\n epochs.pick_channels(chans)\n\n epochs_train = epochs.copy().crop(tmin=0, tmax=4.0)\n\n labels = epochs.events[:, -1] - 1\n\n scores = []\n epochs_data = epochs.get_data()\n\n epochs_data_train = epochs_train.get_data()[0:100,:,:]\n epochs_data_test = epochs_train.get_data()[100:200,:,:]\n labels_train = labels[0:100]\n labels_test = labels[100:200]\n csp = CSP(n_components=6, reg=None, log=True, norm_trace=False)\n X_train = csp.fit_transform(epochs_data_train, labels_train)\n X_test = csp.transform(epochs_data_test)\n\n # fit classifier\n lda.fit(X_train, labels_train)\n print(lda.score(X_test, labels_test))\n csp.plot_patterns(epochs.info, layout=layout, ch_type='eeg',\n units='Patterns (AU)', size=1.5)\n\n\n\n evoked = epochs.average()\n evoked.data = csp.patterns_.T\n evoked.times = np.arange(evoked.data.shape[0])\n evoked.plot_topomap()\n\n cv = ShuffleSplit(1, random_state=42)\n cv_split = cv.split(epochs_data_train)\n # Assemble a classifier\n lda = LinearDiscriminantAnalysis()\n csp = CSP(n_components=6, reg=None, log=True, norm_trace=False)\n # Use scikit-learn Pipeline with cross_val_score function\n\n#####################윈도우##########################\n sfreq = epochs.info['sfreq']\n w_length = int(sfreq * 3) # running classifier: window length\n w_step = int(sfreq * 0.1) # running classifier: window step size\n w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)\n\n scores_windows = []\n\n\n # fit classifier\n lda.fit(X_train, labels_train)\n\n # running classifier: test classifier on sliding window\n score_this_window = []\n for n in w_start:\n epochs_data_train = epochs_train.get_data()[0:100, :, n:(n + w_length)]\n epochs_data_test = epochs_train.get_data()[100:200, :, n:(n + w_length)]\n X_train = csp.fit_transform(epochs_data_train, labels_train)\n X_test = csp.transform(epochs_data_test)\n lda.fit(X_train, labels_train)\n score_this_window.append(lda.score(X_test, labels_test))\n scores_windows.append(score_this_window)\n\n # Plot scores over time\n w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin\n\n plt.figure()\n plt.plot(w_times, np.mean(scores_windows, 0), label='Score')\n plt.axvline(0, linestyle='--', color='k', label='Onset')\n plt.axhline(0.5, linestyle='-', color='k', label='Chance')\n plt.xlabel('time (s)')\n plt.ylabel('classification accuracy')\n plt.title('Classification score over time')\n plt.legend(loc='lower right')\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # clf = Pipeline([('CSP', csp), ('LDA', lda)])\n # scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=-1, )\n\n\n csp.fit_transform(epochs_data_test, labels_test)\n\n layout = read_layout('EEG1005')\n csp.plot_patterns(epochs.info, layout=layout, ch_type='eeg',\n units='Patterns (AU)', size=1.5)\n\n class_balance = np.mean(labels == labels[0])\n class_balance = max(class_balance, 1. - class_balance)\n m_score = np.mean(scores)\n print(\"Classification accuracy: %f / Chance level: %f\" % (m_score,\n class_balance))\n f.write(str(m_score) + '\\n')\n\nf.close()", "from networks_new import Deep4Net_origin, ConvClfNet, TripletNet, FcClfNet\nimport networks_new as nets\nfrom losses import TripletLoss_dev2, TripLoss, ContrastiveLoss_dk\nfrom torch.optim import lr_scheduler\nimport torch.optim as optim\n\n\nclass dgnet():\n # def __init__(self,gamma):\n # margin = 1.0\n # self.gamma = gamma\n # self.embedding_net = Deep4Net_origin(n_classes=2, input_ch=62, input_time=400)\n # self.clf_net = ConvClfNet(self.embedding_net)\n # self.model = TripletNet(self.clf_net)\n # self.optimizer = optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)\n # self.milestones = [30, 50, 70, 90]\n # self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n # self.loss_fn = ContrastiveLoss_dk(margin,self.gamma)\n # def __init__(self,gamma):\n # margin = 1.0\n # self.gamma = gamma\n # self.embedding_net = nets.Deep4Net()\n # self.clf_net = FcClfNet(self.embedding_net)\n # self.model = TripletNet(self.clf_net)\n # self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005)\n # # self.optimizer = optim.Adam(self.model.parameters())\n # self.milestones = [50, 100, 150, 200]\n # self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n # self.loss_fn = TripletLoss_dev2(margin,self.gamma)\n # def __init__(self,gamma):\n # #실험조건\n # #brain decode랑 동일 모델 동일조건에서 스치듯 83프로 성능이 나오는지 확인코자함\n # margin = 1.0\n # self.gamma = gamma\n # self.embedding_net = nets.Deep4Net_origin(n_classes=2, input_ch=62, input_time=400)\n # self.clf_net = ConvClfNet(self.embedding_net)\n # self.model = TripletNet(self.clf_net)\n # self.optimizer = optim.Adam(self.model.parameters())\n # self.milestones = [50, 100, 150, 200]\n # self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n # self.loss_fn = TripletLoss_dev2(margin,self.gamma)\n # def __init__(self,gamma):\n # #실험 내용: deep4net + fc clf 성능 위의 fix 100hz로 검증\n # margin = 1.0\n # self.gamma = gamma\n # self.embedding_net = nets.Deep4Net()\n # self.clf_net = FcClfNet(self.embedding_net)\n # self.model = TripletNet(self.clf_net)\n # # self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005)\n # self.optimizer = optim.Adam(self.model.parameters())\n # self.milestones = [50, 100, 150, 200]\n # self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n # self.loss_fn = TripletLoss_dev2(margin,self.gamma)\n # def __init__(self,gamma):\n # #실험 내용: deep4net + fc clf 성능 위의 fix 250hz로 검증 배치놈 안쓰고(좀더 유의한 특징뽑기 위해?>)\n # margin = 1.0\n # self.gamma = gamma\n # self.embedding_net = nets.Deep4Net(n_ch=62,n_time=1000,batch_norm=False)\n # self.clf_net = FcClfNet(self.embedding_net)\n # self.model = TripletNet(self.clf_net)\n # # self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005)\n # self.optimizer = optim.Adam(self.model.parameters())\n # self.milestones = [50, 100, 150, 200]\n # self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n # self.loss_fn = TripletLoss_dev2(margin,self.gamma)\n\n\n def __init__(self, gamma):\n # 실험 내용: deep4net + fc clf 성능 위의 fix 250hz로 검증 배치놈 안쓰고(좀더 유의한 특징뽑기 위해?>)\n from models.model_3dcnn import Base_3dcnn\n margin = 1.0\n self.gamma = gamma\n self.embedding_net = nets.Deep4Net_origin(2,22,1000,batch_norm=True)\n self.clf_net = nets.FcClfNet(self.embedding_net, n_class=4)\n self.model = nets.TripletNet(self.clf_net)\n # self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005)\n self.optimizer = optim.Adam(self.model.parameters())\n self.milestones = [50]\n self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.milestones, gamma=0.1)\n self.loss_fn = TripletLoss_dev2(margin, self.gamma)", "\"\"\"\nTrialwise Decoding on BCIC IV 2a Dataset\n========================================\n\nThis tutorial shows you how to train and test deep learning models with\nBraindecode in a classical EEG setting: you have trials of data with\nlabels (e.g., Right Hand, Left Hand, etc.).\n\n\"\"\"\n\n\n######################################################################\n# Loading and preprocessing the dataset\n# -------------------------------------\n#\n\n\n######################################################################\n# Loading\n# ~~~~~~~\n#\n\n\n######################################################################\n# First, we load the data. In this tutorial, we use the functionality of\n# braindecode to load datasets through\n# `MOABB <https://github.com/NeuroTechX/moabb>`__ to load the BCI\n# Competition IV 2a data.\n#\n# .. note::\n# To load your own datasets either via mne or from\n# preprocessed X/y numpy arrays, see `MNE Dataset\n# Tutorial <./plot_mne_dataset_example.html>`__ and `Numpy Dataset\n# Tutorial <./plot_custom_dataset_example.html>`__.\n#\n\nfrom braindecode.datasets.moabb import MOABBDataset\nimport mne\nimport torch\n\n\ndataset = MOABBDataset(dataset_name=\"BNCI2014001\", subject_ids=list(range(1,10)))\n\n# subject_id = 3\n######################################################################\n# Preprocessing\n# ~~~~~~~~~~~~~\n#\n\n\n######################################################################\n# Now we apply preprocessing like bandpass filtering to our dataset. You\n# can either apply functions provided by\n# `mne.Raw <https://mne.tools/stable/generated/mne.io.Raw.html>`__ or\n# `mne.Epochs <https://mne.tools/0.11/generated/mne.Epochs.html#mne.Epochs>`__\n# or apply your own functions, either to the MNE object or the underlying\n# numpy array.\n#\n# .. note::\n# These prepocessings are now directly applied to the loaded\n# data, and not on-the-fly applied as transformations in\n# PyTorch-libraries like\n# `torchvision <https://pytorch.org/docs/stable/torchvision/index.html>`__.\n#\n\nfrom braindecode.datautil.preprocess import exponential_moving_standardize\nfrom braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess\n\nlow_cut_hz = 4. # low cut frequency for filtering\nhigh_cut_hz = 38. # high cut frequency for filtering\n# Parameters for exponential moving standardization\nfactor_new = 1e-3\ninit_block_size = 1000\n\npreprocessors = [\n # keep only EEG sensors\n MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),\n # convert from volt to microvolt, directly modifying the numpy array\n NumpyPreproc(fn=lambda x: x * 1e6),\n # bandpass filter\n MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),\n # exponential moving standardization\n NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,\n init_block_size=init_block_size)\n]\n\n# Transform the data\npreprocess(dataset, preprocessors)\n\n\n######################################################################\n# Cut Compute Windows\n# ~~~~~~~~~~~~~~~~~~~\n#\n\n\n######################################################################\n# Now we cut out compute windows, the inputs for the deep networks during\n# training. In the case of trialwise decoding, we just have to decide if\n# we want to cut out some part before and/or after the trial. For this\n# dataset, in our work, it often was beneficial to also cut out 500 ms\n# before the trial.\n#\n\nimport numpy as np\nfrom braindecode.datautil.windowers import create_windows_from_events\n\ntrial_start_offset_seconds = -0.5\n# Extract sampling frequency, check that they are same in all datasets\nsfreq = dataset.datasets[0].raw.info['sfreq']\nassert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])\n# Calculate the trial start offset in samples.\ntrial_start_offset_samples = int(trial_start_offset_seconds * sfreq)\n\n# Create windows using braindecode function for this. It needs parameters to define how\n# trials should be used.\nwindows_dataset = create_windows_from_events(\n dataset,\n trial_start_offset_samples=trial_start_offset_samples,\n trial_stop_offset_samples=0,\n preload=True,\n)\n\n\n######################################################################\n# Split dataset into train and valid\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\n######################################################################\n# We can easily split the dataset using additional info stored in the\n# description attribute, in this case ``session`` column. We select\n# ``session_T`` for training and ``session_E`` for validation.\n#\n\n\nfrom braindecode.datasets.base import BaseConcatDataset\nsplitted = windows_dataset.split('subject')\n# splitted = windows_dataset.split('session')\n\n\n\ndef exp(subject_id):\n import torch\n test_subj = np.r_[subject_id]\n print('test subj:' + str(test_subj))\n # train_subj = np.setdiff1d(np.r_[1:10], test_subj)\n train_subj = np.setdiff1d(np.r_[1, 3, 7, 8], test_subj)\n\n tr = []\n val = []\n for ids in train_subj:\n train_size = int(0.99 * len(splitted[ids]))\n test_size = len(splitted[ids]) - train_size\n tr_i, val_i = torch.utils.data.random_split(splitted[ids], [train_size, test_size])\n tr.append(tr_i)\n val.append(val_i)\n\n train_set = torch.utils.data.ConcatDataset(tr)\n valid_set = torch.utils.data.ConcatDataset(val)\n valid_set = BaseConcatDataset([splitted[ids] for ids in test_subj])\n\n\n ######################################################################\n # Create model\n # ------------\n #\n\n\n ######################################################################\n # Now we create the deep learning model! Braindecode comes with some\n # predefined convolutional neural network architectures for raw\n # time-domain EEG. Here, we use the shallow ConvNet model from `Deep\n # learning with convolutional neural networks for EEG decoding and\n # visualization <https://arxiv.org/abs/1703.05051>`__. These models are\n # pure `PyTorch <https://pytorch.org>`__ deep learning models, therefore\n # to use your own model, it just has to be a normal PyTorch\n # `nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__.\n #\n\n import torch\n from braindecode.util import set_random_seeds\n from braindecode.models import ShallowFBCSPNet,Deep4Net\n\n cuda = torch.cuda.is_available() # check if GPU is available, if True chooses to use it\n device = 'cuda:0' if cuda else 'cpu'\n if cuda:\n torch.backends.cudnn.benchmark = True\n seed = 20200220 # random seed to make results reproducible\n # Set random seed to be able to reproduce results\n set_random_seeds(seed=seed, cuda=cuda)\n\n n_classes=3\n # Extract number of chans and time steps from dataset\n n_chans = train_set[0][0].shape[0]\n input_window_samples = train_set[0][0].shape[1]\n #\n # model = ShallowFBCSPNet(\n # n_chans,\n # n_classes,\n # input_window_samples=input_window_samples,\n # final_conv_length='auto',\n # )\n\n\n from mynetworks import Deep4Net_origin, ConvClfNet, FcClfNet\n\n\n model = Deep4Net(\n n_chans,\n n_classes,\n input_window_samples=input_window_samples,\n final_conv_length=\"auto\",\n )\n\n #\n # embedding_net = Deep4Net_origin(4, 22, input_window_samples)\n # model = FcClfNet(embedding_net)\n # #\n\n print(model)\n\n\n\n\n # Send model to GPU\n if cuda:\n model.cuda()\n\n\n\n ######################################################################\n # Training\n # --------\n #\n\n\n ######################################################################\n # Now we train the network! EEGClassifier is a Braindecode object\n # responsible for managing the training of neural networks. It inherits\n # from skorch.NeuralNetClassifier, so the training logic is the same as in\n # `Skorch <https://skorch.readthedocs.io/en/stable/>`__.\n #\n\n\n ######################################################################\n # **Note**: In this tutorial, we use some default parameters that we\n # have found to work well for motor decoding, however we strongly\n # encourage you to perform your own hyperparameter optimization using\n # cross validation on your training data.\n #\n\n from skorch.callbacks import LRScheduler\n from skorch.helper import predefined_split\n\n from braindecode import EEGClassifier\n # # These values we found good for shallow network:\n lr = 0.0625 * 0.01\n weight_decay = 0\n\n # For deep4 they should be:\n # lr = 1 * 0.01\n # weight_decay = 0.5 * 0.001\n\n batch_size = 8\n n_epochs = 100\n\n\n clf = EEGClassifier(\n model,\n criterion=torch.nn.NLLLoss,\n optimizer=torch.optim.AdamW,\n train_split=predefined_split(valid_set), # using valid_set for validation\n optimizer__lr=lr,\n optimizer__weight_decay=weight_decay,\n batch_size=batch_size,\n callbacks=[\n \"accuracy\", (\"lr_scheduler\", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),\n ],\n device=device,\n )\n # Model training for a specified number of epochs. `y` is None as it is already supplied\n # in the dataset.\n clf.fit(train_set, y=None, epochs=n_epochs)\n\n\n ######################################################################\n # Plot Results\n # ------------\n #\n\n\n ######################################################################\n # Now we use the history stored by Skorch throughout training to plot\n # accuracy and loss curves.\n #\n\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n import pandas as pd\n # Extract loss and accuracy values for plotting from history object\n results_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy']\n df = pd.DataFrame(clf.history[:, results_columns], columns=results_columns,\n index=clf.history[:, 'epoch'])\n\n # get percent of misclass for better visual comparison to loss\n df = df.assign(train_misclass=100 - 100 * df.train_accuracy,\n valid_misclass=100 - 100 * df.valid_accuracy)\n\n plt.style.use('seaborn')\n fig, ax1 = plt.subplots(figsize=(8, 3))\n df.loc[:, ['train_loss', 'valid_loss']].plot(\n ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, fontsize=14)\n\n ax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)\n ax1.set_ylabel(\"Loss\", color='tab:blue', fontsize=14)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n df.loc[:, ['train_misclass', 'valid_misclass']].plot(\n ax=ax2, style=['-', ':'], marker='o', color='tab:red', legend=False)\n ax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)\n ax2.set_ylabel(\"Misclassification Rate [%]\", color='tab:red', fontsize=14)\n ax2.set_ylim(ax2.get_ylim()[0], 85) # make some room for legend\n ax1.set_xlabel(\"Epoch\", fontsize=14)\n\n # where some data has already been plotted to ax\n handles = []\n handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train'))\n handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid'))\n plt.legend(handles, [h.get_label() for h in handles], fontsize=14)\n plt.tight_layout()\n\n # plt.show()\n\n\n\n return df\n\n\nif __name__ == '__main__':\n import pandas as pd\n df_all = pd.DataFrame()\n for id in range(1,10):\n df = exp(id)\n df_all = pd.concat([df_all, df], axis=1)\n df_all.to_csv(\"trial_DG_shallow_origin.csv\",mode='w')\n\n", "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\nfrom torch.utils.data import Dataset\nimport random\nimport argparse\nimport models\nfrom train_eval import *\nimport utils\nimport time\nimport hyperparameter as hp\nfrom logger import eegdg_logger\n\n\n\ndef exp(args,fold_idx, train_set,valid_set, test_set):\n\n path = args.save_root + args.result_dir\n if not os.path.isdir(path):\n os.makedirs(path)\n os.makedirs(path + '/models')\n os.makedirs(path + '/logs')\n\n logger = eegdg_logger(path +f'/logs/{fold_idx}')\n\n with open(path + '/args.txt', 'w') as f:\n f.write(str(args))\n\n import torch.cuda\n cuda = torch.cuda.is_available()\n # check if GPU is available, if True chooses to use it\n device = 'cuda' if cuda else 'cpu'\n\n if cuda:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n seed = args.seed\n random.seed(seed)\n torch.manual_seed(seed)\n if cuda:\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=args.batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False)\n\n\n model = models.get_model(args)\n # model = FcClfNet(embedding_net)\n # model = torch.nn.DataParallel(model)\n\n mb_params= utils.param_size(model)\n print(f\"Model size = {mb_params:.4f} MB\")\n if cuda:\n model.cuda(device=device)\n print(model)\n optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=0.01)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs-1)\n\n results_columns = [f'valid_loss',f'test_loss',f'valid_accuracy',f'test_accuracy']\n df = pd.DataFrame(columns=results_columns)\n\n valid_acc = 0\n valid_min_loss = 100\n best_acc = 0\n best_acc_loss = 0\n max_acc = 0\n\n for epochidx in range(1, args.epochs):\n print(epochidx)\n start = time.time()\n train(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)\n print(f'total time: {time.time()-start}')\n utils.blockPrint()\n train_loss, train_score = eval(model, device, train_loader)\n valid_loss, valid_score = eval(model, device, valid_loader)\n test_loss, test_score = eval(model, device, test_loader)\n utils.enablePrint()\n\n scheduler.step()\n lr = scheduler.get_last_lr()[0]\n\n print(f'LR : {lr}')\n logger.log_training(train_loss, train_score, test_loss, test_score, lr, epochidx)\n\n results = {f'valid_loss':valid_loss,f'test_loss': test_loss, f'valid_accuracy':valid_score,f'test_accuracy': test_score}\n df = df.append(results, ignore_index=True)\n print(results)\n\n if valid_score >= valid_acc:\n valid_acc = valid_score\n best_acc = test_score\n torch.save(model.state_dict(), os.path.join(\n path, 'models',\n f\"model_fold{fold_idx}_best.pt\"))\n best_epoch = epochidx\n\n if valid_loss <= valid_min_loss:\n valid_min_loss = valid_loss\n best_acc_loss = test_score\n torch.save(model.state_dict(), os.path.join(\n path, 'models',\n f\"model_fold{fold_idx}_best(loss).pt\"))\n best_loss_epoch = epochidx\n\n if test_score >= max_acc:\n max_acc = test_score\n torch.save(model.state_dict(), os.path.join(\n path, 'models',\n f\"model_fold{fold_idx}_max.pt\"))\n max_epoch = epochidx\n\n print(f'current best acc : {best_acc:.4f} at epoch {best_epoch}')\n print(f'current best(loss) acc : {best_acc_loss:.4f} at epoch {best_loss_epoch}')\n print(f'current max acc : {max_acc:.4f} at epoch {max_epoch}')\n\n\n best_model = models.get_model(args)\n best_model.load_state_dict(torch.load(os.path.join(\n path, 'models',\n f\"model_fold{fold_idx}_best.pt\"), map_location=device))\n if cuda:\n best_model.cuda(device=device)\n\n print(\"best accuracy\")\n _, _ = eval(best_model, device, test_loader)\n\n df = utils.get_testset_accuracy(best_model,device,test_set,args)\n\n return df\n\n\nif __name__ == '__main__':\n\n\n parser = argparse.ArgumentParser(description='openbmi_gigadb')\n parser.add_argument('--data-root',\n default='C:/Users/Starlab/Documents/onedrive/OneDrive - 고려대학교/untitled/convert/')\n parser.add_argument('--save-root', default='../data')\n parser.add_argument('--result-dir', default=f'/{hp.model}_norm')\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 8)')\n parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',\n help='input batch size for testing (default: 8)')\n parser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=hp.lr, metavar='LR',\n help='learning rate (default: 0.05)')\n parser.add_argument('--save-model', action='store_true', default=True,\n help='For Saving the current best Model')\n\n args = parser.parse_args()\n args.gpuidx = 0\n args.model_name = hp.model\n import pandas as pd\n df_all = pd.DataFrame()\n args.seed = 0\n fold_idx = 0\n # subject_list = np.r_[0, 1, 2, 4, 5, 8, 17, 18, 20, 27, 28, 32, 35, 36, 42, 43, 44, 51]\n subject_list = np.r_[27:40]\n for fold_idx in subject_list:\n print(f\"subject : {fold_idx}\")\n train_set,valid_set, test_set, args = utils.get_data_eeg_subject_subset(args, fold_idx)\n df = exp(args,fold_idx,train_set,valid_set, test_set)\n df_all= pd.concat([df_all,df],axis=0)\n print(df_all)\n\n\n", "import scipy.io as sio\nimport numpy as np\nimport mne\nimport pickle\nsubj=1\nsubj_blocks = list()\nfor subj in range(1,17):\n print(subj)\n\n dataname = 's%d_sess3' % (subj)\n SP = sio.loadmat('C:\\\\Data_speech\\\\'+dataname\n , struct_as_record=False, squeeze_me=True)\n temp = SP['epo']\n x = temp.x\n x = np.transpose(x,[2,1,0])\n y = temp.y\n\n chan = temp.clab.tolist()\n\n # 채널정보 추가해야함\n\n\n n_channels = 64\n sfreq = 250\n info = mne.create_info(ch_names=chan, sfreq=sfreq, ch_types='eeg')\n epochs = mne.EpochsArray(x,info)\n # epochs.filter(l_freq=30,h_freq=100)\n\n subj_blocks.append(y.shape[0])\n if subj == 1:\n epochs_data_train = epochs.get_data()\n labels = y\n else:\n epoch_temp = epochs.get_data()\n epochs_data_train = np.append(epochs_data_train, epoch_temp, axis=0)\n label_temp = y\n labels = np.hstack((labels, label_temp))\n\n\n\n print(epochs_data_train.shape)\n\n\nwith open('epoch_sess3.pkl', 'wb') as f:\n pickle.dump(epochs_data_train, f, protocol=4)\n\nwith open('epoch_sess3_labels.pkl', 'wb') as f:\n pickle.dump(labels, f)\n\nwith open('epoch_sess3_sizes.pkl', 'wb') as f:\n pickle.dump(subj_blocks, f)", "import scipy.io as sio\nimport numpy as np\nimport mne\n\ndef load_gigadata(MI, variable_names, plot=False):\n temp = MI[variable_names]\n sfreq = 1000 # Sampling frequency\n chan = temp.chan.tolist()\n\n # 채널정보 추가해야함\n info = mne.create_info(ch_names=chan, sfreq=sfreq, ch_types='eeg')\n\n t = np.hstack((temp.t.reshape(100, 1), np.zeros((100, 1))))\n y_label = temp.y_dec.reshape(100, 1)\n events = np.hstack((t, y_label)).astype('int')\n\n raw = mne.io.RawArray(temp.x.T, info)\n if plot == True:\n scalings = 'auto' # Could also pass a dictionary with some value == 'auto'\n raw.plot(n_channels=62, scalings=scalings, title='Auto-scaled Data from arrays',\n show=True, block=True)\n\n return raw, events\n\ndef gigadata_epochs(raw,events,tmin=-1,tmax=3,plot=False):\n epochs = mne.Epochs(raw, events=events, event_id=[1, 2], tmin=tmin,\n tmax=tmax, baseline=None, verbose=True, preload=True)\n if plot == True:\n epochs.plot(scalings='auto', block=True)\n return epochs\n", "import torch\n\nfrom braindecode.datasets.moabb import MOABBDataset\nfrom braindecode.datautil.preprocess import exponential_moving_standardize\nfrom braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess\n\nfrom braindecode.util import set_random_seeds\nfrom braindecode.models import ShallowFBCSPNet, Deep4Net\nfrom mynetworks import Deep4Net_origin, ConvClfNet, FcClfNet\nfrom trte import *\n\ndataset = MOABBDataset(dataset_name=\"BNCI2014001\", subject_ids=list(range(1,3)))\n\nlow_cut_hz = 4. # low cut frequency for filtering\nhigh_cut_hz = 38. # high cut frequency for filtering\n# Parameters for exponential moving standardization\nfactor_new = 1e-3\ninit_block_size = 1000\n\npreprocessors = [\n # keep only EEG sensors\n MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),\n # convert from volt to microvolt, directly modifying the numpy array\n NumpyPreproc(fn=lambda x: x * 1e6),\n # bandpass filter\n MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),\n # exponential moving standardization\n NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,\n init_block_size=init_block_size)\n]\n\n# Transform the data\npreprocess(dataset, preprocessors)\n\ninput_window_samples = 1125\n\n\n\n\ncuda = torch.cuda.is_available() # check if GPU is available, if True chooses to use it\ndevice = 'cuda:1' if cuda else 'cpu'\nif cuda:\n torch.backends.cudnn.benchmark = True\nseed = 10 # random seed to make results reproducible\n# Set random seed to be able to reproduce results\nset_random_seeds(seed=seed, cuda=cuda)\n\nn_classes=4\n# Extract number of chans from dataset\nn_chans = dataset[0][0].shape[0]\n\nimport numpy as np\nfrom braindecode.datautil.windowers import create_windows_from_events\n\ntrial_start_offset_seconds = -0.5\n# Extract sampling frequency, check that they are same in all datasets\nsfreq = dataset.datasets[0].raw.info['sfreq']\nassert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])\n\n# Calculate the trial start offset in samples.\ntrial_start_offset_samples = int(trial_start_offset_seconds * sfreq)\n\n# Create windows using braindecode function for this. It needs parameters to define how\n# trials should be used.\nwindows_dataset = create_windows_from_events(\n dataset,\n trial_start_offset_samples=trial_start_offset_samples,\n trial_stop_offset_samples=0,\n preload=True,\n)\n\nfrom braindecode.datasets.base import BaseConcatDataset\nsplitted = windows_dataset.split('session')\n\ndef exp(subject_id):\n import torch\n test_subj = np.r_[subject_id]\n\n print('test subj:' + str(test_subj))\n\n #20% validation\n train_size = int(0.9* len(splitted['session_T']))\n test_size = len(splitted['session_T']) - train_size\n\n\n\n # train_set, valid_set = torch.utils.data.random_split(splitted['session_T'], [train_size, test_size])\n train_set = splitted['session_T']\n test_set = splitted['session_E']\n\n\n\n # model = Deep4Net(\n # n_chans,\n # n_classes,\n # input_window_samples=input_window_samples,\n # final_conv_length=\"auto\",\n # )\n\n from torch.utils.data import Dataset, ConcatDataset\n\n\n\n\n crop_size = 1000\n # embedding_net = Deep4Net_origin(n_classes, n_chans, crop_size)\n # model = FcClfNet(embedding_net)\n\n model = ShallowFBCSPNet(\n n_chans,\n n_classes,\n input_window_samples=input_window_samples,\n final_conv_length='auto',\n )\n\n from braindecode.models.util import to_dense_prediction_model, get_output_shape\n to_dense_prediction_model(model)\n\n n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]\n print(\"n_preds_per_input : \", n_preds_per_input)\n print(model)\n\n\n batch_size =8\n epochs = 200\n\n\n\n\n\n\n lr = 0.0625 * 0.01\n weight_decay = 0\n\n\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)\n\n\n\n # Send model to GPU\n if cuda:\n model.cuda()\n\n from torch.optim import lr_scheduler\n import torch.optim as optim\n\n import argparse\n parser = argparse.ArgumentParser(description='cross subject domain adaptation')\n parser.add_argument('--batch-size', type=int, default=50, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=True,\n help='For Saving the current Model')\n args = parser.parse_args()\n args.gpuidx = 0\n args.seed = 0\n args.use_tensorboard = False\n args.save_model = False\n\n optimizer = optim.AdamW(model.parameters(), lr=0.01, weight_decay=0.5 * 0.001)\n # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs-1)\n\n\n\n import pandas as pd\n results_columns = ['test_loss', 'test_accuracy']\n df = pd.DataFrame(columns=results_columns)\n\n for epochidx in range(1, epochs):\n print(epochidx)\n train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)\n test_loss, test_score = eval_crop(model, device, test_loader)\n results = { 'test_loss': test_loss, 'test_accuracy': test_score}\n df = df.append(results, ignore_index=True)\n print(results)\n\n return df\n\nif __name__ == '__main__':\n import pandas as pd\n df_all = pd.DataFrame()\n for id in range(1,10):\n df = exp(id)\n df_all = pd.concat([df_all, df], axis=1)\n # df_all.to_csv(\"bcic_dk_ds_deep4net_fc_crop.csv\",mode='w')\n\n\n\n" ]
[ [ "numpy.hstack", "numpy.append", "numpy.save" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "sklearn.model_selection.ShuffleSplit", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.mean", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "torch.optim.lr_scheduler.MultiStepLR" ], [ "pandas.concat", "matplotlib.pyplot.tight_layout", "matplotlib.lines.Line2D", "matplotlib.pyplot.subplots", "numpy.setdiff1d", "pandas.DataFrame", "torch.utils.data.ConcatDataset", "torch.utils.data.random_split", "torch.cuda.is_available", "matplotlib.pyplot.style.use" ], [ "pandas.concat", "pandas.DataFrame" ], [ "numpy.hstack", "numpy.append", "scipy.io.loadmat", "numpy.transpose" ], [ "numpy.hstack", "numpy.zeros" ], [ "pandas.concat", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
FanWangEcon/pyfan
[ "126e91c0c6d930f1c335a07396d1d2145b247cea" ]
[ "pyfan/amto/array/geomspace.py" ]
[ "'''\nCreated on May 24, 2018\n\n@author: fan\n\nTo have a better grid denser at the beginning\n'''\n\nimport time as time\nimport numpy as np\n\nfrom numba import jit\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# @vectorize([float64(float64, float64, float64, float64, float64, float64, float64, float64)])\ndef grid_to_geom_short(choice_grid, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, a):\n scaler = (choice_grid_max - choice_grid_min) / (stop - start)\n __, displacement, multiplier, a, b = gen_geom_grid(start, stop, num, geom_ratio, a)\n\n return grid_to_geom_short_core(choice_grid, a, scaler, displacement, multiplier, geom_ratio)\n\n\n@jit(nopython=True, parallel=True)\ndef grid_to_geom_short_core(choice_grid, a, scaler, displacement, multiplier, geom_ratio):\n # choice_grid_geom = ((choice_grid/scaler) + displacement)/multiplier\n # but a is 1\n # choice_grid_geom_base = (np.log(choice_grid_geom/a))/np.log(geom_ratio)\n\n return np.log((((choice_grid / scaler) + displacement) / multiplier) / a) / np.log(geom_ratio)\n\n\n# @njit\ndef grid_to_geom(choice_grid, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, a):\n \"\"\"\n the code now is under the assumption that initial start and end were 0 and 1 \n \n Given geom_grid results, how do we go back to actual data grid. \n So for interpolation. \n interpolate not on actual K and B scales, but on any even grid, as long\n as the grid count is right. \n \n interp_K_grid = np.linspace(0,1,n)\n \n but then there is a vector of actual choices kn_vec, how to map kn_vec to interp_K_grid?\n \n Parameters\n ----------\n choice_grid:\n this is the choice grid, on the actual choice scale\n start: float\n from gen_geom_grid\n stop: float\n from gen_geom_grid\n num: int\n from gen_geom_grid\n geom_ratio: float\n from gen_geom_grid \n \"\"\"\n # logger.debug('enter grid_to_geom')\n\n '''\n 0. Choice Grid Rescaling\n '''\n scaler = (choice_grid_max - choice_grid_min) / (stop - start)\n\n '''\n A. Reverse engineer from vector to geom scale\n '''\n startTime = time.time()\n __, displacement, multiplier, a, b = gen_geom_grid(start, stop, num, geom_ratio, a)\n # logger.debug('displacement:%s', displacement)\n # logger.debug('multiplier:%s', multiplier)\n # logger.debug('a:%s', a)\n # logger.debug('b:%s', b)\n # t = time.time() - startTime\n # print('Step aa:', t)\n\n '''choice_grid_geom is now between a and b'''\n # startTime = time.time()\n choice_grid_geom = ((choice_grid / scaler) + displacement) / multiplier\n # logger.debug('choice_grid:\\n%s', choice_grid)\n # logger.debug('choice_grid_geom:\\n%s', choice_grid_geom)\n t = time.time() - startTime\n print('Step aaa:', t)\n\n '''\n B.\n a <= choice_grid_geom = a*(geom_ratio)^{x} <= b\n \n solve for x\n \n log(choice_grid_geom) = log(a) + x*log(geom_ratio)\n x = (log(choice_grid_geom) - log(a))/log(geom_ratio)\n \n '''\n startTime = time.time()\n choice_grid_geom_base = (np.log(choice_grid_geom / a)) / np.log(geom_ratio)\n # choice_grid_geom_base = (np.log(choice_grid_geom))/np.log(geom_ratio)\n # logger.debug('choice_grid_geom_base:\\n%s', choice_grid_geom_base)\n t = time.time() - startTime\n print('Step bb:', t)\n\n return choice_grid_geom_base\n\n\n# @njit\ndef gen_geom_grid(start, stop, num, geom_ratio, a):\n \"\"\" \n Specify geom_ratio, the z below:\n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n Then generate the grid points that is consistent with the geom_ratio\n \n Parameters\n ----------\n start: float\n same as in linspace\n stop: float\n same as in linspace\n num: int\n same as in linspace\n geom_ratio: float\n z value below kind of except for rescaling\n \"\"\"\n\n '''\n A. Start with a and b\n '''\n # a = 1\n b = a * geom_ratio ** (num - 1)\n\n geom_base = a * (geom_ratio) ** np.arange(num)\n # geom_base2 = np.geomspace(a, b, num)\n # logger.debug('geom_base:\\n%s', geom_base)\n\n '''\n B. Rescaling\n '''\n multiplier = ((stop - start) / (b - a))\n geom_base_scaled = geom_base * multiplier\n # logger.debug('geom_base_scaled:\\n%s', geom_base_scaled)\n\n displacement = (np.min(geom_base_scaled) - start)\n geom_base_scaled = geom_base_scaled - displacement\n # logger.debug('geom_base_scaled:\\n%s', geom_base_scaled)\n\n # logger.debug('geom_base_scaled diff:\\n%s', np.diff(geom_base_scaled))\n\n return geom_base_scaled, displacement, multiplier, a, b\n\n\ndef tester(a=1, b=51, max_power=49):\n \"\"\"\n 1. 1 to 51, geomspace \n \"\"\"\n list_geom_1t51 = np.geomspace(a, b, max_power + 1)\n print('list_geom_1t51:', list_geom_1t51)\n\n \"\"\"\n 2. what is the list above, what does it mean?\n the point is to start from a=start, end at b=end, find:\n \n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n \n 50 numbers like above. \n a is determined by start of geomspace\n b is determined by: \n \n \"\"\"\n z = (b / a) ** (1 / max_power)\n print('z:', z)\n list_geom_fan = a * z ** np.linspace(0, max_power, max_power + 1)\n print('list_geom_fan:', list_geom_fan)\n\n return list_geom_fan\n\n\ndef tester_plus1(a=0, b=50, max_power=49, adjust=1):\n \"\"\"\n to accomndate zero, \n \"\"\"\n list_geom_1t51 = np.geomspace(a + adjust, b + adjust, max_power + 1)\n list_geom_1t51 = list_geom_1t51 - 1\n print('list_geom_1t51:', list_geom_1t51)\n\n \"\"\"\n 2. what is the list above, what does it mean?\n the point is to start from a=start, end at b=end, find:\n \n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n \n 50 numbers like above. \n a is determined by start of geomspace\n b is determined by: \n \n \"\"\"\n z = ((b + adjust) / (a + adjust)) ** (1 / max_power)\n print('z:', z)\n list_geom_fan = (a + adjust) * z ** np.linspace(0, max_power, max_power + 1) - 1\n print('list_geom_fan:', list_geom_fan)\n\n \"\"\"\n 3. So suppose I have list_geom now, how to I take it back to geomspace?\n \"\"\"\n lencount = len(list_geom_1t51)\n equi_space = np.linspace(1, lencount, lencount)\n print('equi_space:', equi_space)\n\n\nif __name__ == \"__main__\":\n FORMAT = '%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'\n # np.set_printoptions(precision=4, linewidth=100, suppress=True, threshold=np.nan)\n np.set_printoptions(precision=3, linewidth=100, suppress=True, threshold=3000)\n logging.basicConfig(level=logging.DEBUG, format=FORMAT)\n\n tester(a=1, b=2, max_power=49)\n tester(a=1, b=3, max_power=49)\n tester(a=1, b=4, max_power=49)\n print((tester(a=1, b=5, max_power=49) - 1) / 4)\n print('')\n print('')\n print('')\n tester(a=1, b=51, max_power=49)\n tester(a=0.1, b=51, max_power=10)\n tester(a=100, b=200, max_power=3)\n\n tester_plus1()\n\n print('')\n print('')\n print('')\n\n start = 0\n stop = 1\n num = 11\n geom_ratio = 1.2\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = 10\n stop = 20\n num = 11\n geom_ratio = 1.00000001\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = -10\n stop = 20\n num = 11\n geom_ratio = 1.1\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = -3.5\n stop = -3.1\n num = 3\n geom_ratio = 1.1\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = 0\n stop = 1\n num = 50\n geom_ratio = 1.03\n geom_base_scaled, __, __, __, __, = gen_geom_grid(start, stop, num, geom_ratio, 1)\n choice_grid_max = stop\n choice_grid_min = start\n grid_to_geom(geom_base_scaled, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, 1)\n\n geom_base_scaled = np.linspace(0, 30, 22)\n choice_grid_max = 30\n choice_grid_min = 0\n grid_to_geom(geom_base_scaled, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, 1)\n" ]
[ [ "numpy.log", "numpy.linspace", "numpy.min", "numpy.arange", "numpy.set_printoptions", "numpy.geomspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amh28/NIF
[ "92a2f447738224fb10b83fa60c78a35e0c25ac34", "92a2f447738224fb10b83fa60c78a35e0c25ac34" ]
[ "niftynet/layer/discrete_label_normalisation.py", "niftynet/layer/activation.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport niftynet.utilities.histogram_standardisation as hs\nfrom niftynet.layer.base_layer import DataDependentLayer\nfrom niftynet.layer.base_layer import Invertible\nfrom niftynet.utilities.user_parameters_helper import standardise_string\nfrom niftynet.utilities.util_common import print_progress_bar\n\n\nclass DiscreteLabelNormalisationLayer(DataDependentLayer, Invertible):\n def __init__(self,\n image_name,\n modalities,\n model_filename,\n name='label_norm'):\n\n super(DiscreteLabelNormalisationLayer, self).__init__(name=name)\n # mapping is a complete cache of the model file, the total number of\n # modalities are listed in self.modalities\n self.image_name = image_name\n self.modalities = modalities\n self.model_file = os.path.abspath(model_filename)\n assert not os.path.isdir(self.model_file), \\\n \"model_filename is a directory, please change histogram_ref_file\"\n self.label_map = hs.read_mapping_file(self.model_file)\n\n @property\n def key(self):\n # provide a readable key for the label mapping item\n key_from = \"{}_{}-from\".format(self.image_name, self.modalities)\n key_to = \"{}_{}-to\".format(self.image_name, self.modalities)\n return standardise_string(key_from), standardise_string(key_to)\n\n def layer_op(self, image, mask=None):\n assert self.is_ready(), \\\n \"discrete_label_normalisation layer needs to be trained first.\"\n # mask is not used for label mapping\n if isinstance(image, dict):\n if self.image_name not in image:\n return image, mask\n label_data = np.asarray(image[self.image_name])\n else:\n label_data = np.asarray(image)\n\n mapping_from = self.label_map[self.key[0]]\n mapping_to = self.label_map[self.key[1]]\n\n image_shape = label_data.shape\n label_data = label_data.reshape(-1)\n mapped_data = np.zeros_like(label_data)\n for (original, new_id) in zip(mapping_from, mapping_to):\n mapped_data[label_data == original] = new_id\n label_data = mapped_data.reshape(image_shape)\n\n if isinstance(image, dict):\n image[self.image_name] = label_data\n return image, mask\n return label_data, mask\n\n def inverse_op(self, image, mask=None):\n assert self.is_ready(), \\\n \"discrete_label_normalisation layer needs to be trained first.\"\n # mask is not used for label mapping\n if isinstance(image, dict):\n label_data = np.asarray(image[self.image_name])\n else:\n label_data = np.asarray(image)\n\n mapping_from = self.label_map[self.key[0]]\n mapping_to = self.label_map[self.key[1]]\n\n image_shape = label_data.shape\n label_data = label_data.reshape(-1)\n mapped_data = np.zeros_like(label_data)\n for (new_id, original) in zip(mapping_from, mapping_to):\n mapped_data[label_data == original] = new_id\n label_data = mapped_data.reshape(image_shape)\n if isinstance(image, dict):\n image[self.image_name] = label_data\n return image, mask\n return label_data, mask\n\n def is_ready(self):\n mapping_from = self.label_map.get(self.key[0], None)\n if mapping_from is None:\n # tf.logging.warning('could not find mapping key %s', self.key[0])\n return False\n mapping_to = self.label_map.get(self.key[1], None)\n if mapping_to is None:\n # tf.logging.warning('could not find mapping key %s', self.key[1])\n return False\n assert len(mapping_from) == len(mapping_to), \\\n \"mapping is not one-to-one, \" \\\n \"corrupted mapping file? {}\".format(self.model_file)\n return True\n\n def train(self, image_list):\n # check modalities to train, using the first subject in subject list\n # to find input modality list\n assert image_list is not None, \"nothing to training for this layer\"\n if self.is_ready():\n tf.logging.info(\n \"label mapping ready for {}:{}, {} classes\".format(\n self.image_name,\n self.modalities,\n len(self.label_map[self.key[0]])))\n return\n tf.logging.info(\n \"Looking for the set of unique discrete labels from input {}\"\n \" using {} subjects\".format(self.image_name, len(image_list)))\n label_map = find_set_of_labels(image_list, self.image_name, self.key)\n # merging trained_mapping dict and self.mapping dict\n self.label_map.update(label_map)\n all_maps = hs.read_mapping_file(self.model_file)\n all_maps.update(self.label_map)\n hs.write_all_mod_mapping(self.model_file, all_maps)\n\n\ndef find_set_of_labels(image_list, field, output_key):\n label_set = set()\n for idx, image in enumerate(image_list):\n assert field in image, \\\n \"no {} data provided in for label mapping\".format(field)\n print_progress_bar(idx, len(image_list),\n prefix='searching unique labels from training files',\n decimals=1, length=10, fill='*')\n unique_label = np.unique(image[field].get_data())\n if len(unique_label) > 500 or len(unique_label) <= 1:\n tf.logging.warning(\n 'unusual discrete values: number of unique '\n 'labels to normalise %s', len(unique_label))\n label_set.update(set(unique_label))\n label_set = list(label_set)\n label_set.sort()\n try:\n mapping_from_to = dict()\n mapping_from_to[output_key[0]] = tuple(label_set)\n mapping_from_to[output_key[1]] = tuple(range(0, len(label_set)))\n except (IndexError, ValueError):\n tf.logging.fatal(\"unable to create mappings keys: %s, image name %s\",\n output_key, field)\n raise\n return mapping_from_to\n", "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nimport tensorflow as tf\n\nfrom niftynet.layer.base_layer import TrainableLayer\nfrom niftynet.utilities.util_common import look_up_operations\n\n\ndef prelu(f_in, channelwise_params):\n pos = tf.nn.relu(f_in)\n neg = channelwise_params * (f_in - tf.abs(f_in)) * 0.5\n return pos + neg\n\n\ndef selu(x, name):\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))\n\n\ndef leaky_relu(x, name):\n half_alpha = 0.01\n return (0.5 + half_alpha) * x + (0.5 - half_alpha) * abs(x)\n\n\nSUPPORTED_OP = {'relu': tf.nn.relu,\n 'relu6': tf.nn.relu6,\n 'elu': tf.nn.elu,\n 'softplus': tf.nn.softplus,\n 'softsign': tf.nn.softsign,\n 'sigmoid': tf.nn.sigmoid,\n 'tanh': tf.nn.tanh,\n 'prelu': prelu,\n 'selu': selu,\n 'leakyrelu': leaky_relu,\n 'dropout': tf.nn.dropout}\n\n\nclass ActiLayer(TrainableLayer):\n \"\"\"\n Apply an element-wise non-linear activation function.\n 'Prelu' uses trainable parameters and those are initialised to zeros\n Dropout function is also supported\n \"\"\"\n\n def __init__(self, func, regularizer=None, name='activation'):\n self.func = func.lower()\n self.layer_name = '{}_{}'.format(self.func, name)\n\n super(ActiLayer, self).__init__(name=self.layer_name)\n\n # these are used for prelu variables\n self.initializers = {'alpha': tf.constant_initializer(0.0)}\n self.regularizers = {'alpha': regularizer}\n\n def layer_op(self, input_tensor, keep_prob=None):\n func_ = look_up_operations(self.func, SUPPORTED_OP)\n if self.func == 'prelu':\n alphas = tf.get_variable(\n 'alpha', input_tensor.get_shape()[-1],\n initializer=self.initializers['alpha'],\n regularizer=self.regularizers['alpha'])\n output_tensor = func_(input_tensor, alphas)\n elif self.func == 'dropout':\n assert keep_prob > 0.0\n assert keep_prob <= 1.0\n output_tensor = func_(input_tensor,\n keep_prob=keep_prob,\n name='dropout')\n else:\n output_tensor = func_(input_tensor, name='acti')\n return output_tensor\n" ]
[ [ "numpy.asarray", "numpy.zeros_like", "tensorflow.logging.fatal" ], [ "tensorflow.constant_initializer", "tensorflow.nn.relu", "tensorflow.nn.elu", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
manopapad/legate.numpy
[ "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936", "896f4fd9b32db445da6cdabf7b78d523fca96936" ]
[ "tests/universal_functions_tests/true_divide_tests/broadcast.py", "tests/universal_functions_tests/sqrt_tests/normal.py", "tests/known_failures/random_creation.py", "tests/universal_functions_tests/log_tests/inplace_normal.py", "tests/reduction_axis.py", "tests/universal_functions_tests/multiply_tests/normal.py", "tests/universal_functions_tests/exp_tests/inplace_normal.py" ]
[ "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n anp = np.random.randn(4, 5)\n b = random.randint(1, 13)\n a = lg.array(anp)\n\n # test true_divide with scalar on rhs\n assert np.array_equal(lg.true_divide(a, b), np.true_divide(anp, b))\n\n # test divide with scalar on lhs\n assert np.array_equal(lg.true_divide(b, a), np.true_divide(b, anp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n npa = np.array([1, 4, 9])\n a = lg.array(npa)\n assert np.array_equal(lg.sqrt(a), np.sqrt(npa))\n\n npa = np.array([1, 4, 9], dtype=np.float)\n a = lg.array(npa)\n assert np.array_equal(lg.sqrt(a), np.sqrt(npa))\n\n npa = np.array([1, 4, 9], dtype=np.float32)\n a = lg.array(npa)\n assert np.array_equal(lg.sqrt(a), np.sqrt(npa))\n\n npa = np.array([1, 4, 9], dtype=np.float64)\n a = lg.array(npa)\n assert np.array_equal(lg.sqrt(a), np.sqrt(npa))\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n lg.random.seed(42)\n x = lg.random.randn(10)\n np.random.seed(42)\n xn = np.random.randn(10)\n assert np.allclose(x, xn)\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n npa = np.array([1, np.e, np.e ** 2])\n a = lg.array(npa)\n assert np.array_equal(lg.log(a, out=a), np.log(npa, out=npa))\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n pythonX = np.reshape(np.linspace(0, 10001, 10000, dtype=int), (100, 100))\n x = lg.array(pythonX)\n\n pythonY = np.sum(pythonX, axis=0)\n y = lg.sum(x, axis=0)\n assert np.array_equal(pythonY, y)\n\n pythonY = np.sum(pythonX, axis=1)\n y = lg.sum(x, axis=1)\n assert np.array_equal(pythonY, y)\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n anp = np.random.randn(4, 5)\n bnp = np.random.randn(4, 5)\n a = lg.array(anp)\n b = lg.array(bnp)\n\n assert np.array_equal(lg.multiply(a, b), np.multiply(anp, bnp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n npa = np.array([-1.0, 0.0, 1.0])\n a = lg.array(npa)\n\n np.exp(npa, out=npa)\n lg.exp(a, out=a)\n\n assert np.array_equal(a, npa)\n return\n\n\nif __name__ == \"__main__\":\n test()\n" ]
[ [ "numpy.true_divide", "numpy.random.randn" ], [ "numpy.array", "numpy.sqrt" ], [ "numpy.random.randn", "numpy.allclose", "numpy.random.seed" ], [ "numpy.log", "numpy.array" ], [ "numpy.sum", "numpy.array_equal", "numpy.linspace" ], [ "numpy.random.randn", "numpy.multiply" ], [ "numpy.exp", "numpy.array", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Azmal16/Covid_Symptoms_Predict_with_Machine_Learning
[ "d1e1fde73aa307bdbcfeac27088f49e4c1cb4555" ]
[ "app.py" ]
[ "import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['GET', 'POST'])\ndef predict():\n '''\n For rendering results on HTML GUI\n '''\n int_features = [int(x) for x in request.form.getlist('comp_select')]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n # print(final_features)\n output = prediction[0]*10\n\n if (output <= 20):\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at Low Risk of getting Covid-19.\\n Please answer the questions below to predict again.'.format(output))\n\n elif (output > 20 and output <= 60):\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at Medium Risk of getting Covid-19.\\n We recommend you to have a Covid Test.\\n Please answer the questions below to predict again.'.format(output))\n\n else:\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at High Risk of getting Covid-19.\\n We recommend you to have a Covid Test as soon as possible.\\n Please answer the questions below to predict again.'.format(output))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yongsheng268/tfx
[ "6283fffb3ac81e2f213b4895fbe19623dfa9c4f5" ]
[ "tfx/utils/channel.py" ]
[ "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definition of TFX Channel type.\n\nDeprecated: please see the new location of this module at `tfx.types.channel`\nand `tfx.types.channel_utils`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Dict, Iterable, List, Union, Text\n\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\nfrom tfx import types\nfrom tfx.types import channel_utils\n\n\[email protected](\n None,\n 'tfx.utils.channel.Channel has been renamed to tfx.types.Channel as of '\n 'TFX 0.14.0.')\nclass Channel(types.Channel):\n pass\n\n\[email protected](None,\n 'tfx.utils.channel.as_channel has been renamed to '\n 'tfx.types.channel_utils.as_channel as of TFX 0.14.0.')\ndef as_channel(source: Union[Channel, Iterable[types.Artifact]]) -> Channel:\n return channel_utils.as_channel(source)\n\n\[email protected](\n None, 'tfx.utils.channel.unwrap_channel_dict has been renamed to '\n 'tfx.types.channel_utils.unwrap_channel_dict as of TFX 0.14.0.')\ndef unwrap_channel_dict(\n channel_dict: Dict[Text, Channel]) -> Dict[Text, List[types.Artifact]]:\n return channel_utils.unwrap_channel_dict(channel_dict)\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
everestocean/Algorithm
[ "a7e2ce796daf50488420290176dc8c1ccccb109f" ]
[ "machine_learning/deep_learning/deep_learning/tweet_sentiment.py" ]
[ "# -*- coding=utf-8 -*-\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers.convolutional import Conv1D\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport pandas as pd\nimport numpy as np\nimport spacy\n\nnlp=spacy.load(\"en\")\n\n#load the dataset\ntrain=pd.read_csv(\"../datasets/training.1600000.processed.noemoticon.csv\" , encoding= \"latin-1\")\nY_train = train[train.columns[0]]\nX_train = train[train.columns[5]]\n\n# split the data into test and train\nfrom sklearn.model_selection import train_test_split\ntrainset1x, trainset2x, trainset1y, trainset2y = train_test_split(X_train.values, Y_train.values, test_size=0.02,random_state=42 )\ntrainset2y=pd.get_dummies(trainset2y)\n\n# function to remove stopwords\ndef stopwords(sentence):\n new=[]\n sentence=nlp(sentence)\n for w in sentence:\n if (w.is_stop == False) & (w.pos_ !=\"PUNCT\"):\n new.append(w.string.strip())\n c=\" \".join(str(x) for x in new)\n return c\n\n# function to lemmatize the tweets\ndef lemmatize(sentence):\n sentence=nlp(sentence)\n str=\"\"\n for w in sentence:\n str+=\" \"+w.lemma_\n return nlp(str)\n\n#loading the glove model\ndef loadGloveModel(gloveFile):\n print(\"Loading Glove Model\")\n f = open(gloveFile,'r')\n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = [float(val) for val in splitLine[1:]]\n model[word] = embedding\n print (\"Done.\"),len(model),(\" words loaded!\")\n return model\n\n# save the glove model\nmodel=loadGloveModel(\"/mnt/hdd/datasets/glove/glove.twitter.27B.200d.txt\")\n\n#vectorising the sentences\ndef sent_vectorizer(sent, model):\n sent_vec = np.zeros(200)\n numw = 0\n for w in sent.split():\n try:\n sent_vec = np.add(sent_vec, model[str(w)])\n numw+=1\n except:\n pass\n return sent_vec\n\n#obtain a clean vector\ncleanvector=[]\nfor i in range(trainset2x.shape[0]):\n document=trainset2x[i]\n document=document.lower()\n document=lemmatize(document)\n document=str(document)\n cleanvector.append(sent_vectorizer(document,model))\n\n#Getting the input and output in proper shape\ncleanvector=np.array(cleanvector)\ncleanvector =cleanvector.reshape(len(cleanvector),200,1)\n\n#tokenizing the sequences\ntokenizer = Tokenizer(num_words=16000)\ntokenizer.fit_on_texts(trainset2x)\nsequences = tokenizer.texts_to_sequences(trainset2x)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\ndata = pad_sequences(sequences, maxlen=15, padding=\"post\")\nprint(data.shape)\n\n#reshape the data and preparing to train\ndata=data.reshape(len(cleanvector),15,1)\nfrom sklearn.model_selection import train_test_split\ntrainx, validx, trainy, validy = train_test_split(data, trainset2y, test_size=0.3,random_state=42)\n\n\n#calculate the number of words\nnb_words=len(tokenizer.word_index)+1\n\n#obtain theembedding matrix\nembedding_matrix = np.zeros((nb_words, 200))\nfor word, i in word_index.items():\n embedding_vector = model.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\ntrainy=np.array(trainy)\nvalidy=np.array(validy)\n\n\n# building a simple RNN model\ndef modelbuild():\n model = Sequential()\n model.add(keras.layers.InputLayer(input_shape=(15, 1)))\n keras.layers.embeddings.Embedding(nb_words, 15, weights=[embedding_matrix], input_length=15,\n trainable=False)\n\n model.add(keras.layers.recurrent.SimpleRNN(units=100, activation='relu',\n use_bias=True))\n model.add(keras.layers.Dense(units=1000, input_dim=2000, activation='sigmoid'))\n model.add(keras.layers.Dense(units=500, input_dim=1000, activation='relu'))\n model.add(keras.layers.Dense(units=2, input_dim=500, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# compiling the model\nfinalmodel = modelbuild()\nfinalmodel.fit(trainx, trainy, epochs=10, batch_size=120, validation_data=(validx, validy))" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "numpy.array", "numpy.zeros", "numpy.sum", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
xnone/CS224n-Natural-Language-Processing-with-Deep-Learning
[ "0ec63dcd6b8671712c0206cc64b3e4c2fc6f97a3" ]
[ "Assignments/assignment1/q3_sgd.py" ]
[ "#!/usr/bin/env python\n\n# Save parameters every a few SGD iterations as fail-safe\nSAVE_PARAMS_EVERY = 5000\n\nimport glob\nimport random\nimport numpy as np\nimport os.path as op\nimport pickle\n\n\ndef load_saved_params():\n \"\"\"\n A helper function that loads previously saved parameters and resets\n iteration start.\n \"\"\"\n st = 0\n for f in glob.glob(\"saved_params_*.npy\"):\n iter = int(op.splitext(op.basename(f))[0].split(\"_\")[2])\n if (iter > st):\n st = iter\n\n if st > 0:\n with open(\"saved_params_%d.npy\" % st, \"r\") as f:\n params = pickle.load(f)\n state = pickle.load(f)\n return st, params, state\n else:\n return st, None, None\n\n\ndef save_params(iter, params):\n with open(\"saved_params_%d.npy\" % iter, \"w\") as f:\n pickle.dump(params, f)\n pickle.dump(random.getstate(), f)\n\n\ndef sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,\n PRINT_EVERY=10):\n \"\"\" Stochastic Gradient Descent\n\n Implement the stochastic gradient descent method in this function.\n\n Arguments:\n f -- the function to optimize, it should take a single\n argument and yield two outputs, a cost and the gradient\n with respect to the arguments\n x0 -- the initial point to start SGD from\n step -- the step size for SGD\n iterations -- total iterations to run SGD for\n postprocessing -- postprocessing function for the parameters\n if necessary. In the case of word2vec we will need to\n normalize the word vectors to have unit length.\n PRINT_EVERY -- specifies how many iterations to output loss\n\n Return:\n x -- the parameter value after SGD finishes\n \"\"\"\n\n # Anneal learning rate every several iterations\n ANNEAL_EVERY = 20000\n\n if useSaved:\n start_iter, oldx, state = load_saved_params()\n if start_iter > 0:\n x0 = oldx\n step *= 0.5 ** (start_iter / ANNEAL_EVERY)\n\n if state:\n random.setstate(state)\n else:\n start_iter = 0\n\n x = x0\n\n if not postprocessing:\n postprocessing = lambda x: x\n\n expcost = None\n\n for iter in range(start_iter + 1, iterations + 1):\n # Don't forget to apply the postprocessing after every iteration!\n # You might want to print the progress every few iterations.\n\n cost = None\n ### YOUR CODE HERE\n cost, grad = f(x)\n x -= step * grad\n postprocessing(x)\n ### END YOUR CODE\n\n if iter % PRINT_EVERY == 0:\n if not expcost:\n expcost = cost\n else:\n expcost = .95 * expcost + .05 * cost\n print(\"iter %d: %f\" % (iter, expcost))\n\n if iter % SAVE_PARAMS_EVERY == 0 and useSaved:\n save_params(iter, x)\n\n if iter % ANNEAL_EVERY == 0:\n step *= 0.5\n\n return x\n\n\ndef sanity_check():\n quad = lambda x: (np.sum(x ** 2), x * 2)\n\n print(\"Running sanity checks...\")\n t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 1 result:\", t1)\n assert abs(t1) <= 1e-6\n\n t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 2 result:\", t2)\n assert abs(t2) <= 1e-6\n\n t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 3 result:\", t3)\n assert abs(t3) <= 1e-6\n\n print(\"\")\n\n\ndef your_sanity_checks():\n \"\"\"\n Use this space add any additional sanity checks by running:\n python q3_sgd.py\n This function will not be called by the autograder, nor will\n your additional tests be graded.\n \"\"\"\n print(\"Running your sanity checks...\")\n ### YOUR CODE HERE\n # raise NotImplementedError\n ### END YOUR CODE\n\n\nif __name__ == \"__main__\":\n sanity_check()\n your_sanity_checks()\n" ]
[ [ "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rcsmit/COVIDcases
[ "8952931ee8316644dee55aad3f94c98f510e2f14", "8952931ee8316644dee55aad3f94c98f510e2f14" ]
[ "not_active_on_streamlit/prepare_casuslandelijk.py", "input/prepare_google_mob_data.py" ]
[ "# PREPARE A CSV-FILE TO ENABLE AN STACKED PLOT FOR POSITIVE TESTS, HOSPITALIZATIONS AND DECEASED\n# Hospitalizations and deceased are not lagged in time, the date of the result of the \"desease onset\", positieve test or notification is leading\n# https://data.rivm.nl/geonetwork/srv/dut/catalog.search#/metadata/2c4357c8-76e4-4662-9574-1deb8a73f724\n\n# MARCH 2021, Rene Smit (@rcsmit) - MIT license\n\n# Fields in\n# Date_file;Date_statistics;Date_statistics_type;Agegroup;Sex;\n# Province;Hospital_admission;Deceased;Week_of_death;Municipal_health_service\n\n# Fields out\n# pos_test_Date_statistics,pos_test_0-9,pos_test_10-19,pos_test_20-29,pos_test_30-39,\n# pos_test_40-49,pos_test_50-59,pos_test_60-69,pos_test_70-79,pos_test_80-89,pos_test_90+,\n# pos_test_<50,pos_test_Unknown,hosp_Date_statistics,hosp_0-9,hosp_10-19,hosp_20-29,hosp_30-39,\n# hosp_40-49,hosp_50-59,hosp_60-69,hosp_70-79,hosp_80-89,hosp_90+,hosp_<50,hosp_Unknown,\n# deceased_Date_statistics,deceased_0-9,deceased_10-19,deceased_20-29,deceased_30-39,\n# deceased_40-49,deceased_50-59,deceased_60-69,deceased_70-79,deceased_80-89,deceased_90+,\n# deceased_<50,deceased_Unknown\n\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom datetime import datetime\n\ndef save_df(df, name):\n \"\"\" save dataframe on harddisk \"\"\"\n OUTPUT_DIR = (\n \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\output\\\\\"\n )\n OUTPUT_DIR = (\n \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\COVIDcases\\\\input\\\\\")\n name_ = OUTPUT_DIR + name + \".csv\"\n compression_opts = dict(method=None, archive_name=name_)\n df.to_csv(name_, index=False, compression=compression_opts)\n\n print(\"--- Saving \" + name_ + \" ---\")\n\n\ndef drop_columns(df, what_to_drop):\n \"\"\" drop columns. what_to_drop : list \"\"\"\n if what_to_drop != None:\n print(\"dropping \" + str(what_to_drop))\n for d in what_to_drop:\n df = df.drop(columns=[d], axis=1)\n return df\n\n\ndef main_x():\n # online version : https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv\n url1 = \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\COVID-19_casus_landelijk.csv\"\n df = pd.read_csv(url1, delimiter=\";\", low_memory=False)\n df[\"Date_statistics\"] = pd.to_datetime(df[\"Date_statistics\"], format=\"%Y-%m-%d\")\n df.rename(\n columns={\n \"Date_file\": \"count\",\n },\n inplace=True,\n )\n\n #until = dt.datetime.strptime(\"2021-1-1\", \"%Y-%m-%d\").date()\n #mask = (df[\"Date_statistics\"].dt.date >= dt.datetime.strptime(\"2020-1-1\", \"%Y-%m-%d\").date()) & (df[\"Date_statistics\"].dt.date <= until)\n #df = df.loc[mask]\n\n df_hospital = df[df[\"Hospital_admission\"] == \"Yes\"].copy(deep=False)\n df_deceased = df[df[\"Deceased\"] == \"Yes\"].copy(deep=False)\n\n df_all = df.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n df_hospital = df_hospital.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n df_deceased = df_deceased.groupby([\"Date_statistics\", \"Agegroup\"], sort=True).count().reset_index()\n #df_deceased = df_deceased.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n\n df = df.groupby([\"Date_statistics\", \"Agegroup\"], sort=True).count().reset_index()\n print (\"CASES\")\n #df_all = df_all[[\"Agegroup\", \"count\"]]\n #df_hospital = df_hospital[[\"Agegroup\", \"count\"]]\n print (df_all)\n print (\"ZIEKENHUISOPNAMES\")\n print (df_hospital)\n\n df_pivot = (\n pd.pivot_table(\n df,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot_hospital = (\n pd.pivot_table(\n df_hospital,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot_deceased = (\n pd.pivot_table(\n df_deceased,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot = df_pivot.add_prefix(\"pos_test_\")\n df_pivot_hospital = df_pivot_hospital.add_prefix(\"hosp_\")\n save_df(df_pivot_hospital, \"df_hospital_per_dag_vanuit_casus_landelijk\")\n df_pivot_deceased = df_pivot_deceased.add_prefix(\"deceased_\")\n print(df_pivot_deceased.dtypes)\n todrop = [\n \"Date_statistics_type\",\n \"Sex\",\n \"Province\",\n \"Hospital_admission\",\n \"Deceased\",\n \"Week_of_death\",\n \"Municipal_health_service\",\n ]\n df = drop_columns(df, todrop)\n save_df(df, \"landelijk_leeftijd_2_vanuit_casus_landelijk\")\n\n save_df(df_pivot, \"landelijk_leeftijd_pivot_vanuit_casus_landelijk\")\n save_df(df_pivot_hospital, \"landelijk_leeftijd_pivot_hospital_vanuit_casus_landelijk\")\n save_df(df_pivot_deceased, \"landelijk_leeftijd_pivot_deceased_vanuit_casus_landelijk\")\n\n\n df_pivot_cases_per_week = df_pivot.groupby(pd.Grouper(key='pos_test_Date_statistics', freq='W')).sum()\n df_pivot_cases_per_week.index -= pd.Timedelta(days=6)\n df_pivot_cases_per_week[\"weekstart\"]= df_pivot_cases_per_week.index\n save_df(df_pivot_cases_per_week, \"landelijk_leeftijd_pivot_per_week_vanuit_casus_landelijk\")\n\n df_temp = pd.merge(\n df_pivot,\n df_pivot_hospital,\n how=\"outer\",\n left_on=\"pos_test_Date_statistics\",\n right_on=\"hosp_Date_statistics\",\n )\n df_temp = pd.merge(\n df_temp,\n df_pivot_deceased,\n how=\"outer\",\n left_on=\"pos_test_Date_statistics\",\n right_on=\"deceased_Date_statistics\",\n )\n\n df_temp_per_week = df_temp.groupby(pd.Grouper(key='pos_test_Date_statistics', freq='W')).sum()\n df_temp_per_week.index -= pd.Timedelta(days=6)\n print(df_temp_per_week)\n df_temp_per_week[\"weekstart\"]= df_temp_per_week.index\n save_df(df_temp, \"final_result_vanuit_casus_landelijk\")\n save_df(df_temp_per_week, \"final_result_per_week_vanuit_casus_landelijk\")\n\n\ndef main_week_data():\n \"\"\"Het maken van weekcijfers en gemiddelden tbv cases_hospital_decased_NL.py\n \"\"\"\n # online version : https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv\n url1 = \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\COVID-19_casus_landelijk.csv\"\n df = pd.read_csv(url1, delimiter=\";\", low_memory=False)\n todrop = [\n \"Date_statistics_type\",\n \"Sex\",\n \"Province\",\n \"Week_of_death\",\n \"Municipal_health_service\",\n ]\n df = drop_columns(df, todrop)\n\n df[\"Date_statistics\"] = pd.to_datetime(df[\"Date_statistics\"], format=\"%Y-%m-%d\")\n df = df.replace(\"Yes\", 1)\n df = df.replace(\"No\", 0)\n df = df.replace(\"Unknown\", 0)\n df[\"cases\"] = 1\n print(df)\n #df = df.groupby([ \"Date_statistics\", \"Agegroup\"], sort=True).sum().reset_index()\n df_week = df.groupby([ pd.Grouper(key='Date_statistics', freq='W'), \"Agegroup\",] ).sum().reset_index()\n print (df)\n df_week[\"Hosp_per_reported\"] = df_week[\"Hospital_admission\"]/df_week[\"cases\"]\n df_week[\"Deceased_per_reported\"] = df_week[\"Deceased\"]/df_week[\"cases\"]\n save_df(df_week, \"landelijk_leeftijd_week_vanuit_casus_landelijk_20211006\")\n\nmain_week_data()\n", "import pandas as pd\n\n# original location https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv\nurl =\"C:\\\\Users\\\\rcxsm\\\\Documents\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\google_mob_world.csv\"\n\ndf = pd.read_csv(url, delimiter=\",\", low_memory=False)\nprint (df)\n#df = df.loc[df['sub_region_1'] == None]\ndf = df[df.sub_region_1.isnull()]\ndf = df[df.sub_region_2.isnull()]\ndf = df[df.metro_area.isnull()]\ndf = df[df.iso_3166_2_code.isnull()]\ndf = df[df.census_fips_code.isnull()]\n\nprint (df)\nname_ = \"C:\\\\Users\\\\rcxsm\\\\Documents\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\google_mob_world_new.csv\"\ncompression_opts = dict(method=None, archive_name=name_)\ndf.to_csv(name_, index=False, compression=compression_opts)\nprint(\"--- Saving \" + name_ + \" ---\")\n\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_datetime", "pandas.Grouper", "pandas.Timedelta", "pandas.pivot_table" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]